diff --git a/.config/taplo.toml b/.config/taplo.toml index 2c6ccfb2b3444..7cbc1b075125a 100644 --- a/.config/taplo.toml +++ b/.config/taplo.toml @@ -33,3 +33,10 @@ keys = ["build"] [rule.formatting] reorder_arrays = false + +[[rule]] +include = ["Cargo.toml"] +keys = ["workspace.dependencies"] + +[rule.formatting] +reorder_keys = true diff --git a/.forklift/config-gitlab.toml b/.forklift/config-gitlab.toml new file mode 100644 index 0000000000000..ab3b2729a46d4 --- /dev/null +++ b/.forklift/config-gitlab.toml @@ -0,0 +1,33 @@ +[compression] +type = "zstd" + +[compression.zstd] +compressionLevel = 3 + +[general] +jobNameVariable = "CI_JOB_NAME" +jobsBlackList = [] +logLevel = "warn" +threadsCount = 6 + +[cache] +extraEnv = ["RUNTIME_METADATA_HASH"] + +[metrics] +enabled = true +pushEndpoint = "placeholder" + +[metrics.extraLabels] +environment = "production" +job_name = "$CI_JOB_NAME" +project_name = "$CI_PROJECT_PATH" + +[storage] +type = "s3" + +[storage.s3] +accessKeyId = "placeholder" +bucketName = "placeholder" +concurrency = 10 +endpointUrl = "placeholder" +secretAccessKey = "placeholder" diff --git a/.forklift/config.toml b/.forklift/config.toml index ab3b2729a46d4..6f8eed8882ea3 100644 --- a/.forklift/config.toml +++ b/.forklift/config.toml @@ -23,11 +23,7 @@ job_name = "$CI_JOB_NAME" project_name = "$CI_PROJECT_PATH" [storage] -type = "s3" +type = "gcs" -[storage.s3] -accessKeyId = "placeholder" -bucketName = "placeholder" -concurrency = 10 -endpointUrl = "placeholder" -secretAccessKey = "placeholder" +[storage.gcs] +bucketName = "parity-ci-forklift" diff --git a/.github/actions/set-up-gh/action.yml b/.github/actions/set-up-gh/action.yml new file mode 100644 index 0000000000000..fc16ce0b26334 --- /dev/null +++ b/.github/actions/set-up-gh/action.yml @@ -0,0 +1,36 @@ +name: 'install gh' +description: 'Install the gh cli in a debian based distro and switches to the PR branch.' +inputs: + pr-number: + description: "Number of the PR" + required: true + GH_TOKEN: + description: "GitHub token" + required: true +outputs: + branch: + description: 'Branch name for the PR' + value: ${{ steps.branch.outputs.branch }} +runs: + using: "composite" + steps: + - name: Instal gh cli + shell: bash + # Here it would get the script from previous step + run: | + (type -p wget >/dev/null || (apt update && apt-get install wget -y)) + mkdir -p -m 755 /etc/apt/keyrings + wget -qO- https://cli.github.com/packages/githubcli-archive-keyring.gpg | tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null + chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null + apt update + apt install gh -y + git config --global --add safe.directory '*' + - run: gh pr checkout ${{ inputs.pr-number }} + shell: bash + env: + GITHUB_TOKEN: ${{ inputs.GH_TOKEN }} + - name: Export branch name + shell: bash + run: echo "branch=$(git rev-parse --abbrev-ref HEAD)" >> "$GITHUB_OUTPUT" + id: branch diff --git a/.github/command-screnshot.png b/.github/command-screnshot.png new file mode 100644 index 0000000000000..1451fabca8b97 Binary files /dev/null and b/.github/command-screnshot.png differ diff --git a/.github/commands-readme.md b/.github/commands-readme.md new file mode 100644 index 0000000000000..20644c048c603 --- /dev/null +++ b/.github/commands-readme.md @@ -0,0 +1,200 @@ +# Running commands + +Command bot has been migrated, it is no longer a comment parser and now it is a GitHub action that works as a [`workflow_dispatch`](https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#workflow_dispatch) event. + +## How to run an action + +To run an action, you need to go to the [_actions tab_](https://github.com/paritytech/polkadot-sdk/actions) and pick the one you desire to run. + +The current available command actions are: + +- [Command FMT](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-fmt.yml) +- [Command Update UI](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-update-ui.yml) +- [Command Sync](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-sync.yml) +- [Command Bench](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-bench.yml) +- [Command Bench All](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-bench-all.yml) +- [Command Bench Overhead](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-bench-overhead.yml) + +You need to select the action, and click on the dropdown that says: `Run workflow`. It is located in the upper right. + +If this dropdown is not visible, you may not have permission to run the action. Contact IT for help. + +![command screenshot](command-screnshot.png) + +Each command will have the same two required values, but it could have more. + +GitHub's official documentation: [Manually running a workflow](https://docs.github.com/en/actions/using-workflows/manually-running-a-workflow) + +### Number of the Pull Request + +The number of the pull request. Required so the action can fetch the correct branch and comment if it fails. + +## Action configurations + +### Bench + +Runs `benchmark pallet` or `benchmark overhead` against your PR and commits back updated weights. + +Posible combinations based on the `benchmark` dropdown. + +- `substrate-pallet`: Pallet Benchmark for Substrate for specific pallet + - Requires `Subcommand` to be `pallet` + - Requires `Runtime` to be `dev` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Target Directory` to be `substrate` +- `polkadot-pallet`: Pallet Benchmark for Polkadot for specific pallet + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `rococo` + - `westend` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Target Directory` to be `polkadot` +- `cumulus-assets`: Pallet Benchmark for Cumulus assets + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `asset-hub-westend` + - `asset-hub-rococo` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `assets` + - Requires `Target Directory` to be `cumulus` +- `cumulus-collectives`: Pallet Benchmark for Cumulus collectives + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be `collectives-westend` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `collectives` + - Requires `Target Directory` to be `cumulus` +- `cumulus-coretime`: Pallet Benchmark for Cumulus coretime + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `coretime-rococo` + - `coretime-westend` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `coretime` + - Requires `Target Directory` to be `cumulus` +- `cumulus-bridge-hubs`: Pallet Benchmark for Cumulus bridge-hubs + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `bridge-hub-rococo` + - `bridge-hub-westend` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `bridge-hub` + - Requires `Target Directory` to be `cumulus` +- `cumulus-contracts`: Pallet Benchmark for Cumulus contracts + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one `contracts-rococo` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `contracts` + - Requires `Target Directory` to be `cumulus` +- `cumulus-glutton`: Pallet Benchmark for Cumulus glutton + - Requires `Subcommand` to be `pallet` + - Requires `Runtime` to be one of the following: + - `glutton-westend` + - `glutton-westend-dev-1300` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `glutton` + - Requires `Target Directory` to be `cumulus` +- `cumulus-starters`: Pallet Benchmark for Cumulus starters + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `seedling` + - `shell` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `starters` + - Requires `Target Directory` to be `cumulus` +- `cumulus-people`: Pallet Benchmark for Cumulus people + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `people-westend` + - `people-rococo` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `people` + - Requires `Target Directory` to be `cumulus` +- `cumulus-testing`: Pallet Benchmark for Cumulus testing + - Requires `Subcommand` to be one of the following: + - `pallet` + - `xcm` + - Requires `Runtime` to be one of the following: + - `penpal` + - `rococo-parachain` + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` + - Requires `Runtime Dir` to be `testing` + - Requires `Target Directory` to be `cumulus` + +### Bench-all + +This is a wrapper to run `bench` for all pallets. + +Posible combinations based on the `benchmark` dropdown. + +- `pallet`: Benchmark for Substrate/Polkadot/Cumulus/Trappist for specific pallet + - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$` +- `substrate`: Pallet + Overhead + Machine Benchmark for Substrate for all pallets + - Requires `Target Directory` to be `substrate` +- `polkadot`: Pallet + Overhead Benchmark for Polkadot + - Requires `Runtime` to be one of the following: + - `rococo` + - `westend` + - Requires `Target Directory` to be `polkadot` +- `cumulus`: Pallet Benchmark for Cumulus + - Requires `Runtime` to be one of the following: + - `rococo` + - `westend` + - `asset-hub-kusama` + - `asset-hub-polkadot` + - `asset-hub-rococo` + - `asset-hub-westend` + - `bridge-hub-kusama` + - `bridge-hub-polkadot` + - `bridge-hub-rococo` + - `bridge-hub-westend` + - `collectives-polkadot` + - `collectives-westend` + - `coretime-rococo` + - `coretime-westend` + - `contracts-rococo` + - `glutton-kusama` + - `glutton-westend` + - `people-rococo` + - `people-westend` + - Requires `Target Directory` to be `cumulus` + +### Bench-overhead + +Run benchmarks overhead and commit back results to PR. + +Posible combinations based on the `benchmark` dropdown. + +- `default`: Runs `benchmark overhead` and commits back to PR the updated `extrinsic_weights.rs` files + - Requires `Runtime` to be one of the following: + - `rococo` + - `westend` + - Requires `Target directory` to be `polkadot` +- `substrate`: Runs `benchmark overhead` and commits back to PR the updated `extrinsic_weights.rs` files + - Requires `Target directory` to be `substrate` +- `cumulus`: Runs `benchmark overhead` and commits back to PR the updated `extrinsic_weights.rs` files + - Requires `Runtime` to be one of the following: + - `asset-hub-rococo` + - `asset-hub-westend` + - Requires `Target directory` to be `cumulus` + +## How to modify an action + +If you want to modify an action and test it, you can do by simply pushing your changes and then selecting your branch in the `Use worflow from` option. + +This will use a file from a specified branch. diff --git a/.github/env b/.github/env index 162ce8af7c0dd..2e4d5b48100df 100644 --- a/.github/env +++ b/.github/env @@ -1 +1 @@ -IMAGE="docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408" \ No newline at end of file +IMAGE="docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v202407161507" diff --git a/.github/review-bot.yml b/.github/review-bot.yml index ed719cefec8bc..adbc480c6ba1a 100644 --- a/.github/review-bot.yml +++ b/.github/review-bot.yml @@ -9,6 +9,7 @@ rules: - ^\.gitlab/.* - ^\.config/nextest.toml - ^\.cargo/.* + - ^\.forklift/.* exclude: - ^\.gitlab/pipeline/zombienet.* type: "or" @@ -33,6 +34,7 @@ rules: - ^docker/.* - ^\.github/.* - ^\.gitlab/.* + - ^\.forklift/.* - ^\.config/nextest.toml - ^\.cargo/.* minApprovals: 2 diff --git a/.github/scripts/check-workspace.py b/.github/scripts/check-workspace.py index 1f8f103e4e157..d5197100ad253 100644 --- a/.github/scripts/check-workspace.py +++ b/.github/scripts/check-workspace.py @@ -135,8 +135,12 @@ def check_deps(deps): if dep_name in all_crates: links.append((name, dep_name)) - if not 'path' in deps[dep]: - broken.append((name, dep_name, "crate must be linked via `path`")) + if name == 'polkadot-sdk': + if not 'path' in deps[dep]: + broken.append((name, dep_name, "crate must use path")) + return + elif not 'workspace' in deps[dep] or not deps[dep]['workspace']: + broken.append((name, dep_name, "crate must use workspace inheritance")) return def check_crate(deps): @@ -154,8 +158,6 @@ def check_crate(deps): check_crate(manifest) - - links.sort() broken.sort() diff --git a/.github/scripts/deny-git-deps.py b/.github/scripts/deny-git-deps.py index 4b831c9347f75..622fc64c48812 100644 --- a/.github/scripts/deny-git-deps.py +++ b/.github/scripts/deny-git-deps.py @@ -19,6 +19,7 @@ root = sys.argv[1] if len(sys.argv) > 1 else os.getcwd() workspace = Workspace.from_path(root) +errors = [] def check_dep(dep, used_by): if dep.location != DependencyLocation.GIT: @@ -27,14 +28,23 @@ def check_dep(dep, used_by): if used_by in KNOWN_BAD_GIT_DEPS.get(dep.name, []): print(f'๐Ÿคจ Ignoring git dependency {dep.name} in {used_by}') else: - print(f'๐Ÿšซ Found git dependency {dep.name} in {used_by}') - sys.exit(1) + errors.append(f'๐Ÿšซ Found git dependency {dep.name} in {used_by}') # Check the workspace dependencies that can be inherited: for dep in workspace.dependencies: check_dep(dep, "workspace") + if workspace.crates.find_by_name(dep.name): + if dep.location != DependencyLocation.PATH: + errors.append(f'๐Ÿšซ Workspace must use path to link local dependency {dep.name}') + # And the dependencies of each crate: for crate in workspace.crates: for dep in crate.dependencies: check_dep(dep, crate.name) + +if errors: + print('โŒ Found errors:') + for error in errors: + print(error) + sys.exit(1) diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index c31dee06ec54a..5df03f1044d88 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -6,7 +6,7 @@ on: merge_group: env: - IMAGE: docker.io/paritytech/prdoc:v0.0.7 + IMAGE: docker.io/paritytech/prdoc:v0.0.8 API_BASE: https://api.github.com/repos REPO: ${{ github.repository }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/check-runtime-migration.yml b/.github/workflows/check-runtime-migration.yml index 671673c02c09e..33da5a8ecd591 100644 --- a/.github/workflows/check-runtime-migration.yml +++ b/.github/workflows/check-runtime-migration.yml @@ -11,13 +11,6 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -env: - FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} - FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} - FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} - FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} - FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} - jobs: set-image: # GitHub Actions allows using 'env' in a container context. diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index 04c63f4192b29..47f9e5061b4ae 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -5,6 +5,8 @@ on: types: [opened, synchronize, reopened, ready_for_review] paths: - prdoc/*.prdoc +env: + TOOLCHAIN: nightly-2024-03-01 jobs: check-semver: @@ -19,14 +21,14 @@ jobs: with: cache-on-failure: true + - name: install parity-publish + run: cargo install parity-publish@0.6.0 + - name: Rust compilation prerequisites run: | - rustup default nightly-2024-03-01 - rustup target add wasm32-unknown-unknown --toolchain nightly-2024-03-01 - rustup component add rust-src --toolchain nightly-2024-03-01 - - - name: install parity-publish - run: cargo install parity-publish@0.5.1 + rustup default $TOOLCHAIN + rustup target add wasm32-unknown-unknown --toolchain $TOOLCHAIN + rustup component add rust-src --toolchain $TOOLCHAIN - name: extra git setup run: | @@ -39,7 +41,7 @@ jobs: export CARGO_TARGET_DIR=target export RUSTFLAGS='-A warnings -A missing_docs' export SKIP_WASM_BUILD=1 - if ! parity-publish --color always prdoc --since old --validate prdoc/pr_$PR.prdoc --toolchain nightly-2024-03-01 -v; then + if ! parity-publish --color always prdoc --since old --validate prdoc/pr_$PR.prdoc -v --toolchain $TOOLCHAIN; then cat <> $GITHUB_OUTPUT fmt: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -40,7 +40,7 @@ jobs: run: cargo +nightly fmt --all -- --check check-dependency-rules: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: check dependency rules @@ -49,17 +49,23 @@ jobs: ../.gitlab/ensure-deps.sh check-rust-feature-propagation: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: fetch deps + run: | + # Pull all dependencies eagerly: + time cargo metadata --format-version=1 --locked > /dev/null - name: run zepter - run: zepter run check + run: | + zepter --version + time zepter run check test-rust-features: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -69,7 +75,7 @@ jobs: run: bash .gitlab/rust-features.sh . check-toml-format: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} @@ -81,7 +87,7 @@ jobs: echo "Please run `taplo format --config .config/taplo.toml` to fix any toml formatting issues" check-workspace: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.0 (22. Sep 2023) - name: install python deps @@ -98,7 +104,7 @@ jobs: run: python3 .github/scripts/deny-git-deps.py . check-markdown: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 steps: - name: Checkout sources uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -121,7 +127,7 @@ jobs: markdownlint --config "$CONFIG" --ignore target . check-umbrella: runs-on: ubuntu-latest - timeout-minutes: 10 + timeout-minutes: 20 needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} diff --git a/.github/workflows/command-bench-all.yml b/.github/workflows/command-bench-all.yml new file mode 100644 index 0000000000000..4128f86fb7c82 --- /dev/null +++ b/.github/workflows/command-bench-all.yml @@ -0,0 +1,99 @@ +name: Command Bench All + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + benchmark: + description: Pallet benchmark + type: choice + required: true + options: + - pallet + - substrate + - polkadot + - cumulus + pallet: + description: Pallet + required: false + type: string + default: pallet_name + target_dir: + description: Target directory + type: choice + options: + - substrate + - polkadot + - cumulus + runtime: + description: Runtime + type: choice + options: + - rococo + - westend + - asset-hub-kusama + - asset-hub-polkadot + - asset-hub-rococo + - asset-hub-westend + - bridge-hub-kusama + - bridge-hub-polkadot + - bridge-hub-rococo + - bridge-hub-westend + - collectives-polkadot + - collectives-westend + - coretime-rococo + - coretime-westend + - contracts-rococo + - glutton-kusama + - glutton-westend + - people-rococo + - people-westend + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-bench-all: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-weights + container: + image: ${{ needs.set-image.outputs.IMAGE }} + permissions: + contents: write + pull-requests: write + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run bench all + run: | + "./scripts/bench-all.sh" "${{ inputs.benchmark }}" --runtime "${{ inputs.runtime }}" --pallet "${{ inputs.pallet }}" --target_dir "${{ inputs.target_dir }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed โŒ

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed ๐ŸŽ‰๐ŸŽ‰

Run by @${{ github.actor }} for ${{ github.workflow }} completed ๐ŸŽ‰. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-bench-overhead.yml b/.github/workflows/command-bench-overhead.yml new file mode 100644 index 0000000000000..fec8d37bb9ef8 --- /dev/null +++ b/.github/workflows/command-bench-overhead.yml @@ -0,0 +1,78 @@ +name: Command Bench Overhead + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + benchmark: + description: Pallet benchmark + type: choice + required: true + options: + - default + - substrate + - cumulus + runtime: + description: Runtime + type: choice + options: + - rococo + - westend + - asset-hub-rococo + - asset-hub-westend + target_dir: + description: Target directory + type: choice + options: + - polkadot + - substrate + - cumulus + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-bench-overhead: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-benchmark + container: + image: ${{ needs.set-image.outputs.IMAGE }} + permissions: + contents: write + pull-requests: write + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run bench overhead + run: | + "./scripts/bench.sh" "${{ inputs.benchmark }}" --subcommand "overhead" --runtime "${{ inputs.runtime }}" --target_dir "${{ inputs.target_dir }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed โŒ

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed ๐ŸŽ‰๐ŸŽ‰

Run by @${{ github.actor }} for ${{ github.workflow }} completed ๐ŸŽ‰. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-bench.yml b/.github/workflows/command-bench.yml new file mode 100644 index 0000000000000..ac879f443755c --- /dev/null +++ b/.github/workflows/command-bench.yml @@ -0,0 +1,124 @@ +name: Command Bench + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + benchmark: + description: Pallet benchmark + type: choice + required: true + options: + - substrate-pallet + - polkadot-pallet + - cumulus-assets + - cumulus-collectives + - cumulus-coretime + - cumulus-bridge-hubs + - cumulus-contracts + - cumulus-glutton + - cumulus-starters + - cumulus-people + - cumulus-testing + subcommand: + description: Subcommand + type: choice + required: true + options: + - pallet + - xcm + runtime: + description: Runtime + type: choice + options: + - dev + - rococo + - westend + - asset-hub-westend + - asset-hub-rococo + - collectives-westend + - coretime-rococo + - coretime-westend + - bridge-hub-rococo + - bridge-hub-westend + - contracts-rococo + - glutton-westend + - glutton-westend-dev-1300 + - seedling + - shell + - people-westend + - people-rococo + - penpal + - rococo-parachain + pallet: + description: Pallet + type: string + default: pallet_name + target_dir: + description: Target directory + type: choice + options: + - substrate + - polkadot + - cumulus + runtime_dir: + description: Runtime directory + type: choice + options: + - people + - collectives + - coretime + - bridge-hubs + - contracts + - glutton + - starters + - testing + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-bench: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-benchmark + container: + image: ${{ needs.set-image.outputs.IMAGE }} + permissions: + contents: write + pull-requests: write + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run bench + run: | + "./scripts/bench.sh" "${{ inputs.benchmark }}" --runtime "${{ inputs.runtime }}" --pallet "${{ inputs.pallet }}" --target_dir "${{ inputs.target_dir }}" --subcommand "${{ inputs.subcommand }}" --runtime_dir "${{ inputs.runtime_dir }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed โŒ

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed ๐ŸŽ‰๐ŸŽ‰

Run by @${{ github.actor }} for ${{ github.workflow }} completed ๐ŸŽ‰. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-fmt.yml b/.github/workflows/command-fmt.yml new file mode 100644 index 0000000000000..586b8c77f2745 --- /dev/null +++ b/.github/workflows/command-fmt.yml @@ -0,0 +1,58 @@ +name: Command FMT + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-fmt: + needs: [set-image] + runs-on: ubuntu-latest + timeout-minutes: 20 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + permissions: + contents: write + pull-requests: write + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run FMT + run: | + # format toml. + # since paritytech/ci-unified:bullseye-1.73.0-2023-11-01-v20231204 includes taplo-cli + taplo format --config .config/taplo.toml + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed โŒ

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed ๐ŸŽ‰๐ŸŽ‰

Run by @${{ github.actor }} for ${{ github.workflow }} completed ๐ŸŽ‰. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-inform.yml b/.github/workflows/command-inform.yml new file mode 100644 index 0000000000000..2825f4a604605 --- /dev/null +++ b/.github/workflows/command-inform.yml @@ -0,0 +1,21 @@ +name: Inform of new command action + +on: + issue_comment: + types: [created] + +jobs: + comment: + runs-on: ubuntu-latest + if: github.event.issue.pull_request && startsWith(github.event.comment.body, 'bot ') + steps: + - name: Inform that the new command exist + uses: actions/github-script@v7 + with: + script: | + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: 'We are migrating the command bot to be a GitHub Action

Please, see the documentation on how to use it' + }) diff --git a/.github/workflows/command-sync.yml b/.github/workflows/command-sync.yml new file mode 100644 index 0000000000000..c610f4066a873 --- /dev/null +++ b/.github/workflows/command-sync.yml @@ -0,0 +1,71 @@ +name: Command Sync + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + chain: + description: Chain + type: choice + required: true + options: + - westend + - rococo + sync-type: + description: Sync type + type: choice + required: true + options: + - warp + - full + - fast + - fast-unsafe + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-sync: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-warpsync + container: + image: ${{ needs.set-image.outputs.IMAGE }} + permissions: + contents: write + pull-requests: write + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run sync + run: | + "./scripts/sync.sh" --chain "${{ inputs.chain }}" --type "${{ inputs.sync-type }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed โŒ

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed ๐ŸŽ‰๐ŸŽ‰

Run by @${{ github.actor }} for ${{ github.workflow }} completed ๐ŸŽ‰. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command-update-ui.yml b/.github/workflows/command-update-ui.yml new file mode 100644 index 0000000000000..860177adc8790 --- /dev/null +++ b/.github/workflows/command-update-ui.yml @@ -0,0 +1,59 @@ +name: Command Update UI + +on: + workflow_dispatch: + inputs: + pr: + description: Number of the Pull Request + required: true + rust-version: + description: Version of rust. Example 1.70 + required: false + +jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cmd-update-ui: + needs: [set-image] + runs-on: arc-runners-polkadot-sdk-beefy + timeout-minutes: 90 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + permissions: + contents: write + pull-requests: write + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Run update-ui + run: | + "./scripts/update-ui-tests.sh" "${{ inputs.rust-version }}" + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed โŒ

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - run: git pull --rebase + - uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: cmd-action - ${{ github.workflow }} + branch: ${{ steps.gh.outputs.branch }} + - name: Report succeed + run: gh pr comment ${{ inputs.pr }} --body "

Action completed ๐ŸŽ‰๐ŸŽ‰

Run by @${{ github.actor }} for ${{ github.workflow }} completed ๐ŸŽ‰. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/publish-check-crates.yml b/.github/workflows/publish-check-crates.yml index 9b5b89e344756..33cf931692004 100644 --- a/.github/workflows/publish-check-crates.yml +++ b/.github/workflows/publish-check-crates.yml @@ -20,7 +20,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.5.1 + run: cargo install parity-publish@0.6.0 - name: parity-publish check run: parity-publish --color always check --allow-unpublished diff --git a/.github/workflows/publish-claim-crates.yml b/.github/workflows/publish-claim-crates.yml index 9643361d9d318..08c50638267ba 100644 --- a/.github/workflows/publish-claim-crates.yml +++ b/.github/workflows/publish-claim-crates.yml @@ -18,7 +18,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.5.1 + run: cargo install parity-publish@0.6.0 - name: parity-publish claim env: diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index f39eb4c1716eb..20492f2d3a910 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -31,7 +31,8 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - binary: [ frame-omni-bencher, chain-spec-builder ] + # Tuples of [package, binary-name] + binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder] ] steps: - name: Checkout sources uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 @@ -41,21 +42,16 @@ jobs: sudo apt update sudo apt install -y protobuf-compiler - - name: Build ${{ matrix.binary }} binary + - name: Build ${{ matrix.binary[1] }} binary run: | - if [[ ${{ matrix.binary }} =~ chain-spec-builder ]]; then - cargo build --locked --profile=production -p staging-${{ matrix.binary }} --bin ${{ matrix.binary }} - target/production/${{ matrix.binary }} -h - else - cargo build --locked --profile=production -p ${{ matrix.binary }} - target/production/${{ matrix.binary }} --version - fi + cargo build --locked --profile=production -p ${{ matrix.binary[0] }} --bin ${{ matrix.binary[1] }} + target/production/${{ matrix.binary[1] }} --version - - name: Upload ${{ matrix.binary }} binary + - name: Upload ${{ matrix.binary[1] }} binary uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - name: ${{ matrix.binary }} - path: target/production/${{ matrix.binary }} + name: ${{ matrix.binary[1] }} + path: target/production/${{ matrix.binary[1] }} publish-release-draft: diff --git a/.github/workflows/release-clobber-stable.yml b/.github/workflows/release-clobber-stable.yml new file mode 100644 index 0000000000000..643c14daa15b1 --- /dev/null +++ b/.github/workflows/release-clobber-stable.yml @@ -0,0 +1,70 @@ +name: Clobber Stable + +# This action implements the +# [Clobbering](https://github.com/paritytech/polkadot-sdk/blob/master/docs/RELEASE.md#clobbering) +# process from the release process. It pushes a new commit to the `stable` branch with all the +# current content of the `audited` tag. It does not use a merge commit, but rather 'clobbers' the +# branch with a single commit that contains all the changes. It has a naming scheme of `Clobber with +# audited ($COMMIT)`. +# Currently, the script is only triggered manually, but can be easily changed to a schedule. + +on: + workflow_dispatch: + +permissions: + contents: write + +jobs: + clobber-stable: + runs-on: ubuntu-latest + timeout-minutes: 5 + env: + STABLE: stable + UNSTABLE: master + AUDITED: audited + steps: + - name: Checkout + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Prechecks + run: | + # Properly fetch + git fetch --prune --unshallow origin tag $AUDITED + git fetch origin $STABLE + + # Sanity checks + git checkout -q tags/$AUDITED || (echo "Could not find the '$AUDITED' tag." && exit 1) + COMMIT=$(git rev-parse tags/$AUDITED) + #$(git branch --contains $COMMIT | grep -q $UNSTABLE) || (echo "The '$AUDITED' tag is not on the '$UNSTABLE' branch." && exit 1) + + git config --global user.email "admin@parity.io" + git config --global user.name "Parity Release Team" + + - name: Prepare commit + run: | + git checkout --quiet origin/$STABLE + + # Delete all tracked files in the working directory + git ls-files -z | xargs -0 rm -f + + # Find and delete any empty directories + find . -type d -empty -delete + + git add . 1>/dev/null 2>/dev/null + git commit -qm "Delete all files" + + # Grab the files from the commit + git checkout --quiet tags/$AUDITED -- . + + # Stage, commit, and push the working directory which now matches 'audited' 1:1 + git status + COMMIT=$(git rev-parse --short=10 tags/$AUDITED) + git add . 1>/dev/null 2>/dev/null + git commit --allow-empty --amend -qm "Clobber with $AUDITED ($COMMIT)" + + - name: Push stable branch + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + git log -3 + git push --verbose origin HEAD:$STABLE diff --git a/.github/workflows/release-srtool.yml b/.github/workflows/release-srtool.yml index 95b1846b98e0c..69a4bdbdda9ae 100644 --- a/.github/workflows/release-srtool.yml +++ b/.github/workflows/release-srtool.yml @@ -6,8 +6,6 @@ env: on: push: - tags: - - "*" branches: - release-v[0-9]+.[0-9]+.[0-9]+* - release-cumulus-v[0-9]+* diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml index 5fdfabc437fe7..55addf11de06d 100644 --- a/.github/workflows/tests-linux-stable.yml +++ b/.github/workflows/tests-linux-stable.yml @@ -12,15 +12,7 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -env: - FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} - FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} - FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} - FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} - FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} - jobs: - changes: permissions: pull-requests: read @@ -31,7 +23,7 @@ jobs: # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 # This workaround sets the container image for each job using 'set-image' job output. needs: changes - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: ubuntu-latest outputs: IMAGE: ${{ steps.set_image.outputs.IMAGE }} @@ -40,12 +32,12 @@ jobs: uses: actions/checkout@v4 - id: set_image run: cat .github/env >> $GITHUB_OUTPUT - + test-linux-stable-int: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 + timeout-minutes: 60 container: image: ${{ needs.set-image.outputs.IMAGE }} env: @@ -60,13 +52,13 @@ jobs: uses: actions/checkout@v4 - name: script run: WASM_BUILD_NO_COLOR=1 time forklift cargo test -p staging-node-cli --release --locked -- --ignored - + # https://github.com/paritytech/ci_cd/issues/864 test-linux-stable-runtime-benchmarks: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 + timeout-minutes: 60 container: image: ${{ needs.set-image.outputs.IMAGE }} env: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 293acadc4e6a8..a413d33061593 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -11,15 +11,7 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true -env: - FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} - FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} - FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} - FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} - FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} - jobs: - changes: permissions: pull-requests: read @@ -40,9 +32,9 @@ jobs: quick-benchmarks: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 + timeout-minutes: 60 container: image: ${{ needs.set-image.outputs.IMAGE }} env: @@ -55,13 +47,13 @@ jobs: uses: actions/checkout@v4 - name: script run: time forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet - + # cf https://github.com/paritytech/polkadot-sdk/issues/1652 test-syscalls: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 + timeout-minutes: 60 container: image: ${{ needs.set-image.outputs.IMAGE }} continue-on-error: true # this rarely triggers in practice @@ -81,12 +73,12 @@ jobs: # - if [[ "$CI_JOB_STATUS" == "failed" ]]; then # printf "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed.\n"; # fi - + cargo-check-all-benches: needs: [set-image, changes] - if: ${{ needs.changes.outputs.rust }} + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 + timeout-minutes: 60 container: image: ${{ needs.set-image.outputs.IMAGE }} env: diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 73a8c52c448f7..7f2babc6bd472 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -120,7 +120,8 @@ default: .forklift-cache: before_script: - mkdir ~/.forklift - - cp .forklift/config.toml ~/.forklift/config.toml + - cp .forklift/config-gitlab.toml ~/.forklift/config.toml + - cat .forklift/config-gitlab.toml > .forklift/config.toml - > if [ "$FORKLIFT_BYPASS" != "true" ]; then echo "FORKLIFT_BYPASS not set"; diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index a7f321505bacf..6e2b53fae6198 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -149,3 +149,27 @@ zombienet-cumulus-0007-full_node_warp_sync: --local-dir="${LOCAL_DIR}" --concurrency=1 --test="0007-full_node_warp_sync.zndsl" + +zombienet-cumulus-0008-elastic_authoring: + extends: + - .zombienet-cumulus-common + - .zombienet-refs + - .zombienet-before-script + - .zombienet-after-script + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}" + --concurrency=1 + --test="0008-elastic_authoring.zndsl" + +zombienet-cumulus-0009-elastic_pov_recovery: + extends: + - .zombienet-cumulus-common + - .zombienet-refs + - .zombienet-before-script + - .zombienet-after-script + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}" + --concurrency=1 + --test="0009-elastic_pov_recovery.zndsl" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index b158cbe0b5aa3..90251082077ce 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -162,6 +162,9 @@ zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: - .zombienet-polkadot-common variables: FORCED_INFRA_INSTANCE: "spot-iops" + before_script: + - !reference [.zombienet-polkadot-common, before_script] + - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/elastic_scaling" @@ -170,6 +173,9 @@ zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: extends: - .zombienet-polkadot-common + before_script: + - !reference [.zombienet-polkadot-common, before_script] + - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/elastic_scaling" @@ -199,6 +205,17 @@ zombienet-polkadot-functional-0014-chunk-fetching-network-compatibility: --local-dir="${LOCAL_DIR}/functional" --test="0014-chunk-fetching-network-compatibility.zndsl" +zombienet-polkadot-functional-0015-coretime-shared-core: + extends: + - .zombienet-polkadot-common + before_script: + - !reference [.zombienet-polkadot-common, before_script] + - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0015-coretime-shared-core.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common diff --git a/Cargo.lock b/Cargo.lock index fba768c653c66..ad75224fefdc4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -83,7 +83,7 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "getrandom 0.2.10", + "getrandom", "once_cell", "version_check", ] @@ -95,7 +95,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom 0.2.10", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -130,7 +130,7 @@ dependencies = [ "hex-literal", "itoa", "proptest", - "rand 0.8.5", + "rand", "ruint", "serde", "tiny-keccak", @@ -632,7 +632,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" dependencies = [ "num-traits", - "rand 0.8.5", + "rand", ] [[package]] @@ -642,7 +642,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", - "rand 0.8.5", + "rand", "rayon", ] @@ -655,7 +655,7 @@ dependencies = [ "ark-serialize 0.4.2", "ark-std 0.4.0", "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", "sha3", ] @@ -692,8 +692,24 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ - "asn1-rs-derive", - "asn1-rs-impl", + "asn1-rs-derive 0.4.0", + "asn1-rs-impl 0.1.0", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "asn1-rs" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" +dependencies = [ + "asn1-rs-derive 0.5.0", + "asn1-rs-impl 0.2.0", "displaydoc", "nom", "num-traits", @@ -711,7 +727,19 @@ dependencies = [ "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" +dependencies = [ + "proc-macro2 1.0.82", + "quote 1.0.35", + "syn 2.0.61", + "synstructure 0.13.1", ] [[package]] @@ -725,6 +753,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2 1.0.82", + "quote 1.0.35", + "syn 2.0.61", +] + [[package]] name = "assert_cmd" version = "2.0.12" @@ -766,7 +805,6 @@ name = "asset-hub-rococo-integration-tests" version = "1.0.0" dependencies = [ "assert_matches", - "asset-hub-rococo-runtime", "asset-test-utils", "cumulus-pallet-parachain-system", "emulated-integration-tests-common", @@ -780,9 +818,7 @@ dependencies = [ "pallet-xcm", "parachains-common", "parity-scale-codec", - "penpal-runtime", "polkadot-runtime-common", - "rococo-runtime", "rococo-runtime-constants", "rococo-system-emulated-network", "sp-runtime", @@ -823,6 +859,7 @@ dependencies = [ "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-assets", + "pallet-assets-freezer", "pallet-aura", "pallet-authorship", "pallet-balances", @@ -859,7 +896,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-transaction-pool", "sp-version", @@ -870,7 +906,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -893,7 +929,6 @@ name = "asset-hub-westend-integration-tests" version = "1.0.0" dependencies = [ "assert_matches", - "asset-hub-westend-runtime", "asset-test-utils", "cumulus-pallet-parachain-system", "cumulus-pallet-xcmp-queue", @@ -911,16 +946,14 @@ dependencies = [ "pallet-xcm", "parachains-common", "parity-scale-codec", - "penpal-runtime", "polkadot-runtime-common", "sp-core", "sp-keyring", "sp-runtime", "staging-xcm", "staging-xcm-executor", - "westend-runtime", "westend-system-emulated-network", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -956,6 +989,7 @@ dependencies = [ "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-assets", + "pallet-assets-freezer", "pallet-aura", "pallet-authorship", "pallet-balances", @@ -991,7 +1025,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-transaction-pool", "sp-version", @@ -1002,7 +1035,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -1027,7 +1060,6 @@ dependencies = [ "parity-scale-codec", "sp-io", "sp-runtime", - "sp-std 14.0.0", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", @@ -1050,7 +1082,6 @@ dependencies = [ "scale-info", "sp-api", "sp-runtime", - "sp-std 14.0.0", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -1074,7 +1105,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener", + "event-listener 2.5.3", "futures-core", ] @@ -1084,11 +1115,11 @@ version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" dependencies = [ - "async-lock", + "async-lock 2.8.0", "async-task", "concurrent-queue", "fastrand 1.9.0", - "futures-lite", + "futures-lite 1.13.0", "slab", ] @@ -1098,10 +1129,10 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" dependencies = [ - "async-lock", + "async-lock 2.8.0", "autocfg", "blocking", - "futures-lite", + "futures-lite 1.13.0", ] [[package]] @@ -1112,10 +1143,10 @@ checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" dependencies = [ "async-channel", "async-executor", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "blocking", - "futures-lite", + "futures-lite 1.13.0", "once_cell", ] @@ -1125,27 +1156,57 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ - "async-lock", + "async-lock 2.8.0", "autocfg", "cfg-if", "concurrent-queue", - "futures-lite", + "futures-lite 1.13.0", "log", "parking", - "polling", + "polling 2.8.0", "rustix 0.37.23", "slab", "socket2 0.4.9", "waker-fn", ] +[[package]] +name = "async-io" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" +dependencies = [ + "async-lock 3.4.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.3.0", + "parking", + "polling 3.4.0", + "rustix 0.38.21", + "slab", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "async-lock" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" dependencies = [ - "event-listener", + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +dependencies = [ + "event-listener 5.2.0", + "event-listener-strategy", + "pin-project-lite 0.2.12", ] [[package]] @@ -1154,10 +1215,10 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4051e67316bc7eff608fe723df5d32ed639946adcd69e07df41fd42a7b411f1f" dependencies = [ - "async-io", + "async-io 1.13.0", "autocfg", "blocking", - "futures-lite", + "futures-lite 1.13.0", ] [[package]] @@ -1166,13 +1227,13 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" dependencies = [ - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "autocfg", "blocking", "cfg-if", - "event-listener", - "futures-lite", + "event-listener 2.5.3", + "futures-lite 1.13.0", "rustix 0.37.23", "signal-hook", "windows-sys 0.48.0", @@ -1187,13 +1248,13 @@ dependencies = [ "async-attributes", "async-channel", "async-global-executor", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite", + "futures-lite 1.13.0", "gloo-timers", "kv-log-macro", "log", @@ -1235,9 +1296,9 @@ checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" -version = "0.1.79" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2 1.0.82", "quote 1.0.35", @@ -1269,6 +1330,17 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" +[[package]] +name = "attohttpc" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" +dependencies = [ + "http 0.2.9", + "log", + "url", +] + [[package]] name = "atty" version = "0.2.14" @@ -1304,9 +1376,9 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ - "getrandom 0.2.10", + "getrandom", "instant", - "rand 0.8.5", + "rand", ] [[package]] @@ -1338,8 +1410,8 @@ dependencies = [ "dleq_vrf", "fflonk", "merlin", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "rand_chacha", + "rand_core", "ring 0.1.0", "sha2 0.10.8", "sp-ark-bls12-381", @@ -1371,6 +1443,12 @@ version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "base64ct" version = "1.6.0" @@ -1493,9 +1571,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "bitvec" @@ -1601,11 +1679,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" dependencies = [ "async-channel", - "async-lock", + "async-lock 2.8.0", "async-task", "atomic-waker", "fastrand 1.9.0", - "futures-lite", + "futures-lite 1.13.0", "log", ] @@ -1902,7 +1980,7 @@ dependencies = [ "bp-parachains", "bp-polkadot-core", "bp-runtime", - "ed25519-dalek 2.1.1", + "ed25519-dalek", "finality-grandpa", "parity-scale-codec", "sp-application-crypto", @@ -1954,7 +2032,6 @@ dependencies = [ "snowbridge-core", "sp-core", "sp-runtime", - "sp-std 14.0.0", "staging-xcm", ] @@ -1975,8 +2052,6 @@ dependencies = [ name = "bridge-hub-rococo-integration-tests" version = "1.0.0" dependencies = [ - "asset-hub-rococo-runtime", - "bridge-hub-rococo-runtime", "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", "frame-support", @@ -2101,8 +2176,7 @@ dependencies = [ "static_assertions", "substrate-wasm-builder", "testnet-parachains-constants", - "tuplex", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -2137,7 +2211,6 @@ dependencies = [ "sp-io", "sp-keyring", "sp-runtime", - "sp-std 14.0.0", "sp-tracing 16.0.0", "staging-xcm", "staging-xcm-builder", @@ -2161,10 +2234,10 @@ dependencies = [ name = "bridge-hub-westend-integration-tests" version = "1.0.0" dependencies = [ - "bridge-hub-westend-runtime", "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", "frame-support", + "hex-literal", "pallet-asset-conversion", "pallet-assets", "pallet-balances", @@ -2261,9 +2334,8 @@ dependencies = [ "static_assertions", "substrate-wasm-builder", "testnet-parachains-constants", - "tuplex", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -2281,7 +2353,6 @@ dependencies = [ "bp-xcm-bridge-hub-router", "frame-support", "frame-system", - "hash-db", "log", "pallet-balances", "pallet-bridge-grandpa", @@ -2292,8 +2363,6 @@ dependencies = [ "pallet-utility", "parity-scale-codec", "scale-info", - "sp-api", - "sp-core", "sp-io", "sp-runtime", "sp-std 14.0.0", @@ -2450,6 +2519,12 @@ dependencies = [ "libc", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -2535,7 +2610,6 @@ dependencies = [ "sp-genesis-builder", "sp-keyring", "sp-runtime", - "sp-std 14.0.0", "staging-chain-spec-builder", "substrate-wasm-builder", ] @@ -2591,7 +2665,7 @@ dependencies = [ "multibase", "multihash 0.17.0", "serde", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -2604,7 +2678,7 @@ dependencies = [ "multibase", "multihash 0.18.1", "serde", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -2768,7 +2842,7 @@ checksum = "a90d114103adbc625300f346d4d09dfb4ab1c4a8df6868435dd903392ecf4354" dependencies = [ "libc", "once_cell", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] @@ -2800,8 +2874,6 @@ name = "collectives-westend-integration-tests" version = "1.0.0" dependencies = [ "assert_matches", - "asset-hub-westend-runtime", - "collectives-westend-runtime", "cumulus-pallet-parachain-system", "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", @@ -2820,7 +2892,6 @@ dependencies = [ "staging-xcm", "staging-xcm-executor", "testnet-parachains-constants", - "westend-runtime", "westend-runtime-constants", "westend-system-emulated-network", ] @@ -2888,7 +2959,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-transaction-pool", "sp-version", @@ -2899,7 +2969,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -2987,7 +3057,7 @@ dependencies = [ "fflonk", "getrandom_or_panic", "merlin", - "rand_chacha 0.3.1", + "rand_chacha", ] [[package]] @@ -3063,7 +3133,7 @@ version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d7d6ab3c3a2282db210df5f02c4dab6e0a7057af0fb7ebd4070f30fe05c0ddb" dependencies = [ - "getrandom 0.2.10", + "getrandom", "once_cell", "proc-macro-hack", "tiny-keccak", @@ -3145,7 +3215,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-transaction-pool", "sp-version", @@ -3155,7 +3224,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -3166,9 +3235,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -3176,9 +3245,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "core2" @@ -3204,6 +3273,7 @@ dependencies = [ "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", + "frame-metadata-hash-extension", "frame-support", "frame-system", "frame-system-benchmarking", @@ -3242,7 +3312,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-transaction-pool", "sp-version", @@ -3252,7 +3321,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -3270,6 +3339,7 @@ dependencies = [ "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", + "frame-metadata-hash-extension", "frame-support", "frame-system", "frame-system-benchmarking", @@ -3306,7 +3376,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-transaction-pool", "sp-version", @@ -3317,7 +3386,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -3573,7 +3642,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" dependencies = [ "generic-array 0.14.7", - "rand_core 0.6.4", + "rand_core", "subtle 2.5.0", "zeroize", ] @@ -3585,7 +3654,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array 0.14.7", - "rand_core 0.6.4", + "rand_core", "typenum", ] @@ -3647,7 +3716,7 @@ dependencies = [ "cumulus-test-runtime", "futures", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -3678,6 +3747,7 @@ dependencies = [ "cumulus-relay-chain-interface", "futures", "parity-scale-codec", + "parking_lot 0.12.3", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-overseer", @@ -3688,6 +3758,7 @@ dependencies = [ "sc-consensus-babe", "sc-consensus-slots", "sc-telemetry", + "sc-utils", "schnellru", "sp-api", "sp-application-crypto", @@ -3702,6 +3773,7 @@ dependencies = [ "sp-state-machine", "sp-timestamp", "substrate-prometheus-endpoint", + "tokio", "tracing", ] @@ -3761,7 +3833,7 @@ dependencies = [ "cumulus-primitives-core", "cumulus-relay-chain-interface", "futures", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-consensus", "sp-api", "sp-block-builder", @@ -3786,7 +3858,7 @@ dependencies = [ "futures", "futures-timer", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-parachain-primitives", @@ -3822,13 +3894,11 @@ dependencies = [ "cumulus-test-relay-sproof-builder", "parity-scale-codec", "sc-client-api", - "scale-info", "sp-api", "sp-crypto-hashing", "sp-inherents", "sp-runtime", "sp-state-machine", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-trie", "tracing", @@ -3852,7 +3922,7 @@ dependencies = [ "polkadot-overseer", "polkadot-primitives", "portpicker", - "rand 0.8.5", + "rand", "rstest", "sc-cli", "sc-client-api", @@ -3920,7 +3990,6 @@ dependencies = [ "sp-application-crypto", "sp-consensus-aura", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -3937,7 +4006,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-tracing 16.0.0", "staging-xcm", ] @@ -3969,7 +4037,7 @@ dependencies = [ "polkadot-parachain-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", - "rand 0.8.5", + "rand", "sc-client-api", "scale-info", "sp-consensus-slots", @@ -4011,7 +4079,6 @@ dependencies = [ "pallet-session", "parity-scale-codec", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -4026,7 +4093,6 @@ dependencies = [ "polkadot-primitives", "scale-info", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -4040,7 +4106,6 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std 14.0.0", "staging-xcm", ] @@ -4065,7 +4130,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -4082,7 +4146,6 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-runtime", - "sp-std 14.0.0", "staging-xcm", ] @@ -4096,7 +4159,6 @@ dependencies = [ "sp-api", "sp-consensus-aura", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -4110,7 +4172,6 @@ dependencies = [ "scale-info", "sp-api", "sp-runtime", - "sp-std 14.0.0", "sp-trie", "staging-xcm", ] @@ -4127,7 +4188,6 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", - "sp-std 14.0.0", "sp-trie", ] @@ -4158,7 +4218,6 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-trie", ] @@ -4170,7 +4229,6 @@ dependencies = [ "futures", "parity-scale-codec", "sp-inherents", - "sp-std 14.0.0", "sp-timestamp", ] @@ -4187,7 +4245,6 @@ dependencies = [ "polkadot-runtime-parachains", "sp-io", "sp-runtime", - "sp-std 14.0.0", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -4249,15 +4306,8 @@ dependencies = [ "cumulus-relay-chain-interface", "cumulus-relay-chain-rpc-interface", "futures", - "parking_lot 0.12.1", - "polkadot-availability-recovery", - "polkadot-collator-protocol", "polkadot-core-primitives", "polkadot-network-bridge", - "polkadot-node-collation-generation", - "polkadot-node-core-chain-api", - "polkadot-node-core-prospective-parachains", - "polkadot-node-core-runtime-api", "polkadot-node-network-protocol", "polkadot-node-subsystem-util", "polkadot-overseer", @@ -4294,7 +4344,7 @@ dependencies = [ "parity-scale-codec", "pin-project", "polkadot-overseer", - "rand 0.8.5", + "rand", "sc-client-api", "sc-rpc-api", "sc-service", @@ -4364,7 +4414,6 @@ dependencies = [ "polkadot-primitives", "sp-runtime", "sp-state-machine", - "sp-std 14.0.0", "sp-trie", ] @@ -4403,7 +4452,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-transaction-pool", "sp-version", "staging-parachain-info", @@ -4450,8 +4498,7 @@ dependencies = [ "polkadot-service", "polkadot-test-service", "portpicker", - "rand 0.8.5", - "rococo-parachain-runtime", + "rand", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", @@ -4476,7 +4523,6 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-consensus-aura", - "sp-consensus-grandpa", "sp-core", "sp-io", "sp-keyring", @@ -4503,7 +4549,7 @@ dependencies = [ "openssl-probe", "openssl-sys", "schannel", - "socket2 0.5.6", + "socket2 0.5.7", "windows-sys 0.52.0", ] @@ -4525,29 +4571,15 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle 2.5.0", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "4.1.2" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "platforms", "rustc_version 0.4.0", "subtle 2.5.0", "zeroize", @@ -4572,7 +4604,7 @@ checksum = "1c359b7249347e46fb28804470d071c921156ad62b3eef5d34e2ba867533dec8" dependencies = [ "byteorder", "digest 0.9.0", - "rand_core 0.6.4", + "rand_core", "subtle-ng", "zeroize", ] @@ -4685,7 +4717,21 @@ version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ - "asn1-rs", + "asn1-rs 0.5.2", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs 0.6.1", "displaydoc", "nom", "num-bigint", @@ -4710,17 +4756,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "derive-syn-parse" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79116f119dd1dba1abf1f3405f03b9b0e79a27a3883864bfebded8a3dc768cd" -dependencies = [ - "proc-macro2 1.0.82", - "quote 1.0.35", - "syn 1.0.109", -] - [[package]] name = "derive-syn-parse" version = "0.2.0" @@ -4904,7 +4939,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a081e51fb188742f5a7a1164ad752121abcb22874b21e2c3b0dd040c515fdad" dependencies = [ "common-path", - "derive-syn-parse 0.2.0", + "derive-syn-parse", "once_cell", "proc-macro2 1.0.82", "quote 1.0.35", @@ -4977,19 +5012,10 @@ dependencies = [ "elliptic-curve", "rfc6979", "serdect", - "signature 2.1.0", + "signature", "spki", ] -[[package]] -name = "ed25519" -version = "1.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" -dependencies = [ - "signature 1.6.4", -] - [[package]] name = "ed25519" version = "2.2.2" @@ -4997,21 +5023,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" dependencies = [ "pkcs8", - "signature 2.1.0", -] - -[[package]] -name = "ed25519-dalek" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" -dependencies = [ - "curve25519-dalek 3.2.0", - "ed25519 1.5.3", - "rand 0.7.3", - "serde", - "sha2 0.9.9", - "zeroize", + "signature", ] [[package]] @@ -5020,9 +5032,9 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ - "curve25519-dalek 4.1.2", - "ed25519 2.2.2", - "rand_core 0.6.4", + "curve25519-dalek", + "ed25519", + "rand_core", "serde", "sha2 0.10.8", "subtle 2.5.0", @@ -5035,11 +5047,11 @@ version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" dependencies = [ - "curve25519-dalek 4.1.2", - "ed25519 2.2.2", + "curve25519-dalek", + "ed25519", "hashbrown 0.14.3", "hex", - "rand_core 0.6.4", + "rand_core", "sha2 0.10.8", "zeroize", ] @@ -5063,7 +5075,7 @@ dependencies = [ "generic-array 0.14.7", "group", "pkcs8", - "rand_core 0.6.4", + "rand_core", "sec1", "serdect", "subtle 2.5.0", @@ -5192,19 +5204,6 @@ dependencies = [ "regex", ] -[[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.10.1" @@ -5345,6 +5344,27 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "event-listener" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite 0.2.12", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.2.0", + "pin-project-lite 0.2.12", +] + [[package]] name = "exit-future" version = "0.2.0" @@ -5356,12 +5376,14 @@ dependencies = [ [[package]] name = "expander" -version = "2.0.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f86a749cf851891866c10515ef6c299b5c69661465e9c3bbe7e07a2b77fb0f7" +checksum = "e2c470c71d91ecbd179935b24170459e926382eaaa86b590b78814e180d8a8e2" dependencies = [ "blake2 0.10.6", + "file-guard", "fs-err", + "prettyplease 0.2.12", "proc-macro2 1.0.82", "quote 1.0.35", "syn 2.0.61", @@ -5471,7 +5493,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core 0.6.4", + "rand_core", "subtle 2.5.0", ] @@ -5494,6 +5516,16 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" +[[package]] +name = "file-guard" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21ef72acf95ec3d7dbf61275be556299490a245f017cf084bd23b4f68cf9407c" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "file-per-thread-logger" version = "0.1.6" @@ -5528,8 +5560,8 @@ dependencies = [ "log", "num-traits", "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "scale-info", ] @@ -5544,7 +5576,7 @@ dependencies = [ "futures", "log", "num-traits", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "relay-utils", ] @@ -5567,7 +5599,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", - "rand 0.8.5", + "rand", "rustc-hex", "static_assertions", ] @@ -5585,7 +5617,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ "crc32fast", - "libz-sys", "miniz_oxide", ] @@ -5683,7 +5714,6 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-runtime-interface 24.0.0", - "sp-std 14.0.0", "sp-storage 19.0.0", "static_assertions", ] @@ -5707,7 +5737,7 @@ dependencies = [ "linked-hash-map", "log", "parity-scale-codec", - "rand 0.8.5", + "rand", "rand_pcg", "sc-block-builder", "sc-chain-spec", @@ -5748,7 +5778,6 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -5775,14 +5804,13 @@ dependencies = [ "frame-support", "frame-system", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "sp-arithmetic", "sp-core", "sp-io", "sp-npos-elections", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -5795,7 +5823,7 @@ dependencies = [ "frame-support", "honggfuzz", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "sp-arithmetic", "sp-npos-elections", @@ -5820,7 +5848,6 @@ dependencies = [ "sp-inherents", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-tracing 16.0.0", "sp-version", ] @@ -5945,12 +5972,12 @@ version = "23.0.0" dependencies = [ "Inflector", "cfg-expr", - "derive-syn-parse 0.2.0", + "derive-syn-parse", "expander", "frame-support-procedural-tools", "itertools 0.11.0", "macro_magic", - "proc-macro-warning", + "proc-macro-warning 1.0.0", "proc-macro2 1.0.82", "quote 1.0.35", "regex", @@ -6000,7 +6027,6 @@ dependencies = [ "sp-metadata-ir", "sp-runtime", "sp-state-machine", - "sp-std 14.0.0", "sp-version", "static_assertions", "trybuild", @@ -6075,7 +6101,6 @@ dependencies = [ "sp-externalities 0.25.0", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-version", ] @@ -6096,7 +6121,6 @@ dependencies = [ "parity-scale-codec", "sp-api", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -6152,6 +6176,16 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-bounded" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b07bbbe7d7e78809544c6f718d875627addc73a7c3582447abc052cd3dc67e0" +dependencies = [ + "futures-timer", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.30" @@ -6201,6 +6235,16 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "futures-core", + "pin-project-lite 0.2.12", +] + [[package]] name = "futures-macro" version = "0.3.30" @@ -6214,13 +6258,12 @@ dependencies = [ [[package]] name = "futures-rustls" -version = "0.22.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" +checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.20.8", - "webpki", + "rustls 0.21.7", ] [[package]] @@ -6311,17 +6354,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "getrandom" version = "0.2.10" @@ -6330,7 +6362,7 @@ checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", ] [[package]] @@ -6339,8 +6371,8 @@ version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ea1015b5a70616b688dc230cfe50c8af89d972cb132d5a622814d29773b10b9" dependencies = [ - "rand 0.8.5", - "rand_core 0.6.4", + "rand", + "rand_core", ] [[package]] @@ -6426,7 +6458,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-transaction-pool", "sp-version", @@ -6450,9 +6481,9 @@ dependencies = [ "futures-timer", "no-std-compat", "nonzero_ext", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "quanta", - "rand 0.8.5", + "rand", "smallvec", ] @@ -6463,7 +6494,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff", - "rand_core 0.6.4", + "rand_core", "subtle 2.5.0", ] @@ -6478,7 +6509,26 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.9", + "indexmap 2.2.3", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.1.0", "indexmap 2.2.3", "slab", "tokio", @@ -6686,6 +6736,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.5" @@ -6693,15 +6754,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", - "http", + "http 0.2.9", "pin-project-lite 0.2.12", ] [[package]] -name = "http-range-header" -version = "0.3.1" +name = "http-body" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "pin-project-lite 0.2.12", +] [[package]] name = "httparse" @@ -6723,44 +6801,103 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.26", + "http 0.2.9", + "http-body 0.4.5", "httparse", "httpdate", "itoa", "pin-project-lite 0.2.12", - "socket2 0.4.9", + "socket2 0.5.7", "tokio", "tower-service", "tracing", "want", ] +[[package]] +name = "hyper" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.0", + "httparse", + "httpdate", + "itoa", + "pin-project-lite 0.2.12", + "smallvec", + "tokio", + "want", +] + [[package]] name = "hyper-rustls" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", - "hyper", + "http 0.2.9", + "hyper 0.14.29", "log", - "rustls 0.21.6", + "rustls 0.21.7", "rustls-native-certs 0.6.3", "tokio", "tokio-rustls 0.24.1", ] +[[package]] +name = "hyper-rustls" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +dependencies = [ + "futures-util", + "http 1.1.0", + "hyper 1.3.1", + "hyper-util", + "log", + "rustls 0.23.10", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.0", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "hyper 1.3.1", + "pin-project-lite 0.2.12", + "socket2 0.5.7", + "tokio", + "tower", + "tower-service", + "tracing", +] + [[package]] name = "iana-time-zone" version = "0.1.57" @@ -6807,21 +6944,21 @@ dependencies = [ [[package]] name = "if-addrs" -version = "0.7.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" +checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" dependencies = [ "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] name = "if-watch" -version = "3.0.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9465340214b296cd17a0009acdb890d6160010b8adf8f78a00d0d7ab270f79f" +checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" dependencies = [ - "async-io", + "async-io 2.3.3", "core-foundation", "fnv", "futures", @@ -6831,7 +6968,26 @@ dependencies = [ "rtnetlink", "system-configuration", "tokio", - "windows 0.34.0", + "windows 0.51.1", +] + +[[package]] +name = "igd-next" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" +dependencies = [ + "async-trait", + "attohttpc", + "bytes", + "futures", + "http 0.2.9", + "hyper 0.14.29", + "log", + "rand", + "tokio", + "url", + "xmltree", ] [[package]] @@ -7004,7 +7160,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.6", + "socket2 0.5.7", "widestring", "windows-sys 0.48.0", "winreg", @@ -7048,13 +7204,13 @@ dependencies = [ "curl", "curl-sys", "encoding_rs", - "event-listener", - "futures-lite", - "http", + "event-listener 2.5.3", + "futures-lite 1.13.0", + "http 0.2.9", "log", "mime", "once_cell", - "polling", + "polling 2.8.0", "slab", "sluice", "tracing", @@ -7087,6 +7243,26 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +[[package]] +name = "jni" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec" +dependencies = [ + "cesu8", + "combine", + "jni-sys", + "log", + "thiserror", + "walkdir", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "jobserver" version = "0.1.26" @@ -7124,9 +7300,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdb12a2381ea5b2e68c3469ec604a007b367778cdb14d09612c8069ebd616ad" +checksum = "62b089779ad7f80768693755a031cc14a7766aba707cbe886674e3f79e9b7e47" dependencies = [ "jsonrpsee-core", "jsonrpsee-http-client", @@ -7140,20 +7316,22 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4978087a58c3ab02efc5b07c5e5e2803024536106fd5506f558db172c889b3aa" +checksum = "08163edd8bcc466c33d79e10f695cdc98c00d1e6ddfb95cec41b6b0279dd5432" dependencies = [ + "base64 0.22.1", "futures-util", - "http", + "http 1.1.0", "jsonrpsee-core", "pin-project", - "rustls-native-certs 0.7.0", + "rustls 0.23.10", "rustls-pki-types", - "soketto", + "rustls-platform-verifier", + "soketto 0.8.0", "thiserror", "tokio", - "tokio-rustls 0.25.0", + "tokio-rustls 0.26.0", "tokio-util", "tracing", "url", @@ -7161,20 +7339,23 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b257e1ec385e07b0255dde0b933f948b5c8b8c28d42afda9587c3a967b896d" +checksum = "79712302e737d23ca0daa178e752c9334846b08321d439fd89af9a384f8c830b" dependencies = [ "anyhow", "async-trait", "beef", + "bytes", "futures-timer", "futures-util", - "hyper", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", "jsonrpsee-types", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", - "rand 0.8.5", + "rand", "rustc-hash", "serde", "serde_json", @@ -7186,15 +7367,20 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ccf93fc4a0bfe05d851d37d7c32b7f370fe94336b52a2f0efc5f1981895c2e5" +checksum = "2d90064e04fb9d7282b1c71044ea94d0bbc6eff5621c66f1a0bce9e9de7cf3ac" dependencies = [ "async-trait", - "hyper", - "hyper-rustls", + "base64 0.22.1", + "http-body 1.0.0", + "hyper 1.3.1", + "hyper-rustls 0.27.2", + "hyper-util", "jsonrpsee-core", "jsonrpsee-types", + "rustls 0.23.10", + "rustls-platform-verifier", "serde", "serde_json", "thiserror", @@ -7206,11 +7392,11 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d0bb047e79a143b32ea03974a6bf59b62c2a4c5f5d42a381c907a8bbb3f75c0" +checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro-crate 3.1.0", "proc-macro2 1.0.82", "quote 1.0.35", @@ -7219,20 +7405,24 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12d8b6a9674422a8572e0b0abb12feeb3f2aeda86528c80d0350c2bd0923ab41" +checksum = "654afab2e92e5d88ebd8a39d6074483f3f2bfdf91c5ac57fe285e7127cdd4f51" dependencies = [ + "anyhow", "futures-util", - "http", - "hyper", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-util", "jsonrpsee-core", "jsonrpsee-types", "pin-project", "route-recognizer", "serde", "serde_json", - "soketto", + "soketto 0.8.0", "thiserror", "tokio", "tokio-stream", @@ -7243,12 +7433,12 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "150d6168405890a7a3231a3c74843f58b8959471f6df76078db2619ddee1d07d" +checksum = "d9c465fbe385238e861fdc4d1c85e04ada6c1fd246161d26385c1b311724d2af" dependencies = [ - "anyhow", "beef", + "http 1.1.0", "serde", "serde_json", "thiserror", @@ -7256,11 +7446,11 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.22.5" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58b9db2dfd5bb1194b0ce921504df9ceae210a345bc2f6c5a61432089bbab070" +checksum = "1c28759775f5cb2f1ea9667672d3fe2b0e701d1f4b7b67954e60afe7fd058b5e" dependencies = [ - "http", + "http 1.1.0", "jsonrpsee-client-transport", "jsonrpsee-core", "jsonrpsee-types", @@ -7349,7 +7539,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf7a85fe66f9ff9cd74e169fdd2c94c6e1e74c412c99a73b4df3200b5d3760b2" dependencies = [ "kvdb", - "parking_lot 0.12.1", + "parking_lot 0.12.3", ] [[package]] @@ -7360,7 +7550,7 @@ checksum = "b644c70b92285f66bfc2032922a79000ea30af7bc2ab31902992a5dcb9b434f6" dependencies = [ "kvdb", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "regex", "rocksdb", "smallvec", @@ -7469,14 +7659,15 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.51.4" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f35eae38201a993ece6bdc823292d6abd1bffed1c4d0f4a3517d2bd8e1d917fe" +checksum = "e94495eb319a85b70a68b85e2389a95bb3555c71c49025b78c691a854a7e6464" dependencies = [ "bytes", + "either", "futures", "futures-timer", - "getrandom 0.2.10", + "getrandom", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -7493,18 +7684,21 @@ dependencies = [ "libp2p-request-response", "libp2p-swarm", "libp2p-tcp", + "libp2p-upnp", "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", - "multiaddr", + "multiaddr 0.18.1", "pin-project", + "rw-stream-sink", + "thiserror", ] [[package]] name = "libp2p-allow-block-list" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510daa05efbc25184458db837f6f9a5143888f1caa742426d92e1833ddd38a50" +checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311" dependencies = [ "libp2p-core", "libp2p-identity", @@ -7514,9 +7708,9 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" -version = "0.1.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4caa33f1d26ed664c4fe2cca81a08c8e07d4c1c04f2f4ac7655c2dd85467fda0" +checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58" dependencies = [ "libp2p-core", "libp2p-identity", @@ -7526,9 +7720,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.39.2" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c1df63c0b582aa434fb09b2d86897fa2b419ffeccf934b36f87fcedc8e835c2" +checksum = "dd44289ab25e4c9230d9246c475a22241e301b23e8f4061d3bdef304a1a99713" dependencies = [ "either", "fnv", @@ -7537,50 +7731,53 @@ dependencies = [ "instant", "libp2p-identity", "log", - "multiaddr", - "multihash 0.17.0", + "multiaddr 0.18.1", + "multihash 0.19.1", "multistream-select", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", "quick-protobuf", - "rand 0.8.5", + "rand", "rw-stream-sink", "smallvec", "thiserror", - "unsigned-varint", + "unsigned-varint 0.7.2", "void", ] [[package]] name = "libp2p-dns" -version = "0.39.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146ff7034daae62077c415c2376b8057368042df6ab95f5432ad5e88568b1554" +checksum = "e6a18db73084b4da2871438f6239fef35190b05023de7656e877c18a00541a3b" dependencies = [ + "async-trait", "futures", "libp2p-core", + "libp2p-identity", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "smallvec", - "trust-dns-resolver 0.22.0", + "trust-dns-resolver", ] [[package]] name = "libp2p-identify" -version = "0.42.2" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5455f472243e63b9c497ff320ded0314254a9eb751799a39c283c6f20b793f3c" +checksum = "45a96638a0a176bec0a4bcaebc1afa8cf909b114477209d7456ade52c61cd9cd" dependencies = [ "asynchronous-codec", "either", "futures", + "futures-bounded", "futures-timer", "libp2p-core", "libp2p-identity", "libp2p-swarm", "log", - "lru 0.10.1", + "lru 0.12.3", "quick-protobuf", "quick-protobuf-codec", "smallvec", @@ -7590,27 +7787,27 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.1.3" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276bb57e7af15d8f100d3c11cbdd32c6752b7eef4ba7a18ecf464972c07abcce" +checksum = "999ec70441b2fb35355076726a6bc466c932e9bdc66f6a11c6c0aa17c7ab9be0" dependencies = [ - "bs58 0.4.0", - "ed25519-dalek 2.1.1", - "log", - "multiaddr", - "multihash 0.17.0", + "bs58 0.5.0", + "ed25519-dalek", + "hkdf", + "multihash 0.19.1", "quick-protobuf", - "rand 0.8.5", + "rand", "sha2 0.10.8", "thiserror", + "tracing", "zeroize", ] [[package]] name = "libp2p-kad" -version = "0.43.3" +version = "0.44.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39d5ef876a2b2323d63c258e63c2f8e36f205fe5a11f0b3095d59635650790ff" +checksum = "16ea178dabba6dde6ffc260a8e0452ccdc8f79becf544946692fff9d412fc29d" dependencies = [ "arrayvec 0.7.4", "asynchronous-codec", @@ -7625,20 +7822,21 @@ dependencies = [ "libp2p-swarm", "log", "quick-protobuf", - "rand 0.8.5", + "quick-protobuf-codec", + "rand", "sha2 0.10.8", "smallvec", "thiserror", "uint", - "unsigned-varint", + "unsigned-varint 0.7.2", "void", ] [[package]] name = "libp2p-mdns" -version = "0.43.1" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19983e1f949f979a928f2c603de1cf180cc0dc23e4ac93a62651ccb18341460b" +checksum = "42a2567c305232f5ef54185e9604579a894fd0674819402bb0ac0246da82f52a" dependencies = [ "data-encoding", "futures", @@ -7647,9 +7845,9 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "log", - "rand 0.8.5", + "rand", "smallvec", - "socket2 0.4.9", + "socket2 0.5.7", "tokio", "trust-dns-proto 0.22.0", "void", @@ -7657,63 +7855,69 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.12.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a42ec91e227d7d0dafa4ce88b333cdf5f277253873ab087555c92798db2ddd46" +checksum = "239ba7d28f8d0b5d77760dc6619c05c7e88e74ec8fbbe97f856f20a56745e620" dependencies = [ + "instant", "libp2p-core", "libp2p-identify", + "libp2p-identity", "libp2p-kad", "libp2p-ping", "libp2p-swarm", + "once_cell", "prometheus-client", ] [[package]] name = "libp2p-noise" -version = "0.42.2" +version = "0.43.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3673da89d29936bc6435bafc638e2f184180d554ce844db65915113f86ec5e" +checksum = "d2eeec39ad3ad0677551907dd304b2f13f17208ccebe333bef194076cd2e8921" dependencies = [ "bytes", - "curve25519-dalek 3.2.0", + "curve25519-dalek", "futures", "libp2p-core", "libp2p-identity", "log", + "multiaddr 0.18.1", + "multihash 0.19.1", "once_cell", "quick-protobuf", - "rand 0.8.5", + "rand", "sha2 0.10.8", "snow", "static_assertions", "thiserror", - "x25519-dalek 1.1.1", + "x25519-dalek", "zeroize", ] [[package]] name = "libp2p-ping" -version = "0.42.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e57759c19c28a73ef1eb3585ca410cefb72c1a709fcf6de1612a378e4219202" +checksum = "e702d75cd0827dfa15f8fd92d15b9932abe38d10d21f47c50438c71dd1b5dae3" dependencies = [ "either", "futures", "futures-timer", "instant", "libp2p-core", + "libp2p-identity", "libp2p-swarm", "log", - "rand 0.8.5", + "rand", "void", ] [[package]] name = "libp2p-quic" -version = "0.7.0-alpha.3" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6b26abd81cd2398382a1edfe739b539775be8a90fa6914f39b2ab49571ec735" +checksum = "130d451d83f21b81eb7b35b360bc7972aeafb15177784adc56528db082e6b927" dependencies = [ "bytes", "futures", @@ -7723,19 +7927,21 @@ dependencies = [ "libp2p-identity", "libp2p-tls", "log", - "parking_lot 0.12.1", - "quinn-proto", - "rand 0.8.5", - "rustls 0.20.8", + "parking_lot 0.12.3", + "quinn 0.10.2", + "rand", + "ring 0.16.20", + "rustls 0.21.7", + "socket2 0.5.7", "thiserror", "tokio", ] [[package]] name = "libp2p-request-response" -version = "0.24.1" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffdb374267d42dc5ed5bc53f6e601d4a64ac5964779c6e40bb9e4f14c1e30d5" +checksum = "d8e3b4d67870478db72bac87bfc260ee6641d0734e0e3e275798f089c3fecfd4" dependencies = [ "async-trait", "futures", @@ -7743,15 +7949,17 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "rand 0.8.5", + "log", + "rand", "smallvec", + "void", ] [[package]] name = "libp2p-swarm" -version = "0.42.2" +version = "0.43.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903b3d592d7694e56204d211f29d31bc004be99386644ba8731fc3e3ef27b296" +checksum = "580189e0074af847df90e75ef54f3f30059aedda37ea5a1659e8b9fca05c0141" dependencies = [ "either", "fnv", @@ -7762,7 +7970,9 @@ dependencies = [ "libp2p-identity", "libp2p-swarm-derive", "log", - "rand 0.8.5", + "multistream-select", + "once_cell", + "rand", "smallvec", "tokio", "void", @@ -7770,36 +7980,39 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fba456131824ab6acd4c7bf61e9c0f0a3014b5fc9868ccb8e10d344594cdc4f" +checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" dependencies = [ "heck 0.4.1", + "proc-macro-warning 0.4.2", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 1.0.109", + "syn 2.0.61", ] [[package]] name = "libp2p-tcp" -version = "0.39.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d33698596d7722d85d3ab0c86c2c322254fce1241e91208e3679b4eb3026cf" +checksum = "b558dd40d1bcd1aaaed9de898e9ec6a436019ecc2420dd0016e712fbb61c5508" dependencies = [ "futures", "futures-timer", "if-watch", "libc", "libp2p-core", + "libp2p-identity", "log", - "socket2 0.4.9", + "socket2 0.5.7", "tokio", ] [[package]] name = "libp2p-tls" -version = "0.1.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" +checksum = "8218d1d5482b122ccae396bbf38abdcb283ecc96fa54760e1dfd251f0546ac61" dependencies = [ "futures", "futures-rustls", @@ -7807,51 +8020,68 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.16.20", - "rustls 0.20.8", + "rustls 0.21.7", + "rustls-webpki 0.101.4", "thiserror", - "webpki", - "x509-parser 0.14.0", + "x509-parser 0.15.1", "yasna", ] +[[package]] +name = "libp2p-upnp" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82775a47b34f10f787ad3e2a22e2c1541e6ebef4fe9f28f3ac553921554c94c1" +dependencies = [ + "futures", + "futures-timer", + "igd-next", + "libp2p-core", + "libp2p-swarm", + "log", + "tokio", + "void", +] + [[package]] name = "libp2p-wasm-ext" -version = "0.39.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77dff9d32353a5887adb86c8afc1de1a94d9e8c3bc6df8b2201d7cdf5c848f43" +checksum = "1e5d8e3a9e07da0ef5b55a9f26c009c8fb3c725d492d8bb4b431715786eea79c" dependencies = [ "futures", "js-sys", "libp2p-core", - "parity-send-wrapper", + "send_wrapper", "wasm-bindgen", "wasm-bindgen-futures", ] [[package]] name = "libp2p-websocket" -version = "0.41.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111273f7b3d3510524c752e8b7a5314b7f7a1fee7e68161c01a7d72cbb06db9f" +checksum = "3facf0691bab65f571bc97c6c65ffa836248ca631d631b7691ac91deb7fceb5f" dependencies = [ "either", "futures", "futures-rustls", "libp2p-core", + "libp2p-identity", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "quicksink", "rw-stream-sink", - "soketto", + "soketto 0.7.1", "url", - "webpki-roots 0.22.6", + "webpki-roots 0.25.2", ] [[package]] name = "libp2p-yamux" -version = "0.43.1" +version = "0.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd21d950662700a385d4c6d68e2f5f54d778e97068cdd718522222ef513bda" +checksum = "8eedcb62824c4300efb9cfd4e2a6edaf3ca097b9e68b36dabe45a44469fd6a85" dependencies = [ "futures", "libp2p-core", @@ -7888,7 +8118,7 @@ dependencies = [ "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", - "rand 0.8.5", + "rand", "serde", "sha2 0.9.9", "typenum", @@ -8018,40 +8248,40 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.5.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f02542ae3a94b4c4ffa37dc56388c923e286afa3bf65452e3984b50b2a2f316" +checksum = "0f46c51c205264b834ceed95c8b195026e700494bc3991aaba3b4ea9e20626d9" dependencies = [ "async-trait", "bs58 0.4.0", "bytes", "cid 0.10.1", - "ed25519-dalek 1.0.1", + "ed25519-dalek", "futures", "futures-timer", "hex-literal", "indexmap 2.2.3", "libc", "mockall 0.12.1", - "multiaddr", + "multiaddr 0.17.1", "multihash 0.17.0", "network-interface", "nohash-hasher", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", - "prost 0.11.9", + "prost 0.12.6", "prost-build 0.11.9", - "quinn", - "rand 0.8.5", + "quinn 0.9.4", + "rand", "rcgen", "ring 0.16.20", - "rustls 0.20.8", + "rustls 0.20.9", "serde", "sha2 0.10.8", "simple-dns", "smallvec", "snow", - "socket2 0.5.6", + "socket2 0.5.7", "static_assertions", "str0m", "thiserror", @@ -8060,13 +8290,13 @@ dependencies = [ "tokio-tungstenite", "tokio-util", "tracing", - "trust-dns-resolver 0.23.2", + "trust-dns-resolver", "uint", - "unsigned-varint", + "unsigned-varint 0.8.0", "url", "webpki", - "x25519-dalek 2.0.0", - "x509-parser 0.15.1", + "x25519-dalek", + "x509-parser 0.16.0", "yasna", "zeroize", ] @@ -8102,18 +8332,18 @@ dependencies = [ [[package]] name = "lru" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" -dependencies = [ - "hashbrown 0.13.2", -] +checksum = "eedb2bdbad7e0634f83989bf596f497b070130daaa398ab22d84c39e266deec5" [[package]] name = "lru" -version = "0.11.0" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eedb2bdbad7e0634f83989bf596f497b070130daaa398ab22d84c39e266deec5" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" +dependencies = [ + "hashbrown 0.14.3", +] [[package]] name = "lru-cache" @@ -8164,9 +8394,9 @@ dependencies = [ [[package]] name = "macro_magic" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e03844fc635e92f3a0067e25fa4bf3e3dbf3f2927bf3aa01bb7bc8f1c428949d" +checksum = "cc33f9f0351468d26fbc53d9ce00a096c8522ecb42f19b50f34f2c422f76d21d" dependencies = [ "macro_magic_core", "macro_magic_macros", @@ -8176,12 +8406,12 @@ dependencies = [ [[package]] name = "macro_magic_core" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "468155613a44cfd825f1fb0ffa532b018253920d404e6fca1e8d43155198a46d" +checksum = "1687dc887e42f352865a393acae7cf79d98fab6351cde1f58e9e057da89bf150" dependencies = [ "const-random", - "derive-syn-parse 0.1.5", + "derive-syn-parse", "macro_magic_core_macros", "proc-macro2 1.0.82", "quote 1.0.35", @@ -8190,9 +8420,9 @@ dependencies = [ [[package]] name = "macro_magic_core_macros" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea73aa640dc01d62a590d48c0c3521ed739d53b27f919b25c3551e233481654" +checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ "proc-macro2 1.0.82", "quote 1.0.35", @@ -8201,9 +8431,9 @@ dependencies = [ [[package]] name = "macro_magic_macros" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef9d79ae96aaba821963320eb2b6e34d17df1e5a83d8a1985c29cc5be59577b3" +checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" dependencies = [ "macro_magic_core", "quote 1.0.35", @@ -8338,7 +8568,7 @@ checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" dependencies = [ "byteorder", "keccak", - "rand_core 0.6.4", + "rand_core", "zeroize", ] @@ -8349,13 +8579,12 @@ dependencies = [ "async-std", "async-trait", "bp-messages", - "env_logger 0.11.3", "finality-relay", "futures", "hex", "log", "num-traits", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "relay-utils", "sp-arithmetic", ] @@ -8367,7 +8596,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69672161530e8aeca1d1400fbf3f1a1747ff60ea604265a4e906c2442df20532" dependencies = [ "futures", - "rand 0.8.5", + "rand", "thrift", ] @@ -8466,7 +8695,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.48.0", ] @@ -8481,14 +8710,14 @@ dependencies = [ "bitflags 1.3.2", "blake2 0.10.6", "c2-chacha", - "curve25519-dalek 4.1.2", + "curve25519-dalek", "either", "hashlink", "lioness", "log", - "parking_lot 0.12.1", - "rand 0.8.5", - "rand_chacha 0.3.1", + "parking_lot 0.12.3", + "rand", + "rand_chacha", "rand_distr", "subtle 2.5.0", "thiserror", @@ -8502,7 +8731,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-offchain", @@ -8608,7 +8837,26 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint", + "unsigned-varint 0.7.2", + "url", +] + +[[package]] +name = "multiaddr" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "libp2p-identity", + "multibase", + "multihash 0.19.1", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.7.2", "url", ] @@ -8634,10 +8882,10 @@ dependencies = [ "blake3", "core2", "digest 0.10.7", - "multihash-derive 0.8.0", + "multihash-derive", "sha2 0.10.8", "sha3", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -8651,10 +8899,10 @@ dependencies = [ "blake3", "core2", "digest 0.10.7", - "multihash-derive 0.8.0", + "multihash-derive", "sha2 0.10.8", "sha3", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -8664,27 +8912,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" dependencies = [ "core2", - "unsigned-varint", -] - -[[package]] -name = "multihash-codetable" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6d815ecb3c8238d00647f8630ede7060a642c9f704761cd6082cb4028af6935" -dependencies = [ - "blake2b_simd", - "blake2s_simd", - "blake3", - "core2", - "digest 0.10.7", - "multihash-derive 0.9.0", - "ripemd", - "serde", - "sha1", - "sha2 0.10.8", - "sha3", - "strobe-rs", + "unsigned-varint 0.7.2", ] [[package]] @@ -8698,32 +8926,7 @@ dependencies = [ "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", - "synstructure", -] - -[[package]] -name = "multihash-derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "890e72cb7396cb99ed98c1246a97b243cc16394470d94e0bc8b0c2c11d84290e" -dependencies = [ - "core2", - "multihash 0.19.1", - "multihash-derive-impl", -] - -[[package]] -name = "multihash-derive-impl" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d38685e08adb338659871ecfc6ee47ba9b22dcc8abcf6975d379cc49145c3040" -dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro-error", - "proc-macro2 1.0.82", - "quote 1.0.35", - "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -8734,16 +8937,16 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "multistream-select" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8552ab875c1313b97b8d20cb857b9fd63e2d1d6a0a1b53ce9821e575405f27a" +checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" dependencies = [ "bytes", "futures", "log", "pin-project", "smallvec", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -8780,7 +8983,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7bddcd3bf5144b6392de80e04c347cd7fab2508f6df16a85fc496ecd5cec39bc" dependencies = [ "clap 3.2.25", - "rand 0.8.5", + "rand", ] [[package]] @@ -8896,7 +9099,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "cfg-if", "cfg_aliases", "libc", @@ -8932,7 +9135,7 @@ dependencies = [ "node-primitives", "node-testing", "parity-db", - "rand 0.8.5", + "rand", "sc-basic-authorship", "sc-client-api", "sc-transaction-pool", @@ -9265,7 +9468,16 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" dependencies = [ - "asn1-rs", + "asn1-rs 0.5.2", +] + +[[package]] +name = "oid-registry" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" +dependencies = [ + "asn1-rs 0.6.1", ] [[package]] @@ -9298,7 +9510,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "cfg-if", "foreign-types", "libc", @@ -9354,9 +9566,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchestra" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92829eef0328a3d1cd22a02c0e51deb92a5362df3e7d21a4e9bdc38934694e66" +checksum = "41f6bbacc8c189a3f2e45e0fd0436e5d97f194db888e721bdbc3973e7dbed4c2" dependencies = [ "async-trait", "dyn-clonable", @@ -9371,9 +9583,9 @@ dependencies = [ [[package]] name = "orchestra-proc-macro" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1344346d5af32c95bbddea91b18a88cc83eac394192d20ef2fc4c40a74332355" +checksum = "f7b1d40dd8f367db3c65bec8d3dd47d4a604ee8874480738f93191bddab4e0e0" dependencies = [ "expander", "indexmap 2.2.3", @@ -9430,7 +9642,6 @@ dependencies = [ "sp-crypto-hashing", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -9451,7 +9662,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -9472,7 +9682,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -9490,7 +9699,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-storage 19.0.0", ] @@ -9507,7 +9715,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -9528,25 +9735,42 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-storage 19.0.0", ] [[package]] name = "pallet-assets" version = "29.1.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "log", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", +] + +[[package]] +name = "pallet-assets-freezer" +version = "0.1.0" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", + "pallet-assets", "pallet-balances", "parity-scale-codec", "scale-info", "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -9561,7 +9785,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -9579,7 +9802,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -9596,7 +9818,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -9611,7 +9832,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -9639,7 +9859,6 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std 14.0.0", ] [[package]] @@ -9659,7 +9878,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-tracing 16.0.0", ] @@ -9670,7 +9888,7 @@ dependencies = [ "frame-election-provider-support", "honggfuzz", "pallet-bags-list", - "rand 0.8.5", + "rand", ] [[package]] @@ -9707,7 +9925,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -9735,7 +9952,6 @@ dependencies = [ "sp-session", "sp-staking", "sp-state-machine", - "sp-std 14.0.0", ] [[package]] @@ -9760,7 +9976,6 @@ dependencies = [ "sp-runtime", "sp-staking", "sp-state-machine", - "sp-std 14.0.0", ] [[package]] @@ -9778,7 +9993,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -9795,7 +10009,7 @@ dependencies = [ "pallet-beefy-mmr", "pallet-mmr", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "serde", "sp-consensus-beefy", @@ -9812,7 +10026,6 @@ dependencies = [ "bp-header-chain", "bp-runtime", "bp-test-utils", - "finality-grandpa", "frame-benchmarking", "frame-support", "frame-system", @@ -9824,13 +10037,13 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std 14.0.0", - "sp-trie", ] [[package]] name = "pallet-bridge-messages" version = "0.7.0" dependencies = [ + "bp-header-chain", "bp-messages", "bp-runtime", "bp-test-utils", @@ -9838,13 +10051,15 @@ dependencies = [ "frame-support", "frame-system", "log", - "num-traits", "pallet-balances", + "pallet-bridge-grandpa", "parity-scale-codec", "scale-info", + "sp-core", "sp-io", "sp-runtime", "sp-std 14.0.0", + "sp-trie", ] [[package]] @@ -9867,7 +10082,6 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std 14.0.0", - "sp-trie", ] [[package]] @@ -9908,7 +10122,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-tracing 16.0.0", ] @@ -9928,7 +10141,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -9945,14 +10157,13 @@ dependencies = [ "pallet-session", "pallet-timestamp", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "sp-consensus-aura", "sp-core", "sp-io", "sp-runtime", "sp-staking", - "sp-std 14.0.0", "sp-tracing 16.0.0", ] @@ -9969,7 +10180,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -9984,7 +10194,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10014,7 +10223,7 @@ dependencies = [ "parity-scale-codec", "paste", "pretty_assertions", - "rand 0.8.5", + "rand", "rand_pcg", "scale-info", "serde", @@ -10077,7 +10286,6 @@ dependencies = [ "sp-io", "sp-keystore", "sp-runtime", - "sp-std 14.0.0", "sp-tracing 16.0.0", "staging-xcm", "staging-xcm-builder", @@ -10121,7 +10329,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10139,7 +10346,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10153,7 +10359,6 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10174,7 +10379,6 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std 14.0.0", "sp-tracing 16.0.0", "substrate-test-utils", ] @@ -10196,7 +10400,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10212,7 +10415,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10231,7 +10433,7 @@ dependencies = [ "pallet-staking", "pallet-timestamp", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "scale-info", "sp-core", "sp-io", @@ -10254,15 +10456,14 @@ dependencies = [ "pallet-balances", "pallet-election-provider-support-benchmarking", "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "scale-info", "sp-arithmetic", "sp-core", "sp-io", "sp-npos-elections", "sp-runtime", - "sp-std 14.0.0", "sp-tracing 16.0.0", "strum 0.26.2", ] @@ -10277,7 +10478,6 @@ dependencies = [ "parity-scale-codec", "sp-npos-elections", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10296,7 +10496,6 @@ dependencies = [ "sp-npos-elections", "sp-runtime", "sp-staking", - "sp-std 14.0.0", "sp-tracing 16.0.0", "substrate-test-utils", ] @@ -10315,7 +10514,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10341,7 +10539,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10372,7 +10569,6 @@ dependencies = [ "sp-io", "sp-keystore", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10391,7 +10587,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-version", ] @@ -10407,7 +10602,6 @@ dependencies = [ "scale-info", "sp-core", "sp-io", - "sp-std 14.0.0", ] [[package]] @@ -10423,7 +10617,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10461,7 +10654,6 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std 14.0.0", "sp-tracing 16.0.0", "substrate-test-utils", ] @@ -10479,9 +10671,9 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-core", + "sp-inherents", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10511,7 +10703,6 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std 14.0.0", ] [[package]] @@ -10530,7 +10721,6 @@ dependencies = [ "sp-io", "sp-keystore", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10550,7 +10740,6 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std 14.0.0", ] [[package]] @@ -10567,7 +10756,6 @@ dependencies = [ "sp-io", "sp-keyring", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10582,7 +10770,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10599,7 +10786,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10615,7 +10801,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10628,7 +10813,7 @@ dependencies = [ "frame-system", "log", "parity-scale-codec", - "rand 0.8.5", + "rand", "rand_distr", "scale-info", "serde", @@ -10637,7 +10822,6 @@ dependencies = [ "sp-crypto-hashing", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-tracing 16.0.0", "sp-weights", ] @@ -10661,7 +10845,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-tracing 16.0.0", "sp-version", ] @@ -10691,7 +10874,6 @@ dependencies = [ "sp-io", "sp-mixnet", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10711,7 +10893,6 @@ dependencies = [ "sp-io", "sp-mmr-primitives", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10727,7 +10908,6 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10765,7 +10945,6 @@ dependencies = [ "sp-io", "sp-keystore", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10775,7 +10954,6 @@ dependencies = [ "pallet-nfts", "parity-scale-codec", "sp-api", - "sp-std 14.0.0", ] [[package]] @@ -10792,7 +10970,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10807,7 +10984,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10824,7 +11000,6 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std 14.0.0", "sp-tracing 16.0.0", ] @@ -10850,7 +11025,6 @@ dependencies = [ "sp-runtime", "sp-runtime-interface 24.0.0", "sp-staking", - "sp-std 14.0.0", ] [[package]] @@ -10862,7 +11036,7 @@ dependencies = [ "honggfuzz", "log", "pallet-nomination-pools", - "rand 0.8.5", + "rand", "sp-io", "sp-runtime", "sp-tracing 16.0.0", @@ -10875,7 +11049,6 @@ dependencies = [ "pallet-nomination-pools", "parity-scale-codec", "sp-api", - "sp-std 14.0.0", ] [[package]] @@ -10942,7 +11115,6 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std 14.0.0", ] [[package]] @@ -10969,7 +11141,6 @@ dependencies = [ "sp-io", "sp-runtime", "sp-staking", - "sp-std 14.0.0", ] [[package]] @@ -10986,7 +11157,6 @@ dependencies = [ "sp-io", "sp-metadata-ir", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11031,7 +11201,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11048,7 +11217,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11065,7 +11233,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11083,7 +11250,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11099,7 +11265,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11121,7 +11286,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11137,7 +11301,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11172,7 +11335,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11192,7 +11354,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11210,7 +11371,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11229,7 +11389,6 @@ dependencies = [ "sp-crypto-hashing", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11247,7 +11406,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-weights", "substrate-test-utils", ] @@ -11264,7 +11422,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11284,7 +11441,6 @@ dependencies = [ "sp-session", "sp-staking", "sp-state-machine", - "sp-std 14.0.0", "sp-trie", ] @@ -11302,13 +11458,12 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "sp-core", "sp-io", "sp-runtime", "sp-session", - "sp-std 14.0.0", ] [[package]] @@ -11320,7 +11475,6 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11334,14 +11488,13 @@ dependencies = [ "log", "pallet-balances", "parity-scale-codec", - "rand_chacha 0.3.1", + "rand_chacha", "scale-info", "sp-arithmetic", "sp-core", "sp-crypto-hashing", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11360,7 +11513,7 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", - "rand_chacha 0.3.1", + "rand_chacha", "scale-info", "serde", "sp-application-crypto", @@ -11369,7 +11522,6 @@ dependencies = [ "sp-npos-elections", "sp-runtime", "sp-staking", - "sp-std 14.0.0", "sp-tracing 16.0.0", "substrate-test-utils", ] @@ -11413,13 +11565,12 @@ dependencies = [ "log", "pallet-balances", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "scale-info", "serde", "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-tracing 16.0.0", "substrate-state-trie-migration-rpc", "thousands", @@ -11442,7 +11593,6 @@ dependencies = [ "sp-io", "sp-runtime", "sp-statement-store", - "sp-std 14.0.0", ] [[package]] @@ -11458,7 +11608,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11490,7 +11639,6 @@ dependencies = [ "sp-inherents", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-timestamp", ] @@ -11511,7 +11659,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-storage 19.0.0", ] @@ -11529,7 +11676,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11575,7 +11721,6 @@ dependencies = [ "sp-inherents", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-transaction-storage-proof", ] @@ -11596,7 +11741,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11615,7 +11759,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11651,7 +11794,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11668,7 +11810,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11686,7 +11827,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11708,11 +11848,10 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -11732,7 +11871,6 @@ dependencies = [ "scale-info", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-tracing 16.0.0", "staging-xcm", "staging-xcm-builder", @@ -11893,7 +12031,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-transaction-pool", "sp-version", "staging-parachain-info", @@ -11926,7 +12063,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "staging-parachain-info", "staging-xcm", "staging-xcm-executor", @@ -11971,7 +12107,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-tracing 16.0.0", "staging-parachain-info", "staging-xcm", @@ -11986,8 +12121,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" dependencies = [ "bitcoin_hashes 0.13.0", - "rand 0.8.5", - "rand_core 0.6.4", + "rand", + "rand_core", "serde", "unicode-normalization", ] @@ -12012,8 +12147,8 @@ dependencies = [ "log", "lz4", "memmap2 0.5.10", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "siphasher", "snap", ] @@ -12045,12 +12180,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "parity-send-wrapper" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" - [[package]] name = "parity-util-mem" version = "0.12.0" @@ -12063,7 +12192,7 @@ dependencies = [ "impl-trait-for-tuples", "lru 0.8.1", "parity-util-mem-derive", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "primitive-types", "smallvec", "winapi", @@ -12077,7 +12206,7 @@ checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ "proc-macro2 1.0.82", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -12105,9 +12234,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core 0.9.8", @@ -12153,7 +12282,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle 2.5.0", ] @@ -12251,7 +12380,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-transaction-pool", "sp-version", @@ -12260,7 +12388,7 @@ dependencies = [ "staging-xcm-builder", "staging-xcm-executor", "substrate-wasm-builder", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -12288,9 +12416,7 @@ dependencies = [ "pallet-message-queue", "parachains-common", "parity-scale-codec", - "people-rococo-runtime", "polkadot-runtime-common", - "rococo-runtime", "rococo-runtime-constants", "rococo-system-emulated-network", "sp-runtime", @@ -12351,7 +12477,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-transaction-pool", "sp-version", @@ -12361,7 +12486,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -12389,12 +12514,10 @@ dependencies = [ "pallet-message-queue", "parachains-common", "parity-scale-codec", - "people-westend-runtime", "polkadot-runtime-common", "sp-runtime", "staging-xcm", "staging-xcm-executor", - "westend-runtime", "westend-runtime-constants", "westend-system-emulated-network", ] @@ -12451,7 +12574,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-transaction-pool", "sp-version", @@ -12462,7 +12584,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -12654,9 +12776,9 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "rand", + "rand_chacha", + "rand_core", "schnorrkel 0.11.4", "sp-authority-discovery", "sp-core", @@ -12680,8 +12802,8 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-primitives", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "sp-application-crypto", "sp-authority-discovery", "sp-core", @@ -12709,7 +12831,7 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", - "rand 0.8.5", + "rand", "rstest", "sc-network", "schnellru", @@ -12741,7 +12863,7 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", - "rand 0.8.5", + "rand", "rstest", "sc-network", "schnellru", @@ -12832,7 +12954,6 @@ dependencies = [ "scale-info", "sp-core", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -12892,15 +13013,15 @@ dependencies = [ "futures", "futures-timer", "lazy_static", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-network-protocol", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-primitives", "quickcheck", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "sc-network", "sc-network-common", "sp-application-crypto", @@ -12926,7 +13047,7 @@ dependencies = [ "futures", "futures-timer", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-subsystem", @@ -12982,7 +13103,7 @@ dependencies = [ "log", "merlin", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -12992,9 +13113,9 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "rand", + "rand_chacha", + "rand_core", "sc-keystore", "schnellru", "schnorrkel 0.11.4", @@ -13023,7 +13144,7 @@ dependencies = [ "kvdb-memorydb", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-erasure-coding", "polkadot-node-jaeger", "polkadot-node-primitives", @@ -13139,7 +13260,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -13210,6 +13331,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", "sc-keystore", "sp-application-crypto", "sp-core", @@ -13269,7 +13391,7 @@ dependencies = [ "polkadot-parachain-primitives", "polkadot-primitives", "procfs", - "rand 0.8.5", + "rand", "rococo-runtime", "rusty-fork", "sc-sysinfo", @@ -13401,7 +13523,7 @@ dependencies = [ "log", "mick-jaeger", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-primitives", "polkadot-primitives", "sc-network", @@ -13419,7 +13541,9 @@ dependencies = [ "bs58 0.5.0", "futures", "futures-timer", - "hyper", + "http-body-util", + "hyper 1.3.1", + "hyper-util", "log", "parity-scale-codec", "polkadot-primitives", @@ -13452,8 +13576,8 @@ dependencies = [ "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-primitives", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "sc-authority-discovery", "sc-network", "sc-network-types", @@ -13501,7 +13625,7 @@ version = "1.0.0" dependencies = [ "async-trait", "futures", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -13564,7 +13688,7 @@ dependencies = [ "log", "parity-db", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", "polkadot-erasure-coding", "polkadot-node-jaeger", @@ -13578,7 +13702,7 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "prioritized-metered-channel", - "rand 0.8.5", + "rand", "sc-client-api", "schnellru", "sp-application-crypto", @@ -13599,7 +13723,7 @@ dependencies = [ "futures", "futures-timer", "orchestra", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -13642,6 +13766,7 @@ dependencies = [ "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-relay-chain-interface", + "docify", "frame-benchmarking", "frame-benchmarking-cli", "frame-support", @@ -13724,7 +13849,6 @@ dependencies = [ "serde", "sp-core", "sp-runtime", - "sp-std 14.0.0", "sp-weights", ] @@ -13751,7 +13875,6 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-staking", - "sp-std 14.0.0", ] [[package]] @@ -13759,7 +13882,7 @@ name = "polkadot-primitives-test-helpers" version = "1.0.0" dependencies = [ "polkadot-primitives", - "rand 0.8.5", + "rand", "sp-application-crypto", "sp-core", "sp-keyring", @@ -13849,7 +13972,6 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std 14.0.0", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", @@ -13864,7 +13986,6 @@ dependencies = [ "frame-benchmarking", "parity-scale-codec", "polkadot-primitives", - "sp-std 14.0.0", "sp-tracing 16.0.0", ] @@ -13900,10 +14021,9 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-metrics", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "rstest", - "rustc-hex", "sc-keystore", "scale-info", "serde", @@ -13920,7 +14040,6 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std 14.0.0", "sp-tracing 16.0.0", "staging-xcm", "staging-xcm-executor", @@ -14018,6 +14137,7 @@ dependencies = [ "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", + "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", @@ -14309,8 +14429,8 @@ dependencies = [ "tracing-gum-proc-macro", "westend-runtime-constants", "xcm-emulator", - "xcm-fee-payment-runtime-api", "xcm-procedural", + "xcm-runtime-apis", "xcm-simulator", ] @@ -14330,6 +14450,9 @@ dependencies = [ "frame-support", "frame-system", "kitchensink-runtime", + "minimal-template-runtime", + "pallet-asset-conversion-tx-payment", + "pallet-asset-tx-payment", "pallet-assets", "pallet-aura", "pallet-authorship", @@ -14348,10 +14471,12 @@ dependencies = [ "pallet-proxy", "pallet-referenda", "pallet-scheduler", + "pallet-skip-feeless-payment", "pallet-timestamp", "pallet-transaction-payment", "pallet-uniques", "pallet-utility", + "parachain-template-runtime", "parity-scale-codec", "polkadot-sdk", "polkadot-sdk-frame", @@ -14371,6 +14496,7 @@ dependencies = [ "sc-service", "scale-info", "simple-mermaid 0.1.1", + "solochain-template-runtime", "sp-api", "sp-arithmetic", "sp-core", @@ -14379,6 +14505,9 @@ dependencies = [ "sp-keyring", "sp-offchain", "sp-runtime", + "sp-runtime-interface 24.0.0", + "sp-std 14.0.0", + "sp-tracing 16.0.0", "sp-version", "staging-chain-spec-builder", "staging-node-cli", @@ -14416,7 +14545,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-transaction-pool", "sp-version", @@ -14449,7 +14577,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "parity-db", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", @@ -14547,7 +14675,7 @@ dependencies = [ "tracing-gum", "westend-runtime", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -14571,7 +14699,7 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", - "rand_chacha 0.3.1", + "rand_chacha", "sc-keystore", "sc-network", "sp-application-crypto", @@ -14639,9 +14767,9 @@ dependencies = [ "prometheus", "pyroscope", "pyroscope_pprofrs", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "rand", + "rand_chacha", + "rand_core", "rand_distr", "sc-keystore", "sc-network", @@ -14660,7 +14788,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-timestamp", - "strum 0.24.1", + "strum 0.26.2", "substrate-prometheus-endpoint", "tokio", "tracing-gum", @@ -14720,7 +14848,7 @@ dependencies = [ "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-primitives", - "rand 0.8.5", + "rand", "sp-core", "sp-keystore", "substrate-build-script-utils", @@ -14776,7 +14904,6 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std 14.0.0", "sp-transaction-pool", "sp-trie", "sp-version", @@ -14808,7 +14935,7 @@ dependencies = [ "polkadot-runtime-parachains", "polkadot-service", "polkadot-test-runtime", - "rand 0.8.5", + "rand", "sc-authority-discovery", "sc-chain-spec", "sc-cli", @@ -14949,6 +15076,20 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "polling" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14" +dependencies = [ + "cfg-if", + "concurrent-queue", + "pin-project-lite 0.2.12", + "rustix 0.38.21", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "poly1305" version = "0.8.0" @@ -14984,7 +15125,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be97d76faf1bfab666e1375477b23fde79eccf0276e9b63b92a39d676a889ba9" dependencies = [ - "rand 0.8.5", + "rand", ] [[package]] @@ -15000,7 +15141,7 @@ dependencies = [ "log", "nix 0.26.2", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "smallvec", "symbolic-demangle", "tempfile", @@ -15165,6 +15306,17 @@ version = "0.5.20+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" +[[package]] +name = "proc-macro-warning" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" +dependencies = [ + "proc-macro2 1.0.82", + "quote 1.0.35", + "syn 2.0.61", +] + [[package]] name = "proc-macro-warning" version = "1.0.0" @@ -15200,7 +15352,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "chrono", "flate2", "hex", @@ -15215,7 +15367,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "chrono", "hex", ] @@ -15230,19 +15382,19 @@ dependencies = [ "fnv", "lazy_static", "memchr", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "thiserror", ] [[package]] name = "prometheus-client" -version = "0.19.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6fa99d535dd930d1249e6c79cb3c2915f9172a540fe2b02a4c8f9ca954721e" +checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" dependencies = [ "dtoa", "itoa", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "prometheus-client-derive-encode", ] @@ -15277,11 +15429,11 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.0", + "bitflags 2.6.0", "lazy_static", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "rand_xorshift", "regex-syntax 0.8.2", "rusty-fork", @@ -15301,12 +15453,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922" +checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" dependencies = [ "bytes", - "prost-derive 0.12.4", + "prost-derive 0.12.6", ] [[package]] @@ -15345,7 +15497,7 @@ dependencies = [ "once_cell", "petgraph", "prettyplease 0.2.12", - "prost 0.12.4", + "prost 0.12.6", "prost-types 0.12.4", "regex", "syn 2.0.61", @@ -15367,9 +15519,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" +checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.11.0", @@ -15393,7 +15545,7 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" dependencies = [ - "prost 0.12.4", + "prost 0.12.6", ] [[package]] @@ -15446,7 +15598,7 @@ dependencies = [ "mach2", "once_cell", "raw-cpuid", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "web-sys", "winapi", ] @@ -15468,15 +15620,26 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1693116345026436eb2f10b677806169c1a1260c1c60eaaffe3fb5a29ae23d8b" +checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" dependencies = [ "asynchronous-codec", "bytes", "quick-protobuf", "thiserror", - "unsigned-varint", + "unsigned-varint 0.7.2", +] + +[[package]] +name = "quick_cache" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5253a3a0d56548d5b0be25414171dc780cc6870727746d05bd2bde352eee96c5" +dependencies = [ + "ahash 0.8.11", + "hashbrown 0.13.2", + "parking_lot 0.12.3", ] [[package]] @@ -15487,7 +15650,7 @@ checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ "env_logger 0.8.4", "log", - "rand 0.8.5", + "rand", ] [[package]] @@ -15509,27 +15672,45 @@ checksum = "2e8b432585672228923edbbf64b8b12c14e1112f62e88737655b4a083dbcd78e" dependencies = [ "bytes", "pin-project-lite 0.2.12", - "quinn-proto", - "quinn-udp", + "quinn-proto 0.9.6", + "quinn-udp 0.3.2", "rustc-hash", - "rustls 0.20.8", + "rustls 0.20.9", "thiserror", "tokio", "tracing", "webpki", ] +[[package]] +name = "quinn" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +dependencies = [ + "bytes", + "futures-io", + "pin-project-lite 0.2.12", + "quinn-proto 0.10.6", + "quinn-udp 0.4.1", + "rustc-hash", + "rustls 0.21.7", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "quinn-proto" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c956be1b23f4261676aed05a0046e204e8a6836e50203902683a718af0797989" +checksum = "94b0b33c13a79f669c85defaf4c275dc86a0c0372807d0ca3d78e0bb87274863" dependencies = [ "bytes", - "rand 0.8.5", + "rand", "ring 0.16.20", "rustc-hash", - "rustls 0.20.8", + "rustls 0.20.9", "slab", "thiserror", "tinyvec", @@ -15537,6 +15718,23 @@ dependencies = [ "webpki", ] +[[package]] +name = "quinn-proto" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" +dependencies = [ + "bytes", + "rand", + "ring 0.16.20", + "rustc-hash", + "rustls 0.21.7", + "slab", + "thiserror", + "tinyvec", + "tracing", +] + [[package]] name = "quinn-udp" version = "0.3.2" @@ -15544,12 +15742,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "641538578b21f5e5c8ea733b736895576d0fe329bb883b937db6f4d163dbaaf4" dependencies = [ "libc", - "quinn-proto", + "quinn-proto 0.9.6", "socket2 0.4.9", "tracing", "windows-sys 0.42.0", ] +[[package]] +name = "quinn-udp" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" +dependencies = [ + "bytes", + "libc", + "socket2 0.5.7", + "tracing", + "windows-sys 0.48.0", +] + [[package]] name = "quote" version = "0.6.13" @@ -15574,19 +15785,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - [[package]] name = "rand" version = "0.8.5" @@ -15594,18 +15792,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -15615,16 +15803,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] @@ -15633,7 +15812,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.10", + "getrandom", ] [[package]] @@ -15643,16 +15822,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ "num-traits", - "rand 0.8.5", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "rand", ] [[package]] @@ -15661,7 +15831,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -15670,7 +15840,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -15773,7 +15943,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.10", + "getrandom", "redox_syscall 0.2.16", "thiserror", ] @@ -15903,23 +16073,22 @@ dependencies = [ "bp-runtime", "finality-relay", "frame-support", - "frame-system", "futures", "jsonrpsee", "log", "num-traits", - "pallet-balances", - "pallet-bridge-messages", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-utility", "parity-scale-codec", - "rand 0.8.5", + "quick_cache", + "rand", "relay-utils", "sc-chain-spec", "sc-rpc-api", "sc-transaction-pool-api", "scale-info", + "serde_json", "sp-consensus-grandpa", "sp-core", "sp-rpc", @@ -15948,7 +16117,7 @@ dependencies = [ "jsonpath_lib", "log", "num-traits", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "serde_json", "sp-runtime", "substrate-prometheus-endpoint", @@ -15984,11 +16153,11 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-rustls", + "h2 0.3.26", + "http 0.2.9", + "http-body 0.4.5", + "hyper 0.14.29", + "hyper-rustls 0.24.2", "ipnet", "js-sys", "log", @@ -15996,7 +16165,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite 0.2.12", - "rustls 0.21.6", + "rustls 0.21.7", "rustls-pemfile 1.0.3", "serde", "serde_json", @@ -16070,22 +16239,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", - "getrandom 0.2.10", + "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", "windows-sys 0.48.0", ] -[[package]] -name = "ripemd" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" -dependencies = [ - "digest 0.10.7", -] - [[package]] name = "rle-decode-fast" version = "1.0.3" @@ -16169,7 +16329,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-transaction-pool", "sp-version", "staging-parachain-info", @@ -16271,7 +16430,6 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-tracing 16.0.0", "sp-transaction-pool", @@ -16284,7 +16442,7 @@ dependencies = [ "substrate-wasm-builder", "tiny-keccak", "tokio", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -16415,7 +16573,7 @@ dependencies = [ "parity-scale-codec", "primitive-types", "proptest", - "rand 0.8.5", + "rand", "rlp", "ruint-macro", "serde", @@ -16517,7 +16675,7 @@ version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys 0.4.10", @@ -16526,11 +16684,10 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" dependencies = [ - "log", "ring 0.16.20", "sct", "webpki", @@ -16538,9 +16695,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.6" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" +checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", "ring 0.16.20", @@ -16550,14 +16707,15 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.2" +version = "0.23.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" dependencies = [ "log", + "once_cell", "ring 0.17.7", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki 0.102.4", "subtle 2.5.0", "zeroize", ] @@ -16608,9 +16766,36 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.2.0" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" + +[[package]] +name = "rustls-platform-verifier" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5f0d26fa1ce3c790f9590868f0109289a044acb954525f933e2aa3b871c157d" +dependencies = [ + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls 0.23.10", + "rustls-native-certs 0.7.0", + "rustls-platform-verifier-android", + "rustls-webpki 0.102.4", + "security-framework", + "security-framework-sys", + "webpki-roots 0.26.3", + "winapi", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a716eb65e3158e90e17cd93d855216e27bde02745ab842f2cab4a39dba1bacf" +checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" [[package]] name = "rustls-webpki" @@ -16624,9 +16809,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.2" +version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ "ring 0.17.7", "rustls-pki-types", @@ -16664,9 +16849,9 @@ dependencies = [ [[package]] name = "rw-stream-sink" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" +checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" dependencies = [ "futures", "pin-project", @@ -16727,13 +16912,12 @@ dependencies = [ "libp2p", "linked_hash_set", "log", - "multihash 0.17.0", - "multihash-codetable", + "multihash 0.19.1", "parity-scale-codec", - "prost 0.12.4", + "prost 0.12.6", "prost-build 0.12.4", "quickcheck", - "rand 0.8.5", + "rand", "sc-client-api", "sc-network", "sc-network-types", @@ -16757,7 +16941,7 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -16848,7 +17032,7 @@ dependencies = [ "names", "parity-bip39", "parity-scale-codec", - "rand 0.8.5", + "rand", "regex", "rpassword", "sc-client-api", @@ -16883,7 +17067,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-executor", "sc-transaction-pool-api", "sc-utils", @@ -16919,9 +17103,9 @@ dependencies = [ "log", "parity-db", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "quickcheck", - "rand 0.8.5", + "rand", "sc-client-api", "sc-state-db", "schnellru", @@ -16943,10 +17127,9 @@ version = "0.33.0" dependencies = [ "async-trait", "futures", - "futures-timer", "log", "mockall 0.11.4", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-client-api", "sc-network-types", "sc-utils", @@ -16970,7 +17153,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -17012,7 +17195,7 @@ dependencies = [ "num-rational", "num-traits", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -17081,7 +17264,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -17122,7 +17305,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-consensus-beefy", "sc-rpc", "serde", @@ -17163,8 +17346,8 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -17269,7 +17452,7 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-client-api", "sc-consensus", "sp-api", @@ -17317,7 +17500,7 @@ dependencies = [ "env_logger 0.11.3", "num_cpus", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "paste", "regex", "sc-executor-common", @@ -17379,7 +17562,7 @@ dependencies = [ "libc", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "paste", "rustix 0.36.15", "sc-allocator", @@ -17414,7 +17597,7 @@ name = "sc-keystore" version = "25.0.0" dependencies = [ "array-bytes", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "serde_json", "sp-application-crypto", "sp-core", @@ -17435,9 +17618,9 @@ dependencies = [ "futures-timer", "log", "mixnet", - "multiaddr", + "multiaddr 0.18.1", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-client-api", "sc-network", "sc-network-types", @@ -17475,12 +17658,12 @@ dependencies = [ "multistream-select", "once_cell", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "partial_sort", "pin-project", - "prost 0.12.4", + "prost 0.12.6", "prost-build 0.12.4", - "rand 0.8.5", + "rand", "sc-block-builder", "sc-client-api", "sc-network-common", @@ -17509,7 +17692,7 @@ dependencies = [ "tokio-stream", "tokio-test", "tokio-util", - "unsigned-varint", + "unsigned-varint 0.7.2", "void", "wasm-timer", "zeroize", @@ -17541,7 +17724,6 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "libp2p", "log", "parity-scale-codec", "quickcheck", @@ -17566,7 +17748,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "prost 0.12.4", + "prost 0.12.6", "prost-build 0.12.4", "sc-client-api", "sc-network", @@ -17584,7 +17766,6 @@ dependencies = [ "array-bytes", "async-channel", "futures", - "libp2p", "log", "parity-scale-codec", "sc-network", @@ -17611,7 +17792,7 @@ dependencies = [ "log", "mockall 0.11.4", "parity-scale-codec", - "prost 0.12.4", + "prost 0.12.6", "prost-build 0.12.4", "quickcheck", "sc-block-builder", @@ -17647,8 +17828,8 @@ dependencies = [ "futures-timer", "libp2p", "log", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -17675,7 +17856,6 @@ version = "0.33.0" dependencies = [ "array-bytes", "futures", - "libp2p", "log", "parity-scale-codec", "sc-network", @@ -17693,13 +17873,14 @@ name = "sc-network-types" version = "0.10.0" dependencies = [ "bs58 0.5.0", - "ed25519-dalek 2.1.1", + "ed25519-dalek", "libp2p-identity", "litep2p", - "multiaddr", - "multihash 0.17.0", + "log", + "multiaddr 0.18.1", + "multihash 0.19.1", "quickcheck", - "rand 0.8.5", + "rand", "thiserror", "zeroize", ] @@ -17714,16 +17895,15 @@ dependencies = [ "fnv", "futures", "futures-timer", - "hyper", - "hyper-rustls", + "hyper 0.14.29", + "hyper-rustls 0.24.2", "lazy_static", - "libp2p", "log", "num_cpus", "once_cell", "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "sc-block-builder", "sc-client-api", "sc-client-db", @@ -17765,7 +17945,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pretty_assertions", "sc-block-builder", "sc-chain-spec", @@ -17823,11 +18003,13 @@ dependencies = [ "forwarded-header-value", "futures", "governor", - "http", - "hyper", + "http 1.1.0", + "http-body-util", + "hyper 1.3.1", "ip_network", "jsonrpsee", "log", + "serde", "serde_json", "substrate-prometheus-endpoint", "tokio", @@ -17847,9 +18029,9 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pretty_assertions", - "rand 0.8.5", + "rand", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -17886,7 +18068,6 @@ dependencies = [ "sp-io", "sp-runtime", "sp-runtime-interface 24.0.0", - "sp-std 14.0.0", "substrate-wasm-builder", ] @@ -17902,9 +18083,9 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", - "rand 0.8.5", + "rand", "sc-chain-spec", "sc-client-api", "sc-client-db", @@ -17965,7 +18146,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-client-db", @@ -17997,7 +18178,7 @@ version = "0.30.0" dependencies = [ "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sp-core", ] @@ -18008,7 +18189,7 @@ dependencies = [ "env_logger 0.11.3", "log", "parity-db", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-client-api", "sc-keystore", "sp-api", @@ -18059,7 +18240,7 @@ dependencies = [ "futures", "libc", "log", - "rand 0.8.5", + "rand", "rand_pcg", "regex", "sc-telemetry", @@ -18080,9 +18261,9 @@ dependencies = [ "futures", "libp2p", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", - "rand 0.8.5", + "rand", "sc-network", "sc-utils", "serde", @@ -18103,7 +18284,7 @@ dependencies = [ "libc", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "regex", "rustc-hash", "sc-client-api", @@ -18144,7 +18325,7 @@ dependencies = [ "linked-hash-map", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-block-builder", "sc-client-api", "sc-transaction-pool-api", @@ -18190,7 +18371,7 @@ dependencies = [ "futures-timer", "lazy_static", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "prometheus", "sp-arithmetic", "tokio-test", @@ -18208,9 +18389,9 @@ dependencies = [ [[package]] name = "scale-decode" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b12ebca36cec2a3f983c46295b282b35e5f8496346fb859a8776dad5389e5389" +checksum = "e98f3262c250d90e700bb802eb704e1f841e03331c2eb815e46516c4edbf5b27" dependencies = [ "derive_more", "parity-scale-codec", @@ -18305,7 +18486,7 @@ dependencies = [ "arrayvec 0.7.4", "curve25519-dalek-ng", "merlin", - "rand_core 0.6.4", + "rand_core", "sha2 0.9.9", "subtle-ng", "zeroize", @@ -18320,10 +18501,10 @@ dependencies = [ "aead", "arrayref", "arrayvec 0.7.4", - "curve25519-dalek 4.1.2", + "curve25519-dalek", "getrandom_or_panic", "merlin", - "rand_core 0.6.4", + "rand_core", "serde_bytes", "sha2 0.10.8", "subtle 2.5.0", @@ -18368,7 +18549,7 @@ dependencies = [ "crc", "fxhash", "log", - "rand 0.8.5", + "rand", "slab", "thiserror", ] @@ -18426,22 +18607,23 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", + "num-bigint", "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -18475,7 +18657,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-transaction-pool", "sp-version", "staging-parachain-info", @@ -18533,6 +18714,12 @@ dependencies = [ "pest", ] +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + [[package]] name = "separator" version = "0.4.1" @@ -18663,7 +18850,7 @@ dependencies = [ "futures", "lazy_static", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "serial_test_derive", ] @@ -18793,7 +18980,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-transaction-pool", "sp-version", "staging-parachain-info", @@ -18828,12 +19014,6 @@ dependencies = [ "libc", ] -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" - [[package]] name = "signature" version = "2.1.0" @@ -18841,7 +19021,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" dependencies = [ "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -18863,7 +19043,7 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cae9a3fcdadafb6d97f4c0e007e4247b114ee0f119f650c3cbf3a8b3a1479694" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", ] [[package]] @@ -18906,7 +19086,6 @@ dependencies = [ "parity-scale-codec", "paste", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -18944,12 +19123,12 @@ dependencies = [ "async-channel", "async-executor", "async-fs", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "async-net", "async-process", "blocking", - "futures-lite", + "futures-lite 1.13.0", ] [[package]] @@ -18968,7 +19147,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0bb30cf57b7b5f6109ce17c3164445e2d6f270af2cb48f6e4d31c2967c9a9f5" dependencies = [ "arrayvec 0.7.4", - "async-lock", + "async-lock 2.8.0", "atomic-take", "base64 0.21.2", "bip39", @@ -18979,9 +19158,9 @@ dependencies = [ "derive_more", "ed25519-zebra", "either", - "event-listener", + "event-listener 2.5.3", "fnv", - "futures-lite", + "futures-lite 1.13.0", "futures-util", "hashbrown 0.14.3", "hex", @@ -18997,8 +19176,8 @@ dependencies = [ "pbkdf2", "pin-project", "poly1305", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "ruzstd", "schnorrkel 0.10.2", "serde", @@ -19008,10 +19187,10 @@ dependencies = [ "siphasher", "slab", "smallvec", - "soketto", + "soketto 0.7.1", "twox-hash", "wasmi 0.31.2", - "x25519-dalek 2.0.0", + "x25519-dalek", "zeroize", ] @@ -19022,15 +19201,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "256b5bad1d6b49045e95fe87492ce73d5af81545d8b4d8318a872d2007024c33" dependencies = [ "async-channel", - "async-lock", + "async-lock 2.8.0", "base64 0.21.2", "blake2-rfc", "derive_more", "either", - "event-listener", + "event-listener 2.5.3", "fnv", "futures-channel", - "futures-lite", + "futures-lite 1.13.0", "futures-util", "hashbrown 0.14.3", "hex", @@ -19038,10 +19217,10 @@ dependencies = [ "log", "lru 0.11.0", "no-std-net", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "serde", "serde_json", "siphasher", @@ -19066,8 +19245,8 @@ dependencies = [ "aes-gcm", "blake2 0.10.6", "chacha20poly1305", - "curve25519-dalek 4.1.2", - "rand_core 0.6.4", + "curve25519-dalek", + "rand_core", "ring 0.17.7", "rustc_version 0.4.0", "sha2 0.10.8", @@ -19139,7 +19318,7 @@ dependencies = [ "hex-literal", "parity-bytes", "parity-scale-codec", - "rand 0.8.5", + "rand", "rlp", "scale-info", "serde", @@ -19160,7 +19339,7 @@ dependencies = [ "hex", "lazy_static", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "snowbridge-amcl", "zeroize", @@ -19204,7 +19383,7 @@ dependencies = [ "log", "pallet-timestamp", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "serde", "serde_json", @@ -19328,7 +19507,6 @@ dependencies = [ "hex-literal", "log", "parity-scale-codec", - "rustc-hex", "scale-info", "snowbridge-core", "sp-core", @@ -19407,9 +19585,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -19423,15 +19601,29 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "flate2", "futures", - "http", "httparse", "log", - "rand 0.8.5", + "rand", "sha-1 0.9.8", ] +[[package]] +name = "soketto" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures", + "http 1.1.0", + "httparse", + "log", + "rand", + "sha1", +] + [[package]] name = "solochain-template-node" version = "0.0.0" @@ -19505,7 +19697,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-transaction-pool", "sp-version", @@ -19516,6 +19707,7 @@ dependencies = [ name = "sp-api" version = "26.0.0" dependencies = [ + "docify", "hash-db", "log", "parity-scale-codec", @@ -19527,7 +19719,6 @@ dependencies = [ "sp-runtime", "sp-runtime-interface 24.0.0", "sp-state-machine", - "sp-std 14.0.0", "sp-test-primitives", "sp-trie", "sp-version", @@ -19580,7 +19771,6 @@ dependencies = [ "serde", "sp-core", "sp-io", - "sp-std 14.0.0", ] [[package]] @@ -19604,11 +19794,10 @@ dependencies = [ "num-traits", "parity-scale-codec", "primitive-types", - "rand 0.8.5", + "rand", "scale-info", "serde", "sp-crypto-hashing", - "sp-std 14.0.0", "static_assertions", ] @@ -19668,10 +19857,11 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "schnellru", "sp-api", "sp-consensus", + "sp-core", "sp-database", "sp-runtime", "sp-state-machine", @@ -19821,10 +20011,10 @@ dependencies = [ "merlin", "parity-bip39", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "paste", "primitive-types", - "rand 0.8.5", + "rand", "regex", "scale-info", "schnorrkel 0.11.4", @@ -19938,7 +20128,7 @@ name = "sp-database" version = "10.0.0" dependencies = [ "kvdb", - "parking_lot 0.12.1", + "parking_lot 0.12.3", ] [[package]] @@ -20009,7 +20199,8 @@ name = "sp-io" version = "30.0.0" dependencies = [ "bytes", - "ed25519-dalek 2.1.1", + "docify", + "ed25519-dalek", "libsecp256k1", "log", "parity-scale-codec", @@ -20022,7 +20213,6 @@ dependencies = [ "sp-keystore", "sp-runtime-interface 24.0.0", "sp-state-machine", - "sp-std 14.0.0", "sp-tracing 16.0.0", "sp-trie", "tracing", @@ -20043,9 +20233,9 @@ name = "sp-keystore" version = "0.34.0" dependencies = [ "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", - "rand_chacha 0.3.1", + "parking_lot 0.12.3", + "rand", + "rand_chacha", "sp-core", "sp-externalities 0.25.0", ] @@ -20099,7 +20289,7 @@ name = "sp-npos-elections" version = "26.0.0" dependencies = [ "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "serde", "sp-arithmetic", @@ -20114,7 +20304,7 @@ version = "2.0.0-alpha.5" dependencies = [ "clap 4.5.3", "honggfuzz", - "rand 0.8.5", + "rand", "sp-npos-elections", "sp-runtime", ] @@ -20159,7 +20349,7 @@ dependencies = [ "num-traits", "parity-scale-codec", "paste", - "rand 0.8.5", + "rand", "scale-info", "serde", "serde_json", @@ -20174,6 +20364,7 @@ dependencies = [ "sp-tracing 16.0.0", "sp-weights", "substrate-test-runtime-client", + "tracing", "zstd 0.12.4", ] @@ -20267,7 +20458,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime-interface 24.0.0", - "sp-std 14.0.0", "substrate-wasm-builder", ] @@ -20316,9 +20506,9 @@ dependencies = [ "hash-db", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pretty_assertions", - "rand 0.8.5", + "rand", "smallvec", "sp-core", "sp-externalities 0.25.0", @@ -20335,11 +20525,11 @@ name = "sp-statement-store" version = "10.0.0" dependencies = [ "aes-gcm", - "curve25519-dalek 4.1.2", - "ed25519-dalek 2.1.1", + "curve25519-dalek", + "ed25519-dalek", "hkdf", "parity-scale-codec", - "rand 0.8.5", + "rand", "scale-info", "sha2 0.10.8", "sp-api", @@ -20350,7 +20540,7 @@ dependencies = [ "sp-runtime", "sp-runtime-interface 24.0.0", "thiserror", - "x25519-dalek 2.0.0", + "x25519-dalek", ] [[package]] @@ -20464,8 +20654,8 @@ dependencies = [ "memory-db", "nohash-hasher", "parity-scale-codec", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "scale-info", "schnellru", "sp-core", @@ -20652,13 +20842,13 @@ dependencies = [ "parity-scale-codec", "platforms", "polkadot-sdk", - "rand 0.8.5", + "rand", "regex", "sc-service-test", "scale-info", "serde", "serde_json", - "soketto", + "soketto 0.7.1", "staging-node-inspect", "substrate-cli-test-utils", "tempfile", @@ -20695,7 +20885,6 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -20747,7 +20936,6 @@ dependencies = [ "sp-arithmetic", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-weights", "staging-xcm", "staging-xcm-executor", @@ -20761,16 +20949,15 @@ dependencies = [ "frame-benchmarking", "frame-support", "impl-trait-for-tuples", - "log", "parity-scale-codec", "scale-info", "sp-arithmetic", "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", "sp-weights", "staging-xcm", + "tracing", ] [[package]] @@ -20838,19 +21025,6 @@ dependencies = [ "serde", ] -[[package]] -name = "strobe-rs" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fabb238a1cccccfa4c4fb703670c0d157e1256c1ba695abf1b93bd2bb14bab2d" -dependencies = [ - "bitflags 1.3.2", - "byteorder", - "keccak", - "subtle 2.5.0", - "zeroize", -] - [[package]] name = "strsim" version = "0.8.0" @@ -21043,7 +21217,9 @@ dependencies = [ name = "substrate-prometheus-endpoint" version = "0.17.0" dependencies = [ - "hyper", + "http-body-util", + "hyper 1.3.1", + "hyper-util", "log", "prometheus", "thiserror", @@ -21063,9 +21239,7 @@ dependencies = [ "bp-polkadot-core", "bp-relayers", "bp-runtime", - "bridge-runtime-common", "equivocation-detector", - "finality-grandpa", "finality-relay", "frame-support", "frame-system", @@ -21085,9 +21259,11 @@ dependencies = [ "rbtag", "relay-substrate-client", "relay-utils", + "scale-info", "sp-consensus-grandpa", "sp-core", "sp-runtime", + "sp-trie", "structopt", "strum 0.26.2", "thiserror", @@ -21161,7 +21337,6 @@ dependencies = [ "frame-system", "frame-system-rpc-runtime-api", "futures", - "hex-literal", "log", "pallet-babe", "pallet-balances", @@ -21199,6 +21374,7 @@ dependencies = [ "sp-version", "substrate-test-runtime-client", "substrate-wasm-builder", + "tracing", "trie-db", ] @@ -21225,7 +21401,7 @@ version = "2.0.0" dependencies = [ "futures", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "sc-transaction-pool", "sc-transaction-pool-api", "sp-blockchain", @@ -21437,6 +21613,17 @@ dependencies = [ "unicode-xid 0.2.4", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2 1.0.82", + "quote 1.0.35", + "syn 2.0.61", +] + [[package]] name = "sysinfo" version = "0.30.5" @@ -21564,7 +21751,6 @@ dependencies = [ "parity-scale-codec", "polkadot-parachain-primitives", "sp-io", - "sp-std 14.0.0", "substrate-wasm-builder", "tiny-keccak", ] @@ -21612,7 +21798,6 @@ dependencies = [ "parity-scale-codec", "polkadot-parachain-primitives", "sp-io", - "sp-std 14.0.0", "substrate-wasm-builder", "tiny-keccak", ] @@ -21877,10 +22062,10 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project-lite 0.2.12", "signal-hook-registry", - "socket2 0.5.6", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] @@ -21903,7 +22088,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" dependencies = [ "pin-project", - "rand 0.8.5", + "rand", "tokio", ] @@ -21913,17 +22098,17 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.6", + "rustls 0.21.7", "tokio", ] [[package]] name = "tokio-rustls" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.22.2", + "rustls 0.23.10", "rustls-pki-types", "tokio", ] @@ -21961,7 +22146,7 @@ checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", - "rustls 0.21.6", + "rustls 0.21.7", "rustls-native-certs 0.6.3", "tokio", "tokio-rustls 0.24.1", @@ -22046,6 +22231,7 @@ dependencies = [ "futures-util", "pin-project", "pin-project-lite 0.2.12", + "tokio", "tower-layer", "tower-service", "tracing", @@ -22053,17 +22239,15 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", "pin-project-lite 0.2.12", "tower-layer", "tower-service", @@ -22210,7 +22394,7 @@ dependencies = [ "matchers 0.1.0", "nu-ansi-term", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "regex", "sharded-slab", "smallvec", @@ -22283,7 +22467,7 @@ dependencies = [ "idna 0.2.3", "ipnet", "lazy_static", - "rand 0.8.5", + "rand", "smallvec", "socket2 0.4.9", "thiserror", @@ -22309,7 +22493,7 @@ dependencies = [ "idna 0.4.0", "ipnet", "once_cell", - "rand 0.8.5", + "rand", "smallvec", "thiserror", "tinyvec", @@ -22318,26 +22502,6 @@ dependencies = [ "url", ] -[[package]] -name = "trust-dns-resolver" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lazy_static", - "lru-cache", - "parking_lot 0.12.1", - "resolv-conf", - "smallvec", - "thiserror", - "tokio", - "tracing", - "trust-dns-proto 0.22.0", -] - [[package]] name = "trust-dns-resolver" version = "0.23.2" @@ -22349,8 +22513,8 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "rand", "resolv-conf", "smallvec", "thiserror", @@ -22396,11 +22560,11 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http", + "http 0.2.9", "httparse", "log", - "rand 0.8.5", - "rustls 0.21.6", + "rand", + "rustls 0.21.7", "sha1", "thiserror", "url", @@ -22421,7 +22585,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.7", - "rand 0.8.5", + "rand", "static_assertions", ] @@ -22526,6 +22690,15 @@ dependencies = [ "bytes", "futures-io", "futures-util", +] + +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" +dependencies = [ + "bytes", "tokio-util", ] @@ -22651,9 +22824,9 @@ dependencies = [ "arrayref", "constcat", "digest 0.10.7", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "rand", + "rand_chacha", + "rand_core", "sha2 0.10.8", "sha3", "thiserror", @@ -22694,12 +22867,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -23136,7 +23303,7 @@ dependencies = [ "memfd", "memoffset 0.8.0", "paste", - "rand 0.8.5", + "rand", "rustix 0.36.15", "wasmtime-asm-macros", "wasmtime-environ", @@ -23199,18 +23366,18 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.6" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" -dependencies = [ - "webpki", -] +checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" [[package]] name = "webpki-roots" -version = "0.25.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +dependencies = [ + "rustls-pki-types", +] [[package]] name = "westend-emulated-chain" @@ -23229,7 +23396,7 @@ dependencies = [ "staging-xcm", "westend-runtime", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -23306,7 +23473,6 @@ dependencies = [ "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", - "rustc-hex", "scale-info", "serde", "serde_derive", @@ -23330,7 +23496,6 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std 14.0.0", "sp-storage 19.0.0", "sp-tracing 16.0.0", "sp-transaction-pool", @@ -23342,7 +23507,7 @@ dependencies = [ "tiny-keccak", "tokio", "westend-runtime-constants", - "xcm-fee-payment-runtime-api", + "xcm-runtime-apis", ] [[package]] @@ -23433,23 +23598,20 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.34.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45296b64204227616fdbf2614cefa4c236b98ee64dfaaaa435207ed99fe7829f" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows_aarch64_msvc 0.34.0", - "windows_i686_gnu 0.34.0", - "windows_i686_msvc 0.34.0", - "windows_x86_64_gnu 0.34.0", - "windows_x86_64_msvc 0.34.0", + "windows-targets 0.48.5", ] [[package]] name = "windows" -version = "0.48.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ + "windows-core 0.51.1", "windows-targets 0.48.5", ] @@ -23459,10 +23621,19 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-core", + "windows-core 0.52.0", "windows-targets 0.52.0", ] +[[package]] +name = "windows-core" +version = "0.51.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +dependencies = [ + "windows-targets 0.48.5", +] + [[package]] name = "windows-core" version = "0.52.0" @@ -23577,12 +23748,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" -[[package]] -name = "windows_aarch64_msvc" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" - [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -23601,12 +23766,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" -[[package]] -name = "windows_i686_gnu" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" - [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -23625,12 +23784,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" -[[package]] -name = "windows_i686_msvc" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" - [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -23649,12 +23802,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" -[[package]] -name = "windows_x86_64_gnu" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" - [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -23691,12 +23838,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" -[[package]] -name = "windows_x86_64_msvc" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" - [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -23743,42 +23884,30 @@ dependencies = [ "tap", ] -[[package]] -name = "x25519-dalek" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f" -dependencies = [ - "curve25519-dalek 3.2.0", - "rand_core 0.5.1", - "zeroize", -] - [[package]] name = "x25519-dalek" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ - "curve25519-dalek 4.1.2", - "rand_core 0.6.4", + "curve25519-dalek", + "rand_core", "serde", "zeroize", ] [[package]] name = "x509-parser" -version = "0.14.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" +checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" dependencies = [ - "asn1-rs", - "base64 0.13.1", + "asn1-rs 0.5.2", "data-encoding", - "der-parser", + "der-parser 8.2.0", "lazy_static", "nom", - "oid-registry", + "oid-registry 0.6.1", "rusticata-macros", "thiserror", "time", @@ -23786,16 +23915,16 @@ dependencies = [ [[package]] name = "x509-parser" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" dependencies = [ - "asn1-rs", + "asn1-rs 0.6.1", "data-encoding", - "der-parser", + "der-parser 9.0.0", "lazy_static", "nom", - "oid-registry", + "oid-registry 0.7.0", "rusticata-macros", "thiserror", "time", @@ -23893,13 +24022,26 @@ dependencies = [ ] [[package]] -name = "xcm-fee-payment-runtime-api" +name = "xcm-procedural" +version = "7.0.0" +dependencies = [ + "Inflector", + "proc-macro2 1.0.82", + "quote 1.0.35", + "staging-xcm", + "syn 2.0.61", + "trybuild", +] + +[[package]] +name = "xcm-runtime-apis" version = "0.1.0" dependencies = [ - "env_logger 0.9.3", + "env_logger 0.11.3", "frame-executive", "frame-support", "frame-system", + "hex-literal", "log", "pallet-assets", "pallet-balances", @@ -23908,26 +24050,12 @@ dependencies = [ "scale-info", "sp-api", "sp-io", - "sp-runtime", - "sp-std 14.0.0", "sp-weights", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", ] -[[package]] -name = "xcm-procedural" -version = "7.0.0" -dependencies = [ - "Inflector", - "proc-macro2 1.0.82", - "quote 1.0.35", - "staging-xcm", - "syn 2.0.61", - "trybuild", -] - [[package]] name = "xcm-simulator" version = "7.0.0" @@ -24004,17 +24132,33 @@ dependencies = [ "xcm-simulator", ] +[[package]] +name = "xml-rs" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" + +[[package]] +name = "xmltree" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7d8a75eaf6557bb84a65ace8609883db44a29951042ada9b393151532e41fcb" +dependencies = [ + "xml-rs", +] + [[package]] name = "yamux" -version = "0.10.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d9ba232399af1783a58d8eb26f6b5006fbefe2dc9ef36bd283324792d03ea5" +checksum = "9ed0164ae619f2dc144909a9f082187ebb5893693d8c0196e8085283ccd4b776" dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.1", - "rand 0.8.5", + "parking_lot 0.12.3", + "pin-project", + "rand", "static_assertions", ] @@ -24055,9 +24199,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 2b2a1cdc17d5c..0999d63040130 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -226,7 +226,7 @@ members = [ "polkadot/xcm/xcm-builder", "polkadot/xcm/xcm-executor", "polkadot/xcm/xcm-executor/integration-tests", - "polkadot/xcm/xcm-fee-payment-runtime-api", + "polkadot/xcm/xcm-runtime-apis", "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", @@ -306,6 +306,7 @@ members = [ "substrate/frame/asset-conversion/ops", "substrate/frame/asset-rate", "substrate/frame/assets", + "substrate/frame/assets-freezer", "substrate/frame/atomic-swap", "substrate/frame/aura", "substrate/frame/authority-discovery", @@ -564,19 +565,787 @@ extra-unused-type-parameters = { level = "allow", priority = 2 } # stylistic default_constructed_unit_structs = { level = "allow", priority = 2 } # stylistic [workspace.dependencies] +Inflector = { version = "0.11.4" } +aes-gcm = { version = "0.10" } +ahash = { version = "0.8.2" } +alloy-primitives = { version = "0.4.2", default-features = false } +alloy-sol-types = { version = "0.4.2", default-features = false } +always-assert = { version = "0.1" } +ansi_term = { version = "0.12.1" } +anyhow = { version = "1.0.81" } +aquamarine = { version = "0.5.0" } +arbitrary = { version = "1.3.2" } +ark-bls12-377 = { version = "0.4.0", default-features = false } +ark-bls12-377-ext = { version = "0.4.1", default-features = false } +ark-bls12-381 = { version = "0.4.0", default-features = false } +ark-bls12-381-ext = { version = "0.4.1", default-features = false } +ark-bw6-761 = { version = "0.4.0", default-features = false } +ark-bw6-761-ext = { version = "0.4.1", default-features = false } +ark-ec = { version = "0.4.2", default-features = false } +ark-ed-on-bls12-377 = { version = "0.4.0", default-features = false } +ark-ed-on-bls12-377-ext = { version = "0.4.1", default-features = false } +ark-ed-on-bls12-381-bandersnatch = { version = "0.4.0", default-features = false } +ark-ed-on-bls12-381-bandersnatch-ext = { version = "0.4.1", default-features = false } +ark-scale = { version = "0.0.12", default-features = false } +array-bytes = { version = "6.2.2", default-features = false } +arrayvec = { version = "0.7.4" } +assert_cmd = { version = "2.0.10" } +assert_matches = { version = "1.5.0" } +asset-hub-rococo-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo" } +asset-hub-rococo-runtime = { path = "cumulus/parachains/runtimes/assets/asset-hub-rococo", default-features = false } +asset-hub-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend" } +asset-hub-westend-runtime = { path = "cumulus/parachains/runtimes/assets/asset-hub-westend" } +asset-test-utils = { path = "cumulus/parachains/runtimes/assets/test-utils", default-features = false } +assets-common = { path = "cumulus/parachains/runtimes/assets/common", default-features = false } +async-channel = { version = "1.8.0" } +async-std = { version = "1.9.0" } +async-trait = { version = "0.1.79" } +asynchronous-codec = { version = "0.6" } +backoff = { version = "0.4" } +backtrace = { version = "0.3.64" } +binary-merkle-tree = { path = "substrate/utils/binary-merkle-tree", default-features = false } +bincode = { version = "1.3.3" } +bip39 = { version = "2.0.0" } +bitflags = { version = "1.3.2" } +bitvec = { version = "1.0.1", default-features = false } +blake2 = { version = "0.10.4", default-features = false } +blake2b_simd = { version = "1.0.1", default-features = false } +blake3 = { version = "1.5" } +bounded-collections = { version = "0.2.0", default-features = false } +bounded-vec = { version = "0.7" } +bp-asset-hub-rococo = { path = "bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "bridges/chains/chain-asset-hub-westend", default-features = false } +bp-beefy = { path = "bridges/primitives/beefy", default-features = false } +bp-bridge-hub-cumulus = { path = "bridges/chains/chain-bridge-hub-cumulus", default-features = false } +bp-bridge-hub-kusama = { default-features = false, path = "bridges/chains/chain-bridge-hub-kusama" } +bp-bridge-hub-polkadot = { path = "bridges/chains/chain-bridge-hub-polkadot", default-features = false } +bp-bridge-hub-rococo = { path = "bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "bridges/chains/chain-bridge-hub-westend", default-features = false } +bp-header-chain = { path = "bridges/primitives/header-chain", default-features = false } +bp-kusama = { default-features = false, path = "bridges/chains/chain-kusama" } +bp-messages = { path = "bridges/primitives/messages", default-features = false } +bp-parachains = { path = "bridges/primitives/parachains", default-features = false } +bp-polkadot = { default-features = false, path = "bridges/chains/chain-polkadot" } +bp-polkadot-bulletin = { path = "bridges/chains/chain-polkadot-bulletin", default-features = false } +bp-polkadot-core = { path = "bridges/primitives/polkadot-core", default-features = false } +bp-relayers = { path = "bridges/primitives/relayers", default-features = false } +bp-rococo = { path = "bridges/chains/chain-rococo", default-features = false } +bp-runtime = { path = "bridges/primitives/runtime", default-features = false } +bp-test-utils = { path = "bridges/primitives/test-utils", default-features = false } +bp-westend = { path = "bridges/chains/chain-westend", default-features = false } +bp-xcm-bridge-hub = { path = "bridges/primitives/xcm-bridge-hub", default-features = false } +bp-xcm-bridge-hub-router = { path = "bridges/primitives/xcm-bridge-hub-router", default-features = false } +bridge-hub-common = { path = "cumulus/parachains/runtimes/bridge-hubs/common", default-features = false } +bridge-hub-rococo-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo" } +bridge-hub-rococo-runtime = { path = "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } +bridge-hub-test-utils = { path = "cumulus/parachains/runtimes/bridge-hubs/test-utils", default-features = false } +bridge-hub-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend" } +bridge-hub-westend-runtime = { path = "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } +bridge-runtime-common = { path = "bridges/bin/runtime-common", default-features = false } +bs58 = { version = "0.5.0", default-features = false } +build-helper = { version = "0.1.1" } +byte-slice-cast = { version = "1.2.1", default-features = false } +byteorder = { version = "1.3.2", default-features = false } +bytes = { version = "1.4.0", default-features = false } +cargo_metadata = { version = "0.15.4" } +cfg-expr = { version = "0.15.5" } +cfg-if = { version = "1.0" } +chain-spec-builder = { path = "substrate/bin/utils/chain-spec-builder", default-features = false, package = "staging-chain-spec-builder" } +chain-spec-guide-runtime = { path = "docs/sdk/src/reference_docs/chain_spec_runtime" } +chrono = { version = "0.4.31" } +cid = { version = "0.9.0" } +clap = { version = "4.5.3" } +clap-num = { version = "1.0.2" } +clap_complete = { version = "4.0.2" } +coarsetime = { version = "0.1.22" } +codec = { version = "3.6.12", default-features = false, package = "parity-scale-codec" } +collectives-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend" } +collectives-westend-runtime = { path = "cumulus/parachains/runtimes/collectives/collectives-westend" } +color-eyre = { version = "0.6.1", default-features = false } +color-print = { version = "0.3.4" } +colored = { version = "2.0.4" } +comfy-table = { version = "7.1.0", default-features = false } +console = { version = "0.15.8" } +contracts-rococo-runtime = { path = "cumulus/parachains/runtimes/contracts/contracts-rococo" } +coretime-rococo-runtime = { path = "cumulus/parachains/runtimes/coretime/coretime-rococo" } +coretime-westend-runtime = { path = "cumulus/parachains/runtimes/coretime/coretime-westend" } +cpu-time = { version = "1.0.0" } +criterion = { version = "0.5.1", default-features = false } +cumulus-client-cli = { path = "cumulus/client/cli", default-features = false } +cumulus-client-collator = { path = "cumulus/client/collator", default-features = false } +cumulus-client-consensus-aura = { path = "cumulus/client/consensus/aura", default-features = false } +cumulus-client-consensus-common = { path = "cumulus/client/consensus/common", default-features = false } +cumulus-client-consensus-proposer = { path = "cumulus/client/consensus/proposer", default-features = false } +cumulus-client-consensus-relay-chain = { path = "cumulus/client/consensus/relay-chain", default-features = false } +cumulus-client-network = { path = "cumulus/client/network", default-features = false } +cumulus-client-parachain-inherent = { path = "cumulus/client/parachain-inherent", default-features = false } +cumulus-client-pov-recovery = { path = "cumulus/client/pov-recovery", default-features = false } +cumulus-client-service = { path = "cumulus/client/service", default-features = false } +cumulus-pallet-aura-ext = { path = "cumulus/pallets/aura-ext", default-features = false } +cumulus-pallet-dmp-queue = { default-features = false, path = "cumulus/pallets/dmp-queue" } +cumulus-pallet-parachain-system = { path = "cumulus/pallets/parachain-system", default-features = false } +cumulus-pallet-parachain-system-proc-macro = { path = "cumulus/pallets/parachain-system/proc-macro", default-features = false } +cumulus-pallet-session-benchmarking = { path = "cumulus/pallets/session-benchmarking", default-features = false } +cumulus-pallet-solo-to-para = { path = "cumulus/pallets/solo-to-para", default-features = false } +cumulus-pallet-xcm = { path = "cumulus/pallets/xcm", default-features = false } +cumulus-pallet-xcmp-queue = { path = "cumulus/pallets/xcmp-queue", default-features = false } +cumulus-ping = { path = "cumulus/parachains/pallets/ping", default-features = false } +cumulus-primitives-aura = { path = "cumulus/primitives/aura", default-features = false } +cumulus-primitives-core = { path = "cumulus/primitives/core", default-features = false } +cumulus-primitives-parachain-inherent = { path = "cumulus/primitives/parachain-inherent", default-features = false } +cumulus-primitives-proof-size-hostfunction = { path = "cumulus/primitives/proof-size-hostfunction", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "cumulus/primitives/storage-weight-reclaim", default-features = false } +cumulus-primitives-timestamp = { path = "cumulus/primitives/timestamp", default-features = false } +cumulus-primitives-utility = { path = "cumulus/primitives/utility", default-features = false } +cumulus-relay-chain-inprocess-interface = { path = "cumulus/client/relay-chain-inprocess-interface", default-features = false } +cumulus-relay-chain-interface = { path = "cumulus/client/relay-chain-interface", default-features = false } +cumulus-relay-chain-minimal-node = { path = "cumulus/client/relay-chain-minimal-node", default-features = false } +cumulus-relay-chain-rpc-interface = { path = "cumulus/client/relay-chain-rpc-interface", default-features = false } +cumulus-test-client = { path = "cumulus/test/client" } +cumulus-test-relay-sproof-builder = { path = "cumulus/test/relay-sproof-builder", default-features = false } +cumulus-test-runtime = { path = "cumulus/test/runtime" } +cumulus-test-service = { path = "cumulus/test/service" } +curve25519-dalek = { version = "4.1.3" } +derivative = { version = "2.2.0", default-features = false } +derive-syn-parse = { version = "0.2.0" } +derive_more = { version = "0.99.17", default-features = false } +digest = { version = "0.10.3", default-features = false } +directories = { version = "5.0.1" } +dlmalloc = { version = "0.2.4" } +docify = { version = "0.2.8" } +dyn-clonable = { version = "0.9.0" } +dyn-clone = { version = "1.0.16" } +ed25519-dalek = { version = "2.1", default-features = false } +ed25519-zebra = { version = "4.0.3", default-features = false } +either = { version = "1.8.1", default-features = false } +emulated-integration-tests-common = { path = "cumulus/parachains/integration-tests/emulated/common", default-features = false } +enumflags2 = { version = "0.7.7" } +enumn = { version = "0.1.12" } +env_logger = { version = "0.11.3" } +environmental = { version = "1.1.4", default-features = false } +equivocation-detector = { path = "bridges/relays/equivocation" } +ethabi = { version = "1.0.0", default-features = false, package = "ethabi-decode" } +ethbloom = { version = "0.13.0", default-features = false } +ethereum-types = { version = "0.14.1", default-features = false } +exit-future = { version = "0.2.0" } +expander = { version = "2.0.0" } +fatality = { version = "0.1.1" } +fdlimit = { version = "0.3.0" } +femme = { version = "2.2.1" } +filetime = { version = "0.2.16" } +finality-grandpa = { version = "0.16.2", default-features = false } +finality-relay = { path = "bridges/relays/finality" } +flate2 = { version = "1.0" } +fnv = { version = "1.0.6" } +fork-tree = { path = "substrate/utils/fork-tree", default-features = false } +forwarded-header-value = { version = "0.1.1" } +fraction = { version = "0.13.1" } +frame = { path = "substrate/frame", default-features = false, package = "polkadot-sdk-frame" } +frame-benchmarking = { path = "substrate/frame/benchmarking", default-features = false } +frame-benchmarking-cli = { path = "substrate/utils/frame/benchmarking-cli", default-features = false } +frame-benchmarking-pallet-pov = { default-features = false, path = "substrate/frame/benchmarking/pov" } +frame-election-provider-solution-type = { path = "substrate/frame/election-provider-support/solution-type", default-features = false } +frame-election-provider-support = { path = "substrate/frame/election-provider-support", default-features = false } +frame-executive = { path = "substrate/frame/executive", default-features = false } +frame-metadata = { version = "16.0.0", default-features = false } +frame-metadata-hash-extension = { path = "substrate/frame/metadata-hash-extension", default-features = false } +frame-support = { path = "substrate/frame/support", default-features = false } +frame-support-procedural = { path = "substrate/frame/support/procedural", default-features = false } +frame-support-procedural-tools = { path = "substrate/frame/support/procedural/tools", default-features = false } +frame-support-procedural-tools-derive = { path = "substrate/frame/support/procedural/tools/derive", default-features = false } +frame-support-test = { path = "substrate/frame/support/test" } +frame-system = { path = "substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "substrate/frame/system/benchmarking", default-features = false } +frame-system-rpc-runtime-api = { path = "substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "substrate/frame/try-runtime", default-features = false } +fs4 = { version = "0.7.0" } +fs_extra = { version = "1.3.0" } +futures = { version = "0.3.30" } +futures-channel = { version = "0.3.23" } +futures-timer = { version = "3.0.2" } +futures-util = { version = "0.3.30", default-features = false } +generate-bags = { path = "substrate/utils/frame/generate-bags", default-features = false } +gethostname = { version = "0.2.3" } +glob = { version = "0.3" } +glutton-westend-runtime = { path = "cumulus/parachains/runtimes/glutton/glutton-westend" } +governor = { version = "0.6.0" } +gum = { path = "polkadot/node/gum", default-features = false, package = "tracing-gum" } +gum-proc-macro = { path = "polkadot/node/gum/proc-macro", default-features = false, package = "tracing-gum-proc-macro" } +handlebars = { version = "5.1.0" } +hash-db = { version = "0.16.0", default-features = false } +hash256-std-hasher = { version = "0.15.2", default-features = false } +hex = { version = "0.4.3", default-features = false } +hex-literal = { version = "0.4.1", default-features = false } +hkdf = { version = "0.12.0" } +hmac = { version = "0.12.1" } +honggfuzz = { version = "0.5.55" } +http = { version = "1.1" } +http-body = { version = "1", default-features = false } +http-body-util = { version = "0.1.2", default-features = false } +hyper = { version = "1.3.1", default-features = false } +hyper-rustls = { version = "0.24.2" } +hyper-util = { version = "0.1.5", default-features = false } +# TODO: remove hyper v0.14 https://github.com/paritytech/polkadot-sdk/issues/4896 +hyperv14 = { package = "hyper", version = "0.14.29", default-features = false } +impl-serde = { version = "0.4.0", default-features = false } +impl-trait-for-tuples = { version = "0.2.2" } +indexmap = { version = "2.0.0" } +indicatif = { version = "0.17.7" } +integer-sqrt = { version = "0.1.2" } +ip_network = { version = "0.4.1" } +is-terminal = { version = "0.4.9" } +is_executable = { version = "1.0.1" } +isahc = { version = "1.2" } +itertools = { version = "0.11" } +jsonpath_lib = { version = "0.3" } +jsonrpsee = { version = "0.23.2" } +jsonrpsee-core = { version = "0.23.2" } +k256 = { version = "0.13.3", default-features = false } +kitchensink-runtime = { path = "substrate/bin/node/runtime" } +kvdb = { version = "0.13.0" } +kvdb-memorydb = { version = "0.13.0" } +kvdb-rocksdb = { version = "0.19.0" } +kvdb-shared-tests = { version = "0.11.0" } +landlock = { version = "0.3.0" } +lazy_static = { version = "1.4.0" } +libc = { version = "0.2.153" } +libfuzzer-sys = { version = "0.4" } +libp2p = { version = "0.52.4" } +libp2p-identity = { version = "0.2.3" } +libsecp256k1 = { version = "0.7.0", default-features = false } +linked-hash-map = { version = "0.5.4" } +linked_hash_set = { version = "0.1.4" } +linregress = { version = "0.5.1" } +lite-json = { version = "0.2.0", default-features = false } +litep2p = { version = "0.6.2" } +log = { version = "0.4.21", default-features = false } +macro_magic = { version = "0.5.1" } +maplit = { version = "1.0.2" } +memmap2 = { version = "0.9.3" } +memory-db = { version = "0.32.0", default-features = false } +merkleized-metadata = { version = "0.1.0" } +merlin = { version = "3.0", default-features = false } +messages-relay = { path = "bridges/relays/messages" } +metered = { version = "0.6.1", default-features = false, package = "prioritized-metered-channel" } +mick-jaeger = { version = "0.1.8" } +milagro-bls = { version = "1.5.4", default-features = false, package = "snowbridge-milagro-bls" } +minimal-template-node = { path = "templates/minimal/node" } +minimal-template-runtime = { path = "templates/minimal/runtime" } +mixnet = { version = "0.7.0" } +mmr-gadget = { path = "substrate/client/merkle-mountain-range", default-features = false } +mmr-lib = { version = "0.5.2", package = "ckb-merkle-mountain-range" } +mmr-rpc = { path = "substrate/client/merkle-mountain-range/rpc", default-features = false } +mockall = { version = "0.11.3" } +multiaddr = { version = "0.18.1" } +multihash = { version = "0.19.1", default-features = false } +multihash-codetable = { version = "0.1.1" } +multistream-select = { version = "0.13.0" } +names = { version = "0.14.0", default-features = false } +nix = { version = "0.28.0" } +node-cli = { path = "substrate/bin/node/cli", package = "staging-node-cli" } +node-inspect = { path = "substrate/bin/node/inspect", default-features = false, package = "staging-node-inspect" } +node-primitives = { path = "substrate/bin/node/primitives", default-features = false } +node-rpc = { path = "substrate/bin/node/rpc" } +node-testing = { path = "substrate/bin/node/testing" } +nohash-hasher = { version = "0.2.0" } +novelpoly = { version = "2.0.0", package = "reed-solomon-novelpoly" } +num-bigint = { version = "0.4.3" } +num-format = { version = "0.4.3" } +num-rational = { version = "0.4.1" } +num-traits = { version = "0.2.17", default-features = false } +num_cpus = { version = "1.13.1" } +once_cell = { version = "1.19.0" } +orchestra = { version = "0.4.0", default-features = false } +pallet-alliance = { path = "substrate/frame/alliance", default-features = false } +pallet-asset-conversion = { path = "substrate/frame/asset-conversion", default-features = false } +pallet-asset-conversion-ops = { path = "substrate/frame/asset-conversion/ops", default-features = false } +pallet-asset-conversion-tx-payment = { path = "substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } +pallet-asset-rate = { path = "substrate/frame/asset-rate", default-features = false } +pallet-asset-tx-payment = { path = "substrate/frame/transaction-payment/asset-tx-payment", default-features = false } +pallet-assets = { path = "substrate/frame/assets", default-features = false } +pallet-assets-freezer = { path = "substrate/frame/assets-freezer", default-features = false } +pallet-atomic-swap = { default-features = false, path = "substrate/frame/atomic-swap" } +pallet-aura = { path = "substrate/frame/aura", default-features = false } +pallet-authority-discovery = { path = "substrate/frame/authority-discovery", default-features = false } +pallet-authorship = { path = "substrate/frame/authorship", default-features = false } +pallet-babe = { path = "substrate/frame/babe", default-features = false } +pallet-bags-list = { path = "substrate/frame/bags-list", default-features = false } +pallet-bags-list-remote-tests = { path = "substrate/frame/bags-list/remote-tests" } +pallet-balances = { path = "substrate/frame/balances", default-features = false } +pallet-beefy = { path = "substrate/frame/beefy", default-features = false } +pallet-beefy-mmr = { path = "substrate/frame/beefy-mmr", default-features = false } +pallet-bounties = { path = "substrate/frame/bounties", default-features = false } +pallet-bridge-grandpa = { path = "bridges/modules/grandpa", default-features = false } +pallet-bridge-messages = { path = "bridges/modules/messages", default-features = false } +pallet-bridge-parachains = { path = "bridges/modules/parachains", default-features = false } +pallet-bridge-relayers = { path = "bridges/modules/relayers", default-features = false } +pallet-broker = { path = "substrate/frame/broker", default-features = false } +pallet-child-bounties = { path = "substrate/frame/child-bounties", default-features = false } +pallet-collator-selection = { path = "cumulus/pallets/collator-selection", default-features = false } +pallet-collective = { path = "substrate/frame/collective", default-features = false } +pallet-collective-content = { path = "cumulus/parachains/pallets/collective-content", default-features = false } +pallet-contracts = { path = "substrate/frame/contracts", default-features = false } +pallet-contracts-fixtures = { path = "substrate/frame/contracts/fixtures" } +pallet-contracts-mock-network = { default-features = false, path = "substrate/frame/contracts/mock-network" } +pallet-contracts-proc-macro = { path = "substrate/frame/contracts/proc-macro", default-features = false } +pallet-contracts-uapi = { path = "substrate/frame/contracts/uapi", default-features = false } +pallet-conviction-voting = { path = "substrate/frame/conviction-voting", default-features = false } +pallet-core-fellowship = { path = "substrate/frame/core-fellowship", default-features = false } +pallet-default-config-example = { path = "substrate/frame/examples/default-config", default-features = false } +pallet-delegated-staking = { path = "substrate/frame/delegated-staking", default-features = false } +pallet-democracy = { path = "substrate/frame/democracy", default-features = false } +pallet-dev-mode = { path = "substrate/frame/examples/dev-mode", default-features = false } +pallet-election-provider-multi-phase = { path = "substrate/frame/election-provider-multi-phase", default-features = false } +pallet-election-provider-support-benchmarking = { path = "substrate/frame/election-provider-support/benchmarking", default-features = false } +pallet-elections-phragmen = { path = "substrate/frame/elections-phragmen", default-features = false } +pallet-example-basic = { path = "substrate/frame/examples/basic", default-features = false } +pallet-example-frame-crate = { path = "substrate/frame/examples/frame-crate", default-features = false } +pallet-example-kitchensink = { path = "substrate/frame/examples/kitchensink", default-features = false } +pallet-example-mbm = { path = "substrate/frame/examples/multi-block-migrations", default-features = false } +pallet-example-offchain-worker = { path = "substrate/frame/examples/offchain-worker", default-features = false } +pallet-example-single-block-migrations = { path = "substrate/frame/examples/single-block-migrations", default-features = false } +pallet-example-split = { path = "substrate/frame/examples/split", default-features = false } +pallet-example-tasks = { path = "substrate/frame/examples/tasks", default-features = false } +pallet-examples = { path = "substrate/frame/examples" } +pallet-fast-unstake = { path = "substrate/frame/fast-unstake", default-features = false } +pallet-glutton = { path = "substrate/frame/glutton", default-features = false } +pallet-grandpa = { path = "substrate/frame/grandpa", default-features = false } +pallet-identity = { path = "substrate/frame/identity", default-features = false } +pallet-im-online = { path = "substrate/frame/im-online", default-features = false } +pallet-indices = { path = "substrate/frame/indices", default-features = false } +pallet-insecure-randomness-collective-flip = { path = "substrate/frame/insecure-randomness-collective-flip", default-features = false } +pallet-lottery = { default-features = false, path = "substrate/frame/lottery" } +pallet-membership = { path = "substrate/frame/membership", default-features = false } +pallet-message-queue = { path = "substrate/frame/message-queue", default-features = false } +pallet-migrations = { path = "substrate/frame/migrations", default-features = false } +pallet-minimal-template = { path = "templates/minimal/pallets/template", default-features = false } +pallet-mixnet = { default-features = false, path = "substrate/frame/mixnet" } +pallet-mmr = { path = "substrate/frame/merkle-mountain-range", default-features = false } +pallet-multisig = { path = "substrate/frame/multisig", default-features = false } +pallet-nft-fractionalization = { path = "substrate/frame/nft-fractionalization", default-features = false } +pallet-nfts = { path = "substrate/frame/nfts", default-features = false } +pallet-nfts-runtime-api = { path = "substrate/frame/nfts/runtime-api", default-features = false } +pallet-nis = { path = "substrate/frame/nis", default-features = false } +pallet-node-authorization = { default-features = false, path = "substrate/frame/node-authorization" } +pallet-nomination-pools = { path = "substrate/frame/nomination-pools", default-features = false } +pallet-nomination-pools-benchmarking = { path = "substrate/frame/nomination-pools/benchmarking", default-features = false } +pallet-nomination-pools-runtime-api = { path = "substrate/frame/nomination-pools/runtime-api", default-features = false } +pallet-offences = { path = "substrate/frame/offences", default-features = false } +pallet-offences-benchmarking = { path = "substrate/frame/offences/benchmarking", default-features = false } +pallet-paged-list = { path = "substrate/frame/paged-list", default-features = false } +pallet-parachain-template = { path = "templates/parachain/pallets/template", default-features = false } +pallet-parameters = { path = "substrate/frame/parameters", default-features = false } +pallet-preimage = { path = "substrate/frame/preimage", default-features = false } +pallet-proxy = { path = "substrate/frame/proxy", default-features = false } +pallet-ranked-collective = { path = "substrate/frame/ranked-collective", default-features = false } +pallet-recovery = { path = "substrate/frame/recovery", default-features = false } +pallet-referenda = { path = "substrate/frame/referenda", default-features = false } +pallet-remark = { default-features = false, path = "substrate/frame/remark" } +pallet-root-offences = { default-features = false, path = "substrate/frame/root-offences" } +pallet-root-testing = { path = "substrate/frame/root-testing", default-features = false } +pallet-safe-mode = { default-features = false, path = "substrate/frame/safe-mode" } +pallet-salary = { path = "substrate/frame/salary", default-features = false } +pallet-scheduler = { path = "substrate/frame/scheduler", default-features = false } +pallet-scored-pool = { default-features = false, path = "substrate/frame/scored-pool" } +pallet-session = { path = "substrate/frame/session", default-features = false } +pallet-session-benchmarking = { path = "substrate/frame/session/benchmarking", default-features = false } +pallet-skip-feeless-payment = { path = "substrate/frame/transaction-payment/skip-feeless-payment", default-features = false } +pallet-society = { path = "substrate/frame/society", default-features = false } +pallet-staking = { path = "substrate/frame/staking", default-features = false } +pallet-staking-reward-curve = { path = "substrate/frame/staking/reward-curve", default-features = false } +pallet-staking-reward-fn = { path = "substrate/frame/staking/reward-fn", default-features = false } +pallet-staking-runtime-api = { path = "substrate/frame/staking/runtime-api", default-features = false } +pallet-state-trie-migration = { path = "substrate/frame/state-trie-migration", default-features = false } +pallet-statement = { default-features = false, path = "substrate/frame/statement" } +pallet-sudo = { path = "substrate/frame/sudo", default-features = false } +pallet-template = { path = "templates/solochain/pallets/template", default-features = false } +pallet-timestamp = { path = "substrate/frame/timestamp", default-features = false } +pallet-tips = { path = "substrate/frame/tips", default-features = false } +pallet-transaction-payment = { path = "substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc = { path = "substrate/frame/transaction-payment/rpc", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-transaction-storage = { default-features = false, path = "substrate/frame/transaction-storage" } +pallet-treasury = { path = "substrate/frame/treasury", default-features = false } +pallet-tx-pause = { default-features = false, path = "substrate/frame/tx-pause" } +pallet-uniques = { path = "substrate/frame/uniques", default-features = false } +pallet-utility = { path = "substrate/frame/utility", default-features = false } +pallet-vesting = { path = "substrate/frame/vesting", default-features = false } +pallet-whitelist = { path = "substrate/frame/whitelist", default-features = false } +pallet-xcm = { path = "polkadot/xcm/pallet-xcm", default-features = false } +pallet-xcm-benchmarks = { path = "polkadot/xcm/pallet-xcm-benchmarks", default-features = false } +pallet-xcm-bridge-hub = { path = "bridges/modules/xcm-bridge-hub", default-features = false } +pallet-xcm-bridge-hub-router = { path = "bridges/modules/xcm-bridge-hub-router", default-features = false } +parachain-info = { path = "cumulus/parachains/pallets/parachain-info", default-features = false, package = "staging-parachain-info" } +parachain-template-runtime = { path = "templates/parachain/runtime" } +parachains-common = { path = "cumulus/parachains/common", default-features = false } +parachains-relay = { path = "bridges/relays/parachains" } +parachains-runtimes-test-utils = { path = "cumulus/parachains/runtimes/test-utils", default-features = false } +parity-bytes = { version = "0.1.2", default-features = false } +parity-db = { version = "0.4.12" } +parity-util-mem = { version = "0.12.0" } +parity-wasm = { version = "0.45.0" } +parking_lot = { version = "0.12.1", default-features = false } +partial_sort = { version = "0.2.0" } +paste = { version = "1.0.14", default-features = false } +pbkdf2 = { version = "0.12.2", default-features = false } +penpal-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal" } +penpal-runtime = { path = "cumulus/parachains/runtimes/testing/penpal" } +people-rococo-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo" } +people-rococo-runtime = { path = "cumulus/parachains/runtimes/people/people-rococo" } +people-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend" } +people-westend-runtime = { path = "cumulus/parachains/runtimes/people/people-westend" } +pin-project = { version = "1.1.3" } +platforms = { version = "3.0" } +polkadot-approval-distribution = { path = "polkadot/node/network/approval-distribution", default-features = false } +polkadot-availability-bitfield-distribution = { path = "polkadot/node/network/bitfield-distribution", default-features = false } +polkadot-availability-distribution = { path = "polkadot/node/network/availability-distribution", default-features = false } +polkadot-availability-recovery = { path = "polkadot/node/network/availability-recovery", default-features = false } +polkadot-cli = { path = "polkadot/cli", default-features = false } +polkadot-collator-protocol = { path = "polkadot/node/network/collator-protocol", default-features = false } +polkadot-core-primitives = { path = "polkadot/core-primitives", default-features = false } +polkadot-dispute-distribution = { path = "polkadot/node/network/dispute-distribution", default-features = false } +polkadot-erasure-coding = { path = "polkadot/erasure-coding", default-features = false } +polkadot-gossip-support = { path = "polkadot/node/network/gossip-support", default-features = false } +polkadot-network-bridge = { path = "polkadot/node/network/bridge", default-features = false } +polkadot-node-collation-generation = { path = "polkadot/node/collation-generation", default-features = false } +polkadot-node-core-approval-voting = { path = "polkadot/node/core/approval-voting", default-features = false } +polkadot-node-core-av-store = { path = "polkadot/node/core/av-store", default-features = false } +polkadot-node-core-backing = { path = "polkadot/node/core/backing", default-features = false } +polkadot-node-core-bitfield-signing = { path = "polkadot/node/core/bitfield-signing", default-features = false } +polkadot-node-core-candidate-validation = { path = "polkadot/node/core/candidate-validation", default-features = false } +polkadot-node-core-chain-api = { path = "polkadot/node/core/chain-api", default-features = false } +polkadot-node-core-chain-selection = { path = "polkadot/node/core/chain-selection", default-features = false } +polkadot-node-core-dispute-coordinator = { path = "polkadot/node/core/dispute-coordinator", default-features = false } +polkadot-node-core-parachains-inherent = { path = "polkadot/node/core/parachains-inherent", default-features = false } +polkadot-node-core-prospective-parachains = { path = "polkadot/node/core/prospective-parachains", default-features = false } +polkadot-node-core-provisioner = { path = "polkadot/node/core/provisioner", default-features = false } +polkadot-node-core-pvf = { path = "polkadot/node/core/pvf", default-features = false } +polkadot-node-core-pvf-checker = { path = "polkadot/node/core/pvf-checker", default-features = false } +polkadot-node-core-pvf-common = { path = "polkadot/node/core/pvf/common", default-features = false } +polkadot-node-core-pvf-execute-worker = { path = "polkadot/node/core/pvf/execute-worker", default-features = false } +polkadot-node-core-pvf-prepare-worker = { path = "polkadot/node/core/pvf/prepare-worker", default-features = false } +polkadot-node-core-runtime-api = { path = "polkadot/node/core/runtime-api", default-features = false } +polkadot-node-jaeger = { path = "polkadot/node/jaeger", default-features = false } +polkadot-node-metrics = { path = "polkadot/node/metrics", default-features = false } +polkadot-node-network-protocol = { path = "polkadot/node/network/protocol", default-features = false } +polkadot-node-primitives = { path = "polkadot/node/primitives", default-features = false } +polkadot-node-subsystem = { path = "polkadot/node/subsystem", default-features = false } +polkadot-node-subsystem-test-helpers = { path = "polkadot/node/subsystem-test-helpers" } +polkadot-node-subsystem-types = { path = "polkadot/node/subsystem-types", default-features = false } +polkadot-node-subsystem-util = { path = "polkadot/node/subsystem-util", default-features = false } +polkadot-overseer = { path = "polkadot/node/overseer", default-features = false } +polkadot-parachain-primitives = { path = "polkadot/parachain", default-features = false } +polkadot-primitives = { path = "polkadot/primitives", default-features = false } +polkadot-primitives-test-helpers = { path = "polkadot/primitives/test-helpers" } +polkadot-rpc = { path = "polkadot/rpc", default-features = false } +polkadot-runtime-common = { path = "polkadot/runtime/common", default-features = false } +polkadot-runtime-metrics = { path = "polkadot/runtime/metrics", default-features = false } +polkadot-runtime-parachains = { path = "polkadot/runtime/parachains", default-features = false } +polkadot-sdk = { path = "umbrella", default-features = false } +polkadot-sdk-docs = { path = "docs/sdk" } +polkadot-service = { path = "polkadot/node/service", default-features = false } +polkadot-statement-distribution = { path = "polkadot/node/network/statement-distribution", default-features = false } +polkadot-statement-table = { path = "polkadot/statement-table", default-features = false } +polkadot-subsystem-bench = { path = "polkadot/node/subsystem-bench" } +polkadot-test-client = { path = "polkadot/node/test/client" } +polkadot-test-runtime = { path = "polkadot/runtime/test-runtime" } +polkadot-test-service = { path = "polkadot/node/test/service" } polkavm = "0.9.3" -polkavm-linker = "0.9.2" polkavm-derive = "0.9.1" -log = { version = "0.4.21", default-features = false } +polkavm-linker = "0.9.2" +portpicker = { version = "0.1.1" } +pretty_assertions = { version = "1.3.0" } +primitive-types = { version = "0.12.1", default-features = false } +proc-macro-crate = { version = "3.0.0" } +proc-macro-warning = { version = "1.0.0", default-features = false } +proc-macro2 = { version = "1.0.64" } +procfs = { version = "0.16.0" } +prometheus = { version = "0.13.0", default-features = false } +prometheus-endpoint = { path = "substrate/utils/prometheus", default-features = false, package = "substrate-prometheus-endpoint" } +prometheus-parse = { version = "0.2.2" } +prost = { version = "0.12.4" } +prost-build = { version = "0.12.4" } +pyroscope = { version = "0.5.7" } +pyroscope_pprofrs = { version = "0.2.7" } +quick_cache = { version = "0.3" } +quickcheck = { version = "1.0.3", default-features = false } quote = { version = "1.0.33" } +rand = { version = "0.8.5", default-features = false } +rand_chacha = { version = "0.3.1", default-features = false } +rand_core = { version = "0.6.2" } +rand_distr = { version = "0.4.3" } +rand_pcg = { version = "0.3.1" } +rayon = { version = "1.5.1" } +rbtag = { version = "0.3" } +ref-cast = { version = "1.0.0" } +regex = { version = "1.10.2" } +relay-substrate-client = { path = "bridges/relays/client-substrate" } +relay-utils = { path = "bridges/relays/utils" } +remote-externalities = { path = "substrate/utils/frame/remote-externalities", default-features = false, package = "frame-remote-externalities" } +reqwest = { version = "0.11", default-features = false } +rlp = { version = "0.5.2", default-features = false } +rococo-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/relays/rococo" } +rococo-parachain-runtime = { path = "cumulus/parachains/runtimes/testing/rococo-parachain" } +rococo-runtime = { path = "polkadot/runtime/rococo" } +rococo-runtime-constants = { path = "polkadot/runtime/rococo/constants", default-features = false } +rococo-system-emulated-network = { path = "cumulus/parachains/integration-tests/emulated/networks/rococo-system" } +rococo-westend-system-emulated-network = { path = "cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system" } +rpassword = { version = "7.0.0" } +rstest = { version = "0.18.2" } +rustc-hash = { version = "1.1.0" } +rustc-hex = { version = "2.1.0", default-features = false } +rustix = { version = "0.36.7", default-features = false } +rustversion = { version = "1.0.6" } +rusty-fork = { version = "0.3.0", default-features = false } +safe-mix = { version = "1.0", default-features = false } +sc-allocator = { path = "substrate/client/allocator", default-features = false } +sc-authority-discovery = { path = "substrate/client/authority-discovery", default-features = false } +sc-basic-authorship = { path = "substrate/client/basic-authorship", default-features = false } +sc-block-builder = { path = "substrate/client/block-builder", default-features = false } +sc-chain-spec = { path = "substrate/client/chain-spec", default-features = false } +sc-chain-spec-derive = { path = "substrate/client/chain-spec/derive", default-features = false } +sc-cli = { path = "substrate/client/cli", default-features = false } +sc-client-api = { path = "substrate/client/api", default-features = false } +sc-client-db = { path = "substrate/client/db", default-features = false } +sc-consensus = { path = "substrate/client/consensus/common", default-features = false } +sc-consensus-aura = { path = "substrate/client/consensus/aura", default-features = false } +sc-consensus-babe = { path = "substrate/client/consensus/babe", default-features = false } +sc-consensus-babe-rpc = { path = "substrate/client/consensus/babe/rpc", default-features = false } +sc-consensus-beefy = { path = "substrate/client/consensus/beefy", default-features = false } +sc-consensus-beefy-rpc = { path = "substrate/client/consensus/beefy/rpc", default-features = false } +sc-consensus-epochs = { path = "substrate/client/consensus/epochs", default-features = false } +sc-consensus-grandpa = { path = "substrate/client/consensus/grandpa", default-features = false } +sc-consensus-grandpa-rpc = { path = "substrate/client/consensus/grandpa/rpc", default-features = false } +sc-consensus-manual-seal = { path = "substrate/client/consensus/manual-seal", default-features = false } +sc-consensus-pow = { path = "substrate/client/consensus/pow", default-features = false } +sc-consensus-slots = { path = "substrate/client/consensus/slots", default-features = false } +sc-executor = { path = "substrate/client/executor", default-features = false } +sc-executor-common = { path = "substrate/client/executor/common", default-features = false } +sc-executor-polkavm = { path = "substrate/client/executor/polkavm", default-features = false } +sc-executor-wasmtime = { path = "substrate/client/executor/wasmtime", default-features = false } +sc-informant = { path = "substrate/client/informant", default-features = false } +sc-keystore = { path = "substrate/client/keystore", default-features = false } +sc-mixnet = { path = "substrate/client/mixnet", default-features = false } +sc-network = { path = "substrate/client/network", default-features = false } +sc-network-common = { path = "substrate/client/network/common", default-features = false } +sc-network-gossip = { path = "substrate/client/network-gossip", default-features = false } +sc-network-light = { path = "substrate/client/network/light", default-features = false } +sc-network-statement = { default-features = false, path = "substrate/client/network/statement" } +sc-network-sync = { path = "substrate/client/network/sync", default-features = false } +sc-network-test = { path = "substrate/client/network/test" } +sc-network-transactions = { path = "substrate/client/network/transactions", default-features = false } +sc-network-types = { path = "substrate/client/network/types", default-features = false } +sc-offchain = { path = "substrate/client/offchain", default-features = false } +sc-proposer-metrics = { path = "substrate/client/proposer-metrics", default-features = false } +sc-rpc = { path = "substrate/client/rpc", default-features = false } +sc-rpc-api = { path = "substrate/client/rpc-api", default-features = false } +sc-rpc-server = { path = "substrate/client/rpc-servers", default-features = false } +sc-rpc-spec-v2 = { path = "substrate/client/rpc-spec-v2", default-features = false } +sc-runtime-test = { path = "substrate/client/executor/runtime-test" } +sc-service = { path = "substrate/client/service", default-features = false } +sc-service-test = { path = "substrate/client/service/test" } +sc-state-db = { path = "substrate/client/state-db", default-features = false } +sc-statement-store = { default-features = false, path = "substrate/client/statement-store" } +sc-storage-monitor = { path = "substrate/client/storage-monitor", default-features = false } +sc-sync-state-rpc = { path = "substrate/client/sync-state-rpc", default-features = false } +sc-sysinfo = { path = "substrate/client/sysinfo", default-features = false } +sc-telemetry = { path = "substrate/client/telemetry", default-features = false } +sc-tracing = { path = "substrate/client/tracing", default-features = false } +sc-tracing-proc-macro = { path = "substrate/client/tracing/proc-macro", default-features = false } +sc-transaction-pool = { path = "substrate/client/transaction-pool", default-features = false } +sc-transaction-pool-api = { path = "substrate/client/transaction-pool/api", default-features = false } +sc-utils = { path = "substrate/client/utils", default-features = false } +scale-info = { version = "2.11.1", default-features = false } +schemars = { version = "0.8.13", default-features = false } +schnellru = { version = "0.2.1" } +schnorrkel = { version = "0.11.4", default-features = false } +seccompiler = { version = "0.4.0" } +secp256k1 = { version = "0.28.0", default-features = false } +secrecy = { version = "0.8.0", default-features = false } +seedling-runtime = { path = "cumulus/parachains/runtimes/starters/seedling" } +separator = { version = "0.4.1" } serde = { version = "1.0.197", default-features = false } serde-big-array = { version = "0.3.2" } serde_derive = { version = "1.0.117" } serde_json = { version = "1.0.114", default-features = false } serde_yaml = { version = "0.9" } +serial_test = { version = "2.0.0" } +sha1 = { version = "0.10.6" } +sha2 = { version = "0.10.7", default-features = false } +sha3 = { version = "0.10.0", default-features = false } +shell-runtime = { path = "cumulus/parachains/runtimes/starters/shell" } +slot-range-helper = { path = "polkadot/runtime/common/slot_range_helper", default-features = false } +slotmap = { version = "1.0" } +smallvec = { version = "1.11.0", default-features = false } +smoldot = { version = "0.11.0", default-features = false } +smoldot-light = { version = "0.9.0", default-features = false } +snowbridge-beacon-primitives = { path = "bridges/snowbridge/primitives/beacon", default-features = false } +snowbridge-core = { path = "bridges/snowbridge/primitives/core", default-features = false } +snowbridge-ethereum = { path = "bridges/snowbridge/primitives/ethereum", default-features = false } +snowbridge-outbound-queue-merkle-tree = { path = "bridges/snowbridge/pallets/outbound-queue/merkle-tree", default-features = false } +snowbridge-outbound-queue-runtime-api = { path = "bridges/snowbridge/pallets/outbound-queue/runtime-api", default-features = false } +snowbridge-pallet-ethereum-client = { path = "bridges/snowbridge/pallets/ethereum-client", default-features = false } +snowbridge-pallet-ethereum-client-fixtures = { path = "bridges/snowbridge/pallets/ethereum-client/fixtures", default-features = false } +snowbridge-pallet-inbound-queue = { path = "bridges/snowbridge/pallets/inbound-queue", default-features = false } +snowbridge-pallet-inbound-queue-fixtures = { path = "bridges/snowbridge/pallets/inbound-queue/fixtures", default-features = false } +snowbridge-pallet-outbound-queue = { path = "bridges/snowbridge/pallets/outbound-queue", default-features = false } +snowbridge-pallet-system = { path = "bridges/snowbridge/pallets/system", default-features = false } +snowbridge-router-primitives = { path = "bridges/snowbridge/primitives/router", default-features = false } +snowbridge-runtime-common = { path = "bridges/snowbridge/runtime/runtime-common", default-features = false } +snowbridge-runtime-test-common = { path = "bridges/snowbridge/runtime/test-common", default-features = false } +snowbridge-system-runtime-api = { path = "bridges/snowbridge/pallets/system/runtime-api", default-features = false } +soketto = { version = "0.7.1" } +solochain-template-runtime = { path = "templates/solochain/runtime" } +sp-api = { path = "substrate/primitives/api", default-features = false } +sp-api-proc-macro = { path = "substrate/primitives/api/proc-macro", default-features = false } +sp-application-crypto = { path = "substrate/primitives/application-crypto", default-features = false } +sp-arithmetic = { path = "substrate/primitives/arithmetic", default-features = false } +sp-authority-discovery = { path = "substrate/primitives/authority-discovery", default-features = false } +sp-block-builder = { path = "substrate/primitives/block-builder", default-features = false } +sp-blockchain = { path = "substrate/primitives/blockchain", default-features = false } +sp-consensus = { path = "substrate/primitives/consensus/common", default-features = false } +sp-consensus-aura = { path = "substrate/primitives/consensus/aura", default-features = false } +sp-consensus-babe = { path = "substrate/primitives/consensus/babe", default-features = false } +sp-consensus-beefy = { path = "substrate/primitives/consensus/beefy", default-features = false } +sp-consensus-grandpa = { path = "substrate/primitives/consensus/grandpa", default-features = false } +sp-consensus-pow = { path = "substrate/primitives/consensus/pow", default-features = false } +sp-consensus-sassafras = { path = "substrate/primitives/consensus/sassafras", default-features = false } +sp-consensus-slots = { path = "substrate/primitives/consensus/slots", default-features = false } +sp-core = { path = "substrate/primitives/core", default-features = false } +sp-core-hashing = { default-features = false, path = "substrate/deprecated/hashing" } +sp-core-hashing-proc-macro = { default-features = false, path = "substrate/deprecated/hashing/proc-macro" } +sp-crypto-ec-utils = { default-features = false, path = "substrate/primitives/crypto/ec-utils" } +sp-crypto-hashing = { path = "substrate/primitives/crypto/hashing", default-features = false } +sp-crypto-hashing-proc-macro = { path = "substrate/primitives/crypto/hashing/proc-macro", default-features = false } +sp-database = { path = "substrate/primitives/database", default-features = false } +sp-debug-derive = { path = "substrate/primitives/debug-derive", default-features = false } +sp-externalities = { path = "substrate/primitives/externalities", default-features = false } +sp-genesis-builder = { path = "substrate/primitives/genesis-builder", default-features = false } +sp-inherents = { path = "substrate/primitives/inherents", default-features = false } +sp-io = { path = "substrate/primitives/io", default-features = false } +sp-keyring = { path = "substrate/primitives/keyring", default-features = false } +sp-keystore = { path = "substrate/primitives/keystore", default-features = false } +sp-maybe-compressed-blob = { path = "substrate/primitives/maybe-compressed-blob", default-features = false } +sp-metadata-ir = { path = "substrate/primitives/metadata-ir", default-features = false } +sp-mixnet = { path = "substrate/primitives/mixnet", default-features = false } +sp-mmr-primitives = { path = "substrate/primitives/merkle-mountain-range", default-features = false } +sp-npos-elections = { path = "substrate/primitives/npos-elections", default-features = false } +sp-offchain = { path = "substrate/primitives/offchain", default-features = false } +sp-panic-handler = { path = "substrate/primitives/panic-handler", default-features = false } +sp-rpc = { path = "substrate/primitives/rpc", default-features = false } +sp-runtime = { path = "substrate/primitives/runtime", default-features = false } +sp-runtime-interface = { path = "substrate/primitives/runtime-interface", default-features = false } +sp-runtime-interface-proc-macro = { path = "substrate/primitives/runtime-interface/proc-macro", default-features = false } +sp-runtime-interface-test-wasm = { path = "substrate/primitives/runtime-interface/test-wasm" } +sp-runtime-interface-test-wasm-deprecated = { path = "substrate/primitives/runtime-interface/test-wasm-deprecated" } +sp-session = { path = "substrate/primitives/session", default-features = false } +sp-staking = { path = "substrate/primitives/staking", default-features = false } +sp-state-machine = { path = "substrate/primitives/state-machine", default-features = false } +sp-statement-store = { path = "substrate/primitives/statement-store", default-features = false } +sp-std = { path = "substrate/primitives/std", default-features = false } +sp-storage = { path = "substrate/primitives/storage", default-features = false } +sp-test-primitives = { path = "substrate/primitives/test-primitives" } +sp-timestamp = { path = "substrate/primitives/timestamp", default-features = false } +sp-tracing = { path = "substrate/primitives/tracing", default-features = false } +sp-transaction-pool = { path = "substrate/primitives/transaction-pool", default-features = false } +sp-transaction-storage-proof = { path = "substrate/primitives/transaction-storage-proof", default-features = false } +sp-trie = { path = "substrate/primitives/trie", default-features = false } +sp-version = { path = "substrate/primitives/version", default-features = false } +sp-version-proc-macro = { path = "substrate/primitives/version/proc-macro", default-features = false } +sp-wasm-interface = { path = "substrate/primitives/wasm-interface", default-features = false } +sp-weights = { path = "substrate/primitives/weights", default-features = false } +spinners = { version = "4.1.0" } +ss58-registry = { version = "1.34.0", default-features = false } +ssz_rs = { version = "0.9.0", default-features = false } +ssz_rs_derive = { version = "0.9.0", default-features = false } +static_assertions = { version = "1.1.0", default-features = false } +static_init = { version = "1.0.3" } +structopt = { version = "0.3" } +strum = { version = "0.26.2", default-features = false } +subkey = { path = "substrate/bin/utils/subkey", default-features = false } +substrate-bip39 = { path = "substrate/utils/substrate-bip39", default-features = false } +substrate-build-script-utils = { path = "substrate/utils/build-script-utils", default-features = false } +substrate-cli-test-utils = { path = "substrate/test-utils/cli" } +substrate-frame-rpc-support = { default-features = false, path = "substrate/utils/frame/rpc/support" } +substrate-frame-rpc-system = { path = "substrate/utils/frame/rpc/system", default-features = false } +substrate-rpc-client = { path = "substrate/utils/frame/rpc/client", default-features = false } +substrate-state-trie-migration-rpc = { path = "substrate/utils/frame/rpc/state-trie-migration-rpc", default-features = false } +substrate-test-client = { path = "substrate/test-utils/client" } +substrate-test-runtime = { path = "substrate/test-utils/runtime" } +substrate-test-runtime-client = { path = "substrate/test-utils/runtime/client" } +substrate-test-runtime-transaction-pool = { path = "substrate/test-utils/runtime/transaction-pool" } +substrate-test-utils = { path = "substrate/test-utils" } +substrate-wasm-builder = { path = "substrate/utils/wasm-builder", default-features = false } syn = { version = "2.0.53" } +sysinfo = { version = "0.30" } +tar = { version = "0.4" } +tempfile = { version = "3.8.1" } +test-log = { version = "0.2.14" } +test-pallet = { path = "substrate/frame/support/test/pallet", default-features = false, package = "frame-support-test-pallet" } +test-parachain-adder = { path = "polkadot/parachain/test-parachains/adder" } +test-parachain-halt = { path = "polkadot/parachain/test-parachains/halt" } +test-parachain-undying = { path = "polkadot/parachain/test-parachains/undying" } +test-runtime-constants = { path = "polkadot/runtime/test-runtime/constants", default-features = false } +testnet-parachains-constants = { path = "cumulus/parachains/runtimes/constants", default-features = false } thiserror = { version = "1.0.48" } +thousands = { version = "0.2.0" } +threadpool = { version = "1.7" } +tikv-jemalloc-ctl = { version = "0.5.0" } +tikv-jemallocator = { version = "0.5.0" } +time = { version = "0.3" } +tiny-keccak = { version = "2.0.2" } +tokio = { version = "1.37.0", default-features = false } +tokio-retry = { version = "0.3.0" } +tokio-stream = { version = "0.1.14" } +tokio-test = { version = "0.4.2" } +tokio-tungstenite = { version = "0.20.1" } +tokio-util = { version = "0.7.8" } +toml = { version = "0.8.8" } +toml_edit = { version = "0.19" } +tower = { version = "0.4.13" } +tower-http = { version = "0.5.2" } +tracing = { version = "0.1.37", default-features = false } +tracing-core = { version = "0.1.32", default-features = false } +tracing-futures = { version = "0.2.4" } +tracing-log = { version = "0.2.0" } tracing-subscriber = { version = "0.3.18" } +tracking-allocator = { path = "polkadot/node/tracking-allocator", default-features = false, package = "staging-tracking-allocator" } +trie-bench = { version = "0.39.0" } +trie-db = { version = "0.29.0", default-features = false } +trie-root = { version = "0.18.0", default-features = false } +trie-standardmap = { version = "0.16.0" } +trybuild = { version = "1.0.88" } +tt-call = { version = "1.0.8" } +tuplex = { version = "0.1", default-features = false } +twox-hash = { version = "1.6.3", default-features = false } +unsigned-varint = { version = "0.7.2" } +url = { version = "2.4.0" } +void = { version = "1.0.2" } +w3f-bls = { version = "0.1.3", default-features = false } +wait-timeout = { version = "0.2" } +walkdir = { version = "2.4.0" } +wasm-bindgen-test = { version = "0.3.19" } +wasm-instrument = { version = "0.4", default-features = false } +wasm-opt = { version = "0.116" } +wasm-timer = { version = "0.2.5" } +wasmi = { version = "0.32.3", default-features = false } +wasmtime = { version = "8.0.1", default-features = false } +wat = { version = "1.0.0" } +westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/relays/westend", default-features = false } +westend-runtime = { path = "polkadot/runtime/westend" } +westend-runtime-constants = { path = "polkadot/runtime/westend/constants", default-features = false } +westend-system-emulated-network = { path = "cumulus/parachains/integration-tests/emulated/networks/westend-system" } +x25519-dalek = { version = "2.0" } +xcm = { path = "polkadot/xcm", default-features = false, package = "staging-xcm" } +xcm-builder = { path = "polkadot/xcm/xcm-builder", default-features = false, package = "staging-xcm-builder" } +xcm-docs = { path = "polkadot/xcm/docs" } +xcm-emulator = { path = "cumulus/xcm/xcm-emulator", default-features = false } +xcm-executor = { path = "polkadot/xcm/xcm-executor", default-features = false, package = "staging-xcm-executor" } +xcm-procedural = { path = "polkadot/xcm/procedural", default-features = false } +xcm-runtime-apis = { path = "polkadot/xcm/xcm-runtime-apis", default-features = false } +xcm-simulator = { path = "polkadot/xcm/xcm-simulator", default-features = false } +zeroize = { version = "1.7.0", default-features = false } +zstd = { version = "0.12.4", default-features = false } [profile.release] # Polkadot runtime requires unwinding. diff --git a/README.md b/README.md index 0b027b2958c15..92901d070db08 100644 --- a/README.md +++ b/README.md @@ -24,8 +24,12 @@ forks](https://img.shields.io/github/forks/paritytech/polkadot-sdk) ## ๐Ÿ“š Documentation * [๐Ÿฆ€ rust-docs](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html) - * [Introduction](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/index.html) - to each component of the Polkadot SDK: Substrate, FRAME, Cumulus, and XCM + * [Introduction](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/index.html) + to each component of the Polkadot SDK: Substrate, FRAME, Cumulus, and XCM + * [Guides](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/guides/index.html), + namely how to build your first FRAME pallet. + * [Templates](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/templates/index.html) + for starting a new project. * Other Resources: * [Polkadot Wiki -> Build](https://wiki.polkadot.network/docs/build-guide) @@ -39,6 +43,9 @@ The Polkadot-SDK has two release channels: `stable` and `nightly`. Production so only use `stable`. `nightly` is meant for tinkerers to try out the latest features. The detailed release process is described in [RELEASE.md](docs/RELEASE.md). +You can use [`psvm`](https://github.com/paritytech/psvm) to manage your Polkadot-SDK dependency +versions in downstream projects. + ### ๐Ÿ˜Œ Stable `stable` releases have a support duration of **three months**. In this period, the release will not diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index 783009a8c8907..36f27b6aa0358 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -11,48 +11,44 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -hash-db = { version = "0.16.0", default-features = false } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -static_assertions = { version = "1.1", optional = true } -tuplex = { version = "0.1", default-features = false } +scale-info = { features = ["derive"], workspace = true } +static_assertions = { optional = true, workspace = true, default-features = true } +tuplex = { workspace = true } # Bridge dependencies - -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-parachains = { path = "../../primitives/parachains", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-relayers = { path = "../../primitives/relayers", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-xcm-bridge-hub = { path = "../../primitives/xcm-bridge-hub", default-features = false } -bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } -pallet-bridge-grandpa = { path = "../../modules/grandpa", default-features = false } -pallet-bridge-messages = { path = "../../modules/messages", default-features = false } -pallet-bridge-parachains = { path = "../../modules/parachains", default-features = false } -pallet-bridge-relayers = { path = "../../modules/relayers", default-features = false } +bp-header-chain = { workspace = true } +bp-messages = { workspace = true } +bp-parachains = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-relayers = { workspace = true } +bp-runtime = { workspace = true } +bp-xcm-bridge-hub = { workspace = true } +bp-xcm-bridge-hub-router = { workspace = true } +pallet-bridge-grandpa = { workspace = true } +pallet-bridge-messages = { workspace = true } +pallet-bridge-parachains = { workspace = true } +pallet-bridge-relayers = { workspace = true } # Substrate dependencies - -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-utility = { path = "../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-utility = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-trie = { optional = true, workspace = true } # Polkadot dependencies -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } [dev-dependencies] -bp-test-utils = { path = "../../primitives/test-utils" } -pallet-balances = { path = "../../../substrate/frame/balances" } +bp-test-utils = { workspace = true } +pallet-balances = { workspace = true } +pallet-bridge-messages = { features = ["std", "test-helpers"], workspace = true } [features] default = ["std"] @@ -63,13 +59,14 @@ std = [ "bp-polkadot-core/std", "bp-relayers/std", "bp-runtime/std", + "bp-test-utils/std", "bp-xcm-bridge-hub-router/std", "bp-xcm-bridge-hub/std", "codec/std", "frame-support/std", "frame-system/std", - "hash-db/std", "log/std", + "pallet-balances/std", "pallet-bridge-grandpa/std", "pallet-bridge-messages/std", "pallet-bridge-parachains/std", @@ -77,8 +74,6 @@ std = [ "pallet-transaction-payment/std", "pallet-utility/std", "scale-info/std", - "sp-api/std", - "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", @@ -88,15 +83,22 @@ std = [ "xcm/std", ] runtime-benchmarks = [ + "bp-runtime/test-helpers", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-bridge-grandpa/runtime-benchmarks", "pallet-bridge-messages/runtime-benchmarks", + "pallet-bridge-messages/test-helpers", "pallet-bridge-parachains/runtime-benchmarks", "pallet-bridge-relayers/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "sp-trie", "xcm-builder/runtime-benchmarks", ] integrity-test = ["static_assertions"] +test-helpers = [ + "bp-runtime/test-helpers", + "sp-trie", +] diff --git a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs index 2c152aef68226..df75092af6e8b 100644 --- a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs +++ b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs @@ -36,6 +36,12 @@ use sp_runtime::{ transaction_validity::{TransactionPriority, TransactionValidity, ValidTransactionBuilder}, }; +// Re-export to avoid include tuplex dependency everywhere. +#[doc(hidden)] +pub mod __private { + pub use tuplex; +} + /// A duplication of the `FilterCall` trait. /// /// We need this trait in order to be able to implement it for the messages pallet, @@ -313,7 +319,7 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { info: &sp_runtime::traits::DispatchInfoOf, len: usize, ) -> Result { - use tuplex::PushBack; + use $crate::extensions::check_obsolete_extension::__private::tuplex::PushBack; let to_post_dispatch = (); $( let (from_validate, call_filter_validity) = < @@ -336,7 +342,7 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { len: usize, result: &sp_runtime::DispatchResult, ) -> Result<(), sp_runtime::transaction_validity::TransactionValidityError> { - use tuplex::PopFront; + use $crate::extensions::check_obsolete_extension::__private::tuplex::PopFront; let Some((relayer, to_post_dispatch)) = to_post_dispatch else { return Ok(()) }; let has_failed = result.is_err(); $( diff --git a/bridges/bin/runtime-common/src/extensions/priority_calculator.rs b/bridges/bin/runtime-common/src/extensions/priority_calculator.rs index 92810290f95e7..9f559dc13b64d 100644 --- a/bridges/bin/runtime-common/src/extensions/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/extensions/priority_calculator.rs @@ -319,6 +319,7 @@ mod integrity_tests { pub mod per_message { use super::*; + use bp_messages::ChainWithMessages; use pallet_bridge_messages::WeightInfoExt; /// Ensures that the value of `PriorityBoostPerMessage` matches the value of @@ -339,7 +340,7 @@ mod integrity_tests { BalanceOf: Send + Sync + FixedPointOperand, { let maximal_messages_in_delivery_transaction = - Runtime::MaxUnconfirmedMessagesAtInboundLane::get(); + Runtime::BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; super::ensure_priority_boost_is_sane::>( "PriorityBoostPerMessage", maximal_messages_in_delivery_transaction, diff --git a/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs index 5aa7f1c095d54..6ba3506377d0e 100644 --- a/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs @@ -22,9 +22,9 @@ use crate::messages_call_ext::{ CallHelper as MessagesCallHelper, CallInfo as MessagesCallInfo, MessagesCallSubType, }; -use bp_messages::{LaneId, MessageNonce}; +use bp_messages::{ChainWithMessages, LaneId, MessageNonce}; use bp_relayers::{ExplicitOrAccountParams, RewardsAccountOwner, RewardsAccountParams}; -use bp_runtime::{Parachain, RangeInclusiveExt, StaticStrProvider}; +use bp_runtime::{Chain, Parachain, RangeInclusiveExt, StaticStrProvider}; use codec::{Codec, Decode, Encode}; use frame_support::{ dispatch::{CallableCallFor, DispatchInfo, PostDispatchInfo}, @@ -293,7 +293,7 @@ pub trait RefundSignedExtension: ::Id::get(), ::Instance, - >>::BridgedChainId::get(), + >>::BridgedChain::ID, if call_info.is_receive_messages_proof_call() { RewardsAccountOwner::ThisChain } else { @@ -406,8 +406,7 @@ pub trait RefundSignedExtension: // a quick check to avoid invalid high-priority transactions let max_unconfirmed_messages_in_confirmation_tx = ::Instance, - >>::MaxUnconfirmedMessagesAtInboundLane::get( - ); + >>::BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; if bundled_messages > max_unconfirmed_messages_in_confirmation_tx { return None } @@ -935,9 +934,6 @@ where pub(crate) mod tests { use super::*; use crate::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - }, messages_call_ext::{ BaseMessagesProofInfo, ReceiveMessagesDeliveryProofInfo, ReceiveMessagesProofInfo, UnrewardedRelayerOccupation, @@ -946,8 +942,10 @@ pub(crate) mod tests { }; use bp_header_chain::StoredHeaderDataBuilder; use bp_messages::{ - DeliveredMessages, InboundLaneData, MessageNonce, MessagesOperatingMode, OutboundLaneData, - UnrewardedRelayer, UnrewardedRelayersState, + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, DeliveredMessages, InboundLaneData, + MessageNonce, MessagesOperatingMode, OutboundLaneData, UnrewardedRelayer, + UnrewardedRelayersState, }; use bp_parachains::{BestParaHeadHash, ParaInfo}; use bp_polkadot_core::parachains::{ParaHeadsProof, ParaId}; @@ -1123,7 +1121,7 @@ pub(crate) mod tests { ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), [parachain_head_at_relay_header_number as u8; 32].into(), )], - parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, + parachain_heads_proof: ParaHeadsProof { storage_proof: Default::default() }, }) } @@ -1136,7 +1134,7 @@ pub(crate) mod tests { ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), [parachain_head_at_relay_header_number as u8; 32].into(), )], - parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, + parachain_heads_proof: ParaHeadsProof { storage_proof: Default::default() }, is_free_execution_expected: false, }) } @@ -1144,9 +1142,9 @@ pub(crate) mod tests { fn message_delivery_call(best_message: MessageNonce) -> RuntimeCall { RuntimeCall::BridgeMessages(MessagesCall::receive_messages_proof { relayer_id_at_bridged_chain: relayer_account_at_bridged_chain(), - proof: FromBridgedChainMessagesProof { + proof: Box::new(FromBridgedChainMessagesProof { bridged_header_hash: Default::default(), - storage_proof: vec![], + storage_proof: Default::default(), lane: TestLaneId::get(), nonces_start: pallet_bridge_messages::InboundLanes::::get( TEST_LANE_ID, @@ -1154,7 +1152,7 @@ pub(crate) mod tests { .last_delivered_nonce() + 1, nonces_end: best_message, - }, + }), messages_count: 1, dispatch_weight: Weight::zero(), }) @@ -1164,7 +1162,7 @@ pub(crate) mod tests { RuntimeCall::BridgeMessages(MessagesCall::receive_messages_delivery_proof { proof: FromBridgedChainMessagesDeliveryProof { bridged_header_hash: Default::default(), - storage_proof: vec![], + storage_proof: Default::default(), lane: TestLaneId::get(), }, relayers_state: UnrewardedRelayersState { @@ -1327,8 +1325,10 @@ pub(crate) mod tests { best_stored_nonce: 100, }, unrewarded_relayers: UnrewardedRelayerOccupation { - free_relayer_slots: MaxUnrewardedRelayerEntriesAtInboundLane::get(), - free_message_slots: MaxUnconfirmedMessagesAtInboundLane::get(), + free_relayer_slots: + BridgedUnderlyingChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + free_message_slots: + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, }, }), ), @@ -1397,8 +1397,10 @@ pub(crate) mod tests { best_stored_nonce: 100, }, unrewarded_relayers: UnrewardedRelayerOccupation { - free_relayer_slots: MaxUnrewardedRelayerEntriesAtInboundLane::get(), - free_message_slots: MaxUnconfirmedMessagesAtInboundLane::get(), + free_relayer_slots: + BridgedUnderlyingChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + free_message_slots: + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, }, }), ), @@ -1459,8 +1461,10 @@ pub(crate) mod tests { best_stored_nonce: 100, }, unrewarded_relayers: UnrewardedRelayerOccupation { - free_relayer_slots: MaxUnrewardedRelayerEntriesAtInboundLane::get(), - free_message_slots: MaxUnconfirmedMessagesAtInboundLane::get(), + free_relayer_slots: + BridgedUnderlyingChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + free_message_slots: + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, }, }), ), @@ -1499,8 +1503,10 @@ pub(crate) mod tests { best_stored_nonce: 100, }, unrewarded_relayers: UnrewardedRelayerOccupation { - free_relayer_slots: MaxUnrewardedRelayerEntriesAtInboundLane::get(), - free_message_slots: MaxUnconfirmedMessagesAtInboundLane::get(), + free_relayer_slots: + BridgedUnderlyingChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + free_message_slots: + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, }, }, )), @@ -1735,14 +1741,16 @@ pub(crate) mod tests { let fns = [run_validate, run_grandpa_validate, run_messages_validate]; for f in fns { - let priority_of_max_messages_delivery = - f(message_delivery_call(100 + MaxUnconfirmedMessagesAtInboundLane::get())) - .unwrap() - .priority; - let priority_of_more_than_max_messages_delivery = - f(message_delivery_call(100 + MaxUnconfirmedMessagesAtInboundLane::get() + 1)) - .unwrap() - .priority; + let priority_of_max_messages_delivery = f(message_delivery_call( + 100 + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, + )) + .unwrap() + .priority; + let priority_of_more_than_max_messages_delivery = f(message_delivery_call( + 100 + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX + 1, + )) + .unwrap() + .priority; assert!( priority_of_max_messages_delivery > priority_of_more_than_max_messages_delivery, @@ -2103,7 +2111,7 @@ pub(crate) mod tests { [1u8; 32].into(), ), ], - parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, + parachain_heads_proof: ParaHeadsProof { storage_proof: Default::default() }, }), message_delivery_call(200), ], @@ -2865,7 +2873,8 @@ pub(crate) mod tests { #[test] fn does_not_panic_on_boosting_priority_of_empty_message_delivery_transaction() { run_test(|| { - let best_delivered_message = MaxUnconfirmedMessagesAtInboundLane::get(); + let best_delivered_message = + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; initialize_environment(100, 100, best_delivered_message); // register relayer so it gets priority boost diff --git a/bridges/bin/runtime-common/src/integrity.rs b/bridges/bin/runtime-common/src/integrity.rs index d3827a14dd6cc..f661db8a22057 100644 --- a/bridges/bin/runtime-common/src/integrity.rs +++ b/bridges/bin/runtime-common/src/integrity.rs @@ -19,10 +19,9 @@ //! Most of the tests in this module assume that the bridge is using standard (see `crate::messages` //! module for details) configuration. -use crate::{messages, messages::MessageBridge}; - -use bp_messages::{InboundLaneData, MessageNonce}; -use bp_runtime::{Chain, ChainId}; +use bp_header_chain::ChainWithGrandpa; +use bp_messages::{ChainWithMessages, InboundLaneData, MessageNonce}; +use bp_runtime::Chain; use codec::Encode; use frame_support::{storage::generator::StorageValue, traits::Get, weights::Weight}; use frame_system::limits; @@ -50,23 +49,6 @@ macro_rules! assert_chain_types( } ); -/// Macro that ensures that the bridge GRANDPA pallet is configured properly to bridge with given -/// chain. -#[macro_export] -macro_rules! assert_bridge_grandpa_pallet_types( - ( runtime: $r:path, with_bridged_chain_grandpa_instance: $i:path, bridged_chain: $bridged:path ) => { - { - // if one of asserts fail, then either bridge isn't configured properly (or alternatively - non-standard - // configuration is used), or something has broke existing configuration (meaning that all bridged chains - // and relays will stop functioning) - use pallet_bridge_grandpa::Config as GrandpaConfig; - use static_assertions::assert_type_eq_all; - - assert_type_eq_all!(<$r as GrandpaConfig<$i>>::BridgedChain, $bridged); - } - } -); - /// Macro that ensures that the bridge messages pallet is configured properly to bridge using given /// configuration. #[macro_export] @@ -74,32 +56,30 @@ macro_rules! assert_bridge_messages_pallet_types( ( runtime: $r:path, with_bridged_chain_messages_instance: $i:path, - bridge: $bridge:path + this_chain: $this:path, + bridged_chain: $bridged:path, ) => { { // if one of asserts fail, then either bridge isn't configured properly (or alternatively - non-standard // configuration is used), or something has broke existing configuration (meaning that all bridged chains // and relays will stop functioning) - use $crate::messages::{ - source::{FromThisChainMessagePayload, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagePayload, SourceHeaderChainAdapter}, - AccountIdOf, BalanceOf, BridgedChain, ThisChain, - }; + use $crate::messages_xcm_extension::XcmAsPlainPayload; + use bp_messages::ChainWithMessages; + use bp_runtime::Chain; use pallet_bridge_messages::Config as MessagesConfig; use static_assertions::assert_type_eq_all; - assert_type_eq_all!(<$r as MessagesConfig<$i>>::OutboundPayload, FromThisChainMessagePayload); + assert_type_eq_all!(<$r as MessagesConfig<$i>>::ThisChain, $this); + assert_type_eq_all!(<$r as MessagesConfig<$i>>::BridgedChain, $bridged); - assert_type_eq_all!(<$r as MessagesConfig<$i>>::InboundRelayer, AccountIdOf>); - - assert_type_eq_all!(<$r as MessagesConfig<$i>>::TargetHeaderChain, TargetHeaderChainAdapter<$bridge>); - assert_type_eq_all!(<$r as MessagesConfig<$i>>::SourceHeaderChain, SourceHeaderChainAdapter<$bridge>); + assert_type_eq_all!(<$r as MessagesConfig<$i>>::OutboundPayload, XcmAsPlainPayload); + assert_type_eq_all!(<$r as MessagesConfig<$i>>::InboundPayload, XcmAsPlainPayload); } } ); /// Macro that combines four other macro calls - `assert_chain_types`, `assert_bridge_types`, -/// `assert_bridge_grandpa_pallet_types` and `assert_bridge_messages_pallet_types`. It may be used +/// and `assert_bridge_messages_pallet_types`. It may be used /// at the chain that is implementing complete standard messages bridge (i.e. with bridge GRANDPA /// and messages pallets deployed). #[macro_export] @@ -108,20 +88,15 @@ macro_rules! assert_complete_bridge_types( runtime: $r:path, with_bridged_chain_grandpa_instance: $gi:path, with_bridged_chain_messages_instance: $mi:path, - bridge: $bridge:path, this_chain: $this:path, bridged_chain: $bridged:path, ) => { $crate::assert_chain_types!(runtime: $r, this_chain: $this); - $crate::assert_bridge_grandpa_pallet_types!( - runtime: $r, - with_bridged_chain_grandpa_instance: $gi, - bridged_chain: $bridged - ); $crate::assert_bridge_messages_pallet_types!( runtime: $r, with_bridged_chain_messages_instance: $mi, - bridge: $bridge + this_chain: $this, + bridged_chain: $bridged, ); } ); @@ -184,20 +159,8 @@ where ); } -/// Parameters for asserting messages pallet constants. -#[derive(Debug)] -pub struct AssertBridgeMessagesPalletConstants { - /// Maximal number of unrewarded relayer entries in a confirmation transaction at the bridged - /// chain. - pub max_unrewarded_relayers_in_bridged_confirmation_tx: MessageNonce, - /// Maximal number of unconfirmed messages in a confirmation transaction at the bridged chain. - pub max_unconfirmed_messages_in_bridged_confirmation_tx: MessageNonce, - /// Identifier of the bridged chain. - pub bridged_chain_id: ChainId, -} - /// Test that the constants, used in messages pallet configuration are valid. -pub fn assert_bridge_messages_pallet_constants(params: AssertBridgeMessagesPalletConstants) +pub fn assert_bridge_messages_pallet_constants() where R: pallet_bridge_messages::Config, MI: 'static, @@ -207,27 +170,22 @@ where "ActiveOutboundLanes ({:?}) must not be empty", R::ActiveOutboundLanes::get(), ); + assert!( - R::MaxUnrewardedRelayerEntriesAtInboundLane::get() <= params.max_unrewarded_relayers_in_bridged_confirmation_tx, - "MaxUnrewardedRelayerEntriesAtInboundLane ({}) must be <= than the hardcoded value for bridged chain: {}", - R::MaxUnrewardedRelayerEntriesAtInboundLane::get(), - params.max_unrewarded_relayers_in_bridged_confirmation_tx, - ); - assert!( - R::MaxUnconfirmedMessagesAtInboundLane::get() <= params.max_unconfirmed_messages_in_bridged_confirmation_tx, - "MaxUnrewardedRelayerEntriesAtInboundLane ({}) must be <= than the hardcoded value for bridged chain: {}", - R::MaxUnconfirmedMessagesAtInboundLane::get(), - params.max_unconfirmed_messages_in_bridged_confirmation_tx, + pallet_bridge_messages::BridgedChainOf::::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX + <= pallet_bridge_messages::BridgedChainOf::::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, + "MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX ({}) of {:?} is larger than \ + its MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX ({}). This makes \ + no sense", + pallet_bridge_messages::BridgedChainOf::::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, + pallet_bridge_messages::BridgedChainOf::::ID, + pallet_bridge_messages::BridgedChainOf::::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, ); - assert_eq!(R::BridgedChainId::get(), params.bridged_chain_id); } /// Parameters for asserting bridge pallet names. #[derive(Debug)] pub struct AssertBridgePalletNames<'a> { - /// Name of the messages pallet, deployed at the bridged chain and used to bridge with this - /// chain. - pub with_this_chain_messages_pallet_name: &'a str, /// Name of the GRANDPA pallet, deployed at this chain and used to bridge with the bridged /// chain. pub with_bridged_chain_grandpa_pallet_name: &'a str, @@ -238,18 +196,22 @@ pub struct AssertBridgePalletNames<'a> { /// Tests that bridge pallet names used in `construct_runtime!()` macro call are matching constants /// from chain primitives crates. -pub fn assert_bridge_pallet_names(params: AssertBridgePalletNames) +fn assert_bridge_pallet_names(params: AssertBridgePalletNames) where - B: MessageBridge, R: pallet_bridge_grandpa::Config + pallet_bridge_messages::Config, GI: 'static, MI: 'static, { - assert_eq!(B::BRIDGED_MESSAGES_PALLET_NAME, params.with_this_chain_messages_pallet_name); + // check that the bridge GRANDPA pallet has required name assert_eq!( pallet_bridge_grandpa::PalletOwner::::storage_value_final_key().to_vec(), - bp_runtime::storage_value_key(params.with_bridged_chain_grandpa_pallet_name, "PalletOwner",).0, + bp_runtime::storage_value_key( + params.with_bridged_chain_grandpa_pallet_name, + "PalletOwner", + ).0, ); + + // check that the bridge messages pallet has required name assert_eq!( pallet_bridge_messages::PalletOwner::::storage_value_final_key().to_vec(), bp_runtime::storage_value_key( @@ -262,35 +224,58 @@ where /// Parameters for asserting complete standard messages bridge. #[derive(Debug)] -pub struct AssertCompleteBridgeConstants<'a> { +pub struct AssertCompleteBridgeConstants { /// Parameters to assert this chain constants. pub this_chain_constants: AssertChainConstants, - /// Parameters to assert messages pallet constants. - pub messages_pallet_constants: AssertBridgeMessagesPalletConstants, - /// Parameters to assert pallet names constants. - pub pallet_names: AssertBridgePalletNames<'a>, } -/// All bridge-related constants tests for the complete standard messages bridge (i.e. with bridge -/// GRANDPA and messages pallets deployed). -pub fn assert_complete_bridge_constants(params: AssertCompleteBridgeConstants) -where +/// All bridge-related constants tests for the complete standard relay-chain messages bridge +/// (i.e. with bridge GRANDPA and messages pallets deployed). +pub fn assert_complete_with_relay_chain_bridge_constants( + params: AssertCompleteBridgeConstants, +) where + R: frame_system::Config + + pallet_bridge_grandpa::Config + + pallet_bridge_messages::Config, + GI: 'static, + MI: 'static, +{ + assert_chain_constants::(params.this_chain_constants); + assert_bridge_grandpa_pallet_constants::(); + assert_bridge_messages_pallet_constants::(); + assert_bridge_pallet_names::(AssertBridgePalletNames { + with_bridged_chain_grandpa_pallet_name: + >::BridgedChain::WITH_CHAIN_GRANDPA_PALLET_NAME, + with_bridged_chain_messages_pallet_name: + >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + }); +} + +/// All bridge-related constants tests for the complete standard parachain messages bridge +/// (i.e. with bridge GRANDPA, parachains and messages pallets deployed). +pub fn assert_complete_with_parachain_bridge_constants( + params: AssertCompleteBridgeConstants, +) where R: frame_system::Config + pallet_bridge_grandpa::Config + pallet_bridge_messages::Config, GI: 'static, MI: 'static, - B: MessageBridge, + RelayChain: ChainWithGrandpa, { assert_chain_constants::(params.this_chain_constants); assert_bridge_grandpa_pallet_constants::(); - assert_bridge_messages_pallet_constants::(params.messages_pallet_constants); - assert_bridge_pallet_names::(params.pallet_names); + assert_bridge_messages_pallet_constants::(); + assert_bridge_pallet_names::(AssertBridgePalletNames { + with_bridged_chain_grandpa_pallet_name: RelayChain::WITH_CHAIN_GRANDPA_PALLET_NAME, + with_bridged_chain_messages_pallet_name: + >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + }); } /// Check that the message lane weights are correct. pub fn check_message_lane_weights< - C: Chain, + C: ChainWithMessages, T: frame_system::Config + pallet_bridge_messages::Config, MessagesPalletInstance: 'static, >( @@ -308,14 +293,20 @@ pub fn check_message_lane_weights< // check basic weight assumptions pallet_bridge_messages::ensure_weights_are_correct::>(); + // check that the maximal message dispatch weight is below hardcoded limit + pallet_bridge_messages::ensure_maximal_message_dispatch::>( + C::maximal_incoming_message_size(), + C::maximal_incoming_message_dispatch_weight(), + ); + // check that weights allow us to receive messages - let max_incoming_message_proof_size = bridged_chain_extra_storage_proof_size - .saturating_add(messages::target::maximal_incoming_message_size(C::max_extrinsic_size())); + let max_incoming_message_proof_size = + bridged_chain_extra_storage_proof_size.saturating_add(C::maximal_incoming_message_size()); pallet_bridge_messages::ensure_able_to_receive_message::>( C::max_extrinsic_size(), C::max_extrinsic_weight(), max_incoming_message_proof_size, - messages::target::maximal_incoming_message_dispatch_weight(C::max_extrinsic_weight()), + C::maximal_incoming_message_dispatch_weight(), ); // check that weights allow us to receive delivery confirmations diff --git a/bridges/bin/runtime-common/src/lib.rs b/bridges/bin/runtime-common/src/lib.rs index 5679acd6006cc..b65bb6041d561 100644 --- a/bridges/bin/runtime-common/src/lib.rs +++ b/bridges/bin/runtime-common/src/lib.rs @@ -20,11 +20,10 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod extensions; -pub mod messages; + pub mod messages_api; pub mod messages_benchmarking; pub mod messages_call_ext; -pub mod messages_generation; pub mod messages_xcm_extension; pub mod parachains_benchmarking; diff --git a/bridges/bin/runtime-common/src/messages.rs b/bridges/bin/runtime-common/src/messages.rs deleted file mode 100644 index 0fe9935dbdb6d..0000000000000 --- a/bridges/bin/runtime-common/src/messages.rs +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types that allow runtime to act as a source/target endpoint of message lanes. -//! -//! Messages are assumed to be encoded `Call`s of the target chain. Call-dispatch -//! pallet is used to dispatch incoming messages. Message identified by a tuple -//! of to elements - message lane id and message nonce. - -pub use bp_runtime::{RangeInclusiveExt, UnderlyingChainOf, UnderlyingChainProvider}; - -use bp_header_chain::HeaderChain; -use bp_messages::{ - source_chain::TargetHeaderChain, - target_chain::{ProvedLaneMessages, ProvedMessages, SourceHeaderChain}, - InboundLaneData, LaneId, Message, MessageKey, MessageNonce, MessagePayload, OutboundLaneData, - VerificationError, -}; -use bp_runtime::{Chain, RawStorageProof, Size, StorageProofChecker}; -use codec::{Decode, Encode}; -use frame_support::{traits::Get, weights::Weight}; -use hash_db::Hasher; -use scale_info::TypeInfo; -use sp_runtime::RuntimeDebug; -use sp_std::{marker::PhantomData, vec::Vec}; - -/// Bidirectional message bridge. -pub trait MessageBridge { - /// Name of the paired messages pallet instance at the Bridged chain. - /// - /// Should be the name that is used in the `construct_runtime!()` macro. - const BRIDGED_MESSAGES_PALLET_NAME: &'static str; - - /// This chain in context of message bridge. - type ThisChain: ThisChainWithMessages; - /// Bridged chain in context of message bridge. - type BridgedChain: BridgedChainWithMessages; - /// Bridged header chain. - type BridgedHeaderChain: HeaderChain>; -} - -/// This chain that has `pallet-bridge-messages` module. -pub trait ThisChainWithMessages: UnderlyingChainProvider { - /// Call origin on the chain. - type RuntimeOrigin; -} - -/// Bridged chain that has `pallet-bridge-messages` module. -pub trait BridgedChainWithMessages: UnderlyingChainProvider {} - -/// This chain in context of message bridge. -pub type ThisChain = ::ThisChain; -/// Bridged chain in context of message bridge. -pub type BridgedChain = ::BridgedChain; -/// Hash used on the chain. -pub type HashOf = bp_runtime::HashOf<::Chain>; -/// Hasher used on the chain. -pub type HasherOf = bp_runtime::HasherOf>; -/// Account id used on the chain. -pub type AccountIdOf = bp_runtime::AccountIdOf>; -/// Type of balances that is used on the chain. -pub type BalanceOf = bp_runtime::BalanceOf>; - -/// Sub-module that is declaring types required for processing This -> Bridged chain messages. -pub mod source { - use super::*; - - /// Message payload for This -> Bridged chain messages. - pub type FromThisChainMessagePayload = crate::messages_xcm_extension::XcmAsPlainPayload; - - /// Maximal size of outbound message payload. - pub struct FromThisChainMaximalOutboundPayloadSize(PhantomData); - - impl Get for FromThisChainMaximalOutboundPayloadSize { - fn get() -> u32 { - maximal_message_size::() - } - } - - /// Messages delivery proof from bridged chain: - /// - /// - hash of finalized header; - /// - storage proof of inbound lane state; - /// - lane id. - #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] - pub struct FromBridgedChainMessagesDeliveryProof { - /// Hash of the bridge header the proof is for. - pub bridged_header_hash: BridgedHeaderHash, - /// Storage trie proof generated for [`Self::bridged_header_hash`]. - pub storage_proof: RawStorageProof, - /// Lane id of which messages were delivered and the proof is for. - pub lane: LaneId, - } - - impl Size for FromBridgedChainMessagesDeliveryProof { - fn size(&self) -> u32 { - u32::try_from( - self.storage_proof - .iter() - .fold(0usize, |sum, node| sum.saturating_add(node.len())), - ) - .unwrap_or(u32::MAX) - } - } - - /// 'Parsed' message delivery proof - inbound lane id and its state. - pub type ParsedMessagesDeliveryProofFromBridgedChain = - (LaneId, InboundLaneData>>); - - /// Return maximal message size of This -> Bridged chain message. - pub fn maximal_message_size() -> u32 { - super::target::maximal_incoming_message_size( - UnderlyingChainOf::>::max_extrinsic_size(), - ) - } - - /// `TargetHeaderChain` implementation that is using default types and perform default checks. - pub struct TargetHeaderChainAdapter(PhantomData); - - impl TargetHeaderChain>> - for TargetHeaderChainAdapter - { - type MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof>>; - - fn verify_message(payload: &FromThisChainMessagePayload) -> Result<(), VerificationError> { - verify_chain_message::(payload) - } - - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData>>), VerificationError> { - verify_messages_delivery_proof::(proof) - } - } - - /// Do basic Bridged-chain specific verification of This -> Bridged chain message. - /// - /// Ok result from this function means that the delivery transaction with this message - /// may be 'mined' by the target chain. - pub fn verify_chain_message( - payload: &FromThisChainMessagePayload, - ) -> Result<(), VerificationError> { - // IMPORTANT: any error that is returned here is fatal for the bridge, because - // this code is executed at the bridge hub and message sender actually lives - // at some sibling parachain. So we are failing **after** the message has been - // sent and we can't report it back to sender (unless error report mechanism is - // embedded into message and its dispatcher). - - // apart from maximal message size check (see below), we should also check the message - // dispatch weight here. But we assume that the bridged chain will just push the message - // to some queue (XCMP, UMP, DMP), so the weight is constant and fits the block. - - // The maximal size of extrinsic at Substrate-based chain depends on the - // `frame_system::Config::MaximumBlockLength` and - // `frame_system::Config::AvailableBlockRatio` constants. This check is here to be sure that - // the lane won't stuck because message is too large to fit into delivery transaction. - // - // **IMPORTANT NOTE**: the delivery transaction contains storage proof of the message, not - // the message itself. The proof is always larger than the message. But unless chain state - // is enormously large, it should be several dozens/hundreds of bytes. The delivery - // transaction also contains signatures and signed extensions. Because of this, we reserve - // 1/3 of the the maximal extrinsic size for this data. - if payload.len() > maximal_message_size::() as usize { - return Err(VerificationError::MessageTooLarge) - } - - Ok(()) - } - - /// Verify proof of This -> Bridged chain messages delivery. - /// - /// This function is used when Bridged chain is directly using GRANDPA finality. For Bridged - /// parachains, please use the `verify_messages_delivery_proof_from_parachain`. - pub fn verify_messages_delivery_proof( - proof: FromBridgedChainMessagesDeliveryProof>>, - ) -> Result, VerificationError> { - let FromBridgedChainMessagesDeliveryProof { bridged_header_hash, storage_proof, lane } = - proof; - let mut storage = - B::BridgedHeaderChain::storage_proof_checker(bridged_header_hash, storage_proof) - .map_err(VerificationError::HeaderChain)?; - // Messages delivery proof is just proof of single storage key read => any error - // is fatal. - let storage_inbound_lane_data_key = bp_messages::storage_keys::inbound_lane_data_key( - B::BRIDGED_MESSAGES_PALLET_NAME, - &lane, - ); - let inbound_lane_data = storage - .read_and_decode_mandatory_value(storage_inbound_lane_data_key.0.as_ref()) - .map_err(VerificationError::InboundLaneStorage)?; - - // check that the storage proof doesn't have any untouched trie nodes - storage.ensure_no_unused_nodes().map_err(VerificationError::StorageProof)?; - - Ok((lane, inbound_lane_data)) - } -} - -/// Sub-module that is declaring types required for processing Bridged -> This chain messages. -pub mod target { - use super::*; - - /// Decoded Bridged -> This message payload. - pub type FromBridgedChainMessagePayload = crate::messages_xcm_extension::XcmAsPlainPayload; - - /// Messages proof from bridged chain: - /// - /// - hash of finalized header; - /// - storage proof of messages and (optionally) outbound lane state; - /// - lane id; - /// - nonces (inclusive range) of messages which are included in this proof. - #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] - pub struct FromBridgedChainMessagesProof { - /// Hash of the finalized bridged header the proof is for. - pub bridged_header_hash: BridgedHeaderHash, - /// A storage trie proof of messages being delivered. - pub storage_proof: RawStorageProof, - /// Messages in this proof are sent over this lane. - pub lane: LaneId, - /// Nonce of the first message being delivered. - pub nonces_start: MessageNonce, - /// Nonce of the last message being delivered. - pub nonces_end: MessageNonce, - } - - impl Size for FromBridgedChainMessagesProof { - fn size(&self) -> u32 { - u32::try_from( - self.storage_proof - .iter() - .fold(0usize, |sum, node| sum.saturating_add(node.len())), - ) - .unwrap_or(u32::MAX) - } - } - - /// Return maximal dispatch weight of the message we're able to receive. - pub fn maximal_incoming_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight { - maximal_extrinsic_weight / 2 - } - - /// Return maximal message size given maximal extrinsic size. - pub fn maximal_incoming_message_size(maximal_extrinsic_size: u32) -> u32 { - maximal_extrinsic_size / 3 * 2 - } - - /// `SourceHeaderChain` implementation that is using default types and perform default checks. - pub struct SourceHeaderChainAdapter(PhantomData); - - impl SourceHeaderChain for SourceHeaderChainAdapter { - type MessagesProof = FromBridgedChainMessagesProof>>; - - fn verify_messages_proof( - proof: Self::MessagesProof, - messages_count: u32, - ) -> Result, VerificationError> { - verify_messages_proof::(proof, messages_count) - } - } - - /// Verify proof of Bridged -> This chain messages. - /// - /// This function is used when Bridged chain is directly using GRANDPA finality. For Bridged - /// parachains, please use the `verify_messages_proof_from_parachain`. - /// - /// The `messages_count` argument verification (sane limits) is supposed to be made - /// outside of this function. This function only verifies that the proof declares exactly - /// `messages_count` messages. - pub fn verify_messages_proof( - proof: FromBridgedChainMessagesProof>>, - messages_count: u32, - ) -> Result, VerificationError> { - let FromBridgedChainMessagesProof { - bridged_header_hash, - storage_proof, - lane, - nonces_start, - nonces_end, - } = proof; - let storage = - B::BridgedHeaderChain::storage_proof_checker(bridged_header_hash, storage_proof) - .map_err(VerificationError::HeaderChain)?; - let mut parser = StorageProofCheckerAdapter::<_, B> { storage, _dummy: Default::default() }; - let nonces_range = nonces_start..=nonces_end; - - // receiving proofs where end < begin is ok (if proof includes outbound lane state) - let messages_in_the_proof = nonces_range.checked_len().unwrap_or(0); - if messages_in_the_proof != MessageNonce::from(messages_count) { - return Err(VerificationError::MessagesCountMismatch) - } - - // Read messages first. All messages that are claimed to be in the proof must - // be in the proof. So any error in `read_value`, or even missing value is fatal. - // - // Mind that we allow proofs with no messages if outbound lane state is proved. - let mut messages = Vec::with_capacity(messages_in_the_proof as _); - for nonce in nonces_range { - let message_key = MessageKey { lane_id: lane, nonce }; - let message_payload = parser.read_and_decode_message_payload(&message_key)?; - messages.push(Message { key: message_key, payload: message_payload }); - } - - // Now let's check if proof contains outbound lane state proof. It is optional, so - // we simply ignore `read_value` errors and missing value. - let proved_lane_messages = ProvedLaneMessages { - lane_state: parser.read_and_decode_outbound_lane_data(&lane)?, - messages, - }; - - // Now we may actually check if the proof is empty or not. - if proved_lane_messages.lane_state.is_none() && proved_lane_messages.messages.is_empty() { - return Err(VerificationError::EmptyMessageProof) - } - - // check that the storage proof doesn't have any untouched trie nodes - parser - .storage - .ensure_no_unused_nodes() - .map_err(VerificationError::StorageProof)?; - - // We only support single lane messages in this generated_schema - let mut proved_messages = ProvedMessages::new(); - proved_messages.insert(lane, proved_lane_messages); - - Ok(proved_messages) - } - - struct StorageProofCheckerAdapter { - storage: StorageProofChecker, - _dummy: sp_std::marker::PhantomData, - } - - impl StorageProofCheckerAdapter { - fn read_and_decode_outbound_lane_data( - &mut self, - lane_id: &LaneId, - ) -> Result, VerificationError> { - let storage_outbound_lane_data_key = bp_messages::storage_keys::outbound_lane_data_key( - B::BRIDGED_MESSAGES_PALLET_NAME, - lane_id, - ); - - self.storage - .read_and_decode_opt_value(storage_outbound_lane_data_key.0.as_ref()) - .map_err(VerificationError::OutboundLaneStorage) - } - - fn read_and_decode_message_payload( - &mut self, - message_key: &MessageKey, - ) -> Result { - let storage_message_key = bp_messages::storage_keys::message_key( - B::BRIDGED_MESSAGES_PALLET_NAME, - &message_key.lane_id, - message_key.nonce, - ); - self.storage - .read_and_decode_mandatory_value(storage_message_key.0.as_ref()) - .map_err(VerificationError::MessageStorage) - } - } -} - -/// The `BridgeMessagesCall` used by a chain. -pub type BridgeMessagesCallOf = bp_messages::BridgeMessagesCall< - bp_runtime::AccountIdOf, - target::FromBridgedChainMessagesProof>, - source::FromBridgedChainMessagesDeliveryProof>, ->; - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - messages_generation::{ - encode_all_messages, encode_lane_data, prepare_messages_storage_proof, - }, - mock::*, - }; - use bp_header_chain::{HeaderChainError, StoredHeaderDataBuilder}; - use bp_runtime::{HeaderId, StorageProofError}; - use codec::Encode; - use sp_core::H256; - use sp_runtime::traits::Header as _; - - #[test] - fn verify_chain_message_rejects_message_with_too_large_declared_weight() { - assert!(source::verify_chain_message::(&vec![ - 42; - BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT - - 1 - ]) - .is_err()); - } - - #[test] - fn verify_chain_message_rejects_message_too_large_message() { - assert!(source::verify_chain_message::(&vec![ - 0; - source::maximal_message_size::() - as usize + 1 - ],) - .is_err()); - } - - #[test] - fn verify_chain_message_accepts_maximal_message() { - assert_eq!( - source::verify_chain_message::(&vec![ - 0; - source::maximal_message_size::() - as _ - ],), - Ok(()), - ); - } - - fn using_messages_proof( - nonces_end: MessageNonce, - outbound_lane_data: Option, - encode_message: impl Fn(MessageNonce, &MessagePayload) -> Option>, - encode_outbound_lane_data: impl Fn(&OutboundLaneData) -> Vec, - test: impl Fn(target::FromBridgedChainMessagesProof) -> R, - ) -> R { - let (state_root, storage_proof) = prepare_messages_storage_proof::( - TEST_LANE_ID, - 1..=nonces_end, - outbound_lane_data, - bp_runtime::StorageProofSize::Minimal(0), - vec![42], - encode_message, - encode_outbound_lane_data, - ); - - sp_io::TestExternalities::new(Default::default()).execute_with(move || { - let bridged_header = BridgedChainHeader::new( - 0, - Default::default(), - state_root, - Default::default(), - Default::default(), - ); - let bridged_header_hash = bridged_header.hash(); - - pallet_bridge_grandpa::BestFinalized::::put(HeaderId( - 0, - bridged_header_hash, - )); - pallet_bridge_grandpa::ImportedHeaders::::insert( - bridged_header_hash, - bridged_header.build(), - ); - test(target::FromBridgedChainMessagesProof { - bridged_header_hash, - storage_proof, - lane: TEST_LANE_ID, - nonces_start: 1, - nonces_end, - }) - }) - } - - #[test] - fn messages_proof_is_rejected_if_declared_less_than_actual_number_of_messages() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |proof| { - target::verify_messages_proof::(proof, 5) - }), - Err(VerificationError::MessagesCountMismatch), - ); - } - - #[test] - fn messages_proof_is_rejected_if_declared_more_than_actual_number_of_messages() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |proof| { - target::verify_messages_proof::(proof, 15) - }), - Err(VerificationError::MessagesCountMismatch), - ); - } - - #[test] - fn message_proof_is_rejected_if_header_is_missing_from_the_chain() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |proof| { - let bridged_header_hash = - pallet_bridge_grandpa::BestFinalized::::get().unwrap().1; - pallet_bridge_grandpa::ImportedHeaders::::remove(bridged_header_hash); - target::verify_messages_proof::(proof, 10) - }), - Err(VerificationError::HeaderChain(HeaderChainError::UnknownHeader)), - ); - } - - #[test] - fn message_proof_is_rejected_if_header_state_root_mismatches() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |proof| { - let bridged_header_hash = - pallet_bridge_grandpa::BestFinalized::::get().unwrap().1; - pallet_bridge_grandpa::ImportedHeaders::::insert( - bridged_header_hash, - BridgedChainHeader::new( - 0, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ) - .build(), - ); - target::verify_messages_proof::(proof, 10) - }), - Err(VerificationError::HeaderChain(HeaderChainError::StorageProof( - StorageProofError::StorageRootMismatch - ))), - ); - } - - #[test] - fn message_proof_is_rejected_if_it_has_duplicate_trie_nodes() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |mut proof| { - let node = proof.storage_proof.pop().unwrap(); - proof.storage_proof.push(node.clone()); - proof.storage_proof.push(node); - target::verify_messages_proof::(proof, 10) - },), - Err(VerificationError::HeaderChain(HeaderChainError::StorageProof( - StorageProofError::DuplicateNodesInProof - ))), - ); - } - - #[test] - fn message_proof_is_rejected_if_it_has_unused_trie_nodes() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |mut proof| { - proof.storage_proof.push(vec![42]); - target::verify_messages_proof::(proof, 10) - },), - Err(VerificationError::StorageProof(StorageProofError::UnusedNodesInTheProof)), - ); - } - - #[test] - fn message_proof_is_rejected_if_required_message_is_missing() { - matches!( - using_messages_proof( - 10, - None, - |n, m| if n != 5 { Some(m.encode()) } else { None }, - encode_lane_data, - |proof| target::verify_messages_proof::(proof, 10) - ), - Err(VerificationError::MessageStorage(StorageProofError::StorageValueEmpty)), - ); - } - - #[test] - fn message_proof_is_rejected_if_message_decode_fails() { - matches!( - using_messages_proof( - 10, - None, - |n, m| { - let mut m = m.encode(); - if n == 5 { - m = vec![42] - } - Some(m) - }, - encode_lane_data, - |proof| target::verify_messages_proof::(proof, 10), - ), - Err(VerificationError::MessageStorage(StorageProofError::StorageValueDecodeFailed(_))), - ); - } - - #[test] - fn message_proof_is_rejected_if_outbound_lane_state_decode_fails() { - matches!( - using_messages_proof( - 10, - Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - encode_all_messages, - |d| { - let mut d = d.encode(); - d.truncate(1); - d - }, - |proof| target::verify_messages_proof::(proof, 10), - ), - Err(VerificationError::OutboundLaneStorage( - StorageProofError::StorageValueDecodeFailed(_) - )), - ); - } - - #[test] - fn message_proof_is_rejected_if_it_is_empty() { - assert_eq!( - using_messages_proof(0, None, encode_all_messages, encode_lane_data, |proof| { - target::verify_messages_proof::(proof, 0) - },), - Err(VerificationError::EmptyMessageProof), - ); - } - - #[test] - fn non_empty_message_proof_without_messages_is_accepted() { - assert_eq!( - using_messages_proof( - 0, - Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - encode_all_messages, - encode_lane_data, - |proof| target::verify_messages_proof::(proof, 0), - ), - Ok(vec![( - TEST_LANE_ID, - ProvedLaneMessages { - lane_state: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - messages: Vec::new(), - }, - )] - .into_iter() - .collect()), - ); - } - - #[test] - fn non_empty_message_proof_is_accepted() { - assert_eq!( - using_messages_proof( - 1, - Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - encode_all_messages, - encode_lane_data, - |proof| target::verify_messages_proof::(proof, 1), - ), - Ok(vec![( - TEST_LANE_ID, - ProvedLaneMessages { - lane_state: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - messages: vec![Message { - key: MessageKey { lane_id: TEST_LANE_ID, nonce: 1 }, - payload: vec![42], - }], - }, - )] - .into_iter() - .collect()), - ); - } - - #[test] - fn verify_messages_proof_does_not_panic_if_messages_count_mismatches() { - assert_eq!( - using_messages_proof(1, None, encode_all_messages, encode_lane_data, |mut proof| { - proof.nonces_end = u64::MAX; - target::verify_messages_proof::(proof, u32::MAX) - },), - Err(VerificationError::MessagesCountMismatch), - ); - } -} diff --git a/bridges/bin/runtime-common/src/messages_benchmarking.rs b/bridges/bin/runtime-common/src/messages_benchmarking.rs index 74494f7908045..1880e65547fe6 100644 --- a/bridges/bin/runtime-common/src/messages_benchmarking.rs +++ b/bridges/bin/runtime-common/src/messages_benchmarking.rs @@ -19,23 +19,22 @@ #![cfg(feature = "runtime-benchmarks")] -use crate::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - AccountIdOf, BridgedChain, HashOf, MessageBridge, ThisChain, - }, +use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, MessagePayload, +}; +use bp_polkadot_core::parachains::ParaHash; +use bp_runtime::{AccountIdOf, Chain, HashOf, Parachain}; +use codec::Encode; +use frame_support::weights::Weight; +use pallet_bridge_messages::{ + benchmarking::{MessageDeliveryProofParams, MessageProofParams}, messages_generation::{ encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, prepare_messages_storage_proof, }, + BridgedChainOf, ThisChainOf, }; - -use bp_messages::MessagePayload; -use bp_polkadot_core::parachains::ParaHash; -use bp_runtime::{Chain, Parachain, StorageProofSize, UnderlyingChainOf}; -use codec::Encode; -use frame_support::weights::Weight; -use pallet_bridge_messages::benchmarking::{MessageDeliveryProofParams, MessageProofParams}; use sp_runtime::traits::{Header, Zero}; use sp_std::prelude::*; use xcm::latest::prelude::*; @@ -45,11 +44,7 @@ fn prepare_inbound_message( params: &MessageProofParams, successful_dispatch_message_generator: impl Fn(usize) -> MessagePayload, ) -> MessagePayload { - // we only care about **this** message size when message proof needs to be `Minimal` - let expected_size = match params.size { - StorageProofSize::Minimal(size) => size as usize, - _ => 0, - }; + let expected_size = params.proof_params.db_size.unwrap_or(0) as usize; // if we don't need a correct message, then we may just return some random blob if !params.is_successful_dispatch_expected { @@ -75,25 +70,32 @@ fn prepare_inbound_message( /// This method is intended to be used when benchmarking pallet, linked to the chain that /// uses GRANDPA finality. For parachains, please use the `prepare_message_proof_from_parachain` /// function. -pub fn prepare_message_proof_from_grandpa_chain( +pub fn prepare_message_proof_from_grandpa_chain( params: MessageProofParams, message_generator: impl Fn(usize) -> MessagePayload, -) -> (FromBridgedChainMessagesProof>>, Weight) +) -> (FromBridgedChainMessagesProof>>, Weight) where - R: pallet_bridge_grandpa::Config>>, + R: pallet_bridge_grandpa::Config> + + pallet_bridge_messages::Config< + MI, + BridgedHeaderChain = pallet_bridge_grandpa::Pallet, + >, FI: 'static, - B: MessageBridge, + MI: 'static, { // prepare storage proof - let (state_root, storage_proof) = prepare_messages_storage_proof::( - params.lane, - params.message_nonces.clone(), - params.outbound_lane_data.clone(), - params.size, - prepare_inbound_message(¶ms, message_generator), - encode_all_messages, - encode_lane_data, - ); + let (state_root, storage_proof) = + prepare_messages_storage_proof::, ThisChainOf>( + params.lane, + params.message_nonces.clone(), + params.outbound_lane_data.clone(), + params.proof_params, + |_| prepare_inbound_message(¶ms, &message_generator), + encode_all_messages, + encode_lane_data, + false, + false, + ); // update runtime storage let (_, bridged_header_hash) = insert_header_to_grandpa_pallet::(state_root); @@ -118,30 +120,33 @@ where /// This method is intended to be used when benchmarking pallet, linked to the chain that /// uses parachain finality. For GRANDPA chains, please use the /// `prepare_message_proof_from_grandpa_chain` function. -pub fn prepare_message_proof_from_parachain( +pub fn prepare_message_proof_from_parachain( params: MessageProofParams, message_generator: impl Fn(usize) -> MessagePayload, -) -> (FromBridgedChainMessagesProof>>, Weight) +) -> (FromBridgedChainMessagesProof>>, Weight) where - R: pallet_bridge_parachains::Config, + R: pallet_bridge_parachains::Config + pallet_bridge_messages::Config, PI: 'static, - B: MessageBridge, - UnderlyingChainOf>: Chain + Parachain, + MI: 'static, + BridgedChainOf: Chain + Parachain, { // prepare storage proof - let (state_root, storage_proof) = prepare_messages_storage_proof::( - params.lane, - params.message_nonces.clone(), - params.outbound_lane_data.clone(), - params.size, - prepare_inbound_message(¶ms, message_generator), - encode_all_messages, - encode_lane_data, - ); + let (state_root, storage_proof) = + prepare_messages_storage_proof::, ThisChainOf>( + params.lane, + params.message_nonces.clone(), + params.outbound_lane_data.clone(), + params.proof_params, + |_| prepare_inbound_message(¶ms, &message_generator), + encode_all_messages, + encode_lane_data, + false, + false, + ); // update runtime storage let (_, bridged_header_hash) = - insert_header_to_parachains_pallet::>>(state_root); + insert_header_to_parachains_pallet::>(state_root); ( FromBridgedChainMessagesProof { @@ -160,21 +165,24 @@ where /// This method is intended to be used when benchmarking pallet, linked to the chain that /// uses GRANDPA finality. For parachains, please use the /// `prepare_message_delivery_proof_from_parachain` function. -pub fn prepare_message_delivery_proof_from_grandpa_chain( - params: MessageDeliveryProofParams>>, -) -> FromBridgedChainMessagesDeliveryProof>> +pub fn prepare_message_delivery_proof_from_grandpa_chain( + params: MessageDeliveryProofParams>>, +) -> FromBridgedChainMessagesDeliveryProof>> where - R: pallet_bridge_grandpa::Config>>, + R: pallet_bridge_grandpa::Config> + + pallet_bridge_messages::Config< + MI, + BridgedHeaderChain = pallet_bridge_grandpa::Pallet, + >, FI: 'static, - B: MessageBridge, + MI: 'static, { // prepare storage proof let lane = params.lane; - let (state_root, storage_proof) = prepare_message_delivery_storage_proof::( - params.lane, - params.inbound_lane_data, - params.size, - ); + let (state_root, storage_proof) = prepare_message_delivery_storage_proof::< + BridgedChainOf, + ThisChainOf, + >(params.lane, params.inbound_lane_data, params.proof_params); // update runtime storage let (_, bridged_header_hash) = insert_header_to_grandpa_pallet::(state_root); @@ -191,26 +199,25 @@ where /// This method is intended to be used when benchmarking pallet, linked to the chain that /// uses parachain finality. For GRANDPA chains, please use the /// `prepare_message_delivery_proof_from_grandpa_chain` function. -pub fn prepare_message_delivery_proof_from_parachain( - params: MessageDeliveryProofParams>>, -) -> FromBridgedChainMessagesDeliveryProof>> +pub fn prepare_message_delivery_proof_from_parachain( + params: MessageDeliveryProofParams>>, +) -> FromBridgedChainMessagesDeliveryProof>> where - R: pallet_bridge_parachains::Config, + R: pallet_bridge_parachains::Config + pallet_bridge_messages::Config, PI: 'static, - B: MessageBridge, - UnderlyingChainOf>: Chain + Parachain, + MI: 'static, + BridgedChainOf: Chain + Parachain, { // prepare storage proof let lane = params.lane; - let (state_root, storage_proof) = prepare_message_delivery_storage_proof::( - params.lane, - params.inbound_lane_data, - params.size, - ); + let (state_root, storage_proof) = prepare_message_delivery_storage_proof::< + BridgedChainOf, + ThisChainOf, + >(params.lane, params.inbound_lane_data, params.proof_params); // update runtime storage let (_, bridged_header_hash) = - insert_header_to_parachains_pallet::>>(state_root); + insert_header_to_parachains_pallet::>(state_root); FromBridgedChainMessagesDeliveryProof { bridged_header_hash: bridged_header_hash.into(), diff --git a/bridges/bin/runtime-common/src/messages_call_ext.rs b/bridges/bin/runtime-common/src/messages_call_ext.rs index fb07f7b6dd691..a9ee1969ae0ca 100644 --- a/bridges/bin/runtime-common/src/messages_call_ext.rs +++ b/bridges/bin/runtime-common/src/messages_call_ext.rs @@ -14,19 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Signed extension for the `pallet-bridge-messages` that is able to reject obsolete -//! (and some other invalid) transactions. +//! Helpers for easier manipulation of call processing with signed extensions. -use crate::messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, +use bp_messages::{ + target_chain::MessageDispatch, ChainWithMessages, InboundLaneData, LaneId, MessageNonce, }; -use bp_messages::{target_chain::MessageDispatch, InboundLaneData, LaneId, MessageNonce}; -use bp_runtime::OwnedBridgeModule; -use frame_support::{ - dispatch::CallableCallFor, - traits::{Get, IsSubType}, -}; -use pallet_bridge_messages::{Config, Pallet}; +use bp_runtime::{AccountIdOf, OwnedBridgeModule}; +use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; +use pallet_bridge_messages::{BridgedChainOf, Config, Pallet}; use sp_runtime::{transaction_validity::TransactionValidity, RuntimeDebug}; use sp_std::ops::RangeInclusive; @@ -213,18 +208,8 @@ pub trait MessagesCallSubType, I: 'static>: } impl< - BridgedHeaderHash, - SourceHeaderChain: bp_messages::target_chain::SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof, - >, - TargetHeaderChain: bp_messages::source_chain::TargetHeaderChain< - >::OutboundPayload, - ::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof, - >, Call: IsSubType, T>>, - T: frame_system::Config - + Config, + T: frame_system::Config + Config, I: 'static, > MessagesCallSubType for T::RuntimeCall { @@ -340,16 +325,17 @@ impl< /// Returns occupation state of unrewarded relayers vector. fn unrewarded_relayers_occupation, I: 'static>( - inbound_lane_data: &InboundLaneData, + inbound_lane_data: &InboundLaneData>>, ) -> UnrewardedRelayerOccupation { UnrewardedRelayerOccupation { - free_relayer_slots: T::MaxUnrewardedRelayerEntriesAtInboundLane::get() + free_relayer_slots: T::BridgedChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX .saturating_sub(inbound_lane_data.relayers.len() as MessageNonce), free_message_slots: { let unconfirmed_messages = inbound_lane_data .last_delivered_nonce() .saturating_sub(inbound_lane_data.last_confirmed_nonce); - T::MaxUnconfirmedMessagesAtInboundLane::get().saturating_sub(unconfirmed_messages) + T::BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX + .saturating_sub(unconfirmed_messages) }, } } @@ -358,22 +344,20 @@ fn unrewarded_relayers_occupation, I: 'static>( mod tests { use super::*; use crate::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - }, messages_call_ext::MessagesCallSubType, - mock::{ - DummyMessageDispatch, MaxUnconfirmedMessagesAtInboundLane, - MaxUnrewardedRelayerEntriesAtInboundLane, TestRuntime, ThisChainRuntimeCall, - }, + mock::{BridgedUnderlyingChain, DummyMessageDispatch, TestRuntime, ThisChainRuntimeCall}, + }; + use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, DeliveredMessages, UnrewardedRelayer, + UnrewardedRelayersState, }; - use bp_messages::{DeliveredMessages, UnrewardedRelayer, UnrewardedRelayersState}; use sp_std::ops::RangeInclusive; fn fill_unrewarded_relayers() { let mut inbound_lane_state = pallet_bridge_messages::InboundLanes::::get(LaneId([0, 0, 0, 0])); - for n in 0..MaxUnrewardedRelayerEntriesAtInboundLane::get() { + for n in 0..BridgedUnderlyingChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX { inbound_lane_state.relayers.push_back(UnrewardedRelayer { relayer: Default::default(), messages: DeliveredMessages { begin: n + 1, end: n + 1 }, @@ -392,7 +376,7 @@ mod tests { relayer: Default::default(), messages: DeliveredMessages { begin: 1, - end: MaxUnconfirmedMessagesAtInboundLane::get(), + end: BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, }, }); pallet_bridge_messages::InboundLanes::::insert( @@ -418,13 +402,13 @@ mod tests { messages_count: nonces_end.checked_sub(nonces_start).map(|x| x + 1).unwrap_or(0) as u32, dispatch_weight: frame_support::weights::Weight::zero(), - proof: FromBridgedChainMessagesProof { + proof: Box::new(FromBridgedChainMessagesProof { bridged_header_hash: Default::default(), - storage_proof: vec![], + storage_proof: Default::default(), lane: LaneId([0, 0, 0, 0]), nonces_start, nonces_end, - }, + }), }, ) .check_obsolete_call() @@ -508,8 +492,8 @@ mod tests { sp_io::TestExternalities::new(Default::default()).execute_with(|| { fill_unrewarded_messages(); assert!(validate_message_delivery( - MaxUnconfirmedMessagesAtInboundLane::get(), - MaxUnconfirmedMessagesAtInboundLane::get() - 1 + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX - 1 )); }); } @@ -540,7 +524,7 @@ mod tests { pallet_bridge_messages::Call::::receive_messages_delivery_proof { proof: FromBridgedChainMessagesDeliveryProof { bridged_header_hash: Default::default(), - storage_proof: Vec::new(), + storage_proof: Default::default(), lane: LaneId([0, 0, 0, 0]), }, relayers_state: UnrewardedRelayersState { @@ -608,7 +592,7 @@ mod tests { free_message_slots: if is_empty { 0 } else { - MaxUnconfirmedMessagesAtInboundLane::get() + BridgedUnderlyingChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX }, }, }, diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index f494746678960..2f248a7162a6c 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -18,26 +18,16 @@ #![cfg(test)] -use crate::messages::{ - source::{ - FromThisChainMaximalOutboundPayloadSize, FromThisChainMessagePayload, - TargetHeaderChainAdapter, - }, - target::{FromBridgedChainMessagePayload, SourceHeaderChainAdapter}, - BridgedChainWithMessages, HashOf, MessageBridge, ThisChainWithMessages, -}; +use crate::messages_xcm_extension::XcmAsPlainPayload; -use bp_header_chain::{ChainWithGrandpa, HeaderChain}; +use bp_header_chain::ChainWithGrandpa; use bp_messages::{ target_chain::{DispatchMessage, MessageDispatch}, - LaneId, MessageNonce, + ChainWithMessages, LaneId, MessageNonce, }; use bp_parachains::SingleParaStoredHeaderDataBuilder; use bp_relayers::PayRewardFromAccount; -use bp_runtime::{ - messages::MessageDispatchResult, Chain, ChainId, Parachain, UnderlyingChainProvider, -}; -use codec::{Decode, Encode}; +use bp_runtime::{messages::MessageDispatchResult, Chain, ChainId, Parachain}; use frame_support::{ derive_impl, parameter_types, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight}, @@ -46,7 +36,7 @@ use pallet_transaction_payment::Multiplier; use sp_runtime::{ testing::H256, traits::{BlakeTwo256, ConstU32, ConstU64, ConstU8}, - FixedPointNumber, Perquintill, + FixedPointNumber, Perquintill, StateVersion, }; /// Account identifier at `ThisChain`. @@ -61,8 +51,6 @@ pub type ThisChainHash = H256; pub type ThisChainHasher = BlakeTwo256; /// Runtime call at `ThisChain`. pub type ThisChainRuntimeCall = RuntimeCall; -/// Runtime call origin at `ThisChain`. -pub type ThisChainCallOrigin = RuntimeOrigin; /// Header of `ThisChain`. pub type ThisChainHeader = sp_runtime::generic::Header; /// Block of `ThisChain`. @@ -100,8 +88,6 @@ pub type TestStakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< pub const TEST_LANE_ID: LaneId = LaneId([0, 0, 0, 0]); /// Bridged chain id used in tests. pub const TEST_BRIDGED_CHAIN_ID: ChainId = *b"brdg"; -/// Maximal extrinsic weight at the `BridgedChain`. -pub const BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT: usize = 2048; /// Maximal extrinsic size at the `BridgedChain`. pub const BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE: u32 = 1024; @@ -126,7 +112,6 @@ crate::generate_bridge_reject_obsolete_headers_and_messages! { parameter_types! { pub const ActiveOutboundLanes: &'static [LaneId] = &[TEST_LANE_ID]; - pub const BridgedChainId: ChainId = TEST_BRIDGED_CHAIN_ID; pub const BridgedParasPalletName: &'static str = "Paras"; pub const ExistentialDeposit: ThisChainBalance = 500; pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 1, write: 2 }; @@ -136,8 +121,6 @@ parameter_types! { pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(3, 100_000); pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000u128); pub MaximumMultiplier: Multiplier = sp_runtime::traits::Bounded::max_value(); - pub const MaxUnrewardedRelayerEntriesAtInboundLane: MessageNonce = 16; - pub const MaxUnconfirmedMessagesAtInboundLane: MessageNonce = 1_000; pub const ReserveId: [u8; 8] = *b"brdgrlrs"; } @@ -203,17 +186,12 @@ impl pallet_bridge_messages::Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type WeightInfo = pallet_bridge_messages::weights::BridgeWeight; type ActiveOutboundLanes = ActiveOutboundLanes; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - type MaximalOutboundPayloadSize = FromThisChainMaximalOutboundPayloadSize; - type OutboundPayload = FromThisChainMessagePayload; + type OutboundPayload = XcmAsPlainPayload; - type InboundPayload = FromBridgedChainMessagePayload; - type InboundRelayer = BridgedChainAccountId; + type InboundPayload = Vec; type DeliveryPayments = (); - type TargetHeaderChain = TargetHeaderChainAdapter; type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< TestRuntime, (), @@ -221,9 +199,11 @@ impl pallet_bridge_messages::Config for TestRuntime { >; type OnMessagesDelivered = (); - type SourceHeaderChain = SourceHeaderChainAdapter; type MessageDispatch = DummyMessageDispatch; - type BridgedChainId = BridgedChainId; + + type ThisChain = ThisUnderlyingChain; + type BridgedChain = BridgedUnderlyingChain; + type BridgedHeaderChain = BridgeGrandpa; } impl pallet_bridge_relayers::Config for TestRuntime { @@ -262,55 +242,6 @@ impl MessageDispatch for DummyMessageDispatch { } } -/// Bridge that is deployed on `ThisChain` and allows sending/receiving messages to/from -/// `BridgedChain`. -#[derive(Debug, PartialEq, Eq)] -pub struct OnThisChainBridge; - -impl MessageBridge for OnThisChainBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; - - type ThisChain = ThisChain; - type BridgedChain = BridgedChain; - type BridgedHeaderChain = pallet_bridge_grandpa::GrandpaChainHeaders; -} - -/// Bridge that is deployed on `BridgedChain` and allows sending/receiving messages to/from -/// `ThisChain`. -#[derive(Debug, PartialEq, Eq)] -pub struct OnBridgedChainBridge; - -impl MessageBridge for OnBridgedChainBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; - - type ThisChain = BridgedChain; - type BridgedChain = ThisChain; - type BridgedHeaderChain = ThisHeaderChain; -} - -/// Dummy implementation of `HeaderChain` for `ThisChain` at the `BridgedChain`. -pub struct ThisHeaderChain; - -impl HeaderChain for ThisHeaderChain { - fn finalized_header_state_root(_hash: HashOf) -> Option> { - unreachable!() - } -} - -/// Call origin at `BridgedChain`. -#[derive(Clone, Debug)] -pub struct BridgedChainOrigin; - -impl From - for Result, BridgedChainOrigin> -{ - fn from( - _origin: BridgedChainOrigin, - ) -> Result, BridgedChainOrigin> { - unreachable!() - } -} - /// Underlying chain of `ThisChain`. pub struct ThisUnderlyingChain; @@ -326,6 +257,8 @@ impl Chain for ThisUnderlyingChain { type Nonce = u32; type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE } @@ -335,29 +268,20 @@ impl Chain for ThisUnderlyingChain { } } -/// The chain where we are in tests. -pub struct ThisChain; - -impl UnderlyingChainProvider for ThisChain { - type Chain = ThisUnderlyingChain; -} +impl ChainWithMessages for ThisUnderlyingChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = ""; -impl ThisChainWithMessages for ThisChain { - type RuntimeOrigin = ThisChainCallOrigin; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 1000; } -impl BridgedChainWithMessages for ThisChain {} - /// Underlying chain of `BridgedChain`. pub struct BridgedUnderlyingChain; /// Some parachain under `BridgedChain` consensus. pub struct BridgedUnderlyingParachain; -/// Runtime call of the `BridgedChain`. -#[derive(Decode, Encode)] -pub struct BridgedChainCall; impl Chain for BridgedUnderlyingChain { - const ID: ChainId = *b"buch"; + const ID: ChainId = TEST_BRIDGED_CHAIN_ID; type BlockNumber = BridgedChainBlockNumber; type Hash = BridgedChainHash; @@ -368,6 +292,8 @@ impl Chain for BridgedUnderlyingChain { type Nonce = u32; type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE } @@ -384,6 +310,12 @@ impl ChainWithGrandpa for BridgedUnderlyingChain { const AVERAGE_HEADER_SIZE: u32 = 64; } +impl ChainWithMessages for BridgedUnderlyingChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = ""; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 1000; +} + impl Chain for BridgedUnderlyingParachain { const ID: ChainId = *b"bupc"; @@ -396,6 +328,8 @@ impl Chain for BridgedUnderlyingParachain { type Nonce = u32; type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE } @@ -409,19 +343,6 @@ impl Parachain for BridgedUnderlyingParachain { const MAX_HEADER_SIZE: u32 = 1_024; } -/// The other, bridged chain, used in tests. -pub struct BridgedChain; - -impl UnderlyingChainProvider for BridgedChain { - type Chain = BridgedUnderlyingChain; -} - -impl ThisChainWithMessages for BridgedChain { - type RuntimeOrigin = BridgedChainOrigin; -} - -impl BridgedChainWithMessages for BridgedChain {} - /// Run test within test externalities. pub fn run_test(test: impl FnOnce()) { sp_io::TestExternalities::new(Default::default()).execute_with(test) diff --git a/bridges/bin/runtime-common/src/parachains_benchmarking.rs b/bridges/bin/runtime-common/src/parachains_benchmarking.rs index b3050b9ac0f3c..bcbd779b44dea 100644 --- a/bridges/bin/runtime-common/src/parachains_benchmarking.rs +++ b/bridges/bin/runtime-common/src/parachains_benchmarking.rs @@ -18,14 +18,11 @@ #![cfg(feature = "runtime-benchmarks")] -use crate::{ - messages_benchmarking::insert_header_to_grandpa_pallet, - messages_generation::grow_trie_leaf_value, -}; +use crate::messages_benchmarking::insert_header_to_grandpa_pallet; use bp_parachains::parachain_head_storage_key_at_source; use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; -use bp_runtime::{record_all_trie_keys, StorageProofSize}; +use bp_runtime::{grow_storage_value, record_all_trie_keys, Chain, UnverifiedStorageProofParams}; use codec::Encode; use frame_support::traits::Get; use pallet_bridge_parachains::{RelayBlockHash, RelayBlockHasher, RelayBlockNumber}; @@ -39,14 +36,14 @@ use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut}; pub fn prepare_parachain_heads_proof( parachains: &[ParaId], parachain_head_size: u32, - size: StorageProofSize, + proof_params: UnverifiedStorageProofParams, ) -> (RelayBlockNumber, RelayBlockHash, ParaHeadsProof, Vec<(ParaId, ParaHash)>) where R: pallet_bridge_parachains::Config + pallet_bridge_grandpa::Config, PI: 'static, >::BridgedChain: - bp_runtime::Chain, + Chain, { let parachain_head = ParaHead(vec![0u8; parachain_head_size as usize]); @@ -64,7 +61,7 @@ where let storage_key = parachain_head_storage_key_at_source(R::ParasPalletName::get(), *parachain); let leaf_data = if i == 0 { - grow_trie_leaf_value(parachain_head.encode(), size) + grow_storage_value(parachain_head.encode(), &proof_params) } else { parachain_head.encode() }; diff --git a/bridges/chains/chain-asset-hub-rococo/Cargo.toml b/bridges/chains/chain-asset-hub-rococo/Cargo.toml index d9afe2c8bf767..b765fbc57bb0a 100644 --- a/bridges/chains/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-asset-hub-rococo/Cargo.toml @@ -11,14 +11,14 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-support = { workspace = true } # Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } +bp-xcm-bridge-hub-router = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-asset-hub-westend/Cargo.toml b/bridges/chains/chain-asset-hub-westend/Cargo.toml index 4b3ed052f1382..ff89864fb2db4 100644 --- a/bridges/chains/chain-asset-hub-westend/Cargo.toml +++ b/bridges/chains/chain-asset-hub-westend/Cargo.toml @@ -11,14 +11,14 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-support = { workspace = true } # Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } +bp-xcm-bridge-hub-router = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml index 4b900002a4d81..5609398385f98 100644 --- a/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml @@ -13,19 +13,19 @@ workspace = true [dependencies] # Bridge Dependencies -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-polkadot-core = { workspace = true } +bp-messages = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-system = { workspace = true } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } # Polkadot Dependencies -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } +polkadot-primitives = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-bridge-hub-kusama/Cargo.toml b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml index ff6dd8849abe3..605643b0a4eb7 100644 --- a/bridges/chains/chain-bridge-hub-kusama/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml @@ -13,16 +13,16 @@ workspace = true [dependencies] # Bridge Dependencies -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } +bp-bridge-hub-cumulus = { workspace = true } +bp-runtime = { workspace = true } +bp-messages = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-bridge-hub-kusama/src/lib.rs b/bridges/chains/chain-bridge-hub-kusama/src/lib.rs index ef3ef4ab7b7a9..c990e8a12f367 100644 --- a/bridges/chains/chain-bridge-hub-kusama/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-kusama/src/lib.rs @@ -29,7 +29,7 @@ use frame_support::{ dispatch::DispatchClass, sp_runtime::{MultiAddress, MultiSigner}, }; -use sp_runtime::RuntimeDebug; +use sp_runtime::{RuntimeDebug, StateVersion}; /// BridgeHubKusama parachain. #[derive(RuntimeDebug)] @@ -48,6 +48,8 @@ impl Chain for BridgeHubKusama { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } diff --git a/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml index da8b8a82fa702..97e36a19c748c 100644 --- a/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml @@ -14,16 +14,16 @@ workspace = true # Bridge Dependencies -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } +bp-bridge-hub-cumulus = { workspace = true } +bp-runtime = { workspace = true } +bp-messages = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs b/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs index 9db71af928e5d..7379b8863b1de 100644 --- a/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs @@ -26,7 +26,7 @@ use bp_runtime::{ decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, ChainId, Parachain, }; use frame_support::dispatch::DispatchClass; -use sp_runtime::RuntimeDebug; +use sp_runtime::{RuntimeDebug, StateVersion}; /// BridgeHubPolkadot parachain. #[derive(RuntimeDebug)] @@ -45,6 +45,8 @@ impl Chain for BridgeHubPolkadot { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } diff --git a/bridges/chains/chain-bridge-hub-rococo/Cargo.toml b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml index f7672df012f2f..5c91847032235 100644 --- a/bridges/chains/chain-bridge-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml @@ -13,16 +13,16 @@ workspace = true [dependencies] # Bridge Dependencies -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } +bp-bridge-hub-cumulus = { workspace = true } +bp-runtime = { workspace = true } +bp-messages = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-bridge-hub-rococo/src/lib.rs b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs index d7097f01c5316..73af997b9950e 100644 --- a/bridges/chains/chain-bridge-hub-rococo/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs @@ -25,8 +25,10 @@ use bp_messages::*; use bp_runtime::{ decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, ChainId, Parachain, }; -use frame_support::dispatch::DispatchClass; -use sp_runtime::{MultiAddress, MultiSigner, RuntimeDebug}; +use frame_support::{ + dispatch::DispatchClass, + sp_runtime::{MultiAddress, MultiSigner, RuntimeDebug, StateVersion}, +}; /// BridgeHubRococo parachain. #[derive(RuntimeDebug)] @@ -45,6 +47,8 @@ impl Chain for BridgeHubRococo { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } @@ -103,10 +107,10 @@ frame_support::parameter_types! { pub const BridgeHubRococoBaseXcmFeeInRocs: u128 = 59_034_266; /// Transaction fee that is paid at the Rococo BridgeHub for delivering single inbound message. - /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) + /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_standalone_message_delivery_transaction` + `33%`) pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 314_037_860; /// Transaction fee that is paid at the Rococo BridgeHub for delivering single outbound message confirmation. - /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) + /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_standalone_message_confirmation_transaction` + `33%`) pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 57_414_813; } diff --git a/bridges/chains/chain-bridge-hub-westend/Cargo.toml b/bridges/chains/chain-bridge-hub-westend/Cargo.toml index ec74c4b947d69..0b429ab9a0bd9 100644 --- a/bridges/chains/chain-bridge-hub-westend/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-westend/Cargo.toml @@ -14,16 +14,16 @@ workspace = true # Bridge Dependencies -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } +bp-bridge-hub-cumulus = { workspace = true } +bp-runtime = { workspace = true } +bp-messages = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-bridge-hub-westend/src/lib.rs b/bridges/chains/chain-bridge-hub-westend/src/lib.rs index 800f290d7bfa4..17ff2c858a1d3 100644 --- a/bridges/chains/chain-bridge-hub-westend/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-westend/src/lib.rs @@ -25,7 +25,7 @@ use bp_runtime::{ decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, ChainId, Parachain, }; use frame_support::dispatch::DispatchClass; -use sp_runtime::RuntimeDebug; +use sp_runtime::{RuntimeDebug, StateVersion}; /// BridgeHubWestend parachain. #[derive(RuntimeDebug)] @@ -44,6 +44,8 @@ impl Chain for BridgeHubWestend { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } diff --git a/bridges/chains/chain-kusama/Cargo.toml b/bridges/chains/chain-kusama/Cargo.toml index 66061ff2793cb..ec45c1eddce5d 100644 --- a/bridges/chains/chain-kusama/Cargo.toml +++ b/bridges/chains/chain-kusama/Cargo.toml @@ -14,15 +14,15 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-kusama/src/lib.rs b/bridges/chains/chain-kusama/src/lib.rs index fd7172c5869d4..dcd0b23abbbef 100644 --- a/bridges/chains/chain-kusama/src/lib.rs +++ b/bridges/chains/chain-kusama/src/lib.rs @@ -23,7 +23,7 @@ pub use bp_polkadot_core::*; use bp_header_chain::ChainWithGrandpa; use bp_runtime::{decl_bridge_finality_runtime_apis, Chain, ChainId}; -use frame_support::weights::Weight; +use frame_support::{sp_runtime::StateVersion, weights::Weight}; /// Kusama Chain pub struct Kusama; @@ -41,6 +41,8 @@ impl Chain for Kusama { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V0; + fn max_extrinsic_size() -> u32 { max_extrinsic_size() } diff --git a/bridges/chains/chain-polkadot-bulletin/Cargo.toml b/bridges/chains/chain-polkadot-bulletin/Cargo.toml index 700247b7055a8..ea5f4d2e77591 100644 --- a/bridges/chains/chain-polkadot-bulletin/Cargo.toml +++ b/bridges/chains/chain-polkadot-bulletin/Cargo.toml @@ -11,23 +11,23 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-messages = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-polkadot-bulletin/src/lib.rs b/bridges/chains/chain-polkadot-bulletin/src/lib.rs index f3d300567f2b4..88980a9575016 100644 --- a/bridges/chains/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/chains/chain-polkadot-bulletin/src/lib.rs @@ -37,7 +37,9 @@ use frame_support::{ }; use frame_system::limits; use scale_info::TypeInfo; -use sp_runtime::{traits::DispatchInfoOf, transaction_validity::TransactionValidityError, Perbill}; +use sp_runtime::{ + traits::DispatchInfoOf, transaction_validity::TransactionValidityError, Perbill, StateVersion, +}; // This chain reuses most of Polkadot primitives. pub use bp_polkadot_core::{ @@ -192,6 +194,8 @@ impl Chain for PolkadotBulletin { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } diff --git a/bridges/chains/chain-polkadot/Cargo.toml b/bridges/chains/chain-polkadot/Cargo.toml index c700935f3083b..50f637af4251c 100644 --- a/bridges/chains/chain-polkadot/Cargo.toml +++ b/bridges/chains/chain-polkadot/Cargo.toml @@ -14,15 +14,15 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-polkadot/src/lib.rs b/bridges/chains/chain-polkadot/src/lib.rs index a8cac0467d574..f4b262d40735d 100644 --- a/bridges/chains/chain-polkadot/src/lib.rs +++ b/bridges/chains/chain-polkadot/src/lib.rs @@ -25,7 +25,7 @@ use bp_header_chain::ChainWithGrandpa; use bp_runtime::{ decl_bridge_finality_runtime_apis, extensions::PrevalidateAttests, Chain, ChainId, }; -use frame_support::weights::Weight; +use frame_support::{sp_runtime::StateVersion, weights::Weight}; /// Polkadot Chain pub struct Polkadot; @@ -43,6 +43,8 @@ impl Chain for Polkadot { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V0; + fn max_extrinsic_size() -> u32 { max_extrinsic_size() } diff --git a/bridges/chains/chain-rococo/Cargo.toml b/bridges/chains/chain-rococo/Cargo.toml index 5a5613bb376a5..49a1a397ee096 100644 --- a/bridges/chains/chain-rococo/Cargo.toml +++ b/bridges/chains/chain-rococo/Cargo.toml @@ -14,15 +14,15 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-rococo/src/lib.rs b/bridges/chains/chain-rococo/src/lib.rs index b290fe71c829d..bfcafdf41ea2e 100644 --- a/bridges/chains/chain-rococo/src/lib.rs +++ b/bridges/chains/chain-rococo/src/lib.rs @@ -23,7 +23,7 @@ pub use bp_polkadot_core::*; use bp_header_chain::ChainWithGrandpa; use bp_runtime::{decl_bridge_finality_runtime_apis, Chain, ChainId}; -use frame_support::weights::Weight; +use frame_support::{sp_runtime::StateVersion, weights::Weight}; /// Rococo Chain pub struct Rococo; @@ -41,6 +41,8 @@ impl Chain for Rococo { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { max_extrinsic_size() } diff --git a/bridges/chains/chain-westend/Cargo.toml b/bridges/chains/chain-westend/Cargo.toml index 10b06d76507ef..5e27bc647bfc5 100644 --- a/bridges/chains/chain-westend/Cargo.toml +++ b/bridges/chains/chain-westend/Cargo.toml @@ -14,15 +14,15 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/chains/chain-westend/src/lib.rs b/bridges/chains/chain-westend/src/lib.rs index ef451f7de0a96..2a247e03e59d6 100644 --- a/bridges/chains/chain-westend/src/lib.rs +++ b/bridges/chains/chain-westend/src/lib.rs @@ -23,7 +23,7 @@ pub use bp_polkadot_core::*; use bp_header_chain::ChainWithGrandpa; use bp_runtime::{decl_bridge_finality_runtime_apis, Chain, ChainId}; -use frame_support::weights::Weight; +use frame_support::{sp_runtime::StateVersion, weights::Weight}; /// Westend Chain pub struct Westend; @@ -41,6 +41,8 @@ impl Chain for Westend { type Nonce = Nonce; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { max_extrinsic_size() } diff --git a/bridges/modules/beefy/Cargo.toml b/bridges/modules/beefy/Cargo.toml index e36bbb615f23a..cffc62d290828 100644 --- a/bridges/modules/beefy/Cargo.toml +++ b/bridges/modules/beefy/Cargo.toml @@ -12,32 +12,32 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true } # Bridge Dependencies -bp-beefy = { path = "../../primitives/beefy", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-beefy = { workspace = true } +bp-runtime = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy" } -mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2" } -pallet-beefy-mmr = { path = "../../../substrate/frame/beefy-mmr" } -pallet-mmr = { path = "../../../substrate/frame/merkle-mountain-range" } -rand = "0.8.5" -sp-io = { path = "../../../substrate/primitives/io" } -bp-test-utils = { path = "../../primitives/test-utils" } +sp-consensus-beefy = { workspace = true, default-features = true } +mmr-lib = { workspace = true } +pallet-beefy-mmr = { workspace = true, default-features = true } +pallet-mmr = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +bp-test-utils = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/modules/beefy/src/mock.rs b/bridges/modules/beefy/src/mock.rs index 53efd57c29a0d..3b751ddf066c9 100644 --- a/bridges/modules/beefy/src/mock.rs +++ b/bridges/modules/beefy/src/mock.rs @@ -29,6 +29,7 @@ use sp_core::{sr25519::Signature, Pair}; use sp_runtime::{ testing::{Header, H256}, traits::{BlakeTwo256, Hash}, + StateVersion, }; pub use sp_consensus_beefy::ecdsa_crypto::{AuthorityId as BeefyId, Pair as BeefyPair}; @@ -93,6 +94,8 @@ impl Chain for TestBridgedChain { type Nonce = u64; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { unreachable!() } diff --git a/bridges/modules/grandpa/Cargo.toml b/bridges/modules/grandpa/Cargo.toml index 0ca6b67503511..6d1419ae5b030 100644 --- a/bridges/modules/grandpa/Cargo.toml +++ b/bridges/modules/grandpa/Cargo.toml @@ -13,32 +13,31 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -finality-grandpa = { version = "0.16.2", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Bridge Dependencies -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-runtime = { workspace = true } +bp-header-chain = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-consensus-grandpa = { features = ["serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-std = { workspace = true } # Optional Benchmarking Dependencies -bp-test-utils = { path = "../../primitives/test-utils", default-features = false, optional = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +bp-test-utils = { optional = true, workspace = true } +frame-benchmarking = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../../substrate/primitives/core" } -sp-io = { path = "../../../substrate/primitives/io" } +bp-runtime = { features = ["test-helpers"], workspace = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -47,7 +46,6 @@ std = [ "bp-runtime/std", "bp-test-utils/std", "codec/std", - "finality-grandpa/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", @@ -56,7 +54,6 @@ std = [ "sp-consensus-grandpa/std", "sp-runtime/std", "sp-std/std", - "sp-trie/std", ] runtime-benchmarks = [ "bp-test-utils", diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index 3b77f676870e1..c62951b74656b 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -1443,11 +1443,14 @@ mod tests { } #[test] - fn parse_finalized_storage_proof_rejects_proof_on_unknown_header() { + fn verify_storage_proof_rejects_unknown_header() { run_test(|| { assert_noop!( - Pallet::::storage_proof_checker(Default::default(), vec![],) - .map(|_| ()), + Pallet::::verify_storage_proof( + Default::default(), + Default::default(), + ) + .map(|_| ()), bp_header_chain::HeaderChainError::UnknownHeader, ); }); @@ -1465,9 +1468,7 @@ mod tests { >::put(HeaderId(2, hash)); >::insert(hash, header.build()); - assert_ok!( - Pallet::::storage_proof_checker(hash, storage_proof).map(|_| ()) - ); + assert_ok!(Pallet::::verify_storage_proof(hash, storage_proof).map(|_| ())); }); } diff --git a/bridges/modules/grandpa/src/mock.rs b/bridges/modules/grandpa/src/mock.rs index 27df9d9c78f54..71af6182e057c 100644 --- a/bridges/modules/grandpa/src/mock.rs +++ b/bridges/modules/grandpa/src/mock.rs @@ -20,7 +20,8 @@ use bp_header_chain::ChainWithGrandpa; use bp_runtime::{Chain, ChainId}; use frame_support::{ - construct_runtime, derive_impl, parameter_types, traits::Hooks, weights::Weight, + construct_runtime, derive_impl, parameter_types, sp_runtime::StateVersion, traits::Hooks, + weights::Weight, }; use sp_core::sr25519::Signature; @@ -78,6 +79,8 @@ impl Chain for TestBridgedChain { type Nonce = u64; type Signature = Signature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { unreachable!() } diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index 71c86ccc03617..33f524030d264 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -11,54 +11,69 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Bridge dependencies - -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-messages = { workspace = true } +bp-runtime = { workspace = true } # Substrate Dependencies - -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-trie = { optional = true, workspace = true } [dev-dependencies] -bp-test-utils = { path = "../../primitives/test-utils" } -pallet-balances = { path = "../../../substrate/frame/balances" } -sp-io = { path = "../../../substrate/primitives/io" } +bp-runtime = { features = ["test-helpers"], workspace = true } +bp-test-utils = { workspace = true } +pallet-balances = { workspace = true } +pallet-bridge-grandpa = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } [features] default = ["std"] std = [ + "bp-header-chain/std", "bp-messages/std", "bp-runtime/std", + "bp-test-utils/std", "codec/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", - "num-traits/std", + "pallet-balances/std", + "pallet-bridge-grandpa/std", "scale-info/std", + "sp-core/std", + "sp-io/std", "sp-runtime/std", "sp-std/std", + "sp-trie/std", ] runtime-benchmarks = [ + "bp-runtime/test-helpers", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-bridge-grandpa/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-balances/try-runtime", + "pallet-bridge-grandpa/try-runtime", "sp-runtime/try-runtime", ] +test-helpers = [ + "bp-runtime/test-helpers", + "sp-trie", +] diff --git a/bridges/modules/messages/README.md b/bridges/modules/messages/README.md index c06b96b857dea..80fd92eb0e5a7 100644 --- a/bridges/modules/messages/README.md +++ b/bridges/modules/messages/README.md @@ -104,17 +104,22 @@ the message. When a message is delivered to the target chain, the `MessagesDeliv `receive_messages_delivery_proof()` transaction. The `MessagesDelivered` contains the message lane identifier and inclusive range of delivered message nonces. -The pallet provides no means to get the result of message dispatch at the target chain. If that is required, it must be -done outside of the pallet. For example, XCM messages, when dispatched, have special instructions to send some data back -to the sender. Other dispatchers may use similar mechanism for that. -### How to plug-in Messages Module to Send Messages to the Bridged Chain? - -The `pallet_bridge_messages::Config` trait has 3 main associated types that are used to work with outbound messages. The -`pallet_bridge_messages::Config::TargetHeaderChain` defines how we see the bridged chain as the target for our outbound -messages. It must be able to check that the bridged chain may accept our message - like that the message has size below -maximal possible transaction size of the chain and so on. And when the relayer sends us a confirmation transaction, this -implementation must be able to parse and verify the proof of messages delivery. Normally, you would reuse the same -(configurable) type on all chains that are sending messages to the same bridged chain. +The pallet provides no means to get the result of message dispatch at the target chain. If that is +required, it must be done outside of the pallet. For example, XCM messages, when dispatched, have +special instructions to send some data back to the sender. Other dispatchers may use similar +mechanism for that. + +### How to plug-in Messages Module to Send and Receive Messages from the Bridged Chain? + +The `pallet_bridge_messages::Config` trait has 2 main associated types that are used to work with +inbound messages. The `pallet_bridge_messages::BridgedChain` defines basic primitives of the bridged +chain. The `pallet_bridge_messages::BridgedHeaderChain` defines the way we access the bridged chain +headers in our runtime. You may use `pallet_bridge_grandpa` if you're bridging with chain that uses +GRANDPA finality or `pallet_bridge_parachains::ParachainHeaders` if you're bridging with parachain. + +The `pallet_bridge_messages::Config::MessageDispatch` defines a way on how to dispatch delivered +messages. Apart from actually dispatching the message, the implementation must return the correct +dispatch weight of the message before dispatch is called. The last type is the `pallet_bridge_messages::Config::DeliveryConfirmationPayments`. When confirmation transaction is received, we call the `pay_reward()` method, passing the range of delivered messages. @@ -129,18 +134,6 @@ You should be looking at the `bp_messages::source_chain::ForbidOutboundMessages` [`bp_messages::source_chain`](../../primitives/messages/src/source_chain.rs). It implements all required traits and will simply reject all transactions, related to outbound messages. -### How to plug-in Messages Module to Receive Messages from the Bridged Chain? - -The `pallet_bridge_messages::Config` trait has 2 main associated types that are used to work with inbound messages. The -`pallet_bridge_messages::Config::SourceHeaderChain` defines how we see the bridged chain as the source of our inbound -messages. When relayer sends us a delivery transaction, this implementation must be able to parse and verify the proof -of messages wrapped in this transaction. Normally, you would reuse the same (configurable) type on all chains that are -sending messages to the same bridged chain. - -The `pallet_bridge_messages::Config::MessageDispatch` defines a way on how to dispatch delivered messages. Apart from -actually dispatching the message, the implementation must return the correct dispatch weight of the message before -dispatch is called. - ### I have a Messages Module in my Runtime, but I Want to Reject all Inbound Messages. What shall I do? You should be looking at the `bp_messages::target_chain::ForbidInboundMessages` structure from the @@ -150,36 +143,42 @@ and will simply reject all transactions, related to inbound messages. ### What about other Constants in the Messages Module Configuration Trait? Two settings that are used to check messages in the `send_message()` function. The -`pallet_bridge_messages::Config::ActiveOutboundLanes` is an array of all message lanes, that may be used to send -messages. All messages sent using other lanes are rejected. All messages that have size above -`pallet_bridge_messages::Config::MaximalOutboundPayloadSize` will also be rejected. - -To be able to reward the relayer for delivering messages, we store a map of message nonces range => identifier of the -relayer that has delivered this range at the target chain runtime storage. If a relayer delivers multiple consequent -ranges, they're merged into single entry. So there may be more than one entry for the same relayer. Eventually, this -whole map must be delivered back to the source chain to confirm delivery and pay rewards. So to make sure we are able to -craft this confirmation transaction, we need to: (1) keep the size of this map below a certain limit and (2) make sure -that the weight of processing this map is below a certain limit. Both size and processing weight mostly depend on the -number of entries. The number of entries is limited with the -`pallet_bridge_messages::ConfigMaxUnrewardedRelayerEntriesAtInboundLane` parameter. Processing weight also depends on -the total number of messages that are being confirmed, because every confirmed message needs to be read. So there's -another `pallet_bridge_messages::Config::MaxUnconfirmedMessagesAtInboundLane` parameter for that. - -When choosing values for these parameters, you must also keep in mind that if proof in your scheme is based on finality -of headers (and it is the most obvious option for Substrate-based chains with finality notion), then choosing too small -values for these parameters may cause significant delays in message delivery. That's because there are too many actors -involved in this scheme: 1) authorities that are finalizing headers of the target chain need to finalize header with -non-empty map; 2) the headers relayer then needs to submit this header and its finality proof to the source chain; 3) -the messages relayer must then send confirmation transaction (storage proof of this map) to the source chain; 4) when -the confirmation transaction will be mined at some header, source chain authorities must finalize this header; 5) the -headers relay then needs to submit this header and its finality proof to the target chain; 6) only now the messages -relayer may submit new messages from the source to target chain and prune the entry from the map. - -Delivery transaction requires the relayer to provide both number of entries and total number of messages in the map. -This means that the module never charges an extra cost for delivering a map - the relayer would need to pay exactly for -the number of entries+messages it has delivered. So the best guess for values of these parameters would be the pair that -would occupy `N` percent of the maximal transaction size and weight of the source chain. The `N` should be large enough -to process large maps, at the same time keeping reserve for future source chain upgrades. +`pallet_bridge_messages::Config::ActiveOutboundLanes` is an array of all message lanes, that +may be used to send messages. All messages sent using other lanes are rejected. All messages that have +size above `pallet_bridge_messages::Config::MaximalOutboundPayloadSize` will also be rejected. + +To be able to reward the relayer for delivering messages, we store a map of message nonces range => +identifier of the relayer that has delivered this range at the target chain runtime storage. If a +relayer delivers multiple consequent ranges, they're merged into single entry. So there may be more +than one entry for the same relayer. Eventually, this whole map must be delivered back to the source +chain to confirm delivery and pay rewards. So to make sure we are able to craft this confirmation +transaction, we need to: (1) keep the size of this map below a certain limit and (2) make sure that +the weight of processing this map is below a certain limit. Both size and processing weight mostly +depend on the number of entries. The number of entries is limited with the +`pallet_bridge_messages::Config::BridgedChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX` parameter. +Processing weight also depends on the total number of messages that are being confirmed, because every +confirmed message needs to be read. So there's another +`pallet_bridge_messages::Config::BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX` parameter +for that. + +When choosing values for these parameters, you must also keep in mind that if proof in your scheme +is based on finality of headers (and it is the most obvious option for Substrate-based chains with +finality notion), then choosing too small values for these parameters may cause significant delays +in message delivery. That's because there are too many actors involved in this scheme: 1) authorities +that are finalizing headers of the target chain need to finalize header with non-empty map; 2) the +headers relayer then needs to submit this header and its finality proof to the source chain; 3) the +messages relayer must then send confirmation transaction (storage proof of this map) to the source +chain; 4) when the confirmation transaction will be mined at some header, source chain authorities +must finalize this header; 5) the headers relay then needs to submit this header and its finality +proof to the target chain; 6) only now the messages relayer may submit new messages from the source +to target chain and prune the entry from the map. + +Delivery transaction requires the relayer to provide both number of entries and total number of +messages in the map. This means that the module never charges an extra cost for delivering a map - +the relayer would need to pay exactly for the number of entries+messages it has delivered. So the +best guess for values of these parameters would be the pair that would occupy `N` percent of the +maximal transaction size and weight of the source chain. The `N` should be large enough to process +large maps, at the same time keeping reserve for future source chain upgrades. ## Non-Essential Functionality diff --git a/bridges/modules/messages/src/benchmarking.rs b/bridges/modules/messages/src/benchmarking.rs index 4f13c4409672b..d38aaf32dc94b 100644 --- a/bridges/modules/messages/src/benchmarking.rs +++ b/bridges/modules/messages/src/benchmarking.rs @@ -16,19 +16,22 @@ //! Messages pallet benchmarking. +#![cfg(feature = "runtime-benchmarks")] + use crate::{ inbound_lane::InboundLaneStorage, outbound_lane, weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH, - Call, OutboundLanes, RuntimeInboundLaneStorage, + BridgedChainOf, Call, OutboundLanes, RuntimeInboundLaneStorage, }; use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, DeliveredMessages, + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, ChainWithMessages, DeliveredMessages, InboundLaneData, LaneId, MessageNonce, OutboundLaneData, UnrewardedRelayer, UnrewardedRelayersState, }; -use bp_runtime::StorageProofSize; +use bp_runtime::{AccountIdOf, HashOf, UnverifiedStorageProofParams}; use codec::Decode; -use frame_benchmarking::{account, benchmarks_instance_pallet}; +use frame_benchmarking::{account, v2::*}; use frame_support::weights::Weight; use frame_system::RawOrigin; use sp_runtime::{traits::TrailingZeroInput, BoundedVec}; @@ -54,7 +57,7 @@ pub struct MessageProofParams { /// return `true` from the `is_message_successfully_dispatched`. pub is_successful_dispatch_expected: bool, /// Proof size requirements. - pub size: StorageProofSize, + pub proof_params: UnverifiedStorageProofParams, } /// Benchmark-specific message delivery proof parameters. @@ -65,7 +68,7 @@ pub struct MessageDeliveryProofParams { /// The proof needs to include this inbound lane data. pub inbound_lane_data: InboundLaneData, /// Proof size requirements. - pub size: StorageProofSize, + pub proof_params: UnverifiedStorageProofParams, } /// Trait that must be implemented by runtime. @@ -80,8 +83,8 @@ pub trait Config: crate::Config { /// Return id of relayer account at the bridged chain. /// /// By default, zero account is returned. - fn bridged_relayer_id() -> Self::InboundRelayer { - Self::InboundRelayer::decode(&mut TrailingZeroInput::zeroes()).unwrap() + fn bridged_relayer_id() -> AccountIdOf> { + Decode::decode(&mut TrailingZeroInput::zeroes()).unwrap() } /// Create given account and give it enough balance for test purposes. Used to create @@ -94,11 +97,11 @@ pub trait Config: crate::Config { /// Prepare messages proof to receive by the module. fn prepare_message_proof( params: MessageProofParams, - ) -> (::MessagesProof, Weight); + ) -> (FromBridgedChainMessagesProof>>, Weight); /// Prepare messages delivery proof to receive by the module. fn prepare_message_delivery_proof( params: MessageDeliveryProofParams, - ) -> >::MessagesDeliveryProof; + ) -> FromBridgedChainMessagesDeliveryProof>>; /// Returns true if message has been successfully dispatched or not. fn is_message_successfully_dispatched(_nonce: MessageNonce) -> bool { @@ -109,174 +112,227 @@ pub trait Config: crate::Config { fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool; } -benchmarks_instance_pallet! { +fn send_regular_message, I: 'static>() { + let mut outbound_lane = outbound_lane::(T::bench_lane_id()); + outbound_lane.send_message(BoundedVec::try_from(vec![]).expect("We craft valid messages")); +} + +fn receive_messages, I: 'static>(nonce: MessageNonce) { + let mut inbound_lane_storage = + RuntimeInboundLaneStorage::::from_lane_id(T::bench_lane_id()); + inbound_lane_storage.set_data(InboundLaneData { + relayers: vec![UnrewardedRelayer { + relayer: T::bridged_relayer_id(), + messages: DeliveredMessages::new(nonce), + }] + .into_iter() + .collect(), + last_confirmed_nonce: 0, + }); +} + +struct ReceiveMessagesProofSetup, I: 'static> { + relayer_id_on_src: AccountIdOf>, + relayer_id_on_tgt: T::AccountId, + msgs_count: u32, + _phantom_data: sp_std::marker::PhantomData, +} + +impl, I: 'static> ReceiveMessagesProofSetup { + const LATEST_RECEIVED_NONCE: MessageNonce = 20; + + fn new(msgs_count: u32) -> Self { + let setup = Self { + relayer_id_on_src: T::bridged_relayer_id(), + relayer_id_on_tgt: account("relayer", 0, SEED), + msgs_count, + _phantom_data: Default::default(), + }; + T::endow_account(&setup.relayer_id_on_tgt); + // mark messages 1..=latest_recvd_nonce as delivered + receive_messages::(Self::LATEST_RECEIVED_NONCE); + + setup + } + + fn relayer_id_on_src(&self) -> AccountIdOf> { + self.relayer_id_on_src.clone() + } + + fn relayer_id_on_tgt(&self) -> T::AccountId { + self.relayer_id_on_tgt.clone() + } + + fn last_nonce(&self) -> MessageNonce { + Self::LATEST_RECEIVED_NONCE + self.msgs_count as u64 + } + + fn nonces(&self) -> RangeInclusive { + (Self::LATEST_RECEIVED_NONCE + 1)..=self.last_nonce() + } + + fn check_last_nonce(&self) { + assert_eq!( + crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), + self.last_nonce(), + ); + } +} + +#[instance_benchmarks] +mod benchmarks { + use super::*; + // // Benchmarks that are used directly by the runtime calls weight formulae. // - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: + fn max_msgs, I: 'static>() -> u32 { + T::BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX as u32 - + ReceiveMessagesProofSetup::::LATEST_RECEIVED_NONCE as u32 + } + + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following + // conditions: // * proof does not include outbound lane state proof; // * inbound lane already has state, so it needs to be read and decoded; // * message is dispatched (reminder: dispatch weight should be minimal); // * message requires all heavy checks done by dispatcher. - // - // This is base benchmark for all other message delivery benchmarks. - receive_single_message_proof { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - + #[benchmark] + fn receive_single_message_proof() { + // setup code + let setup = ReceiveMessagesProofSetup::::new(1); let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { lane: T::bench_lane_id(), - message_nonces: 21..=21, + message_nonces: setup.nonces(), outbound_lane_data: None, is_successful_dispatch_expected: false, - size: StorageProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + proof_params: UnverifiedStorageProofParams::from_db_size( + EXPECTED_DEFAULT_MESSAGE_LENGTH, + ), }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, + + #[extrinsic_call] + receive_messages_proof( + RawOrigin::Signed(setup.relayer_id_on_tgt()), + setup.relayer_id_on_src(), + Box::new(proof), + setup.msgs_count, + dispatch_weight, ); + + // verification code + setup.check_last_nonce(); } - // Benchmark `receive_messages_proof` extrinsic with two minimal-weight messages and following conditions: + // Benchmark `receive_messages_proof` extrinsic with `n` minimal-weight messages and following + // conditions: // * proof does not include outbound lane state proof; // * inbound lane already has state, so it needs to be read and decoded; // * message is dispatched (reminder: dispatch weight should be minimal); // * message requires all heavy checks done by dispatcher. - // - // The weight of single message delivery could be approximated as - // `weight(receive_two_messages_proof) - weight(receive_single_message_proof)`. - // This won't be super-accurate if message has non-zero dispatch weight, but estimation should - // be close enough to real weight. - receive_two_messages_proof { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - + #[benchmark] + fn receive_n_messages_proof(n: Linear<1, { max_msgs::() }>) { + // setup code + let setup = ReceiveMessagesProofSetup::::new(n); let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { lane: T::bench_lane_id(), - message_nonces: 21..=22, + message_nonces: setup.nonces(), outbound_lane_data: None, is_successful_dispatch_expected: false, - size: StorageProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + proof_params: UnverifiedStorageProofParams::from_db_size( + EXPECTED_DEFAULT_MESSAGE_LENGTH, + ), }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 2, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 22, + + #[extrinsic_call] + receive_messages_proof( + RawOrigin::Signed(setup.relayer_id_on_tgt()), + setup.relayer_id_on_src(), + Box::new(proof), + setup.msgs_count, + dispatch_weight, ); + + // verification code + setup.check_last_nonce(); } - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following + // conditions: // * proof includes outbound lane state proof; // * inbound lane already has state, so it needs to be read and decoded; // * message is successfully dispatched (reminder: dispatch weight should be minimal); // * message requires all heavy checks done by dispatcher. // // The weight of outbound lane state delivery would be - // `weight(receive_single_message_proof_with_outbound_lane_state) - weight(receive_single_message_proof)`. - // This won't be super-accurate if message has non-zero dispatch weight, but estimation should - // be close enough to real weight. - receive_single_message_proof_with_outbound_lane_state { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - + // `weight(receive_single_message_proof_with_outbound_lane_state) - + // weight(receive_single_message_proof)`. This won't be super-accurate if message has non-zero + // dispatch weight, but estimation should be close enough to real weight. + #[benchmark] + fn receive_single_message_proof_with_outbound_lane_state() { + // setup code + let setup = ReceiveMessagesProofSetup::::new(1); let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { lane: T::bench_lane_id(), - message_nonces: 21..=21, + message_nonces: setup.nonces(), outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 21, - latest_received_nonce: 20, - latest_generated_nonce: 21, + oldest_unpruned_nonce: setup.last_nonce(), + latest_received_nonce: ReceiveMessagesProofSetup::::LATEST_RECEIVED_NONCE, + latest_generated_nonce: setup.last_nonce(), }), is_successful_dispatch_expected: false, - size: StorageProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + proof_params: UnverifiedStorageProofParams::from_db_size( + EXPECTED_DEFAULT_MESSAGE_LENGTH, + ), }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - let lane_state = crate::InboundLanes::::get(&T::bench_lane_id()); - assert_eq!(lane_state.last_delivered_nonce(), 21); - assert_eq!(lane_state.last_confirmed_nonce, 20); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * the proof has large leaf with total size of approximately 1KB; - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is dispatched (reminder: dispatch weight should be minimal); - // * message requires all heavy checks done by dispatcher. - // - // With single KB of messages proof, the weight of the call is increased (roughly) by - // `(receive_single_message_proof_16KB - receive_single_message_proof_1_kb) / 15`. - receive_single_message_proof_1_kb { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - is_successful_dispatch_expected: false, - size: StorageProofSize::HasLargeLeaf(1024), - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, + #[extrinsic_call] + receive_messages_proof( + RawOrigin::Signed(setup.relayer_id_on_tgt()), + setup.relayer_id_on_src(), + Box::new(proof), + setup.msgs_count, + dispatch_weight, ); + + // verification code + setup.check_last_nonce(); } - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * the proof has large leaf with total size of approximately 16KB; + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following + // conditions: + // * the proof has large leaf with total size ranging between 1KB and 16KB; // * proof does not include outbound lane state proof; // * inbound lane already has state, so it needs to be read and decoded; // * message is dispatched (reminder: dispatch weight should be minimal); // * message requires all heavy checks done by dispatcher. - // - // Size of proof grows because it contains extra trie nodes in it. - // - // With single KB of messages proof, the weight of the call is increased (roughly) by - // `(receive_single_message_proof_16KB - receive_single_message_proof) / 15`. - receive_single_message_proof_16_kb { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - + #[benchmark] + fn receive_single_n_bytes_message_proof( + /// Proof size in KB + n: Linear<1, { 16 * 1024 }>, + ) { + // setup code + let setup = ReceiveMessagesProofSetup::::new(1); let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { lane: T::bench_lane_id(), - message_nonces: 21..=21, + message_nonces: setup.nonces(), outbound_lane_data: None, is_successful_dispatch_expected: false, - size: StorageProofSize::HasLargeLeaf(16 * 1024), + proof_params: UnverifiedStorageProofParams::from_db_size(n), }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, + + #[extrinsic_call] + receive_messages_proof( + RawOrigin::Signed(setup.relayer_id_on_tgt()), + setup.relayer_id_on_src(), + Box::new(proof), + setup.msgs_count, + dispatch_weight, ); + + // verification code + setup.check_last_nonce(); } // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: @@ -284,7 +340,8 @@ benchmarks_instance_pallet! { // * relayer account does not exist (in practice it needs to exist in production environment). // // This is base benchmark for all other confirmations delivery benchmarks. - receive_delivery_proof_for_single_message { + #[benchmark] + fn receive_delivery_proof_for_single_message() { let relayer_id: T::AccountId = account("relayer", 0, SEED); // send message that we're going to confirm @@ -302,13 +359,21 @@ benchmarks_instance_pallet! { relayers: vec![UnrewardedRelayer { relayer: relayer_id.clone(), messages: DeliveredMessages::new(1), - }].into_iter().collect(), + }] + .into_iter() + .collect(), last_confirmed_nonce: 0, }, - size: StorageProofSize::Minimal(0), + proof_params: UnverifiedStorageProofParams::default(), }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) - verify { + + #[extrinsic_call] + receive_messages_delivery_proof( + RawOrigin::Signed(relayer_id.clone()), + proof, + relayers_state, + ); + assert_eq!(OutboundLanes::::get(T::bench_lane_id()).latest_received_nonce, 1); assert!(T::is_relayer_rewarded(&relayer_id)); } @@ -320,7 +385,8 @@ benchmarks_instance_pallet! { // Additional weight for paying single-message reward to the same relayer could be computed // as `weight(receive_delivery_proof_for_two_messages_by_single_relayer) // - weight(receive_delivery_proof_for_single_message)`. - receive_delivery_proof_for_two_messages_by_single_relayer { + #[benchmark] + fn receive_delivery_proof_for_two_messages_by_single_relayer() { let relayer_id: T::AccountId = account("relayer", 0, SEED); // send message that we're going to confirm @@ -341,13 +407,21 @@ benchmarks_instance_pallet! { relayers: vec![UnrewardedRelayer { relayer: relayer_id.clone(), messages: delivered_messages, - }].into_iter().collect(), + }] + .into_iter() + .collect(), last_confirmed_nonce: 0, }, - size: StorageProofSize::Minimal(0), + proof_params: UnverifiedStorageProofParams::default(), }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) - verify { + + #[extrinsic_call] + receive_messages_delivery_proof( + RawOrigin::Signed(relayer_id.clone()), + proof, + relayers_state, + ); + assert_eq!(OutboundLanes::::get(T::bench_lane_id()).latest_received_nonce, 2); assert!(T::is_relayer_rewarded(&relayer_id)); } @@ -359,7 +433,8 @@ benchmarks_instance_pallet! { // Additional weight for paying reward to the next relayer could be computed // as `weight(receive_delivery_proof_for_two_messages_by_two_relayers) // - weight(receive_delivery_proof_for_two_messages_by_single_relayer)`. - receive_delivery_proof_for_two_messages_by_two_relayers { + #[benchmark] + fn receive_delivery_proof_for_two_messages_by_two_relayers() { let relayer1_id: T::AccountId = account("relayer1", 1, SEED); let relayer2_id: T::AccountId = account("relayer2", 2, SEED); @@ -385,13 +460,21 @@ benchmarks_instance_pallet! { relayer: relayer2_id.clone(), messages: DeliveredMessages::new(2), }, - ].into_iter().collect(), + ] + .into_iter() + .collect(), last_confirmed_nonce: 0, }, - size: StorageProofSize::Minimal(0), + proof_params: UnverifiedStorageProofParams::default(), }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer1_id.clone()), proof, relayers_state) - verify { + + #[extrinsic_call] + receive_messages_delivery_proof( + RawOrigin::Signed(relayer1_id.clone()), + proof, + relayers_state, + ); + assert_eq!(OutboundLanes::::get(T::bench_lane_id()).latest_received_nonce, 2); assert!(T::is_relayer_rewarded(&relayer1_id)); assert!(T::is_relayer_rewarded(&relayer2_id)); @@ -411,51 +494,38 @@ benchmarks_instance_pallet! { // * inbound lane already has state, so it needs to be read and decoded; // * message is **SUCCESSFULLY** dispatched; // * message requires all heavy checks done by dispatcher. - receive_single_message_proof_with_dispatch { - // maybe dispatch weight relies on the message size too? - let i in EXPECTED_DEFAULT_MESSAGE_LENGTH .. EXPECTED_DEFAULT_MESSAGE_LENGTH * 16; - - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - + #[benchmark] + fn receive_single_n_bytes_message_proof_with_dispatch( + /// Proof size in KB + n: Linear<1, { 16 * 1024 }>, + ) { + // setup code + let setup = ReceiveMessagesProofSetup::::new(1); let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { lane: T::bench_lane_id(), - message_nonces: 21..=21, + message_nonces: setup.nonces(), outbound_lane_data: None, is_successful_dispatch_expected: true, - size: StorageProofSize::Minimal(i), + proof_params: UnverifiedStorageProofParams::from_db_size(n), }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, - ); - assert!(T::is_message_successfully_dispatched(21)); - } - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime) -} + #[extrinsic_call] + receive_messages_proof( + RawOrigin::Signed(setup.relayer_id_on_tgt()), + setup.relayer_id_on_src(), + Box::new(proof), + setup.msgs_count, + dispatch_weight, + ); -fn send_regular_message, I: 'static>() { - let mut outbound_lane = outbound_lane::(T::bench_lane_id()); - outbound_lane.send_message(BoundedVec::try_from(vec![]).expect("We craft valid messages")); -} + // verification code + setup.check_last_nonce(); + assert!(T::is_message_successfully_dispatched(setup.last_nonce())); + } -fn receive_messages, I: 'static>(nonce: MessageNonce) { - let mut inbound_lane_storage = - RuntimeInboundLaneStorage::::from_lane_id(T::bench_lane_id()); - inbound_lane_storage.set_data(InboundLaneData { - relayers: vec![UnrewardedRelayer { - relayer: T::bridged_relayer_id(), - messages: DeliveredMessages::new(nonce), - }] - .into_iter() - .collect(), - last_confirmed_nonce: 0, - }); + impl_benchmark_test_suite!( + Pallet, + crate::tests::mock::new_test_ext(), + crate::tests::mock::TestRuntime + ); } diff --git a/bridges/modules/messages/src/inbound_lane.rs b/bridges/modules/messages/src/inbound_lane.rs index da1698e6e0370..7ef4599a93c48 100644 --- a/bridges/modules/messages/src/inbound_lane.rs +++ b/bridges/modules/messages/src/inbound_lane.rs @@ -16,15 +16,15 @@ //! Everything about incoming messages receival. -use crate::Config; +use crate::{BridgedChainOf, Config}; use bp_messages::{ target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, - DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, - ReceptionResult, UnrewardedRelayer, + ChainWithMessages, DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, + OutboundLaneData, ReceptionResult, UnrewardedRelayer, }; +use bp_runtime::AccountIdOf; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use frame_support::traits::Get; use scale_info::{Type, TypeInfo}; use sp_runtime::RuntimeDebug; use sp_std::prelude::PartialEq; @@ -55,10 +55,12 @@ pub trait InboundLaneStorage { /// /// The encoding of this type matches encoding of the corresponding `MessageData`. #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)] -pub struct StoredInboundLaneData, I: 'static>(pub InboundLaneData); +pub struct StoredInboundLaneData, I: 'static>( + pub InboundLaneData>>, +); impl, I: 'static> sp_std::ops::Deref for StoredInboundLaneData { - type Target = InboundLaneData; + type Target = InboundLaneData>>; fn deref(&self) -> &Self::Target { &self.0 @@ -78,7 +80,7 @@ impl, I: 'static> Default for StoredInboundLaneData { } impl, I: 'static> From> - for InboundLaneData + for InboundLaneData>> { fn from(data: StoredInboundLaneData) -> Self { data.0 @@ -86,7 +88,7 @@ impl, I: 'static> From> } impl, I: 'static> EncodeLike> - for InboundLaneData + for InboundLaneData>> { } @@ -94,14 +96,14 @@ impl, I: 'static> TypeInfo for StoredInboundLaneData { type Identity = Self; fn type_info() -> Type { - InboundLaneData::::type_info() + InboundLaneData::>>::type_info() } } impl, I: 'static> MaxEncodedLen for StoredInboundLaneData { fn max_encoded_len() -> usize { - InboundLaneData::::encoded_size_hint( - T::MaxUnrewardedRelayerEntriesAtInboundLane::get() as usize, + InboundLaneData::>>::encoded_size_hint( + BridgedChainOf::::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX as usize, ) .unwrap_or(usize::MAX) } @@ -216,10 +218,10 @@ mod tests { use super::*; use crate::{ inbound_lane, - mock::{ + tests::mock::{ dispatch_result, inbound_message_data, inbound_unrewarded_relayers_state, run_test, - unrewarded_relayer, TestMessageDispatch, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID, - TEST_RELAYER_A, TEST_RELAYER_B, TEST_RELAYER_C, + unrewarded_relayer, BridgedChain, TestMessageDispatch, TestRuntime, REGULAR_PAYLOAD, + TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B, TEST_RELAYER_C, }, RuntimeInboundLaneStorage, }; @@ -372,8 +374,7 @@ mod tests { fn fails_to_receive_messages_above_unrewarded_relayer_entries_limit_per_lane() { run_test(|| { let mut lane = inbound_lane::(TEST_LANE_ID); - let max_nonce = - ::MaxUnrewardedRelayerEntriesAtInboundLane::get(); + let max_nonce = BridgedChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; for current_nonce in 1..max_nonce + 1 { assert_eq!( lane.receive_message::( @@ -409,7 +410,7 @@ mod tests { fn fails_to_receive_messages_above_unconfirmed_messages_limit_per_lane() { run_test(|| { let mut lane = inbound_lane::(TEST_LANE_ID); - let max_nonce = ::MaxUnconfirmedMessagesAtInboundLane::get(); + let max_nonce = BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; for current_nonce in 1..=max_nonce { assert_eq!( lane.receive_message::( diff --git a/bridges/modules/messages/src/lib.rs b/bridges/modules/messages/src/lib.rs index e31a4542056cb..bf105b1404018 100644 --- a/bridges/modules/messages/src/lib.rs +++ b/bridges/modules/messages/src/lib.rs @@ -41,8 +41,8 @@ pub use outbound_lane::StoredMessagePayload; pub use weights::WeightInfo; pub use weights_ext::{ ensure_able_to_receive_confirmation, ensure_able_to_receive_message, - ensure_weights_are_correct, WeightInfoExt, EXPECTED_DEFAULT_MESSAGE_LENGTH, - EXTRA_STORAGE_PROOF_SIZE, + ensure_maximal_message_dispatch, ensure_weights_are_correct, WeightInfoExt, + EXPECTED_DEFAULT_MESSAGE_LENGTH, EXTRA_STORAGE_PROOF_SIZE, }; use crate::{ @@ -50,20 +50,23 @@ use crate::{ outbound_lane::{OutboundLane, OutboundLaneStorage, ReceptionConfirmationError}, }; +use bp_header_chain::HeaderChain; use bp_messages::{ source_chain::{ - DeliveryConfirmationPayments, OnMessagesDelivered, SendMessageArtifacts, TargetHeaderChain, + DeliveryConfirmationPayments, FromBridgedChainMessagesDeliveryProof, OnMessagesDelivered, + SendMessageArtifacts, }, target_chain::{ - DeliveryPayments, DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, - SourceHeaderChain, + DeliveryPayments, DispatchMessage, FromBridgedChainMessagesProof, MessageDispatch, + ProvedLaneMessages, ProvedMessages, }, - DeliveredMessages, InboundLaneData, InboundMessageDetails, LaneId, MessageKey, MessageNonce, - MessagePayload, MessagesOperatingMode, OutboundLaneData, OutboundMessageDetails, - UnrewardedRelayersState, VerificationError, + ChainWithMessages, DeliveredMessages, InboundLaneData, InboundMessageDetails, LaneId, + MessageKey, MessageNonce, MessagePayload, MessagesOperatingMode, OutboundLaneData, + OutboundMessageDetails, UnrewardedRelayersState, VerificationError, }; use bp_runtime::{ - BasicOperatingMode, ChainId, OwnedBridgeModule, PreComputedSize, RangeInclusiveExt, Size, + AccountIdOf, BasicOperatingMode, HashOf, OwnedBridgeModule, PreComputedSize, RangeInclusiveExt, + Size, }; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{dispatch::PostDispatchInfo, ensure, fail, traits::Get, DefaultNoBound}; @@ -72,6 +75,8 @@ use sp_std::{marker::PhantomData, prelude::*}; mod inbound_lane; mod outbound_lane; +mod proofs; +mod tests; mod weights_ext; pub mod weights; @@ -79,10 +84,9 @@ pub mod weights; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; -#[cfg(test)] -mod mock; - pub use pallet::*; +#[cfg(feature = "test-helpers")] +pub use tests::*; /// The target that will be used when publishing logs related to this pallet. pub const LOG_TARGET: &str = "runtime::bridge-messages"; @@ -105,76 +109,39 @@ pub mod pallet { /// Benchmarks results from runtime we're plugged into. type WeightInfo: WeightInfoExt; - /// Gets the chain id value from the instance. - #[pallet::constant] - type BridgedChainId: Get; + /// This chain type. + type ThisChain: ChainWithMessages; + /// Bridged chain type. + type BridgedChain: ChainWithMessages; + /// Bridged chain headers provider. + type BridgedHeaderChain: HeaderChain; /// Get all active outbound lanes that the message pallet is serving. type ActiveOutboundLanes: Get<&'static [LaneId]>; - /// Maximal number of unrewarded relayer entries at inbound lane. Unrewarded means that the - /// relayer has delivered messages, but either confirmations haven't been delivered back to - /// the source chain, or we haven't received reward confirmations yet. - /// - /// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep - /// in mind that the same relayer account may take several (non-consecutive) entries in this - /// set. - type MaxUnrewardedRelayerEntriesAtInboundLane: Get; - /// Maximal number of unconfirmed messages at inbound lane. Unconfirmed means that the - /// message has been delivered, but either confirmations haven't been delivered back to the - /// source chain, or we haven't received reward confirmations for these messages yet. - /// - /// This constant limits difference between last message from last entry of the - /// `InboundLaneData::relayers` and first message at the first entry. - /// - /// There is no point of making this parameter lesser than - /// MaxUnrewardedRelayerEntriesAtInboundLane, because then maximal number of relayer entries - /// will be limited by maximal number of messages. - /// - /// This value also represents maximal number of messages in single delivery transaction. - /// Transaction that is declaring more messages than this value, will be rejected. Even if - /// these messages are from different lanes. - type MaxUnconfirmedMessagesAtInboundLane: Get; - - /// Maximal encoded size of the outbound payload. - #[pallet::constant] - type MaximalOutboundPayloadSize: Get; + /// Payload type of outbound messages. This payload is dispatched on the bridged chain. type OutboundPayload: Parameter + Size; - /// Payload type of inbound messages. This payload is dispatched on this chain. type InboundPayload: Decode; - /// Identifier of relayer that deliver messages to this chain. Relayer reward is paid on the - /// bridged chain. - type InboundRelayer: Parameter + MaxEncodedLen; - /// Delivery payments. - type DeliveryPayments: DeliveryPayments; - - // Types that are used by outbound_lane (on source chain). - /// Target header chain. - type TargetHeaderChain: TargetHeaderChain; - /// Delivery confirmation payments. + /// Handler for relayer payments that happen during message delivery transaction. + type DeliveryPayments: DeliveryPayments; + /// Handler for relayer payments that happen during message delivery confirmation + /// transaction. type DeliveryConfirmationPayments: DeliveryConfirmationPayments; /// Delivery confirmation callback. type OnMessagesDelivered: OnMessagesDelivered; - // Types that are used by inbound_lane (on target chain). - - /// Source header chain, as it is represented on target chain. - type SourceHeaderChain: SourceHeaderChain; - /// Message dispatch. + /// Message dispatch handler. type MessageDispatch: MessageDispatch; } - /// Shortcut to messages proof type for Config. - pub type MessagesProofOf = - <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof; - /// Shortcut to messages delivery proof type for Config. - pub type MessagesDeliveryProofOf = - <>::TargetHeaderChain as TargetHeaderChain< - >::OutboundPayload, - ::AccountId, - >>::MessagesDeliveryProof; + /// Shortcut to this chain type for Config. + pub type ThisChainOf = >::ThisChain; + /// Shortcut to bridged chain type for Config. + pub type BridgedChainOf = >::BridgedChain; + /// Shortcut to bridged header chain type for Config. + pub type BridgedHeaderChainOf = >::BridgedHeaderChain; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -265,11 +232,11 @@ pub mod pallet { /// The call may succeed, but some messages may not be delivered e.g. if they are not fit /// into the unrewarded relayers vector. #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::receive_messages_proof_weight(proof, *messages_count, *dispatch_weight))] + #[pallet::weight(T::WeightInfo::receive_messages_proof_weight(&**proof, *messages_count, *dispatch_weight))] pub fn receive_messages_proof( origin: OriginFor, - relayer_id_at_bridged_chain: T::InboundRelayer, - proof: MessagesProofOf, + relayer_id_at_bridged_chain: AccountIdOf>, + proof: Box>>>, messages_count: u32, dispatch_weight: Weight, ) -> DispatchResultWithPostInfo { @@ -278,7 +245,8 @@ pub mod pallet { // reject transactions that are declaring too many messages ensure!( - MessageNonce::from(messages_count) <= T::MaxUnconfirmedMessagesAtInboundLane::get(), + MessageNonce::from(messages_count) <= + BridgedChainOf::::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, Error::::TooManyMessagesInTheProof ); @@ -296,22 +264,19 @@ pub mod pallet { // The DeclaredWeight is exactly what's computed here. Unfortunately it is impossible // to get pre-computed value (and it has been already computed by the executive). let declared_weight = T::WeightInfo::receive_messages_proof_weight( - &proof, + &*proof, messages_count, dispatch_weight, ); let mut actual_weight = declared_weight; // verify messages proof && convert proof into messages - let messages = verify_and_decode_messages_proof::< - T::SourceHeaderChain, - T::InboundPayload, - >(proof, messages_count) - .map_err(|err| { - log::trace!(target: LOG_TARGET, "Rejecting invalid messages proof: {:?}", err,); + let messages = verify_and_decode_messages_proof::(*proof, messages_count) + .map_err(|err| { + log::trace!(target: LOG_TARGET, "Rejecting invalid messages proof: {:?}", err,); - Error::::InvalidMessagesProof - })?; + Error::::InvalidMessagesProof + })?; // dispatch messages and (optionally) update lane(s) state(s) let mut total_messages = 0; @@ -424,14 +389,14 @@ pub mod pallet { ))] pub fn receive_messages_delivery_proof( origin: OriginFor, - proof: MessagesDeliveryProofOf, + proof: FromBridgedChainMessagesDeliveryProof>>, mut relayers_state: UnrewardedRelayersState, ) -> DispatchResultWithPostInfo { Self::ensure_not_halted().map_err(Error::::BridgeModule)?; let proof_size = proof.size(); let confirmation_relayer = ensure_signed(origin)?; - let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof) + let (lane_id, lane_data) = proofs::verify_messages_delivery_proof::(proof) .map_err(|err| { log::trace!( target: LOG_TARGET, @@ -542,8 +507,6 @@ pub mod pallet { InactiveOutboundLane, /// The inbound message dispatcher is inactive. MessageDispatchInactive, - /// Message has been treated as invalid by chain verifier. - MessageRejectedByChainVerifier(VerificationError), /// Message has been treated as invalid by the pallet logic. MessageRejectedByPallet(VerificationError), /// Submitter has failed to pay fee for delivering and dispatching messages. @@ -674,7 +637,9 @@ pub mod pallet { } /// Return inbound lane data. - pub fn inbound_lane_data(lane: LaneId) -> InboundLaneData { + pub fn inbound_lane_data( + lane: LaneId, + ) -> InboundLaneData>> { InboundLanes::::get(lane).0 } } @@ -714,18 +679,6 @@ where // let's check if outbound lane is active ensure!(T::ActiveOutboundLanes::get().contains(&lane), Error::::InactiveOutboundLane); - // let's first check if message can be delivered to target chain - T::TargetHeaderChain::verify_message(message).map_err(|err| { - log::trace!( - target: LOG_TARGET, - "Message to lane {:?} is rejected by target chain: {:?}", - lane, - err, - ); - - Error::::MessageRejectedByChainVerifier(err) - })?; - Ok(SendMessageArgs { lane_id: lane, payload: StoredMessagePayload::::try_from(message.encode()).map_err(|_| { @@ -785,7 +738,7 @@ fn outbound_lane, I: 'static>( /// Runtime inbound lane storage. struct RuntimeInboundLaneStorage, I: 'static = ()> { lane_id: LaneId, - cached_data: Option>, + cached_data: Option>>>, _phantom: PhantomData, } @@ -802,39 +755,39 @@ impl, I: 'static> RuntimeInboundLaneStorage { /// maximal configured. /// /// Maximal inbound lane state set size is configured by the - /// `MaxUnrewardedRelayerEntriesAtInboundLane` constant from the pallet configuration. The PoV + /// `MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX` constant from the pallet configuration. The PoV /// of the call includes the maximal size of inbound lane state. If the actual size is smaller, /// we may subtract extra bytes from this component. pub fn extra_proof_size_bytes(&mut self) -> u64 { let max_encoded_len = StoredInboundLaneData::::max_encoded_len(); let relayers_count = self.get_or_init_data().relayers.len(); let actual_encoded_len = - InboundLaneData::::encoded_size_hint(relayers_count) + InboundLaneData::>>::encoded_size_hint(relayers_count) .unwrap_or(usize::MAX); max_encoded_len.saturating_sub(actual_encoded_len) as _ } } impl, I: 'static> InboundLaneStorage for RuntimeInboundLaneStorage { - type Relayer = T::InboundRelayer; + type Relayer = AccountIdOf>; fn id(&self) -> LaneId { self.lane_id } fn max_unrewarded_relayer_entries(&self) -> MessageNonce { - T::MaxUnrewardedRelayerEntriesAtInboundLane::get() + BridgedChainOf::::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX } fn max_unconfirmed_messages(&self) -> MessageNonce { - T::MaxUnconfirmedMessagesAtInboundLane::get() + BridgedChainOf::::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX } - fn get_or_init_data(&mut self) -> InboundLaneData { + fn get_or_init_data(&mut self) -> InboundLaneData>> { match self.cached_data { Some(ref data) => data.clone(), None => { - let data: InboundLaneData = + let data: InboundLaneData>> = InboundLanes::::get(self.lane_id).into(); self.cached_data = Some(data.clone()); data @@ -842,7 +795,7 @@ impl, I: 'static> InboundLaneStorage for RuntimeInboundLaneStorage< } } - fn set_data(&mut self, data: InboundLaneData) { + fn set_data(&mut self, data: InboundLaneData>>) { self.cached_data = Some(data.clone()); InboundLanes::::insert(self.lane_id, StoredInboundLaneData::(data)) } @@ -887,14 +840,14 @@ impl, I: 'static> OutboundLaneStorage for RuntimeOutboundLaneStorag } /// Verify messages proof and return proved messages with decoded payload. -fn verify_and_decode_messages_proof( - proof: Chain::MessagesProof, +fn verify_and_decode_messages_proof, I: 'static>( + proof: FromBridgedChainMessagesProof>>, messages_count: u32, -) -> Result>, VerificationError> { - // `receive_messages_proof` weight formula and `MaxUnconfirmedMessagesAtInboundLane` check - // guarantees that the `message_count` is sane and Vec may be allocated. +) -> Result>, VerificationError> { + // `receive_messages_proof` weight formula and `MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX` + // check guarantees that the `message_count` is sane and Vec may be allocated. // (tx with too many messages will either be rejected from the pool, or will fail earlier) - Chain::verify_messages_proof(proof, messages_count).map(|messages_by_lane| { + proofs::verify_messages_proof::(proof, messages_count).map(|messages_by_lane| { messages_by_lane .into_iter() .map(|(lane, lane_data)| { @@ -909,1209 +862,3 @@ fn verify_and_decode_messages_proof::set_block_number(1); - System::::reset_events(); - } - - fn send_regular_message(lane_id: LaneId) { - get_ready_for_events(); - - let outbound_lane = outbound_lane::(lane_id); - let message_nonce = outbound_lane.data().latest_generated_nonce + 1; - let prev_enqueued_messages = outbound_lane.data().queued_messages().saturating_len(); - let valid_message = Pallet::::validate_message(lane_id, ®ULAR_PAYLOAD) - .expect("validate_message has failed"); - let artifacts = Pallet::::send_message(valid_message); - assert_eq!(artifacts.enqueued_messages, prev_enqueued_messages + 1); - - // check event with assigned nonce - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Messages(Event::MessageAccepted { - lane_id, - nonce: message_nonce - }), - topics: vec![], - }], - ); - } - - fn receive_messages_delivery_proof() { - System::::set_block_number(1); - System::::reset_events(); - - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![UnrewardedRelayer { - relayer: 0, - messages: DeliveredMessages::new(1), - }] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - )); - - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Messages(Event::MessagesDelivered { - lane_id: TEST_LANE_ID, - messages: DeliveredMessages::new(1), - }), - topics: vec![], - }], - ); - } - - #[test] - fn pallet_rejects_transactions_if_halted() { - run_test(|| { - // send message first to be able to check that delivery_proof fails later - send_regular_message(TEST_LANE_ID); - - PalletOperatingMode::::put(MessagesOperatingMode::Basic( - BasicOperatingMode::Halted, - )); - - assert_noop!( - Pallet::::validate_message(TEST_LANE_ID, ®ULAR_PAYLOAD), - Error::::NotOperatingNormally, - ); - - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(2, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - ), - Error::::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted), - ); - - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - ), - Error::::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted), - ); - }); - } - - #[test] - fn pallet_rejects_new_messages_in_rejecting_outbound_messages_operating_mode() { - run_test(|| { - // send message first to be able to check that delivery_proof fails later - send_regular_message(TEST_LANE_ID); - - PalletOperatingMode::::put( - MessagesOperatingMode::RejectingOutboundMessages, - ); - - assert_noop!( - Pallet::::validate_message(TEST_LANE_ID, ®ULAR_PAYLOAD), - Error::::NotOperatingNormally, - ); - - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - ),); - - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - )); - }); - } - - #[test] - fn send_message_works() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - }); - } - - #[test] - fn send_message_rejects_too_large_message() { - run_test(|| { - let mut message_payload = message_payload(1, 0); - // the payload isn't simply extra, so it'll definitely overflow - // `MAX_OUTBOUND_PAYLOAD_SIZE` if we add `MAX_OUTBOUND_PAYLOAD_SIZE` bytes to extra - message_payload - .extra - .extend_from_slice(&[0u8; MAX_OUTBOUND_PAYLOAD_SIZE as usize]); - assert_noop!( - Pallet::::validate_message(TEST_LANE_ID, &message_payload.clone(),), - Error::::MessageRejectedByPallet( - VerificationError::MessageTooLarge - ), - ); - - // let's check that we're able to send `MAX_OUTBOUND_PAYLOAD_SIZE` messages - while message_payload.encoded_size() as u32 > MAX_OUTBOUND_PAYLOAD_SIZE { - message_payload.extra.pop(); - } - assert_eq!(message_payload.encoded_size() as u32, MAX_OUTBOUND_PAYLOAD_SIZE); - - let valid_message = - Pallet::::validate_message(TEST_LANE_ID, &message_payload) - .expect("validate_message has failed"); - Pallet::::send_message(valid_message); - }) - } - - #[test] - fn chain_verifier_rejects_invalid_message_in_send_message() { - run_test(|| { - // messages with this payload are rejected by target chain verifier - assert_noop!( - Pallet::::validate_message( - TEST_LANE_ID, - &PAYLOAD_REJECTED_BY_TARGET_CHAIN, - ), - Error::::MessageRejectedByChainVerifier(VerificationError::Other( - mock::TEST_ERROR - )), - ); - }); - } - - #[test] - fn receive_messages_proof_works() { - run_test(|| { - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - )); - - assert_eq!(InboundLanes::::get(TEST_LANE_ID).0.last_delivered_nonce(), 1); - - assert!(TestDeliveryPayments::is_reward_paid(1)); - }); - } - - #[test] - fn receive_messages_proof_updates_confirmed_message_nonce() { - run_test(|| { - // say we have received 10 messages && last confirmed message is 8 - InboundLanes::::insert( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 8, - relayers: vec![ - unrewarded_relayer(9, 9, TEST_RELAYER_A), - unrewarded_relayer(10, 10, TEST_RELAYER_B), - ] - .into_iter() - .collect(), - }, - ); - assert_eq!( - inbound_unrewarded_relayers_state(TEST_LANE_ID), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - last_delivered_nonce: 10, - }, - ); - - // message proof includes outbound lane state with latest confirmed message updated to 9 - let mut message_proof: TestMessagesProof = - Ok(vec![message(11, REGULAR_PAYLOAD)]).into(); - message_proof.result.as_mut().unwrap()[0].1.lane_state = - Some(OutboundLaneData { latest_received_nonce: 9, ..Default::default() }); - - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - message_proof, - 1, - REGULAR_PAYLOAD.declared_weight, - )); - - assert_eq!( - InboundLanes::::get(TEST_LANE_ID).0, - InboundLaneData { - last_confirmed_nonce: 9, - relayers: vec![ - unrewarded_relayer(10, 10, TEST_RELAYER_B), - unrewarded_relayer(11, 11, TEST_RELAYER_A) - ] - .into_iter() - .collect(), - }, - ); - assert_eq!( - inbound_unrewarded_relayers_state(TEST_LANE_ID), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - last_delivered_nonce: 11, - }, - ); - }); - } - - #[test] - fn receive_messages_fails_if_dispatcher_is_inactive() { - run_test(|| { - TestMessageDispatch::deactivate(); - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - ), - Error::::MessageDispatchInactive, - ); - }); - } - - #[test] - fn receive_messages_proof_does_not_accept_message_if_dispatch_weight_is_not_enough() { - run_test(|| { - let mut declared_weight = REGULAR_PAYLOAD.declared_weight; - *declared_weight.ref_time_mut() -= 1; - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - declared_weight, - ), - Error::::InsufficientDispatchWeight - ); - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 0); - }); - } - - #[test] - fn receive_messages_proof_rejects_invalid_proof() { - run_test(|| { - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Err(()).into(), - 1, - Weight::zero(), - ), - Error::::InvalidMessagesProof, - ); - }); - } - - #[test] - fn receive_messages_proof_rejects_proof_with_too_many_messages() { - run_test(|| { - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - u32::MAX, - Weight::zero(), - ), - Error::::TooManyMessagesInTheProof, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_works() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - receive_messages_delivery_proof(); - - assert_eq!( - OutboundLanes::::get(TEST_LANE_ID).latest_received_nonce, - 1, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_rewards_relayers() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - - // this reports delivery of message 1 => reward is paid to TEST_RELAYER_A - let single_message_delivery_proof = TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into_iter().collect(), - ..Default::default() - }, - ))); - let single_message_delivery_proof_size = single_message_delivery_proof.size(); - let result = Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - single_message_delivery_proof, - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - ); - assert_ok!(result); - assert_eq!( - result.unwrap().actual_weight.unwrap(), - TestWeightInfo::receive_messages_delivery_proof_weight( - &PreComputedSize(single_message_delivery_proof_size as _), - &UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - ..Default::default() - }, - ) - ); - assert!(TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_A, 1)); - assert!(!TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_B, 1)); - assert_eq!(TestOnMessagesDelivered::call_arguments(), Some((TEST_LANE_ID, 1))); - - // this reports delivery of both message 1 and message 2 => reward is paid only to - // TEST_RELAYER_B - let two_messages_delivery_proof = TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B), - ] - .into_iter() - .collect(), - ..Default::default() - }, - ))); - let two_messages_delivery_proof_size = two_messages_delivery_proof.size(); - let result = Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - two_messages_delivery_proof, - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - last_delivered_nonce: 2, - }, - ); - assert_ok!(result); - // even though the pre-dispatch weight was for two messages, the actual weight is - // for single message only - assert_eq!( - result.unwrap().actual_weight.unwrap(), - TestWeightInfo::receive_messages_delivery_proof_weight( - &PreComputedSize(two_messages_delivery_proof_size as _), - &UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - ..Default::default() - }, - ) - ); - assert!(!TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_A, 1)); - assert!(TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_B, 1)); - assert_eq!(TestOnMessagesDelivered::call_arguments(), Some((TEST_LANE_ID, 0))); - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_invalid_proof() { - run_test(|| { - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Err(())), - Default::default(), - ), - Error::::InvalidMessagesDeliveryProof, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_proof_if_declared_relayers_state_is_invalid() { - run_test(|| { - // when number of relayers entries is invalid - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B) - ] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 2, - last_delivered_nonce: 2, - ..Default::default() - }, - ), - Error::::InvalidUnrewardedRelayersState, - ); - - // when number of messages is invalid - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B) - ] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - total_messages: 1, - last_delivered_nonce: 2, - ..Default::default() - }, - ), - Error::::InvalidUnrewardedRelayersState, - ); - - // when last delivered nonce is invalid - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B) - ] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - total_messages: 2, - last_delivered_nonce: 8, - ..Default::default() - }, - ), - Error::::InvalidUnrewardedRelayersState, - ); - }); - } - - #[test] - fn receive_messages_accepts_single_message_with_invalid_payload() { - run_test(|| { - let mut invalid_message = message(1, REGULAR_PAYLOAD); - invalid_message.payload = Vec::new(); - - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![invalid_message]).into(), - 1, - Weight::zero(), /* weight may be zero in this case (all messages are - * improperly encoded) */ - ),); - - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 1,); - }); - } - - #[test] - fn receive_messages_accepts_batch_with_message_with_invalid_payload() { - run_test(|| { - let mut invalid_message = message(2, REGULAR_PAYLOAD); - invalid_message.payload = Vec::new(); - - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok( - vec![message(1, REGULAR_PAYLOAD), invalid_message, message(3, REGULAR_PAYLOAD),] - ) - .into(), - 3, - REGULAR_PAYLOAD.declared_weight + REGULAR_PAYLOAD.declared_weight, - ),); - - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 3,); - }); - } - - #[test] - fn actual_dispatch_weight_does_not_overflow() { - run_test(|| { - let message1 = message(1, message_payload(0, u64::MAX / 2)); - let message2 = message(2, message_payload(0, u64::MAX / 2)); - let message3 = message(3, message_payload(0, u64::MAX / 2)); - - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - // this may cause overflow if source chain storage is invalid - Ok(vec![message1, message2, message3]).into(), - 3, - Weight::MAX, - ), - Error::::InsufficientDispatchWeight - ); - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 0); - }); - } - - #[test] - fn ref_time_refund_from_receive_messages_proof_works() { - run_test(|| { - fn submit_with_unspent_weight( - nonce: MessageNonce, - unspent_weight: u64, - ) -> (Weight, Weight) { - let mut payload = REGULAR_PAYLOAD; - *payload.dispatch_result.unspent_weight.ref_time_mut() = unspent_weight; - let proof = Ok(vec![message(nonce, payload)]).into(); - let messages_count = 1; - let pre_dispatch_weight = - ::WeightInfo::receive_messages_proof_weight( - &proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ); - let result = Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ) - .expect("delivery has failed"); - let post_dispatch_weight = - result.actual_weight.expect("receive_messages_proof always returns Some"); - - // message delivery transactions are never free - assert_eq!(result.pays_fee, Pays::Yes); - - (pre_dispatch_weight, post_dispatch_weight) - } - - // when dispatch is returning `unspent_weight < declared_weight` - let (pre, post) = submit_with_unspent_weight(1, 1); - assert_eq!(post.ref_time(), pre.ref_time() - 1); - - // when dispatch is returning `unspent_weight = declared_weight` - let (pre, post) = - submit_with_unspent_weight(2, REGULAR_PAYLOAD.declared_weight.ref_time()); - assert_eq!( - post.ref_time(), - pre.ref_time() - REGULAR_PAYLOAD.declared_weight.ref_time() - ); - - // when dispatch is returning `unspent_weight > declared_weight` - let (pre, post) = - submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight.ref_time() + 1); - assert_eq!( - post.ref_time(), - pre.ref_time() - REGULAR_PAYLOAD.declared_weight.ref_time() - ); - - // when there's no unspent weight - let (pre, post) = submit_with_unspent_weight(4, 0); - assert_eq!(post.ref_time(), pre.ref_time()); - - // when dispatch is returning `unspent_weight < declared_weight` - let (pre, post) = submit_with_unspent_weight(5, 1); - assert_eq!(post.ref_time(), pre.ref_time() - 1); - }); - } - - #[test] - fn proof_size_refund_from_receive_messages_proof_works() { - run_test(|| { - let max_entries = crate::mock::MaxUnrewardedRelayerEntriesAtInboundLane::get() as usize; - - // if there's maximal number of unrewarded relayer entries at the inbound lane, then - // `proof_size` is unchanged in post-dispatch weight - let proof: TestMessagesProof = Ok(vec![message(101, REGULAR_PAYLOAD)]).into(); - let messages_count = 1; - let pre_dispatch_weight = - ::WeightInfo::receive_messages_proof_weight( - &proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ); - InboundLanes::::insert( - TEST_LANE_ID, - StoredInboundLaneData(InboundLaneData { - relayers: vec![ - UnrewardedRelayer { - relayer: 42, - messages: DeliveredMessages { begin: 0, end: 100 } - }; - max_entries - ] - .into_iter() - .collect(), - last_confirmed_nonce: 0, - }), - ); - let post_dispatch_weight = Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - proof.clone(), - messages_count, - REGULAR_PAYLOAD.declared_weight, - ) - .unwrap() - .actual_weight - .unwrap(); - assert_eq!(post_dispatch_weight.proof_size(), pre_dispatch_weight.proof_size()); - - // if count of unrewarded relayer entries is less than maximal, then some `proof_size` - // must be refunded - InboundLanes::::insert( - TEST_LANE_ID, - StoredInboundLaneData(InboundLaneData { - relayers: vec![ - UnrewardedRelayer { - relayer: 42, - messages: DeliveredMessages { begin: 0, end: 100 } - }; - max_entries - 1 - ] - .into_iter() - .collect(), - last_confirmed_nonce: 0, - }), - ); - let post_dispatch_weight = Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ) - .unwrap() - .actual_weight - .unwrap(); - assert!( - post_dispatch_weight.proof_size() < pre_dispatch_weight.proof_size(), - "Expected post-dispatch PoV {} to be less than pre-dispatch PoV {}", - post_dispatch_weight.proof_size(), - pre_dispatch_weight.proof_size(), - ); - }); - } - - #[test] - fn messages_delivered_callbacks_are_called() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - - // messages 1+2 are confirmed in 1 tx, message 3 in a separate tx - // dispatch of message 2 has failed - let mut delivered_messages_1_and_2 = DeliveredMessages::new(1); - delivered_messages_1_and_2.note_dispatched_message(); - let messages_1_and_2_proof = Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 0, - relayers: vec![UnrewardedRelayer { - relayer: 0, - messages: delivered_messages_1_and_2.clone(), - }] - .into_iter() - .collect(), - }, - )); - let delivered_message_3 = DeliveredMessages::new(3); - let messages_3_proof = Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 0, - relayers: vec![UnrewardedRelayer { relayer: 0, messages: delivered_message_3 }] - .into_iter() - .collect(), - }, - )); - - // first tx with messages 1+2 - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(messages_1_and_2_proof), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 2, - total_messages: 2, - last_delivered_nonce: 2, - }, - )); - // second tx with message 3 - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(messages_3_proof), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 3, - }, - )); - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_proof_if_trying_to_confirm_more_messages_than_expected( - ) { - run_test(|| { - // send message first to be able to check that delivery_proof fails later - send_regular_message(TEST_LANE_ID); - - // 1) InboundLaneData declares that the `last_confirmed_nonce` is 1; - // 2) InboundLaneData has no entries => `InboundLaneData::last_delivered_nonce()` - // returns `last_confirmed_nonce`; - // 3) it means that we're going to confirm delivery of messages 1..=1; - // 4) so the number of declared messages (see `UnrewardedRelayersState`) is `0` and - // number of actually confirmed messages is `1`. - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { last_confirmed_nonce: 1, relayers: Default::default() }, - ))), - UnrewardedRelayersState { last_delivered_nonce: 1, ..Default::default() }, - ), - Error::::ReceptionConfirmation( - ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected - ), - ); - }); - } - - #[test] - fn storage_keys_computed_properly() { - assert_eq!( - PalletOperatingMode::::storage_value_final_key().to_vec(), - bp_messages::storage_keys::operating_mode_key("Messages").0, - ); - - assert_eq!( - OutboundMessages::::storage_map_final_key(MessageKey { - lane_id: TEST_LANE_ID, - nonce: 42 - }), - bp_messages::storage_keys::message_key("Messages", &TEST_LANE_ID, 42).0, - ); - - assert_eq!( - OutboundLanes::::storage_map_final_key(TEST_LANE_ID), - bp_messages::storage_keys::outbound_lane_data_key("Messages", &TEST_LANE_ID).0, - ); - - assert_eq!( - InboundLanes::::storage_map_final_key(TEST_LANE_ID), - bp_messages::storage_keys::inbound_lane_data_key("Messages", &TEST_LANE_ID).0, - ); - } - - #[test] - fn inbound_message_details_works() { - run_test(|| { - assert_eq!( - Pallet::::inbound_message_data( - TEST_LANE_ID, - REGULAR_PAYLOAD.encode(), - OutboundMessageDetails { nonce: 0, dispatch_weight: Weight::zero(), size: 0 }, - ), - InboundMessageDetails { dispatch_weight: REGULAR_PAYLOAD.declared_weight }, - ); - }); - } - - #[test] - fn on_idle_callback_respects_remaining_weight() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 4, - relayers: vec![unrewarded_relayer(1, 4, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 4, - total_messages: 4, - last_delivered_nonce: 4, - }, - )); - - // all 4 messages may be pruned now - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().latest_received_nonce, - 4 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 1 - ); - System::::set_block_number(2); - - // if passed wight is too low to do anything - let dbw = DbWeight::get(); - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(1, 1)), - Weight::zero(), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 1 - ); - - // if passed wight is enough to prune single message - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(1, 2)), - dbw.reads_writes(1, 2), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 2 - ); - - // if passed wight is enough to prune two more messages - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(1, 3)), - dbw.reads_writes(1, 3), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 4 - ); - - // if passed wight is enough to prune many messages - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(100, 100)), - dbw.reads_writes(1, 2), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 5 - ); - }); - } - - #[test] - fn on_idle_callback_is_rotating_lanes_to_prune() { - run_test(|| { - // send + receive confirmation for lane 1 - send_regular_message(TEST_LANE_ID); - receive_messages_delivery_proof(); - // send + receive confirmation for lane 2 - send_regular_message(TEST_LANE_ID_2); - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID_2, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - )); - - // nothing is pruned yet - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().latest_received_nonce, - 1 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 1 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID_2).data().latest_received_nonce, - 1 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, - 1 - ); - - // in block#2.on_idle lane messages of lane 1 are pruned - let dbw = DbWeight::get(); - System::::set_block_number(2); - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(100, 100)), - dbw.reads_writes(1, 2), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 2 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, - 1 - ); - - // in block#3.on_idle lane messages of lane 2 are pruned - System::::set_block_number(3); - - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(100, 100)), - dbw.reads_writes(1, 2), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 2 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, - 2 - ); - }); - } - - #[test] - fn outbound_message_from_unconfigured_lane_is_rejected() { - run_test(|| { - assert_noop!( - Pallet::::validate_message(TEST_LANE_ID_3, ®ULAR_PAYLOAD,), - Error::::InactiveOutboundLane, - ); - }); - } - - #[test] - fn test_bridge_messages_call_is_correctly_defined() { - let account_id = 1; - let message_proof: TestMessagesProof = Ok(vec![message(1, REGULAR_PAYLOAD)]).into(); - let message_delivery_proof = TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![UnrewardedRelayer { - relayer: 0, - messages: DeliveredMessages::new(1), - }] - .into_iter() - .collect(), - }, - ))); - let unrewarded_relayer_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - last_delivered_nonce: 1, - ..Default::default() - }; - - let direct_receive_messages_proof_call = Call::::receive_messages_proof { - relayer_id_at_bridged_chain: account_id, - proof: message_proof.clone(), - messages_count: 1, - dispatch_weight: REGULAR_PAYLOAD.declared_weight, - }; - let indirect_receive_messages_proof_call = BridgeMessagesCall::< - AccountId, - TestMessagesProof, - TestMessagesDeliveryProof, - >::receive_messages_proof { - relayer_id_at_bridged_chain: account_id, - proof: message_proof, - messages_count: 1, - dispatch_weight: REGULAR_PAYLOAD.declared_weight, - }; - assert_eq!( - direct_receive_messages_proof_call.encode(), - indirect_receive_messages_proof_call.encode() - ); - - let direct_receive_messages_delivery_proof_call = - Call::::receive_messages_delivery_proof { - proof: message_delivery_proof.clone(), - relayers_state: unrewarded_relayer_state.clone(), - }; - let indirect_receive_messages_delivery_proof_call = BridgeMessagesCall::< - AccountId, - TestMessagesProof, - TestMessagesDeliveryProof, - >::receive_messages_delivery_proof { - proof: message_delivery_proof, - relayers_state: unrewarded_relayer_state, - }; - assert_eq!( - direct_receive_messages_delivery_proof_call.encode(), - indirect_receive_messages_delivery_proof_call.encode() - ); - } - - generate_owned_bridge_module_tests!( - MessagesOperatingMode::Basic(BasicOperatingMode::Normal), - MessagesOperatingMode::Basic(BasicOperatingMode::Halted) - ); - - #[test] - fn inbound_storage_extra_proof_size_bytes_works() { - fn relayer_entry() -> UnrewardedRelayer { - UnrewardedRelayer { relayer: 42u64, messages: DeliveredMessages { begin: 0, end: 100 } } - } - - fn storage(relayer_entries: usize) -> RuntimeInboundLaneStorage { - RuntimeInboundLaneStorage { - lane_id: Default::default(), - cached_data: Some(InboundLaneData { - relayers: vec![relayer_entry(); relayer_entries].into_iter().collect(), - last_confirmed_nonce: 0, - }), - _phantom: Default::default(), - } - } - - let max_entries = crate::mock::MaxUnrewardedRelayerEntriesAtInboundLane::get() as usize; - - // when we have exactly `MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded relayers - assert_eq!(storage(max_entries).extra_proof_size_bytes(), 0); - - // when we have less than `MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded relayers - assert_eq!( - storage(max_entries - 1).extra_proof_size_bytes(), - relayer_entry().encode().len() as u64 - ); - assert_eq!( - storage(max_entries - 2).extra_proof_size_bytes(), - 2 * relayer_entry().encode().len() as u64 - ); - - // when we have more than `MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded relayers - // (shall not happen in practice) - assert_eq!(storage(max_entries + 1).extra_proof_size_bytes(), 0); - } - - #[test] - fn maybe_outbound_lanes_count_returns_correct_value() { - assert_eq!( - MaybeOutboundLanesCount::::get(), - Some(mock::ActiveOutboundLanes::get().len() as u32) - ); - } -} diff --git a/bridges/modules/messages/src/outbound_lane.rs b/bridges/modules/messages/src/outbound_lane.rs index acef5546d2a64..fcdddf199dc65 100644 --- a/bridges/modules/messages/src/outbound_lane.rs +++ b/bridges/modules/messages/src/outbound_lane.rs @@ -18,16 +18,18 @@ use crate::{Config, LOG_TARGET}; -use bp_messages::{DeliveredMessages, LaneId, MessageNonce, OutboundLaneData, UnrewardedRelayer}; +use bp_messages::{ + ChainWithMessages, DeliveredMessages, LaneId, MessageNonce, OutboundLaneData, UnrewardedRelayer, +}; use codec::{Decode, Encode}; use frame_support::{ + traits::Get, weights::{RuntimeDbWeight, Weight}, BoundedVec, PalletError, }; -use num_traits::Zero; use scale_info::TypeInfo; -use sp_runtime::RuntimeDebug; -use sp_std::collections::vec_deque::VecDeque; +use sp_runtime::{traits::Zero, RuntimeDebug}; +use sp_std::{collections::vec_deque::VecDeque, marker::PhantomData}; /// Outbound lane storage. pub trait OutboundLaneStorage { @@ -48,8 +50,17 @@ pub trait OutboundLaneStorage { fn remove_message(&mut self, nonce: &MessageNonce); } +/// Limit for the `StoredMessagePayload` vector. +pub struct StoredMessagePayloadLimit(PhantomData<(T, I)>); + +impl, I: 'static> Get for StoredMessagePayloadLimit { + fn get() -> u32 { + T::BridgedChain::maximal_incoming_message_size() + } +} + /// Outbound message data wrapper that implements `MaxEncodedLen`. -pub type StoredMessagePayload = BoundedVec>::MaximalOutboundPayloadSize>; +pub type StoredMessagePayload = BoundedVec>; /// Result of messages receival confirmation. #[derive(Encode, Decode, RuntimeDebug, PartialEq, Eq, PalletError, TypeInfo)] @@ -204,11 +215,11 @@ fn ensure_unrewarded_relayers_are_correct( mod tests { use super::*; use crate::{ - mock::{ + outbound_lane, + tests::mock::{ outbound_message_data, run_test, unrewarded_relayer, TestRelayer, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID, }, - outbound_lane, }; use frame_support::weights::constants::RocksDbWeight; use sp_std::ops::RangeInclusive; @@ -263,12 +274,43 @@ mod tests { assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 3); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 0); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); assert_eq!( lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), Ok(Some(delivered_messages(1..=3))), ); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 3); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); + }); + } + + #[test] + fn confirm_partial_delivery_works() { + run_test(|| { + let mut lane = outbound_lane::(TEST_LANE_ID); + assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 1); + assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 2); + assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 3); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 0); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); + + assert_eq!( + lane.confirm_delivery(3, 2, &unrewarded_relayers(1..=2)), + Ok(Some(delivered_messages(1..=2))), + ); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 2); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); + + assert_eq!( + lane.confirm_delivery(3, 3, &unrewarded_relayers(3..=3)), + Ok(Some(delivered_messages(3..=3))), + ); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 3); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); }); } @@ -281,6 +323,7 @@ mod tests { lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 0); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); assert_eq!( lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), Ok(Some(delivered_messages(1..=3))), @@ -288,10 +331,12 @@ mod tests { assert_eq!(lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), Ok(None),); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 3); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); assert_eq!(lane.confirm_delivery(1, 2, &unrewarded_relayers(1..=1)), Ok(None),); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 3); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); }); } @@ -310,8 +355,8 @@ mod tests { 3, &unrewarded_relayers(1..=1) .into_iter() - .chain(unrewarded_relayers(2..=30).into_iter()) - .chain(unrewarded_relayers(3..=3).into_iter()) + .chain(unrewarded_relayers(2..=30)) + .chain(unrewarded_relayers(3..=3)) .collect(), ), Err(ReceptionConfirmationError::FailedToConfirmFutureMessages), @@ -326,8 +371,8 @@ mod tests { 3, &unrewarded_relayers(1..=1) .into_iter() - .chain(unrewarded_relayers(2..=1).into_iter()) - .chain(unrewarded_relayers(2..=3).into_iter()) + .chain(unrewarded_relayers(2..=1)) + .chain(unrewarded_relayers(2..=3)) .collect(), ), Err(ReceptionConfirmationError::EmptyUnrewardedRelayerEntry), @@ -341,8 +386,8 @@ mod tests { 3, &unrewarded_relayers(1..=1) .into_iter() - .chain(unrewarded_relayers(3..=3).into_iter()) - .chain(unrewarded_relayers(2..=2).into_iter()) + .chain(unrewarded_relayers(3..=3)) + .chain(unrewarded_relayers(2..=2)) .collect(), ), Err(ReceptionConfirmationError::NonConsecutiveUnrewardedRelayerEntries), diff --git a/bridges/modules/messages/src/proofs.rs b/bridges/modules/messages/src/proofs.rs new file mode 100644 index 0000000000000..18367029d72cd --- /dev/null +++ b/bridges/modules/messages/src/proofs.rs @@ -0,0 +1,562 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tools for messages and delivery proof verification. + +use crate::{BridgedChainOf, BridgedHeaderChainOf, Config}; + +use bp_header_chain::{HeaderChain, HeaderChainError}; +use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::{FromBridgedChainMessagesProof, ProvedLaneMessages, ProvedMessages}, + ChainWithMessages, InboundLaneData, LaneId, Message, MessageKey, MessageNonce, MessagePayload, + OutboundLaneData, VerificationError, +}; +use bp_runtime::{ + HashOf, HasherOf, RangeInclusiveExt, RawStorageProof, StorageProofChecker, StorageProofError, +}; +use codec::Decode; +use sp_std::vec::Vec; + +/// 'Parsed' message delivery proof - inbound lane id and its state. +pub(crate) type ParsedMessagesDeliveryProofFromBridgedChain = + (LaneId, InboundLaneData<::AccountId>); + +/// Verify proof of Bridged -> This chain messages. +/// +/// This function is used when Bridged chain is directly using GRANDPA finality. For Bridged +/// parachains, please use the `verify_messages_proof_from_parachain`. +/// +/// The `messages_count` argument verification (sane limits) is supposed to be made +/// outside of this function. This function only verifies that the proof declares exactly +/// `messages_count` messages. +pub fn verify_messages_proof, I: 'static>( + proof: FromBridgedChainMessagesProof>>, + messages_count: u32, +) -> Result, VerificationError> { + let FromBridgedChainMessagesProof { + bridged_header_hash, + storage_proof, + lane, + nonces_start, + nonces_end, + } = proof; + let mut parser: MessagesStorageProofAdapter = + MessagesStorageProofAdapter::try_new_with_verified_storage_proof( + bridged_header_hash, + storage_proof, + ) + .map_err(VerificationError::HeaderChain)?; + let nonces_range = nonces_start..=nonces_end; + + // receiving proofs where end < begin is ok (if proof includes outbound lane state) + let messages_in_the_proof = nonces_range.checked_len().unwrap_or(0); + if messages_in_the_proof != MessageNonce::from(messages_count) { + return Err(VerificationError::MessagesCountMismatch) + } + + // Read messages first. All messages that are claimed to be in the proof must + // be in the proof. So any error in `read_value`, or even missing value is fatal. + // + // Mind that we allow proofs with no messages if outbound lane state is proved. + let mut messages = Vec::with_capacity(messages_in_the_proof as _); + for nonce in nonces_range { + let message_key = MessageKey { lane_id: lane, nonce }; + let message_payload = parser + .read_and_decode_message_payload(&message_key) + .map_err(VerificationError::MessageStorage)?; + messages.push(Message { key: message_key, payload: message_payload }); + } + + // Now let's check if proof contains outbound lane state proof. It is optional, so + // we simply ignore `read_value` errors and missing value. + let proved_lane_messages = ProvedLaneMessages { + lane_state: parser + .read_and_decode_outbound_lane_data(&lane) + .map_err(VerificationError::OutboundLaneStorage)?, + messages, + }; + + // Now we may actually check if the proof is empty or not. + if proved_lane_messages.lane_state.is_none() && proved_lane_messages.messages.is_empty() { + return Err(VerificationError::EmptyMessageProof) + } + + // Check that the storage proof doesn't have any untouched keys. + parser.ensure_no_unused_keys().map_err(VerificationError::StorageProof)?; + + // We only support single lane messages in this generated_schema + let mut proved_messages = ProvedMessages::new(); + proved_messages.insert(lane, proved_lane_messages); + + Ok(proved_messages) +} + +/// Verify proof of This -> Bridged chain messages delivery. +pub fn verify_messages_delivery_proof, I: 'static>( + proof: FromBridgedChainMessagesDeliveryProof>>, +) -> Result, VerificationError> { + let FromBridgedChainMessagesDeliveryProof { bridged_header_hash, storage_proof, lane } = proof; + let mut parser: MessagesStorageProofAdapter = + MessagesStorageProofAdapter::try_new_with_verified_storage_proof( + bridged_header_hash, + storage_proof, + ) + .map_err(VerificationError::HeaderChain)?; + // Messages delivery proof is just proof of single storage key read => any error + // is fatal. + let storage_inbound_lane_data_key = bp_messages::storage_keys::inbound_lane_data_key( + T::ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + &lane, + ); + let inbound_lane_data = parser + .read_and_decode_mandatory_value(&storage_inbound_lane_data_key) + .map_err(VerificationError::InboundLaneStorage)?; + + // check that the storage proof doesn't have any untouched trie nodes + parser.ensure_no_unused_keys().map_err(VerificationError::StorageProof)?; + + Ok((lane, inbound_lane_data)) +} + +/// Abstraction over storage proof manipulation, hiding implementation details of actual storage +/// proofs. +trait StorageProofAdapter, I: 'static> { + fn read_and_decode_mandatory_value( + &mut self, + key: &impl AsRef<[u8]>, + ) -> Result; + fn read_and_decode_optional_value( + &mut self, + key: &impl AsRef<[u8]>, + ) -> Result, StorageProofError>; + fn ensure_no_unused_keys(self) -> Result<(), StorageProofError>; + + fn read_and_decode_outbound_lane_data( + &mut self, + lane_id: &LaneId, + ) -> Result, StorageProofError> { + let storage_outbound_lane_data_key = bp_messages::storage_keys::outbound_lane_data_key( + T::ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + lane_id, + ); + self.read_and_decode_optional_value(&storage_outbound_lane_data_key) + } + + fn read_and_decode_message_payload( + &mut self, + message_key: &MessageKey, + ) -> Result { + let storage_message_key = bp_messages::storage_keys::message_key( + T::ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + &message_key.lane_id, + message_key.nonce, + ); + self.read_and_decode_mandatory_value(&storage_message_key) + } +} + +/// Actual storage proof adapter for messages proofs. +type MessagesStorageProofAdapter = StorageProofCheckerAdapter; + +/// A `StorageProofAdapter` implementation for raw storage proofs. +struct StorageProofCheckerAdapter, I: 'static> { + storage: StorageProofChecker>>, + _dummy: sp_std::marker::PhantomData<(T, I)>, +} + +impl, I: 'static> StorageProofCheckerAdapter { + fn try_new_with_verified_storage_proof( + bridged_header_hash: HashOf>, + storage_proof: RawStorageProof, + ) -> Result { + BridgedHeaderChainOf::::verify_storage_proof(bridged_header_hash, storage_proof).map( + |storage| StorageProofCheckerAdapter:: { storage, _dummy: Default::default() }, + ) + } +} + +impl, I: 'static> StorageProofAdapter for StorageProofCheckerAdapter { + fn read_and_decode_optional_value( + &mut self, + key: &impl AsRef<[u8]>, + ) -> Result, StorageProofError> { + self.storage.read_and_decode_opt_value(key.as_ref()) + } + + fn read_and_decode_mandatory_value( + &mut self, + key: &impl AsRef<[u8]>, + ) -> Result { + self.storage.read_and_decode_mandatory_value(key.as_ref()) + } + + fn ensure_no_unused_keys(self) -> Result<(), StorageProofError> { + self.storage.ensure_no_unused_nodes() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{ + messages_generation::{ + encode_all_messages, encode_lane_data, generate_dummy_message, + prepare_messages_storage_proof, + }, + mock::*, + }; + + use bp_header_chain::StoredHeaderDataBuilder; + use bp_runtime::{HeaderId, StorageProofError}; + use codec::Encode; + use sp_runtime::traits::Header; + + fn using_messages_proof( + nonces_end: MessageNonce, + outbound_lane_data: Option, + encode_message: impl Fn(MessageNonce, &MessagePayload) -> Option>, + encode_outbound_lane_data: impl Fn(&OutboundLaneData) -> Vec, + add_duplicate_key: bool, + add_unused_key: bool, + test: impl Fn(FromBridgedChainMessagesProof) -> R, + ) -> R { + let (state_root, storage_proof) = prepare_messages_storage_proof::( + TEST_LANE_ID, + 1..=nonces_end, + outbound_lane_data, + bp_runtime::UnverifiedStorageProofParams::default(), + generate_dummy_message, + encode_message, + encode_outbound_lane_data, + add_duplicate_key, + add_unused_key, + ); + + sp_io::TestExternalities::new(Default::default()).execute_with(move || { + let bridged_header = BridgedChainHeader::new( + 0, + Default::default(), + state_root, + Default::default(), + Default::default(), + ); + let bridged_header_hash = bridged_header.hash(); + + pallet_bridge_grandpa::BestFinalized::::put(HeaderId( + 0, + bridged_header_hash, + )); + pallet_bridge_grandpa::ImportedHeaders::::insert( + bridged_header_hash, + bridged_header.build(), + ); + test(FromBridgedChainMessagesProof { + bridged_header_hash, + storage_proof, + lane: TEST_LANE_ID, + nonces_start: 1, + nonces_end, + }) + }) + } + + #[test] + fn messages_proof_is_rejected_if_declared_less_than_actual_number_of_messages() { + assert_eq!( + using_messages_proof( + 10, + None, + encode_all_messages, + encode_lane_data, + false, + false, + |proof| { verify_messages_proof::(proof, 5) } + ), + Err(VerificationError::MessagesCountMismatch), + ); + } + + #[test] + fn messages_proof_is_rejected_if_declared_more_than_actual_number_of_messages() { + assert_eq!( + using_messages_proof( + 10, + None, + encode_all_messages, + encode_lane_data, + false, + false, + |proof| { verify_messages_proof::(proof, 15) } + ), + Err(VerificationError::MessagesCountMismatch), + ); + } + + #[test] + fn message_proof_is_rejected_if_header_is_missing_from_the_chain() { + assert_eq!( + using_messages_proof( + 10, + None, + encode_all_messages, + encode_lane_data, + false, + false, + |proof| { + let bridged_header_hash = + pallet_bridge_grandpa::BestFinalized::::get().unwrap().1; + pallet_bridge_grandpa::ImportedHeaders::::remove( + bridged_header_hash, + ); + verify_messages_proof::(proof, 10) + } + ), + Err(VerificationError::HeaderChain(HeaderChainError::UnknownHeader)), + ); + } + + #[test] + fn message_proof_is_rejected_if_header_state_root_mismatches() { + assert_eq!( + using_messages_proof( + 10, + None, + encode_all_messages, + encode_lane_data, + false, + false, + |proof| { + let bridged_header_hash = + pallet_bridge_grandpa::BestFinalized::::get().unwrap().1; + pallet_bridge_grandpa::ImportedHeaders::::insert( + bridged_header_hash, + BridgedChainHeader::new( + 0, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ) + .build(), + ); + verify_messages_proof::(proof, 10) + } + ), + Err(VerificationError::HeaderChain(HeaderChainError::StorageProof( + StorageProofError::StorageRootMismatch + ))), + ); + } + + #[test] + fn message_proof_is_rejected_if_it_has_duplicate_trie_nodes() { + assert_eq!( + using_messages_proof( + 10, + None, + encode_all_messages, + encode_lane_data, + true, + false, + |proof| { verify_messages_proof::(proof, 10) }, + ), + Err(VerificationError::HeaderChain(HeaderChainError::StorageProof( + StorageProofError::DuplicateNodes + ))), + ); + } + + #[test] + fn message_proof_is_rejected_if_it_has_unused_trie_nodes() { + assert_eq!( + using_messages_proof( + 10, + None, + encode_all_messages, + encode_lane_data, + false, + true, + |proof| { verify_messages_proof::(proof, 10) }, + ), + Err(VerificationError::StorageProof(StorageProofError::UnusedKey)), + ); + } + + #[test] + fn message_proof_is_rejected_if_required_message_is_missing() { + matches!( + using_messages_proof( + 10, + None, + |n, m| if n != 5 { Some(m.encode()) } else { None }, + encode_lane_data, + false, + false, + |proof| verify_messages_proof::(proof, 10) + ), + Err(VerificationError::MessageStorage(StorageProofError::EmptyVal)), + ); + } + + #[test] + fn message_proof_is_rejected_if_message_decode_fails() { + matches!( + using_messages_proof( + 10, + None, + |n, m| { + let mut m = m.encode(); + if n == 5 { + m = vec![42] + } + Some(m) + }, + encode_lane_data, + false, + false, + |proof| verify_messages_proof::(proof, 10), + ), + Err(VerificationError::MessageStorage(StorageProofError::DecodeError)), + ); + } + + #[test] + fn message_proof_is_rejected_if_outbound_lane_state_decode_fails() { + matches!( + using_messages_proof( + 10, + Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + encode_all_messages, + |d| { + let mut d = d.encode(); + d.truncate(1); + d + }, + false, + false, + |proof| verify_messages_proof::(proof, 10), + ), + Err(VerificationError::OutboundLaneStorage(StorageProofError::DecodeError)), + ); + } + + #[test] + fn message_proof_is_rejected_if_it_is_empty() { + assert_eq!( + using_messages_proof( + 0, + None, + encode_all_messages, + encode_lane_data, + false, + false, + |proof| { verify_messages_proof::(proof, 0) }, + ), + Err(VerificationError::EmptyMessageProof), + ); + } + + #[test] + fn non_empty_message_proof_without_messages_is_accepted() { + assert_eq!( + using_messages_proof( + 0, + Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + encode_all_messages, + encode_lane_data, + false, + false, + |proof| verify_messages_proof::(proof, 0), + ), + Ok(vec![( + TEST_LANE_ID, + ProvedLaneMessages { + lane_state: Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + messages: Vec::new(), + }, + )] + .into_iter() + .collect()), + ); + } + + #[test] + fn non_empty_message_proof_is_accepted() { + assert_eq!( + using_messages_proof( + 1, + Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + encode_all_messages, + encode_lane_data, + false, + false, + |proof| verify_messages_proof::(proof, 1), + ), + Ok(vec![( + TEST_LANE_ID, + ProvedLaneMessages { + lane_state: Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + messages: vec![Message { + key: MessageKey { lane_id: TEST_LANE_ID, nonce: 1 }, + payload: vec![42], + }], + }, + )] + .into_iter() + .collect()), + ); + } + + #[test] + fn verify_messages_proof_does_not_panic_if_messages_count_mismatches() { + assert_eq!( + using_messages_proof( + 1, + None, + encode_all_messages, + encode_lane_data, + false, + false, + |mut proof| { + proof.nonces_end = u64::MAX; + verify_messages_proof::(proof, u32::MAX) + }, + ), + Err(VerificationError::MessagesCountMismatch), + ); + } +} diff --git a/bridges/bin/runtime-common/src/messages_generation.rs b/bridges/modules/messages/src/tests/messages_generation.rs similarity index 62% rename from bridges/bin/runtime-common/src/messages_generation.rs rename to bridges/modules/messages/src/tests/messages_generation.rs index c37aaa5d4d537..6c4867fa6de39 100644 --- a/bridges/bin/runtime-common/src/messages_generation.rs +++ b/bridges/modules/messages/src/tests/messages_generation.rs @@ -16,17 +16,23 @@ //! Helpers for generating message storage proofs, that are used by tests and by benchmarks. -use crate::messages::{AccountIdOf, BridgedChain, HashOf, HasherOf, MessageBridge, ThisChain}; - use bp_messages::{ - storage_keys, InboundLaneData, LaneId, MessageKey, MessageNonce, MessagePayload, - OutboundLaneData, + storage_keys, ChainWithMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, + MessagePayload, OutboundLaneData, +}; +use bp_runtime::{ + grow_storage_value, record_all_trie_keys, AccountIdOf, Chain, HashOf, HasherOf, + RawStorageProof, UnverifiedStorageProofParams, }; -use bp_runtime::{record_all_trie_keys, RawStorageProof, StorageProofSize}; use codec::Encode; use sp_std::{ops::RangeInclusive, prelude::*}; use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut}; +/// Dummy message generation function. +pub fn generate_dummy_message(_: MessageNonce) -> MessagePayload { + vec![42] +} + /// Simple and correct message data encode function. pub fn encode_all_messages(_: MessageNonce, m: &MessagePayload) -> Option> { Some(m.encode()) @@ -40,18 +46,20 @@ pub fn encode_lane_data(d: &OutboundLaneData) -> Vec { /// Prepare storage proof of given messages. /// /// Returns state trie root and nodes with prepared messages. -pub fn prepare_messages_storage_proof( +#[allow(clippy::too_many_arguments)] +pub fn prepare_messages_storage_proof( lane: LaneId, message_nonces: RangeInclusive, outbound_lane_data: Option, - size: StorageProofSize, - message_payload: MessagePayload, + proof_params: UnverifiedStorageProofParams, + generate_message: impl Fn(MessageNonce) -> MessagePayload, encode_message: impl Fn(MessageNonce, &MessagePayload) -> Option>, encode_outbound_lane_data: impl Fn(&OutboundLaneData) -> Vec, -) -> (HashOf>, RawStorageProof) + add_duplicate_key: bool, + add_unused_key: bool, +) -> (HashOf, RawStorageProof) where - B: MessageBridge, - HashOf>: Copy + Default, + HashOf: Copy + Default, { // prepare Bridged chain storage with messages and (optionally) outbound lane state let message_count = message_nonces.end().saturating_sub(*message_nonces.start()) + 1; @@ -60,22 +68,22 @@ where let mut mdb = MemoryDB::default(); { let mut trie = - TrieDBMutBuilderV1::>>::new(&mut mdb, &mut root).build(); + TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); // insert messages for (i, nonce) in message_nonces.into_iter().enumerate() { let message_key = MessageKey { lane_id: lane, nonce }; - let message_payload = match encode_message(nonce, &message_payload) { + let message_payload = match encode_message(nonce, &generate_message(nonce)) { Some(message_payload) => if i == 0 { - grow_trie_leaf_value(message_payload, size) + grow_storage_value(message_payload, &proof_params) } else { message_payload }, None => continue, }; let storage_key = storage_keys::message_key( - B::BRIDGED_MESSAGES_PALLET_NAME, + ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &message_key.lane_id, message_key.nonce, ) @@ -89,8 +97,11 @@ where // insert outbound lane state if let Some(outbound_lane_data) = outbound_lane_data.as_ref().map(encode_outbound_lane_data) { - let storage_key = - storage_keys::outbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, &lane).0; + let storage_key = storage_keys::outbound_lane_data_key( + ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + &lane, + ) + .0; trie.insert(&storage_key, &outbound_lane_data) .map_err(|_| "TrieMut::insert has failed") .expect("TrieMut::insert should not fail in benchmarks"); @@ -99,52 +110,54 @@ where } // generate storage proof to be delivered to This chain - let storage_proof = record_all_trie_keys::>>, _>(&mdb, &root) - .map_err(|_| "record_all_trie_keys has failed") - .expect("record_all_trie_keys should not fail in benchmarks"); + let mut storage_proof = + record_all_trie_keys::>, _>(&mdb, &root) + .map_err(|_| "record_all_trie_keys has failed") + .expect("record_all_trie_keys should not fail in benchmarks"); + + if add_duplicate_key { + assert!(!storage_proof.is_empty()); + let node = storage_proof.pop().unwrap(); + storage_proof.push(node.clone()); + storage_proof.push(node); + } + + if add_unused_key { + storage_proof.push(b"unused_value".to_vec()); + } + (root, storage_proof) } /// Prepare storage proof of given messages delivery. /// /// Returns state trie root and nodes with prepared messages. -pub fn prepare_message_delivery_storage_proof( +pub fn prepare_message_delivery_storage_proof( lane: LaneId, - inbound_lane_data: InboundLaneData>>, - size: StorageProofSize, -) -> (HashOf>, RawStorageProof) + inbound_lane_data: InboundLaneData>, + proof_params: UnverifiedStorageProofParams, +) -> (HashOf, RawStorageProof) where - B: MessageBridge, + HashOf: Copy + Default, { // prepare Bridged chain storage with inbound lane state - let storage_key = storage_keys::inbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, &lane).0; + let storage_key = + storage_keys::inbound_lane_data_key(ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &lane).0; let mut root = Default::default(); let mut mdb = MemoryDB::default(); { let mut trie = - TrieDBMutBuilderV1::>>::new(&mut mdb, &mut root).build(); - let inbound_lane_data = grow_trie_leaf_value(inbound_lane_data.encode(), size); + TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); + let inbound_lane_data = grow_storage_value(inbound_lane_data.encode(), &proof_params); trie.insert(&storage_key, &inbound_lane_data) .map_err(|_| "TrieMut::insert has failed") .expect("TrieMut::insert should not fail in benchmarks"); } // generate storage proof to be delivered to This chain - let storage_proof = record_all_trie_keys::>>, _>(&mdb, &root) + let storage_proof = record_all_trie_keys::>, _>(&mdb, &root) .map_err(|_| "record_all_trie_keys has failed") .expect("record_all_trie_keys should not fail in benchmarks"); (root, storage_proof) } - -/// Add extra data to the trie leaf value so that it'll be of given size. -pub fn grow_trie_leaf_value(mut value: Vec, size: StorageProofSize) -> Vec { - match size { - StorageProofSize::Minimal(_) => (), - StorageProofSize::HasLargeLeaf(size) if size as usize > value.len() => { - value.extend(sp_std::iter::repeat(42u8).take(size as usize - value.len())); - }, - StorageProofSize::HasLargeLeaf(_) => (), - } - value -} diff --git a/bridges/modules/messages/src/mock.rs b/bridges/modules/messages/src/tests/mock.rs similarity index 62% rename from bridges/modules/messages/src/mock.rs rename to bridges/modules/messages/src/tests/mock.rs index ec63f15b94b52..ffdd536830b5f 100644 --- a/bridges/modules/messages/src/mock.rs +++ b/bridges/modules/messages/src/tests/mock.rs @@ -17,30 +17,43 @@ // From construct_runtime macro #![allow(clippy::from_over_into)] -use crate::{Config, StoredMessagePayload}; +use crate::{ + tests::messages_generation::{ + encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, + prepare_messages_storage_proof, + }, + Config, StoredMessagePayload, +}; +use bp_header_chain::{ChainWithGrandpa, StoredHeaderData}; use bp_messages::{ calc_relayers_rewards, - source_chain::{DeliveryConfirmationPayments, OnMessagesDelivered, TargetHeaderChain}, + source_chain::{ + DeliveryConfirmationPayments, FromBridgedChainMessagesDeliveryProof, OnMessagesDelivered, + }, target_chain::{ - DeliveryPayments, DispatchMessage, DispatchMessageData, MessageDispatch, - ProvedLaneMessages, ProvedMessages, SourceHeaderChain, + DeliveryPayments, DispatchMessage, DispatchMessageData, FromBridgedChainMessagesProof, + MessageDispatch, }, - DeliveredMessages, InboundLaneData, LaneId, Message, MessageKey, MessageNonce, - UnrewardedRelayer, UnrewardedRelayersState, VerificationError, + ChainWithMessages, DeliveredMessages, InboundLaneData, LaneId, Message, MessageKey, + MessageNonce, OutboundLaneData, UnrewardedRelayer, UnrewardedRelayersState, +}; +use bp_runtime::{ + messages::MessageDispatchResult, Chain, ChainId, Size, UnverifiedStorageProofParams, }; -use bp_runtime::{messages::MessageDispatchResult, Size}; use codec::{Decode, Encode}; use frame_support::{ derive_impl, parameter_types, weights::{constants::RocksDbWeight, Weight}, }; use scale_info::TypeInfo; -use sp_runtime::BuildStorage; -use std::{ - collections::{BTreeMap, VecDeque}, - ops::RangeInclusive, +use sp_core::H256; +use sp_runtime::{ + testing::Header as SubstrateHeader, + traits::{BlakeTwo256, ConstU32}, + BuildStorage, StateVersion, }; +use std::{collections::VecDeque, ops::RangeInclusive}; pub type AccountId = u64; pub type Balance = u64; @@ -62,6 +75,77 @@ pub type TestMessageFee = u64; pub type TestRelayer = u64; pub type TestDispatchLevelResult = (); +pub struct ThisChain; + +impl Chain for ThisChain { + const ID: ChainId = *b"ttch"; + + type BlockNumber = u64; + type Hash = H256; + type Hasher = BlakeTwo256; + type Header = SubstrateHeader; + type AccountId = AccountId; + type Balance = Balance; + type Nonce = u64; + type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + + fn max_extrinsic_size() -> u32 { + u32::MAX + } + + fn max_extrinsic_weight() -> Weight { + Weight::MAX + } +} + +impl ChainWithMessages for ThisChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = "WithThisChainBridgeMessages"; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 128; +} + +pub struct BridgedChain; + +pub type BridgedHeaderHash = H256; +pub type BridgedChainHeader = SubstrateHeader; + +impl Chain for BridgedChain { + const ID: ChainId = *b"tbch"; + + type BlockNumber = u64; + type Hash = BridgedHeaderHash; + type Hasher = BlakeTwo256; + type Header = BridgedChainHeader; + type AccountId = TestRelayer; + type Balance = Balance; + type Nonce = u64; + type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + + fn max_extrinsic_size() -> u32 { + 4096 + } + + fn max_extrinsic_weight() -> Weight { + Weight::MAX + } +} + +impl ChainWithGrandpa for BridgedChain { + const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = "WithBridgedChainBridgeGrandpa"; + const MAX_AUTHORITIES_COUNT: u32 = 16; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 4; + const MAX_MANDATORY_HEADER_SIZE: u32 = 4096; + const AVERAGE_HEADER_SIZE: u32 = 4096; +} + +impl ChainWithMessages for BridgedChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = "WithBridgedChainBridgeMessages"; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 128; +} + type Block = frame_system::mocking::MockBlock; use crate as pallet_bridge_messages; @@ -71,6 +155,7 @@ frame_support::construct_runtime! { { System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Event}, + BridgedChainGrandpa: pallet_bridge_grandpa::{Pallet, Call, Event}, Messages: pallet_bridge_messages::{Pallet, Call, Event}, } } @@ -86,14 +171,20 @@ impl frame_system::Config for TestRuntime { #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for TestRuntime { - type ReserveIdentifier = [u8; 8]; type AccountStore = System; } +impl pallet_bridge_grandpa::Config for TestRuntime { + type RuntimeEvent = RuntimeEvent; + type BridgedChain = BridgedChain; + type MaxFreeHeadersPerBlock = ConstU32<4>; + type FreeHeadersInterval = ConstU32<1_024>; + type HeadersToKeep = ConstU32<8>; + type WeightInfo = pallet_bridge_grandpa::weights::BridgeWeight; +} + parameter_types! { pub const MaxMessagesToPruneAtOnce: u64 = 10; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: u64 = 16; - pub const MaxUnconfirmedMessagesAtInboundLane: u64 = 128; pub const TestBridgedChainId: bp_runtime::ChainId = *b"test"; pub const ActiveOutboundLanes: &'static [LaneId] = &[TEST_LANE_ID, TEST_LANE_ID_2]; } @@ -104,24 +195,22 @@ pub type TestWeightInfo = (); impl Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type WeightInfo = TestWeightInfo; + + type ThisChain = ThisChain; + type BridgedChain = BridgedChain; + type BridgedHeaderChain = BridgedChainGrandpa; + type ActiveOutboundLanes = ActiveOutboundLanes; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - type MaximalOutboundPayloadSize = frame_support::traits::ConstU32; type OutboundPayload = TestPayload; type InboundPayload = TestPayload; - type InboundRelayer = TestRelayer; type DeliveryPayments = TestDeliveryPayments; - type TargetHeaderChain = TestTargetHeaderChain; type DeliveryConfirmationPayments = TestDeliveryConfirmationPayments; type OnMessagesDelivered = TestOnMessagesDelivered; - type SourceHeaderChain = TestSourceHeaderChain; type MessageDispatch = TestMessageDispatch; - type BridgedChainId = TestBridgedChainId; } #[cfg(feature = "runtime-benchmarks")] @@ -132,29 +221,26 @@ impl crate::benchmarking::Config<()> for TestRuntime { fn prepare_message_proof( params: crate::benchmarking::MessageProofParams, - ) -> (TestMessagesProof, Weight) { - // in mock run we only care about benchmarks correctness, not the benchmark results - // => ignore size related arguments - let (messages, total_dispatch_weight) = - params.message_nonces.into_iter().map(|n| message(n, REGULAR_PAYLOAD)).fold( - (Vec::new(), Weight::zero()), - |(mut messages, total_dispatch_weight), message| { - let weight = REGULAR_PAYLOAD.declared_weight; - messages.push(message); - (messages, total_dispatch_weight.saturating_add(weight)) - }, - ); - let mut proof: TestMessagesProof = Ok(messages).into(); - proof.result.as_mut().unwrap().get_mut(0).unwrap().1.lane_state = params.outbound_lane_data; - (proof, total_dispatch_weight) + ) -> (FromBridgedChainMessagesProof, Weight) { + use bp_runtime::RangeInclusiveExt; + + let dispatch_weight = + REGULAR_PAYLOAD.declared_weight * params.message_nonces.checked_len().unwrap_or(0); + ( + *prepare_messages_proof( + params.message_nonces.into_iter().map(|n| message(n, REGULAR_PAYLOAD)).collect(), + params.outbound_lane_data, + ), + dispatch_weight, + ) } fn prepare_message_delivery_proof( params: crate::benchmarking::MessageDeliveryProofParams, - ) -> TestMessagesDeliveryProof { + ) -> FromBridgedChainMessagesDeliveryProof { // in mock run we only care about benchmarks correctness, not the benchmark results // => ignore size related arguments - TestMessagesDeliveryProof(Ok((params.lane, params.inbound_lane_data))) + prepare_messages_delivery_proof(params.lane, params.inbound_lane_data) } fn is_relayer_rewarded(_relayer: &AccountId) -> bool { @@ -168,9 +254,6 @@ impl Size for TestPayload { } } -/// Maximal outbound payload size. -pub const MAX_OUTBOUND_PAYLOAD_SIZE: u32 = 4096; - /// Account that has balance to use in tests. pub const ENDOWED_ACCOUNT: AccountId = 0xDEAD; @@ -183,9 +266,6 @@ pub const TEST_RELAYER_B: AccountId = 101; /// Account id of additional test relayer - C. pub const TEST_RELAYER_C: AccountId = 102; -/// Error that is returned by all test implementations. -pub const TEST_ERROR: &str = "Test error"; - /// Lane that we're using in tests. pub const TEST_LANE_ID: LaneId = LaneId([0, 0, 0, 1]); @@ -198,71 +278,6 @@ pub const TEST_LANE_ID_3: LaneId = LaneId([0, 0, 0, 3]); /// Regular message payload. pub const REGULAR_PAYLOAD: TestPayload = message_payload(0, 50); -/// Payload that is rejected by `TestTargetHeaderChain`. -pub const PAYLOAD_REJECTED_BY_TARGET_CHAIN: TestPayload = message_payload(1, 50); - -/// Vec of proved messages, grouped by lane. -pub type MessagesByLaneVec = Vec<(LaneId, ProvedLaneMessages)>; - -/// Test messages proof. -#[derive(Debug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)] -pub struct TestMessagesProof { - pub result: Result, -} - -impl Size for TestMessagesProof { - fn size(&self) -> u32 { - 0 - } -} - -impl From, ()>> for TestMessagesProof { - fn from(result: Result, ()>) -> Self { - Self { - result: result.map(|messages| { - let mut messages_by_lane: BTreeMap> = - BTreeMap::new(); - for message in messages { - messages_by_lane.entry(message.key.lane_id).or_default().messages.push(message); - } - messages_by_lane.into_iter().collect() - }), - } - } -} - -/// Messages delivery proof used in tests. -#[derive(Debug, Encode, Decode, Eq, Clone, PartialEq, TypeInfo)] -pub struct TestMessagesDeliveryProof(pub Result<(LaneId, InboundLaneData), ()>); - -impl Size for TestMessagesDeliveryProof { - fn size(&self) -> u32 { - 0 - } -} - -/// Target header chain that is used in tests. -#[derive(Debug, Default)] -pub struct TestTargetHeaderChain; - -impl TargetHeaderChain for TestTargetHeaderChain { - type MessagesDeliveryProof = TestMessagesDeliveryProof; - - fn verify_message(payload: &TestPayload) -> Result<(), VerificationError> { - if *payload == PAYLOAD_REJECTED_BY_TARGET_CHAIN { - Err(VerificationError::Other(TEST_ERROR)) - } else { - Ok(()) - } - } - - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), VerificationError> { - proof.0.map_err(|_| VerificationError::Other(TEST_ERROR)) - } -} - /// Reward payments at the target chain during delivery transaction. #[derive(Debug, Default)] pub struct TestDeliveryPayments; @@ -323,24 +338,6 @@ impl DeliveryConfirmationPayments for TestDeliveryConfirmationPayment } } -/// Source header chain that is used in tests. -#[derive(Debug)] -pub struct TestSourceHeaderChain; - -impl SourceHeaderChain for TestSourceHeaderChain { - type MessagesProof = TestMessagesProof; - - fn verify_messages_proof( - proof: Self::MessagesProof, - _messages_count: u32, - ) -> Result, VerificationError> { - proof - .result - .map(|proof| proof.into_iter().collect()) - .map_err(|_| VerificationError::Other(TEST_ERROR)) - } -} - /// Test message dispatcher. #[derive(Debug)] pub struct TestMessageDispatch; @@ -459,3 +456,75 @@ pub fn new_test_ext() -> sp_io::TestExternalities { pub fn run_test(test: impl FnOnce() -> T) -> T { new_test_ext().execute_with(test) } + +/// Prepare valid storage proof for given messages and insert appropriate header to the +/// bridged header chain. +/// +/// Since this function changes the runtime storage, you can't "inline" it in the +/// `asset_noop` macro calls. +pub fn prepare_messages_proof( + messages: Vec, + outbound_lane_data: Option, +) -> Box> { + // first - let's generate storage proof + let lane = messages.first().unwrap().key.lane_id; + let nonces_start = messages.first().unwrap().key.nonce; + let nonces_end = messages.last().unwrap().key.nonce; + let (storage_root, storage_proof) = prepare_messages_storage_proof::( + TEST_LANE_ID, + nonces_start..=nonces_end, + outbound_lane_data, + UnverifiedStorageProofParams::default(), + |nonce| messages[(nonce - nonces_start) as usize].payload.clone(), + encode_all_messages, + encode_lane_data, + false, + false, + ); + + // let's now insert bridged chain header into the storage + let bridged_header_hash = Default::default(); + pallet_bridge_grandpa::ImportedHeaders::::insert( + bridged_header_hash, + StoredHeaderData { number: 0, state_root: storage_root }, + ); + + Box::new(FromBridgedChainMessagesProof:: { + bridged_header_hash, + storage_proof, + lane, + nonces_start, + nonces_end, + }) +} + +/// Prepare valid storage proof for given messages and insert appropriate header to the +/// bridged header chain. +/// +/// Since this function changes the runtime storage, you can't "inline" it in the +/// `asset_noop` macro calls. +pub fn prepare_messages_delivery_proof( + lane: LaneId, + inbound_lane_data: InboundLaneData, +) -> FromBridgedChainMessagesDeliveryProof { + // first - let's generate storage proof + let (storage_root, storage_proof) = + prepare_message_delivery_storage_proof::( + lane, + inbound_lane_data, + UnverifiedStorageProofParams::default(), + ); + + // let's now insert bridged chain header into the storage + let bridged_header_hash = Default::default(); + pallet_bridge_grandpa::ImportedHeaders::::insert( + bridged_header_hash, + StoredHeaderData { number: 0, state_root: storage_root }, + ); + + FromBridgedChainMessagesDeliveryProof:: { + bridged_header_hash, + storage_proof, + lane, + } +} diff --git a/bridges/modules/messages/src/tests/mod.rs b/bridges/modules/messages/src/tests/mod.rs new file mode 100644 index 0000000000000..c3bde5fc27584 --- /dev/null +++ b/bridges/modules/messages/src/tests/mod.rs @@ -0,0 +1,26 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tests and test helpers for messages pallet. + +#![cfg(any(feature = "test-helpers", test))] + +#[cfg(test)] +pub(crate) mod mock; +#[cfg(test)] +mod pallet_tests; + +pub mod messages_generation; diff --git a/bridges/modules/messages/src/tests/pallet_tests.rs b/bridges/modules/messages/src/tests/pallet_tests.rs new file mode 100644 index 0000000000000..42e1042717de0 --- /dev/null +++ b/bridges/modules/messages/src/tests/pallet_tests.rs @@ -0,0 +1,1100 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Pallet-level tests. + +use crate::{ + outbound_lane, + outbound_lane::ReceptionConfirmationError, + tests::mock::{self, RuntimeEvent as TestEvent, *}, + weights_ext::WeightInfoExt, + Call, Config, Error, Event, InboundLanes, MaybeOutboundLanesCount, OutboundLanes, + OutboundMessages, Pallet, PalletOperatingMode, PalletOwner, RuntimeInboundLaneStorage, + StoredInboundLaneData, +}; + +use bp_messages::{ + source_chain::{FromBridgedChainMessagesDeliveryProof, MessagesBridge}, + target_chain::FromBridgedChainMessagesProof, + BridgeMessagesCall, ChainWithMessages, DeliveredMessages, InboundLaneData, + InboundMessageDetails, LaneId, MessageKey, MessageNonce, MessagesOperatingMode, + OutboundLaneData, OutboundMessageDetails, UnrewardedRelayer, UnrewardedRelayersState, + VerificationError, +}; +use bp_runtime::{BasicOperatingMode, PreComputedSize, RangeInclusiveExt, Size}; +use bp_test_utils::generate_owned_bridge_module_tests; +use codec::Encode; +use frame_support::{ + assert_noop, assert_ok, + dispatch::Pays, + storage::generator::{StorageMap, StorageValue}, + traits::Hooks, + weights::Weight, +}; +use frame_system::{EventRecord, Pallet as System, Phase}; +use sp_core::Get; +use sp_runtime::DispatchError; + +fn get_ready_for_events() { + System::::set_block_number(1); + System::::reset_events(); +} + +fn send_regular_message(lane_id: LaneId) { + get_ready_for_events(); + + let outbound_lane = outbound_lane::(lane_id); + let message_nonce = outbound_lane.data().latest_generated_nonce + 1; + let prev_enqueued_messages = outbound_lane.data().queued_messages().saturating_len(); + let valid_message = Pallet::::validate_message(lane_id, ®ULAR_PAYLOAD) + .expect("validate_message has failed"); + let artifacts = Pallet::::send_message(valid_message); + assert_eq!(artifacts.enqueued_messages, prev_enqueued_messages + 1); + + // check event with assigned nonce + assert_eq!( + System::::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: TestEvent::Messages(Event::MessageAccepted { lane_id, nonce: message_nonce }), + topics: vec![], + }], + ); +} + +fn receive_messages_delivery_proof() { + System::::set_block_number(1); + System::::reset_events(); + + assert_ok!(Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 1, + relayers: vec![UnrewardedRelayer { + relayer: 0, + messages: DeliveredMessages::new(1), + }] + .into(), + }, + ), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + last_delivered_nonce: 1, + }, + )); + + assert_eq!( + System::::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: TestEvent::Messages(Event::MessagesDelivered { + lane_id: TEST_LANE_ID, + messages: DeliveredMessages::new(1), + }), + topics: vec![], + }], + ); +} + +#[test] +fn pallet_rejects_transactions_if_halted() { + run_test(|| { + // send message first to be able to check that delivery_proof fails later + send_regular_message(TEST_LANE_ID); + + PalletOperatingMode::::put(MessagesOperatingMode::Basic( + BasicOperatingMode::Halted, + )); + + assert_noop!( + Pallet::::validate_message(TEST_LANE_ID, ®ULAR_PAYLOAD), + Error::::NotOperatingNormally, + ); + + let messages_proof = prepare_messages_proof(vec![message(2, REGULAR_PAYLOAD)], None); + assert_noop!( + Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + messages_proof, + 1, + REGULAR_PAYLOAD.declared_weight, + ), + Error::::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted), + ); + + let delivery_proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 1, + relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into(), + }, + ); + assert_noop!( + Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + delivery_proof, + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + last_delivered_nonce: 1, + }, + ), + Error::::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted), + ); + }); +} + +#[test] +fn receive_messages_fails_if_dispatcher_is_inactive() { + run_test(|| { + TestMessageDispatch::deactivate(); + let proof = prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None); + assert_noop!( + Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof, + 1, + REGULAR_PAYLOAD.declared_weight, + ), + Error::::MessageDispatchInactive, + ); + }); +} + +#[test] +fn pallet_rejects_new_messages_in_rejecting_outbound_messages_operating_mode() { + run_test(|| { + // send message first to be able to check that delivery_proof fails later + send_regular_message(TEST_LANE_ID); + + PalletOperatingMode::::put( + MessagesOperatingMode::RejectingOutboundMessages, + ); + + assert_noop!( + Pallet::::validate_message(TEST_LANE_ID, ®ULAR_PAYLOAD), + Error::::NotOperatingNormally, + ); + + assert_ok!(Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None), + 1, + REGULAR_PAYLOAD.declared_weight, + ),); + + assert_ok!(Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 1, + relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into(), + }, + ), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + last_delivered_nonce: 1, + }, + )); + }); +} + +#[test] +fn send_message_works() { + run_test(|| { + send_regular_message(TEST_LANE_ID); + }); +} + +#[test] +fn send_message_rejects_too_large_message() { + run_test(|| { + let mut message_payload = message_payload(1, 0); + // the payload isn't simply extra, so it'll definitely overflow + // `max_outbound_payload_size` if we add `max_outbound_payload_size` bytes to extra + let max_outbound_payload_size = BridgedChain::maximal_incoming_message_size(); + message_payload + .extra + .extend_from_slice(&vec![0u8; max_outbound_payload_size as usize]); + assert_noop!( + Pallet::::validate_message(TEST_LANE_ID, &message_payload.clone(),), + Error::::MessageRejectedByPallet(VerificationError::MessageTooLarge), + ); + + // let's check that we're able to send `max_outbound_payload_size` messages + while message_payload.encoded_size() as u32 > max_outbound_payload_size { + message_payload.extra.pop(); + } + assert_eq!(message_payload.encoded_size() as u32, max_outbound_payload_size); + + let valid_message = + Pallet::::validate_message(TEST_LANE_ID, &message_payload) + .expect("validate_message has failed"); + Pallet::::send_message(valid_message); + }) +} + +#[test] +fn receive_messages_proof_works() { + run_test(|| { + assert_ok!(Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None), + 1, + REGULAR_PAYLOAD.declared_weight, + )); + + assert_eq!(InboundLanes::::get(TEST_LANE_ID).0.last_delivered_nonce(), 1); + + assert!(TestDeliveryPayments::is_reward_paid(1)); + }); +} + +#[test] +fn receive_messages_proof_updates_confirmed_message_nonce() { + run_test(|| { + // say we have received 10 messages && last confirmed message is 8 + InboundLanes::::insert( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 8, + relayers: vec![ + unrewarded_relayer(9, 9, TEST_RELAYER_A), + unrewarded_relayer(10, 10, TEST_RELAYER_B), + ] + .into(), + }, + ); + assert_eq!( + inbound_unrewarded_relayers_state(TEST_LANE_ID), + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + messages_in_oldest_entry: 1, + total_messages: 2, + last_delivered_nonce: 10, + }, + ); + + // message proof includes outbound lane state with latest confirmed message updated to 9 + assert_ok!(Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + prepare_messages_proof( + vec![message(11, REGULAR_PAYLOAD)], + Some(OutboundLaneData { latest_received_nonce: 9, ..Default::default() }), + ), + 1, + REGULAR_PAYLOAD.declared_weight, + )); + + assert_eq!( + InboundLanes::::get(TEST_LANE_ID).0, + InboundLaneData { + last_confirmed_nonce: 9, + relayers: vec![ + unrewarded_relayer(10, 10, TEST_RELAYER_B), + unrewarded_relayer(11, 11, TEST_RELAYER_A) + ] + .into(), + }, + ); + assert_eq!( + inbound_unrewarded_relayers_state(TEST_LANE_ID), + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + messages_in_oldest_entry: 1, + total_messages: 2, + last_delivered_nonce: 11, + }, + ); + }); +} + +#[test] +fn receive_messages_proof_does_not_accept_message_if_dispatch_weight_is_not_enough() { + run_test(|| { + let proof = prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None); + let mut declared_weight = REGULAR_PAYLOAD.declared_weight; + *declared_weight.ref_time_mut() -= 1; + + assert_noop!( + Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof, + 1, + declared_weight, + ), + Error::::InsufficientDispatchWeight + ); + assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 0); + }); +} + +#[test] +fn receive_messages_proof_rejects_invalid_proof() { + run_test(|| { + let mut proof = prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None); + proof.nonces_end += 1; + + assert_noop!( + Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof, + 1, + Weight::zero(), + ), + Error::::InvalidMessagesProof, + ); + }); +} + +#[test] +fn receive_messages_proof_rejects_proof_with_too_many_messages() { + run_test(|| { + let proof = prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None); + assert_noop!( + Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof, + u32::MAX, + Weight::zero(), + ), + Error::::TooManyMessagesInTheProof, + ); + }); +} + +#[test] +fn receive_messages_delivery_proof_works() { + run_test(|| { + send_regular_message(TEST_LANE_ID); + receive_messages_delivery_proof(); + + assert_eq!(OutboundLanes::::get(TEST_LANE_ID).latest_received_nonce, 1,); + }); +} + +#[test] +fn receive_messages_delivery_proof_rewards_relayers() { + run_test(|| { + send_regular_message(TEST_LANE_ID); + send_regular_message(TEST_LANE_ID); + + // this reports delivery of message 1 => reward is paid to TEST_RELAYER_A + let single_message_delivery_proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into(), + ..Default::default() + }, + ); + let single_message_delivery_proof_size = single_message_delivery_proof.size(); + let result = Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + single_message_delivery_proof, + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + last_delivered_nonce: 1, + }, + ); + assert_ok!(result); + assert_eq!( + result.unwrap().actual_weight.unwrap(), + TestWeightInfo::receive_messages_delivery_proof_weight( + &PreComputedSize(single_message_delivery_proof_size as _), + &UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }, + ) + ); + assert!(TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_A, 1)); + assert!(!TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_B, 1)); + + // this reports delivery of both message 1 and message 2 => reward is paid only to + // TEST_RELAYER_B + let two_messages_delivery_proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![ + unrewarded_relayer(1, 1, TEST_RELAYER_A), + unrewarded_relayer(2, 2, TEST_RELAYER_B), + ] + .into(), + ..Default::default() + }, + ); + let two_messages_delivery_proof_size = two_messages_delivery_proof.size(); + let result = Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + two_messages_delivery_proof, + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + messages_in_oldest_entry: 1, + total_messages: 2, + last_delivered_nonce: 2, + }, + ); + assert_ok!(result); + // even though the pre-dispatch weight was for two messages, the actual weight is + // for single message only + assert_eq!( + result.unwrap().actual_weight.unwrap(), + TestWeightInfo::receive_messages_delivery_proof_weight( + &PreComputedSize(two_messages_delivery_proof_size as _), + &UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }, + ) + ); + assert!(!TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_A, 1)); + assert!(TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_B, 1)); + assert_eq!(TestOnMessagesDelivered::call_arguments(), Some((TEST_LANE_ID, 0))); + }); +} + +#[test] +fn receive_messages_delivery_proof_rejects_invalid_proof() { + run_test(|| { + let mut proof = prepare_messages_delivery_proof(TEST_LANE_ID, Default::default()); + proof.lane = bp_messages::LaneId([42, 42, 42, 42]); + + assert_noop!( + Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + proof, + Default::default(), + ), + Error::::InvalidMessagesDeliveryProof, + ); + }); +} + +#[test] +fn receive_messages_delivery_proof_rejects_proof_if_declared_relayers_state_is_invalid() { + run_test(|| { + // when number of relayers entries is invalid + let proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![ + unrewarded_relayer(1, 1, TEST_RELAYER_A), + unrewarded_relayer(2, 2, TEST_RELAYER_B), + ] + .into(), + ..Default::default() + }, + ); + assert_noop!( + Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + proof, + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 2, + last_delivered_nonce: 2, + ..Default::default() + }, + ), + Error::::InvalidUnrewardedRelayersState, + ); + + // when number of messages is invalid + let proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![ + unrewarded_relayer(1, 1, TEST_RELAYER_A), + unrewarded_relayer(2, 2, TEST_RELAYER_B), + ] + .into(), + ..Default::default() + }, + ); + assert_noop!( + Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + proof, + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + total_messages: 1, + last_delivered_nonce: 2, + ..Default::default() + }, + ), + Error::::InvalidUnrewardedRelayersState, + ); + + // when last delivered nonce is invalid + let proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![ + unrewarded_relayer(1, 1, TEST_RELAYER_A), + unrewarded_relayer(2, 2, TEST_RELAYER_B), + ] + .into(), + ..Default::default() + }, + ); + assert_noop!( + Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + proof, + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + total_messages: 2, + last_delivered_nonce: 8, + ..Default::default() + }, + ), + Error::::InvalidUnrewardedRelayersState, + ); + }); +} + +#[test] +fn receive_messages_accepts_single_message_with_invalid_payload() { + run_test(|| { + let mut invalid_message = message(1, REGULAR_PAYLOAD); + invalid_message.payload = Vec::new(); + + assert_ok!(Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + prepare_messages_proof(vec![invalid_message], None), + 1, + Weight::zero(), /* weight may be zero in this case (all messages are + * improperly encoded) */ + ),); + + assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 1,); + }); +} + +#[test] +fn receive_messages_accepts_batch_with_message_with_invalid_payload() { + run_test(|| { + let mut invalid_message = message(2, REGULAR_PAYLOAD); + invalid_message.payload = Vec::new(); + + assert_ok!(Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + prepare_messages_proof( + vec![message(1, REGULAR_PAYLOAD), invalid_message, message(3, REGULAR_PAYLOAD),], + None + ), + 3, + REGULAR_PAYLOAD.declared_weight + REGULAR_PAYLOAD.declared_weight, + ),); + + assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 3,); + }); +} + +#[test] +fn actual_dispatch_weight_does_not_overflow() { + run_test(|| { + let message1 = message(1, message_payload(0, u64::MAX / 2)); + let message2 = message(2, message_payload(0, u64::MAX / 2)); + let message3 = message(3, message_payload(0, u64::MAX / 2)); + + let proof = prepare_messages_proof(vec![message1, message2, message3], None); + assert_noop!( + Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + // this may cause overflow if source chain storage is invalid + proof, + 3, + Weight::MAX, + ), + Error::::InsufficientDispatchWeight + ); + assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 0); + }); +} + +#[test] +fn ref_time_refund_from_receive_messages_proof_works() { + run_test(|| { + fn submit_with_unspent_weight( + nonce: MessageNonce, + unspent_weight: u64, + ) -> (Weight, Weight) { + let mut payload = REGULAR_PAYLOAD; + *payload.dispatch_result.unspent_weight.ref_time_mut() = unspent_weight; + let proof = prepare_messages_proof(vec![message(nonce, payload)], None); + let messages_count = 1; + let pre_dispatch_weight = + ::WeightInfo::receive_messages_proof_weight( + &*proof, + messages_count, + REGULAR_PAYLOAD.declared_weight, + ); + let result = Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof, + messages_count, + REGULAR_PAYLOAD.declared_weight, + ) + .expect("delivery has failed"); + let post_dispatch_weight = + result.actual_weight.expect("receive_messages_proof always returns Some"); + + // message delivery transactions are never free + assert_eq!(result.pays_fee, Pays::Yes); + + (pre_dispatch_weight, post_dispatch_weight) + } + + // when dispatch is returning `unspent_weight < declared_weight` + let (pre, post) = submit_with_unspent_weight(1, 1); + assert_eq!(post.ref_time(), pre.ref_time() - 1); + + // when dispatch is returning `unspent_weight = declared_weight` + let (pre, post) = submit_with_unspent_weight(2, REGULAR_PAYLOAD.declared_weight.ref_time()); + assert_eq!(post.ref_time(), pre.ref_time() - REGULAR_PAYLOAD.declared_weight.ref_time()); + + // when dispatch is returning `unspent_weight > declared_weight` + let (pre, post) = + submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight.ref_time() + 1); + assert_eq!(post.ref_time(), pre.ref_time() - REGULAR_PAYLOAD.declared_weight.ref_time()); + + // when there's no unspent weight + let (pre, post) = submit_with_unspent_weight(4, 0); + assert_eq!(post.ref_time(), pre.ref_time()); + + // when dispatch is returning `unspent_weight < declared_weight` + let (pre, post) = submit_with_unspent_weight(5, 1); + assert_eq!(post.ref_time(), pre.ref_time() - 1); + }); +} + +#[test] +fn proof_size_refund_from_receive_messages_proof_works() { + run_test(|| { + let max_entries = BridgedChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX as usize; + + // if there's maximal number of unrewarded relayer entries at the inbound lane, then + // `proof_size` is unchanged in post-dispatch weight + let proof = prepare_messages_proof(vec![message(101, REGULAR_PAYLOAD)], None); + let messages_count = 1; + let pre_dispatch_weight = + ::WeightInfo::receive_messages_proof_weight( + &*proof, + messages_count, + REGULAR_PAYLOAD.declared_weight, + ); + InboundLanes::::insert( + TEST_LANE_ID, + StoredInboundLaneData(InboundLaneData { + relayers: vec![ + UnrewardedRelayer { + relayer: 42, + messages: DeliveredMessages { begin: 0, end: 100 } + }; + max_entries + ] + .into(), + last_confirmed_nonce: 0, + }), + ); + let post_dispatch_weight = Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof.clone(), + messages_count, + REGULAR_PAYLOAD.declared_weight, + ) + .unwrap() + .actual_weight + .unwrap(); + assert_eq!(post_dispatch_weight.proof_size(), pre_dispatch_weight.proof_size()); + + // if count of unrewarded relayer entries is less than maximal, then some `proof_size` + // must be refunded + InboundLanes::::insert( + TEST_LANE_ID, + StoredInboundLaneData(InboundLaneData { + relayers: vec![ + UnrewardedRelayer { + relayer: 42, + messages: DeliveredMessages { begin: 0, end: 100 } + }; + max_entries - 1 + ] + .into(), + last_confirmed_nonce: 0, + }), + ); + let post_dispatch_weight = Pallet::::receive_messages_proof( + RuntimeOrigin::signed(1), + TEST_RELAYER_A, + proof, + messages_count, + REGULAR_PAYLOAD.declared_weight, + ) + .unwrap() + .actual_weight + .unwrap(); + assert!( + post_dispatch_weight.proof_size() < pre_dispatch_weight.proof_size(), + "Expected post-dispatch PoV {} to be less than pre-dispatch PoV {}", + post_dispatch_weight.proof_size(), + pre_dispatch_weight.proof_size(), + ); + }); +} + +#[test] +fn receive_messages_delivery_proof_rejects_proof_if_trying_to_confirm_more_messages_than_expected() +{ + run_test(|| { + // send message first to be able to check that delivery_proof fails later + send_regular_message(TEST_LANE_ID); + + // 1) InboundLaneData declares that the `last_confirmed_nonce` is 1; + // 2) InboundLaneData has no entries => `InboundLaneData::last_delivered_nonce()` returns + // `last_confirmed_nonce`; + // 3) it means that we're going to confirm delivery of messages 1..=1; + // 4) so the number of declared messages (see `UnrewardedRelayersState`) is `0` and numer of + // actually confirmed messages is `1`. + let proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { last_confirmed_nonce: 1, relayers: Default::default() }, + ); + assert_noop!( + Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + proof, + UnrewardedRelayersState { last_delivered_nonce: 1, ..Default::default() }, + ), + Error::::ReceptionConfirmation( + ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected + ), + ); + }); +} + +#[test] +fn storage_keys_computed_properly() { + assert_eq!( + PalletOperatingMode::::storage_value_final_key().to_vec(), + bp_messages::storage_keys::operating_mode_key("Messages").0, + ); + + assert_eq!( + OutboundMessages::::storage_map_final_key(MessageKey { + lane_id: TEST_LANE_ID, + nonce: 42 + }), + bp_messages::storage_keys::message_key("Messages", &TEST_LANE_ID, 42).0, + ); + + assert_eq!( + OutboundLanes::::storage_map_final_key(TEST_LANE_ID), + bp_messages::storage_keys::outbound_lane_data_key("Messages", &TEST_LANE_ID).0, + ); + + assert_eq!( + InboundLanes::::storage_map_final_key(TEST_LANE_ID), + bp_messages::storage_keys::inbound_lane_data_key("Messages", &TEST_LANE_ID).0, + ); +} + +#[test] +fn inbound_message_details_works() { + run_test(|| { + assert_eq!( + Pallet::::inbound_message_data( + TEST_LANE_ID, + REGULAR_PAYLOAD.encode(), + OutboundMessageDetails { nonce: 0, dispatch_weight: Weight::zero(), size: 0 }, + ), + InboundMessageDetails { dispatch_weight: REGULAR_PAYLOAD.declared_weight }, + ); + }); +} + +#[test] +fn on_idle_callback_respects_remaining_weight() { + run_test(|| { + send_regular_message(TEST_LANE_ID); + send_regular_message(TEST_LANE_ID); + send_regular_message(TEST_LANE_ID); + send_regular_message(TEST_LANE_ID); + + assert_ok!(Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 4, + relayers: vec![unrewarded_relayer(1, 4, TEST_RELAYER_A)].into(), + }, + ), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 4, + total_messages: 4, + last_delivered_nonce: 4, + }, + )); + + // all 4 messages may be pruned now + assert_eq!(outbound_lane::(TEST_LANE_ID).data().latest_received_nonce, 4); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 1); + System::::set_block_number(2); + + // if passed wight is too low to do anything + let dbw = DbWeight::get(); + assert_eq!(Pallet::::on_idle(0, dbw.reads_writes(1, 1)), Weight::zero(),); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 1); + + // if passed wight is enough to prune single message + assert_eq!( + Pallet::::on_idle(0, dbw.reads_writes(1, 2)), + dbw.reads_writes(1, 2), + ); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 2); + + // if passed wight is enough to prune two more messages + assert_eq!( + Pallet::::on_idle(0, dbw.reads_writes(1, 3)), + dbw.reads_writes(1, 3), + ); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 4); + + // if passed wight is enough to prune many messages + assert_eq!( + Pallet::::on_idle(0, dbw.reads_writes(100, 100)), + dbw.reads_writes(1, 2), + ); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 5); + }); +} + +#[test] +fn on_idle_callback_is_rotating_lanes_to_prune() { + run_test(|| { + // send + receive confirmation for lane 1 + send_regular_message(TEST_LANE_ID); + receive_messages_delivery_proof(); + // send + receive confirmation for lane 2 + send_regular_message(TEST_LANE_ID_2); + assert_ok!(Pallet::::receive_messages_delivery_proof( + RuntimeOrigin::signed(1), + prepare_messages_delivery_proof( + TEST_LANE_ID_2, + InboundLaneData { + last_confirmed_nonce: 1, + relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into(), + }, + ), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + last_delivered_nonce: 1, + }, + )); + + // nothing is pruned yet + assert_eq!(outbound_lane::(TEST_LANE_ID).data().latest_received_nonce, 1); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 1); + assert_eq!( + outbound_lane::(TEST_LANE_ID_2).data().latest_received_nonce, + 1 + ); + assert_eq!( + outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, + 1 + ); + + // in block#2.on_idle lane messages of lane 1 are pruned + let dbw = DbWeight::get(); + System::::set_block_number(2); + assert_eq!( + Pallet::::on_idle(0, dbw.reads_writes(100, 100)), + dbw.reads_writes(1, 2), + ); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 2); + assert_eq!( + outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, + 1 + ); + + // in block#3.on_idle lane messages of lane 2 are pruned + System::::set_block_number(3); + + assert_eq!( + Pallet::::on_idle(0, dbw.reads_writes(100, 100)), + dbw.reads_writes(1, 2), + ); + assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 2); + assert_eq!( + outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, + 2 + ); + }); +} + +#[test] +fn outbound_message_from_unconfigured_lane_is_rejected() { + run_test(|| { + assert_noop!( + Pallet::::validate_message(TEST_LANE_ID_3, ®ULAR_PAYLOAD,), + Error::::InactiveOutboundLane, + ); + }); +} + +#[test] +fn test_bridge_messages_call_is_correctly_defined() { + run_test(|| { + let account_id = 1; + let message_proof = prepare_messages_proof(vec![message(1, REGULAR_PAYLOAD)], None); + let message_delivery_proof = prepare_messages_delivery_proof( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 1, + relayers: vec![UnrewardedRelayer { + relayer: 0, + messages: DeliveredMessages::new(1), + }] + .into(), + }, + ); + let unrewarded_relayer_state = UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + last_delivered_nonce: 1, + ..Default::default() + }; + + let direct_receive_messages_proof_call = Call::::receive_messages_proof { + relayer_id_at_bridged_chain: account_id, + proof: message_proof.clone(), + messages_count: 1, + dispatch_weight: REGULAR_PAYLOAD.declared_weight, + }; + let indirect_receive_messages_proof_call = BridgeMessagesCall::< + AccountId, + FromBridgedChainMessagesProof, + FromBridgedChainMessagesDeliveryProof, + >::receive_messages_proof { + relayer_id_at_bridged_chain: account_id, + proof: *message_proof, + messages_count: 1, + dispatch_weight: REGULAR_PAYLOAD.declared_weight, + }; + assert_eq!( + direct_receive_messages_proof_call.encode(), + indirect_receive_messages_proof_call.encode() + ); + + let direct_receive_messages_delivery_proof_call = + Call::::receive_messages_delivery_proof { + proof: message_delivery_proof.clone(), + relayers_state: unrewarded_relayer_state.clone(), + }; + let indirect_receive_messages_delivery_proof_call = BridgeMessagesCall::< + AccountId, + FromBridgedChainMessagesProof, + FromBridgedChainMessagesDeliveryProof, + >::receive_messages_delivery_proof { + proof: message_delivery_proof, + relayers_state: unrewarded_relayer_state, + }; + assert_eq!( + direct_receive_messages_delivery_proof_call.encode(), + indirect_receive_messages_delivery_proof_call.encode() + ); + }); +} + +generate_owned_bridge_module_tests!( + MessagesOperatingMode::Basic(BasicOperatingMode::Normal), + MessagesOperatingMode::Basic(BasicOperatingMode::Halted) +); + +#[test] +fn inbound_storage_extra_proof_size_bytes_works() { + fn relayer_entry() -> UnrewardedRelayer { + UnrewardedRelayer { relayer: 42u64, messages: DeliveredMessages { begin: 0, end: 100 } } + } + + fn storage(relayer_entries: usize) -> RuntimeInboundLaneStorage { + RuntimeInboundLaneStorage { + lane_id: Default::default(), + cached_data: Some(InboundLaneData { + relayers: vec![relayer_entry(); relayer_entries].into(), + last_confirmed_nonce: 0, + }), + _phantom: Default::default(), + } + } + + let max_entries = BridgedChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX as usize; + + // when we have exactly `MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX` unrewarded relayers + assert_eq!(storage(max_entries).extra_proof_size_bytes(), 0); + + // when we have less than `MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX` unrewarded relayers + assert_eq!( + storage(max_entries - 1).extra_proof_size_bytes(), + relayer_entry().encode().len() as u64 + ); + assert_eq!( + storage(max_entries - 2).extra_proof_size_bytes(), + 2 * relayer_entry().encode().len() as u64 + ); + + // when we have more than `MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX` unrewarded relayers + // (shall not happen in practice) + assert_eq!(storage(max_entries + 1).extra_proof_size_bytes(), 0); +} + +#[test] +fn maybe_outbound_lanes_count_returns_correct_value() { + assert_eq!( + MaybeOutboundLanesCount::::get(), + Some(mock::ActiveOutboundLanes::get().len() as u32) + ); +} diff --git a/bridges/modules/messages/src/weights.rs b/bridges/modules/messages/src/weights.rs index 5bf7d56756079..72a06599b1655 100644 --- a/bridges/modules/messages/src/weights.rs +++ b/bridges/modules/messages/src/weights.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for pallet_bridge_messages //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz` +//! HOSTNAME: `serban-ROG-Zephyrus`, CPU: `12th Gen Intel(R) Core(TM) i7-12700H` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -51,14 +51,13 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_bridge_messages. pub trait WeightInfo { fn receive_single_message_proof() -> Weight; - fn receive_two_messages_proof() -> Weight; + fn receive_n_messages_proof(n: u32) -> Weight; fn receive_single_message_proof_with_outbound_lane_state() -> Weight; - fn receive_single_message_proof_1_kb() -> Weight; - fn receive_single_message_proof_16_kb() -> Weight; + fn receive_single_n_bytes_message_proof(n: u32) -> Weight; fn receive_delivery_proof_for_single_message() -> Weight; fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight; fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight; - fn receive_single_message_proof_with_dispatch(i: u32) -> Weight; + fn receive_single_n_bytes_message_proof_with_dispatch(n: u32) -> Weight; } /// Weights for `pallet_bridge_messages` that are generated using one of the Bridge testnets. @@ -82,56 +81,39 @@ impl WeightInfo for BridgeWeight { /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 52_321 nanoseconds. - Weight::from_parts(54_478_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 38_724 nanoseconds. + Weight::from_parts(40_650_000, 52673) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 64_597 nanoseconds. - Weight::from_parts(69_267_000, 57170) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) + /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49208), added: + /// 51683, mode: MaxEncodedLen) /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) + /// The range of component `n` is `[1, 1004]`. /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { + /// The range of component `n` is `[1, 1004]`. + fn receive_n_messages_proof(n: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 64_079 nanoseconds. - Weight::from_parts(65_905_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 39_354 nanoseconds. + Weight::from_parts(29_708_543, 52673) + // Standard Error: 1_185 + .saturating_add(Weight::from_parts(7_648_787, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -149,12 +131,12 @@ impl WeightInfo for BridgeWeight { /// /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_1_kb() -> Weight { + fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 50_588 nanoseconds. - Weight::from_parts(53_544_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 45_578 nanoseconds. + Weight::from_parts(47_161_000, 52673) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -172,12 +154,16 @@ impl WeightInfo for BridgeWeight { /// /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_16_kb() -> Weight { + /// + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof(n: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 78_269 nanoseconds. - Weight::from_parts(81_748_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 38_702 nanoseconds. + Weight::from_parts(41_040_143, 52673) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_174, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -198,16 +184,21 @@ impl WeightInfo for BridgeWeight { /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, + /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(93), added: 2568, /// mode: MaxEncodedLen) + /// + /// Storage: BridgeRialtoMessages OutboundMessages (r:0 w:1) + /// + /// Proof: BridgeRialtoMessages OutboundMessages (max_values: None, max_size: Some(65596), + /// added: 68071, mode: MaxEncodedLen) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `579` - // Estimated: `9584` - // Minimum execution time: 45_786 nanoseconds. - Weight::from_parts(47_382_000, 9584) + // Measured: `701` + // Estimated: `3558` + // Minimum execution time: 37_197 nanoseconds. + Weight::from_parts(38_371_000, 3558) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// @@ -226,16 +217,21 @@ impl WeightInfo for BridgeWeight { /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, + /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(93), added: 2568, /// mode: MaxEncodedLen) + /// + /// Storage: BridgeRialtoMessages OutboundMessages (r:0 w:2) + /// + /// Proof: BridgeRialtoMessages OutboundMessages (max_values: None, max_size: Some(65596), + /// added: 68071, mode: MaxEncodedLen) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `596` - // Estimated: `9584` - // Minimum execution time: 44_544 nanoseconds. - Weight::from_parts(45_451_000, 9584) + // Measured: `701` + // Estimated: `3558` + // Minimum execution time: 38_684 nanoseconds. + Weight::from_parts(39_929_000, 3558) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// @@ -254,16 +250,21 @@ impl WeightInfo for BridgeWeight { /// /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, + /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(93), added: 2568, /// mode: MaxEncodedLen) + /// + /// Storage: BridgeRialtoMessages OutboundMessages (r:0 w:2) + /// + /// Proof: BridgeRialtoMessages OutboundMessages (max_values: None, max_size: Some(65596), + /// added: 68071, mode: MaxEncodedLen) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `596` - // Estimated: `12124` - // Minimum execution time: 47_344 nanoseconds. - Weight::from_parts(48_311_000, 12124) + // Measured: `701` + // Estimated: `6126` + // Minimum execution time: 41_363 nanoseconds. + Weight::from_parts(42_621_000, 6126) .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// @@ -280,15 +281,15 @@ impl WeightInfo for BridgeWeight { /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) /// - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32) -> Weight { + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof_with_dispatch(n: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 52_385 nanoseconds. - Weight::from_parts(54_919_468, 57170) - // Standard Error: 108 - .saturating_add(Weight::from_parts(3_286, 0).saturating_mul(i.into())) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 38_925 nanoseconds. + Weight::from_parts(39_617_000, 52673) + // Standard Error: 612 + .saturating_add(Weight::from_parts(372_813, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -312,33 +313,39 @@ impl WeightInfo for () { /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 52_321 nanoseconds. - Weight::from_parts(54_478_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 38_724 nanoseconds. + Weight::from_parts(40_650_000, 52673) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) + /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_two_messages_proof() -> Weight { + /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49208), added: + /// 51683, mode: MaxEncodedLen) + /// + /// The range of component `n` is `[1, 1004]`. + /// + /// The range of component `n` is `[1, 1004]`. + fn receive_n_messages_proof(n: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 64_597 nanoseconds. - Weight::from_parts(69_267_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 39_354 nanoseconds. + Weight::from_parts(29_708_543, 52673) + // Standard Error: 1_185 + .saturating_add(Weight::from_parts(7_648_787, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -358,10 +365,10 @@ impl WeightInfo for () { /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 64_079 nanoseconds. - Weight::from_parts(65_905_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 45_578 nanoseconds. + Weight::from_parts(47_161_000, 52673) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -377,37 +384,20 @@ impl WeightInfo for () { /// /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 50_588 nanoseconds. - Weight::from_parts(53_544_000, 57170) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) + /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49208), added: + /// 51683, mode: MaxEncodedLen) /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) + /// The range of component `n` is `[1, 16384]`. /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_16_kb() -> Weight { + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof(n: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 78_269 nanoseconds. - Weight::from_parts(81_748_000, 57170) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 38_702 nanoseconds. + Weight::from_parts(41_040_143, 52673) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_174, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -428,16 +418,21 @@ impl WeightInfo for () { /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, + /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(93), added: 2568, /// mode: MaxEncodedLen) + /// + /// Storage: BridgeRialtoMessages OutboundMessages (r:0 w:1) + /// + /// Proof: BridgeRialtoMessages OutboundMessages (max_values: None, max_size: Some(65596), + /// added: 68071, mode: MaxEncodedLen) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `579` - // Estimated: `9584` - // Minimum execution time: 45_786 nanoseconds. - Weight::from_parts(47_382_000, 9584) + // Measured: `701` + // Estimated: `3558` + // Minimum execution time: 37_197 nanoseconds. + Weight::from_parts(38_371_000, 3558) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// @@ -456,16 +451,21 @@ impl WeightInfo for () { /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, + /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(93), added: 2568, /// mode: MaxEncodedLen) + /// + /// Storage: BridgeRialtoMessages OutboundMessages (r:0 w:2) + /// + /// Proof: BridgeRialtoMessages OutboundMessages (max_values: None, max_size: Some(65596), + /// added: 68071, mode: MaxEncodedLen) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `596` - // Estimated: `9584` - // Minimum execution time: 44_544 nanoseconds. - Weight::from_parts(45_451_000, 9584) + // Measured: `701` + // Estimated: `3558` + // Minimum execution time: 38_684 nanoseconds. + Weight::from_parts(39_929_000, 3558) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// @@ -484,16 +484,21 @@ impl WeightInfo for () { /// /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, + /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(93), added: 2568, /// mode: MaxEncodedLen) + /// + /// Storage: BridgeRialtoMessages OutboundMessages (r:0 w:2) + /// + /// Proof: BridgeRialtoMessages OutboundMessages (max_values: None, max_size: Some(65596), + /// added: 68071, mode: MaxEncodedLen) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `596` - // Estimated: `12124` - // Minimum execution time: 47_344 nanoseconds. - Weight::from_parts(48_311_000, 12124) + // Measured: `701` + // Estimated: `6126` + // Minimum execution time: 41_363 nanoseconds. + Weight::from_parts(42_621_000, 6126) .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// @@ -510,15 +515,15 @@ impl WeightInfo for () { /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) /// - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32) -> Weight { + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof_with_dispatch(n: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 52_385 nanoseconds. - Weight::from_parts(54_919_468, 57170) - // Standard Error: 108 - .saturating_add(Weight::from_parts(3_286, 0).saturating_mul(i.into())) + // Measured: `653` + // Estimated: `52673` + // Minimum execution time: 38_925 nanoseconds. + Weight::from_parts(39_617_000, 52673) + // Standard Error: 612 + .saturating_add(Weight::from_parts(372_813, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/bridges/modules/messages/src/weights_ext.rs b/bridges/modules/messages/src/weights_ext.rs index c12e04f692bf8..7711e212efb06 100644 --- a/bridges/modules/messages/src/weights_ext.rs +++ b/bridges/modules/messages/src/weights_ext.rs @@ -40,13 +40,6 @@ pub fn ensure_weights_are_correct() { // benchmarked using `MaxEncodedLen` approach and there are no components that cause additional // db reads - // verify `receive_messages_proof` weight components - assert_ne!(W::receive_messages_proof_overhead().ref_time(), 0); - assert_ne!(W::receive_messages_proof_overhead().proof_size(), 0); - // W::receive_messages_proof_messages_overhead(1).ref_time() may be zero because: - // the message processing code (`InboundLane::receive_message`) is minimal and may not be - // accounted by our benchmarks - assert_eq!(W::receive_messages_proof_messages_overhead(1).proof_size(), 0); // W::receive_messages_proof_outbound_lane_state_overhead().ref_time() may be zero because: // the outbound lane state processing code (`InboundLane::receive_state_update`) is minimal and // may not be accounted by our benchmarks @@ -86,6 +79,19 @@ pub fn ensure_weights_are_correct() { total_messages_in_delivery_proof_does_not_affect_proof_size::(); } +/// Ensure that we are able to dispatch maximal size messages. +pub fn ensure_maximal_message_dispatch( + max_incoming_message_size: u32, + max_incoming_message_dispatch_weight: Weight, +) { + let message_dispatch_weight = W::message_dispatch_weight(max_incoming_message_size); + assert!( + message_dispatch_weight.all_lte(max_incoming_message_dispatch_weight), + "Dispatch weight of maximal message {message_dispatch_weight:?} must be lower \ + than the hardcoded {max_incoming_message_dispatch_weight:?}", + ); +} + /// Ensure that we're able to receive maximal (by-size and by-weight) message from other chain. pub fn ensure_able_to_receive_message( max_extrinsic_size: u32, @@ -98,7 +104,8 @@ pub fn ensure_able_to_receive_message( max_incoming_message_proof_size.saturating_add(SIGNED_EXTENSIONS_SIZE); assert!( max_delivery_transaction_size <= max_extrinsic_size, - "Size of maximal message delivery transaction {max_incoming_message_proof_size} + {SIGNED_EXTENSIONS_SIZE} is larger than maximal possible transaction size {max_extrinsic_size}", + "Size of maximal message delivery transaction {max_incoming_message_proof_size} + \ + {SIGNED_EXTENSIONS_SIZE} is larger than maximal possible transaction size {max_extrinsic_size}", ); // verify that we're able to receive proof of maximal-size message with maximal dispatch weight @@ -297,13 +304,11 @@ pub trait WeightInfoExt: WeightInfo { dispatch_weight: Weight, ) -> Weight { // basic components of extrinsic weight - let transaction_overhead = Self::receive_messages_proof_overhead(); + let base_weight = Self::receive_n_messages_proof(messages_count); let transaction_overhead_from_runtime = Self::receive_messages_proof_overhead_from_runtime(); let outbound_state_delivery_weight = Self::receive_messages_proof_outbound_lane_state_overhead(); - let messages_delivery_weight = - Self::receive_messages_proof_messages_overhead(MessageNonce::from(messages_count)); let messages_dispatch_weight = dispatch_weight; // proof size overhead weight @@ -315,10 +320,9 @@ pub trait WeightInfoExt: WeightInfo { actual_proof_size.saturating_sub(expected_proof_size), ); - transaction_overhead + base_weight .saturating_add(transaction_overhead_from_runtime) .saturating_add(outbound_state_delivery_weight) - .saturating_add(messages_delivery_weight) .saturating_add(messages_dispatch_weight) .saturating_add(proof_size_overhead) } @@ -354,25 +358,6 @@ pub trait WeightInfoExt: WeightInfo { // Functions that are used by extrinsics weights formulas. - /// Returns weight overhead of message delivery transaction (`receive_messages_proof`). - fn receive_messages_proof_overhead() -> Weight { - let weight_of_two_messages_and_two_tx_overheads = - Self::receive_single_message_proof().saturating_mul(2); - let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); - weight_of_two_messages_and_two_tx_overheads - .saturating_sub(weight_of_two_messages_and_single_tx_overhead) - } - - /// Returns weight that needs to be accounted when receiving given a number of messages with - /// message delivery transaction (`receive_messages_proof`). - fn receive_messages_proof_messages_overhead(messages: MessageNonce) -> Weight { - let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); - let weight_of_single_message_and_single_tx_overhead = Self::receive_single_message_proof(); - weight_of_two_messages_and_single_tx_overhead - .saturating_sub(weight_of_single_message_and_single_tx_overhead) - .saturating_mul(messages as _) - } - /// Returns weight that needs to be accounted when message delivery transaction /// (`receive_messages_proof`) is carrying outbound lane state proof. fn receive_messages_proof_outbound_lane_state_overhead() -> Weight { @@ -426,9 +411,8 @@ pub trait WeightInfoExt: WeightInfo { /// is less than that cost). fn storage_proof_size_overhead(proof_size: u32) -> Weight { let proof_size_in_bytes = proof_size; - let byte_weight = (Self::receive_single_message_proof_16_kb() - - Self::receive_single_message_proof_1_kb()) / - (15 * 1024); + let byte_weight = Self::receive_single_n_bytes_message_proof(2) - + Self::receive_single_n_bytes_message_proof(1); proof_size_in_bytes * byte_weight } @@ -440,11 +424,9 @@ pub trait WeightInfoExt: WeightInfo { /// `receive_single_message_proof_with_dispatch` benchmark. See its requirements for /// details. fn message_dispatch_weight(message_size: u32) -> Weight { - // There may be a tiny overweight/underweight here, because we don't account how message - // size affects all steps before dispatch. But the effect should be small enough and we - // may ignore it. - Self::receive_single_message_proof_with_dispatch(message_size) - .saturating_sub(Self::receive_single_message_proof()) + let message_size_in_bytes = message_size; + Self::receive_single_n_bytes_message_proof_with_dispatch(message_size_in_bytes) + .saturating_sub(Self::receive_single_n_bytes_message_proof(message_size_in_bytes)) } } @@ -479,7 +461,7 @@ impl WeightInfoExt for crate::weights::BridgeWeight #[cfg(test)] mod tests { use super::*; - use crate::{mock::TestRuntime, weights::BridgeWeight}; + use crate::{tests::mock::TestRuntime, weights::BridgeWeight}; #[test] fn ensure_default_weights_are_correct() { diff --git a/bridges/modules/parachains/Cargo.toml b/bridges/modules/parachains/Cargo.toml index d3152f8d0a4aa..cda0ee8106d54 100644 --- a/bridges/modules/parachains/Cargo.toml +++ b/bridges/modules/parachains/Cargo.toml @@ -11,32 +11,31 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-parachains = { path = "../../primitives/parachains", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -pallet-bridge-grandpa = { path = "../grandpa", default-features = false } +bp-header-chain = { workspace = true } +bp-parachains = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } +pallet-bridge-grandpa = { workspace = true } # Substrate Dependencies -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -bp-header-chain = { path = "../../primitives/header-chain" } -bp-test-utils = { path = "../../primitives/test-utils" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-io = { path = "../../../substrate/primitives/io" } +bp-header-chain = { workspace = true, default-features = true } +bp-test-utils = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -54,7 +53,6 @@ std = [ "scale-info/std", "sp-runtime/std", "sp-std/std", - "sp-trie/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/bridges/modules/parachains/src/benchmarking.rs b/bridges/modules/parachains/src/benchmarking.rs index 27e06a12a1d93..92ece6d688cbe 100644 --- a/bridges/modules/parachains/src/benchmarking.rs +++ b/bridges/modules/parachains/src/benchmarking.rs @@ -22,7 +22,7 @@ use crate::{ }; use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; -use bp_runtime::StorageProofSize; +use bp_runtime::UnverifiedStorageProofParams; use frame_benchmarking::{account, benchmarks_instance_pallet}; use frame_system::RawOrigin; use sp_std::prelude::*; @@ -38,7 +38,7 @@ pub trait Config: crate::Config { fn prepare_parachain_heads_proof( parachains: &[ParaId], parachain_head_size: u32, - proof_size: StorageProofSize, + proof_params: UnverifiedStorageProofParams, ) -> (RelayBlockNumber, RelayBlockHash, ParaHeadsProof, Vec<(ParaId, ParaHash)>); } @@ -68,7 +68,7 @@ benchmarks_instance_pallet! { let (relay_block_number, relay_block_hash, parachain_heads_proof, parachains_heads) = T::prepare_parachain_heads_proof( ¶chains, DEFAULT_PARACHAIN_HEAD_SIZE, - StorageProofSize::Minimal(0), + UnverifiedStorageProofParams::default(), ); let at_relay_block = (relay_block_number, relay_block_hash); }: submit_parachain_heads(RawOrigin::Signed(sender), at_relay_block, parachains_heads, parachain_heads_proof) @@ -85,7 +85,7 @@ benchmarks_instance_pallet! { let (relay_block_number, relay_block_hash, parachain_heads_proof, parachains_heads) = T::prepare_parachain_heads_proof( ¶chains, DEFAULT_PARACHAIN_HEAD_SIZE, - StorageProofSize::HasLargeLeaf(1024), + UnverifiedStorageProofParams::from_db_size(1024), ); let at_relay_block = (relay_block_number, relay_block_hash); }: submit_parachain_heads(RawOrigin::Signed(sender), at_relay_block, parachains_heads, parachain_heads_proof) @@ -102,7 +102,7 @@ benchmarks_instance_pallet! { let (relay_block_number, relay_block_hash, parachain_heads_proof, parachains_heads) = T::prepare_parachain_heads_proof( ¶chains, DEFAULT_PARACHAIN_HEAD_SIZE, - StorageProofSize::HasLargeLeaf(16 * 1024), + UnverifiedStorageProofParams::from_db_size(16 * 1024), ); let at_relay_block = (relay_block_number, relay_block_hash); }: submit_parachain_heads(RawOrigin::Signed(sender), at_relay_block, parachains_heads, parachain_heads_proof) diff --git a/bridges/modules/parachains/src/call_ext.rs b/bridges/modules/parachains/src/call_ext.rs index fe6b319205d41..0f77eaf2c5a93 100644 --- a/bridges/modules/parachains/src/call_ext.rs +++ b/bridges/modules/parachains/src/call_ext.rs @@ -289,7 +289,7 @@ mod tests { RuntimeCall::Parachains(crate::Call::::submit_parachain_heads_ex { at_relay_block: (num, [num as u8; 32].into()), parachains, - parachain_heads_proof: ParaHeadsProof { storage_proof: Vec::new() }, + parachain_heads_proof: ParaHeadsProof { storage_proof: Default::default() }, is_free_execution_expected: false, }) .check_obsolete_submit_parachain_heads() @@ -303,7 +303,7 @@ mod tests { RuntimeCall::Parachains(crate::Call::::submit_parachain_heads_ex { at_relay_block: (num, [num as u8; 32].into()), parachains, - parachain_heads_proof: ParaHeadsProof { storage_proof: Vec::new() }, + parachain_heads_proof: ParaHeadsProof { storage_proof: Default::default() }, is_free_execution_expected: true, }) .check_obsolete_submit_parachain_heads() diff --git a/bridges/modules/parachains/src/lib.rs b/bridges/modules/parachains/src/lib.rs index d323aef3b2207..e2c30ce9aecc1 100644 --- a/bridges/modules/parachains/src/lib.rs +++ b/bridges/modules/parachains/src/lib.rs @@ -28,11 +28,12 @@ pub use weights::WeightInfo; pub use weights_ext::WeightInfoExt; use bp_header_chain::{HeaderChain, HeaderChainError}; -use bp_parachains::{parachain_head_storage_key_at_source, ParaInfo, ParaStoredHeaderData}; -use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; -use bp_runtime::{Chain, HashOf, HeaderId, HeaderIdOf, Parachain, StorageProofError}; +use bp_parachains::{ParaInfo, ParaStoredHeaderData}; +use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; +use bp_runtime::{Chain, HashOf, HeaderId, HeaderIdOf, Parachain}; use frame_support::{dispatch::PostDispatchInfo, DefaultNoBound}; use pallet_bridge_grandpa::SubmitFinalityProofHelper; +use proofs::{ParachainsStorageProofAdapter, StorageProofAdapter}; use sp_std::{marker::PhantomData, vec::Vec}; #[cfg(feature = "runtime-benchmarks")] @@ -55,6 +56,7 @@ pub mod benchmarking; mod call_ext; #[cfg(test)] mod mock; +mod proofs; /// The target that will be used when publishing logs related to this pallet. pub const LOG_TARGET: &str = "runtime::bridge-parachains"; @@ -448,15 +450,15 @@ pub mod pallet { parachains.len() as _, ); - let mut is_updated_something = false; - let mut storage = GrandpaPalletOf::::storage_proof_checker( - relay_block_hash, - parachain_heads_proof.storage_proof, - ) - .map_err(Error::::HeaderChainStorageProof)?; + let mut storage: ParachainsStorageProofAdapter = + ParachainsStorageProofAdapter::try_new_with_verified_storage_proof( + relay_block_hash, + parachain_heads_proof.storage_proof, + ) + .map_err(Error::::HeaderChainStorageProof)?; for (parachain, parachain_head_hash) in parachains { - let parachain_head = match Self::read_parachain_head(&mut storage, parachain) { + let parachain_head = match storage.read_parachain_head(parachain) { Ok(Some(parachain_head)) => parachain_head, Ok(None) => { log::trace!( @@ -541,7 +543,6 @@ pub mod pallet { parachain_head_hash, )?; - is_updated_something = true; if is_free { free_parachain_heads = free_parachain_heads + 1; } @@ -572,7 +573,7 @@ pub mod pallet { // => treat this as an error // // (we can throw error here, because now all our calls are transactional) - storage.ensure_no_unused_nodes().map_err(|e| { + storage.ensure_no_unused_keys().map_err(|e| { Error::::HeaderChainStorageProof(HeaderChainError::StorageProof(e)) })?; @@ -633,16 +634,6 @@ pub mod pallet { ImportedParaHeads::::get(parachain, hash).map(|h| h.into_inner()) } - /// Read parachain head from storage proof. - fn read_parachain_head( - storage: &mut bp_runtime::StorageProofChecker, - parachain: ParaId, - ) -> Result, StorageProofError> { - let parachain_head_key = - parachain_head_storage_key_at_source(T::ParasPalletName::get(), parachain); - storage.read_and_decode_value(parachain_head_key.0.as_ref()) - } - /// Try to update parachain head. pub(super) fn update_parachain_head( parachain: ParaId, @@ -801,6 +792,7 @@ impl, I: 'static, C: Parachain> HeaderChain pub fn initialize_for_benchmarks, I: 'static, PC: Parachain>( header: HeaderOf, ) { + use bp_polkadot_core::parachains::ParaHead; use bp_runtime::HeaderIdProvider; use sp_runtime::traits::Header; @@ -844,9 +836,10 @@ pub(crate) mod tests { use bp_parachains::{ BestParaHeadHash, BridgeParachainCall, ImportedParaHeadsKeyProvider, ParasInfoKeyProvider, }; + use bp_polkadot_core::parachains::ParaHead; use bp_runtime::{ BasicOperatingMode, OwnedBridgeModuleError, StorageDoubleMapKeyProvider, - StorageMapKeyProvider, + StorageMapKeyProvider, StorageProofError, }; use bp_test_utils::{ authority_list, generate_owned_bridge_module_tests, make_default_justification, diff --git a/bridges/modules/parachains/src/mock.rs b/bridges/modules/parachains/src/mock.rs index dbb62845392d5..c49b5939093c5 100644 --- a/bridges/modules/parachains/src/mock.rs +++ b/bridges/modules/parachains/src/mock.rs @@ -23,7 +23,7 @@ use frame_support::{ use sp_runtime::{ testing::H256, traits::{BlakeTwo256, Header as HeaderT}, - MultiSignature, + MultiSignature, StateVersion, }; use crate as pallet_bridge_parachains; @@ -60,6 +60,8 @@ impl Chain for Parachain1 { type Nonce = u64; type Signature = MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 0 } @@ -87,6 +89,8 @@ impl Chain for Parachain2 { type Nonce = u64; type Signature = MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 0 } @@ -114,6 +118,8 @@ impl Chain for Parachain3 { type Nonce = u64; type Signature = MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 0 } @@ -142,6 +148,8 @@ impl Chain for BigParachain { type Nonce = u64; type Signature = MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 0 } @@ -222,7 +230,7 @@ impl pallet_bridge_parachains::benchmarking::Config<()> for TestRuntime { fn prepare_parachain_heads_proof( parachains: &[ParaId], _parachain_head_size: u32, - _proof_size: bp_runtime::StorageProofSize, + _proof_params: bp_runtime::UnverifiedStorageProofParams, ) -> ( crate::RelayBlockNumber, crate::RelayBlockHash, @@ -256,38 +264,7 @@ impl Chain for TestBridgedChain { type Nonce = u32; type Signature = sp_runtime::testing::TestSignature; - fn max_extrinsic_size() -> u32 { - unreachable!() - } - - fn max_extrinsic_weight() -> Weight { - unreachable!() - } -} - -impl ChainWithGrandpa for TestBridgedChain { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; - const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; - const MAX_MANDATORY_HEADER_SIZE: u32 = 256; - const AVERAGE_HEADER_SIZE: u32 = 64; -} - -#[derive(Debug)] -pub struct OtherBridgedChain; - -impl Chain for OtherBridgedChain { - const ID: ChainId = *b"obch"; - - type BlockNumber = u64; - type Hash = crate::RelayBlockHash; - type Hasher = crate::RelayBlockHasher; - type Header = sp_runtime::generic::Header; - - type AccountId = AccountId; - type Balance = u32; - type Nonce = u32; - type Signature = sp_runtime::testing::TestSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; fn max_extrinsic_size() -> u32 { unreachable!() @@ -298,7 +275,7 @@ impl Chain for OtherBridgedChain { } } -impl ChainWithGrandpa for OtherBridgedChain { +impl ChainWithGrandpa for TestBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; diff --git a/bridges/modules/parachains/src/proofs.rs b/bridges/modules/parachains/src/proofs.rs new file mode 100644 index 0000000000000..dcf22229f3423 --- /dev/null +++ b/bridges/modules/parachains/src/proofs.rs @@ -0,0 +1,81 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tools for parachain head proof verification. + +use crate::{Config, GrandpaPalletOf, RelayBlockHash, RelayBlockHasher}; +use bp_header_chain::{HeaderChain, HeaderChainError}; +use bp_parachains::parachain_head_storage_key_at_source; +use bp_polkadot_core::parachains::{ParaHead, ParaId}; +use bp_runtime::{RawStorageProof, StorageProofChecker, StorageProofError}; +use codec::Decode; +use frame_support::traits::Get; + +/// Abstraction over storage proof manipulation, hiding implementation details of actual storage +/// proofs. +pub trait StorageProofAdapter, I: 'static> { + /// Read and decode optional value from the proof. + fn read_and_decode_optional_value( + &mut self, + key: &impl AsRef<[u8]>, + ) -> Result, StorageProofError>; + + /// Checks if each key was read. + fn ensure_no_unused_keys(self) -> Result<(), StorageProofError>; + + /// Read parachain head from storage proof. + fn read_parachain_head( + &mut self, + parachain: ParaId, + ) -> Result, StorageProofError> { + let parachain_head_key = + parachain_head_storage_key_at_source(T::ParasPalletName::get(), parachain); + self.read_and_decode_optional_value(¶chain_head_key) + } +} + +/// Actual storage proof adapter for parachain proofs. +pub type ParachainsStorageProofAdapter = RawStorageProofAdapter; + +/// A `StorageProofAdapter` implementation for raw storage proofs. +pub struct RawStorageProofAdapter, I: 'static> { + storage: StorageProofChecker, + _dummy: sp_std::marker::PhantomData<(T, I)>, +} + +impl, I: 'static> RawStorageProofAdapter { + /// Try to create a new instance of `RawStorageProofAdapter`. + pub fn try_new_with_verified_storage_proof( + relay_block_hash: RelayBlockHash, + storage_proof: RawStorageProof, + ) -> Result { + GrandpaPalletOf::::verify_storage_proof(relay_block_hash, storage_proof) + .map(|storage| RawStorageProofAdapter:: { storage, _dummy: Default::default() }) + } +} + +impl, I: 'static> StorageProofAdapter for RawStorageProofAdapter { + fn read_and_decode_optional_value( + &mut self, + key: &impl AsRef<[u8]>, + ) -> Result, StorageProofError> { + self.storage.read_and_decode_opt_value(key.as_ref()) + } + + fn ensure_no_unused_keys(self) -> Result<(), StorageProofError> { + self.storage.ensure_no_unused_nodes() + } +} diff --git a/bridges/modules/parachains/src/weights.rs b/bridges/modules/parachains/src/weights.rs index abddc87689470..1f92b7ff763c3 100644 --- a/bridges/modules/parachains/src/weights.rs +++ b/bridges/modules/parachains/src/weights.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for pallet_bridge_parachains //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-06-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz` +//! HOSTNAME: `serban-ROG-Zephyrus`, CPU: `12th Gen Intel(R) Core(TM) i7-12700H` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -86,14 +86,12 @@ impl WeightInfo for BridgeWeight { /// Some(196), added: 1681, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(p: u32) -> Weight { + fn submit_parachain_heads_with_n_parachains(_p: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 36_701 nanoseconds. - Weight::from_parts(38_597_828, 4648) - // Standard Error: 190_859 - .saturating_add(Weight::from_parts(60_685, 0).saturating_mul(p.into())) + // Measured: `302` + // Estimated: `3038` + // Minimum execution time: 30_211 nanoseconds. + Weight::from_parts(32_633_893, 3038) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -123,10 +121,10 @@ impl WeightInfo for BridgeWeight { /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 38_189 nanoseconds. - Weight::from_parts(39_252_000, 4648) + // Measured: `302` + // Estimated: `3038` + // Minimum execution time: 30_830 nanoseconds. + Weight::from_parts(31_801_000, 3038) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -156,10 +154,10 @@ impl WeightInfo for BridgeWeight { /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 62_868 nanoseconds. - Weight::from_parts(63_581_000, 4648) + // Measured: `302` + // Estimated: `3038` + // Minimum execution time: 44_736 nanoseconds. + Weight::from_parts(45_296_000, 3038) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -193,14 +191,12 @@ impl WeightInfo for () { /// Some(196), added: 1681, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(p: u32) -> Weight { + fn submit_parachain_heads_with_n_parachains(_p: u32) -> Weight { // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 36_701 nanoseconds. - Weight::from_parts(38_597_828, 4648) - // Standard Error: 190_859 - .saturating_add(Weight::from_parts(60_685, 0).saturating_mul(p.into())) + // Measured: `302` + // Estimated: `3038` + // Minimum execution time: 30_211 nanoseconds. + Weight::from_parts(32_633_893, 3038) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -230,10 +226,10 @@ impl WeightInfo for () { /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 38_189 nanoseconds. - Weight::from_parts(39_252_000, 4648) + // Measured: `302` + // Estimated: `3038` + // Minimum execution time: 30_830 nanoseconds. + Weight::from_parts(31_801_000, 3038) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -263,10 +259,10 @@ impl WeightInfo for () { /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 62_868 nanoseconds. - Weight::from_parts(63_581_000, 4648) + // Measured: `302` + // Estimated: `3038` + // Minimum execution time: 44_736 nanoseconds. + Weight::from_parts(45_296_000, 3038) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index 08e1438d4f194..27a28546afb48 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -11,31 +11,31 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Bridge dependencies -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-relayers = { path = "../../primitives/relayers", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -pallet-bridge-messages = { path = "../messages", default-features = false } +bp-messages = { workspace = true } +bp-relayers = { workspace = true } +bp-runtime = { workspace = true } +pallet-bridge-messages = { workspace = true } # Substrate Dependencies -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -bp-runtime = { path = "../../primitives/runtime" } -pallet-balances = { path = "../../../substrate/frame/balances" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } +bp-runtime = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/modules/relayers/src/lib.rs b/bridges/modules/relayers/src/lib.rs index 7a3a0f9ea94cb..2c86ec01f5b91 100644 --- a/bridges/modules/relayers/src/lib.rs +++ b/bridges/modules/relayers/src/lib.rs @@ -63,7 +63,7 @@ pub mod pallet { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Type of relayer reward. - type Reward: AtLeast32BitUnsigned + Copy + Parameter + MaxEncodedLen; + type Reward: AtLeast32BitUnsigned + Copy + Member + Parameter + MaxEncodedLen; /// Pay rewards scheme. type PaymentProcedure: PaymentProcedure; /// Stake and slash scheme. diff --git a/bridges/modules/relayers/src/payment_adapter.rs b/bridges/modules/relayers/src/payment_adapter.rs index b2d9c676bddc4..f75c409aca4f3 100644 --- a/bridges/modules/relayers/src/payment_adapter.rs +++ b/bridges/modules/relayers/src/payment_adapter.rs @@ -23,6 +23,7 @@ use bp_messages::{ LaneId, MessageNonce, }; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; +use bp_runtime::Chain; use frame_support::{sp_runtime::SaturatedConversion, traits::Get}; use sp_arithmetic::traits::{Saturating, Zero}; use sp_std::{collections::vec_deque::VecDeque, marker::PhantomData, ops::RangeInclusive}; @@ -57,7 +58,7 @@ where relayers_rewards, RewardsAccountParams::new( lane_id, - T::BridgedChainId::get(), + T::BridgedChain::ID, RewardsAccountOwner::BridgedChain, ), DeliveryReward::get(), diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index b80240c974de9..ec7c3b5628327 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -11,31 +11,31 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } +scale-info = { features = ["bit-vec", "derive", "serde"], workspace = true } # Bridge dependencies -bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } +bp-xcm-bridge-hub-router = { workspace = true } # Substrate Dependencies -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # Polkadot Dependencies -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } [dev-dependencies] -sp-io = { path = "../../../substrate/primitives/io" } -sp-std = { path = "../../../substrate/primitives/std" } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/modules/xcm-bridge-hub/Cargo.toml b/bridges/modules/xcm-bridge-hub/Cargo.toml index 9b22770061a9a..092df477265fc 100644 --- a/bridges/modules/xcm-bridge-hub/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub/Cargo.toml @@ -11,33 +11,33 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Bridge Dependencies -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-xcm-bridge-hub = { path = "../../primitives/xcm-bridge-hub", default-features = false } -pallet-bridge-messages = { path = "../messages", default-features = false } -bridge-runtime-common = { path = "../../bin/runtime-common", default-features = false } +bp-messages = { workspace = true } +bp-runtime = { workspace = true } +bp-xcm-bridge-hub = { workspace = true } +pallet-bridge-messages = { workspace = true } +bridge-runtime-common = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # Polkadot Dependencies -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } [dev-dependencies] -bp-header-chain = { path = "../../primitives/header-chain" } -pallet-balances = { path = "../../../substrate/frame/balances" } -sp-io = { path = "../../../substrate/primitives/io" } +bp-header-chain = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/modules/xcm-bridge-hub/src/mock.rs b/bridges/modules/xcm-bridge-hub/src/mock.rs index 4c09bce56d73e..df72e7a3c4fcc 100644 --- a/bridges/modules/xcm-bridge-hub/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub/src/mock.rs @@ -20,23 +20,17 @@ use crate as pallet_xcm_bridge_hub; use bp_messages::{ target_chain::{DispatchMessage, MessageDispatch}, - LaneId, -}; -use bp_runtime::{messages::MessageDispatchResult, Chain, ChainId, UnderlyingChainProvider}; -use bridge_runtime_common::{ - messages::{ - source::TargetHeaderChainAdapter, target::SourceHeaderChainAdapter, - BridgedChainWithMessages, HashOf, MessageBridge, ThisChainWithMessages, - }, - messages_xcm_extension::{SenderAndLane, XcmBlobHauler}, + ChainWithMessages, LaneId, MessageNonce, }; +use bp_runtime::{messages::MessageDispatchResult, Chain, ChainId, HashOf}; +use bridge_runtime_common::messages_xcm_extension::{SenderAndLane, XcmBlobHauler}; use codec::Encode; -use frame_support::{derive_impl, parameter_types, traits::ConstU32, weights::RuntimeDbWeight}; +use frame_support::{derive_impl, parameter_types, weights::RuntimeDbWeight}; use sp_core::H256; use sp_runtime::{ testing::Header as SubstrateHeader, traits::{BlakeTwo256, IdentityLookup}, - AccountId32, BuildStorage, + AccountId32, BuildStorage, StateVersion, }; use xcm::prelude::*; @@ -85,20 +79,17 @@ impl pallet_bridge_messages::Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type WeightInfo = TestMessagesWeights; - type BridgedChainId = (); type ActiveOutboundLanes = ActiveOutboundLanes; - type MaxUnrewardedRelayerEntriesAtInboundLane = (); - type MaxUnconfirmedMessagesAtInboundLane = (); - type MaximalOutboundPayloadSize = ConstU32<2048>; type OutboundPayload = Vec; type InboundPayload = Vec; - type InboundRelayer = (); type DeliveryPayments = (); - type TargetHeaderChain = TargetHeaderChainAdapter; type DeliveryConfirmationPayments = (); type OnMessagesDelivered = (); - type SourceHeaderChain = SourceHeaderChainAdapter; type MessageDispatch = TestMessageDispatch; + + type ThisChain = ThisUnderlyingChain; + type BridgedChain = BridgedUnderlyingChain; + type BridgedHeaderChain = BridgedHeaderChain; } pub struct TestMessagesWeights; @@ -107,34 +98,27 @@ impl pallet_bridge_messages::WeightInfo for TestMessagesWeights { fn receive_single_message_proof() -> Weight { Weight::zero() } - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { + fn receive_n_messages_proof(_: u32) -> Weight { Weight::zero() } - fn receive_delivery_proof_for_single_message() -> Weight { + fn receive_single_message_proof_with_outbound_lane_state() -> Weight { Weight::zero() } - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { + fn receive_single_n_bytes_message_proof(_: u32) -> Weight { Weight::zero() } - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { + fn receive_delivery_proof_for_single_message() -> Weight { Weight::zero() } - - fn receive_two_messages_proof() -> Weight { + fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { Weight::zero() } - - fn receive_single_message_proof_1_kb() -> Weight { + fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { Weight::zero() } - - fn receive_single_message_proof_16_kb() -> Weight { + fn receive_single_n_bytes_message_proof_with_dispatch(_: u32) -> Weight { Weight::zero() } - - fn receive_single_message_proof_with_dispatch(_: u32) -> Weight { - Weight::from_parts(1, 0) - } } impl pallet_bridge_messages::WeightInfoExt for TestMessagesWeights { @@ -198,9 +182,9 @@ impl XcmBlobHauler for TestXcmBlobHauler { type UncongestedMessage = (); } -pub struct ThisChain; +pub struct ThisUnderlyingChain; -impl Chain for ThisChain { +impl Chain for ThisUnderlyingChain { const ID: ChainId = *b"tuch"; type BlockNumber = u64; type Hash = H256; @@ -211,6 +195,8 @@ impl Chain for ThisChain { type Nonce = u64; type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { u32::MAX } @@ -220,12 +206,19 @@ impl Chain for ThisChain { } } -pub struct BridgedChain; +impl ChainWithMessages for ThisUnderlyingChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = ""; + + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 1000; +} + +pub struct BridgedUnderlyingChain; pub type BridgedHeaderHash = H256; pub type BridgedChainHeader = SubstrateHeader; -impl Chain for BridgedChain { - const ID: ChainId = *b"tuch"; +impl Chain for BridgedUnderlyingChain { + const ID: ChainId = *b"bgdc"; type BlockNumber = u64; type Hash = BridgedHeaderHash; type Hasher = BlakeTwo256; @@ -235,6 +228,8 @@ impl Chain for BridgedChain { type Nonce = u64; type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 4096 } @@ -244,6 +239,12 @@ impl Chain for BridgedChain { } } +impl ChainWithMessages for BridgedUnderlyingChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = ""; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 1000; +} + /// Test message dispatcher. pub struct TestMessageDispatch; @@ -272,42 +273,15 @@ impl MessageDispatch for TestMessageDispatch { } } -pub struct WrappedThisChain; -impl UnderlyingChainProvider for WrappedThisChain { - type Chain = ThisChain; -} -impl ThisChainWithMessages for WrappedThisChain { - type RuntimeOrigin = RuntimeOrigin; -} - -pub struct WrappedBridgedChain; -impl UnderlyingChainProvider for WrappedBridgedChain { - type Chain = BridgedChain; -} -impl BridgedChainWithMessages for WrappedBridgedChain {} - pub struct BridgedHeaderChain; -impl bp_header_chain::HeaderChain for BridgedHeaderChain { +impl bp_header_chain::HeaderChain for BridgedHeaderChain { fn finalized_header_state_root( - _hash: HashOf, - ) -> Option> { + _hash: HashOf, + ) -> Option> { unreachable!() } } -/// Bridge that is deployed on `ThisChain` and allows sending/receiving messages to/from -/// `BridgedChain`. -#[derive(Debug, PartialEq, Eq)] -pub struct OnThisChainBridge; - -impl MessageBridge for OnThisChainBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; - - type ThisChain = WrappedThisChain; - type BridgedChain = WrappedBridgedChain; - type BridgedHeaderChain = BridgedHeaderChain; -} - /// Run pallet test. pub fn run_test(test: impl FnOnce() -> T) -> T { sp_io::TestExternalities::new( diff --git a/bridges/primitives/beefy/Cargo.toml b/bridges/primitives/beefy/Cargo.toml index bd68076ca48fc..404acaff30af2 100644 --- a/bridges/primitives/beefy/Cargo.toml +++ b/bridges/primitives/beefy/Cargo.toml @@ -12,23 +12,23 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } -serde = { default-features = false, features = ["alloc", "derive"], workspace = true } +codec = { features = ["bit-vec", "derive"], workspace = true } +scale-info = { features = ["bit-vec", "derive"], workspace = true } +serde = { features = ["alloc", "derive"], workspace = true } # Bridge Dependencies -bp-runtime = { path = "../runtime", default-features = false } +bp-runtime = { workspace = true } # Substrate Dependencies -binary-merkle-tree = { path = "../../../substrate/utils/binary-merkle-tree", default-features = false } -sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -pallet-beefy-mmr = { path = "../../../substrate/frame/beefy-mmr", default-features = false } -pallet-mmr = { path = "../../../substrate/frame/merkle-mountain-range", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +binary-merkle-tree = { workspace = true } +sp-consensus-beefy = { workspace = true } +frame-support = { workspace = true } +pallet-beefy-mmr = { workspace = true } +pallet-mmr = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index def1f7ad4dfef..081bda479495f 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -11,27 +11,27 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -finality-grandpa = { version = "0.16.2", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true } +finality-grandpa = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies -bp-runtime = { path = "../runtime", default-features = false } +bp-runtime = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = ["serde"] } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-consensus-grandpa = { features = ["serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-std = { workspace = true } [dev-dependencies] -bp-test-utils = { path = "../test-utils" } -hex = "0.4" -hex-literal = "0.4" +bp-test-utils = { workspace = true, default-features = true } +hex = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/primitives/header-chain/src/lib.rs b/bridges/primitives/header-chain/src/lib.rs index af2afb65a26a7..26295dee1801a 100644 --- a/bridges/primitives/header-chain/src/lib.rs +++ b/bridges/primitives/header-chain/src/lib.rs @@ -46,7 +46,7 @@ pub mod storage_keys; pub enum HeaderChainError { /// Header with given hash is missing from the chain. UnknownHeader, - /// Storage proof related error. + /// Error generated by the `storage_proof` module. StorageProof(StorageProofError), } @@ -78,8 +78,9 @@ impl StoredHeaderDataBuilder for H { pub trait HeaderChain { /// Returns state (storage) root of given finalized header. fn finalized_header_state_root(header_hash: HashOf) -> Option>; + /// Get storage proof checker using finalized header. - fn storage_proof_checker( + fn verify_storage_proof( header_hash: HashOf, storage_proof: RawStorageProof, ) -> Result>, HeaderChainError> { @@ -409,7 +410,9 @@ mod tests { use super::*; use bp_runtime::ChainId; use frame_support::weights::Weight; - use sp_runtime::{testing::H256, traits::BlakeTwo256, DigestItem, MultiSignature}; + use sp_runtime::{ + testing::H256, traits::BlakeTwo256, DigestItem, MultiSignature, StateVersion, + }; struct TestChain; @@ -425,6 +428,8 @@ mod tests { type Nonce = u64; type Signature = MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 0 } diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index 20337873c2e6a..4a9037342bcea 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -11,24 +11,24 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { features = ["bit-vec", "derive"], workspace = true } +scale-info = { features = ["bit-vec", "derive"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies -bp-runtime = { path = "../runtime", default-features = false } -bp-header-chain = { path = "../header-chain", default-features = false } +bp-runtime = { workspace = true } +bp-header-chain = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -hex = "0.4" -hex-literal = "0.4" +hex = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/primitives/messages/src/lib.rs b/bridges/primitives/messages/src/lib.rs index c3f79b3ee388c..9984f8ac32227 100644 --- a/bridges/primitives/messages/src/lib.rs +++ b/bridges/primitives/messages/src/lib.rs @@ -38,6 +38,9 @@ pub mod source_chain; pub mod storage_keys; pub mod target_chain; +/// Hard limit on message size that can be sent over the bridge. +pub const HARD_MESSAGE_SIZE_LIMIT: u32 = 64 * 1024; + /// Substrate-based chain with messaging support. pub trait ChainWithMessages: Chain { /// Name of the bridge messages pallet (used in `construct_runtime` macro call) that is @@ -48,11 +51,63 @@ pub trait ChainWithMessages: Chain { const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str; /// Maximal number of unrewarded relayers in a single confirmation transaction at this - /// `ChainWithMessages`. + /// `ChainWithMessages`. Unrewarded means that the relayer has delivered messages, but + /// either confirmations haven't been delivered back to the source chain, or we haven't + /// received reward confirmations yet. + /// + /// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep + /// in mind that the same relayer account may take several (non-consecutive) entries in this + /// set. const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce; /// Maximal number of unconfirmed messages in a single confirmation transaction at this - /// `ChainWithMessages`. + /// `ChainWithMessages`. Unconfirmed means that the + /// message has been delivered, but either confirmations haven't been delivered back to the + /// source chain, or we haven't received reward confirmations for these messages yet. + /// + /// This constant limits difference between last message from last entry of the + /// `InboundLaneData::relayers` and first message at the first entry. + /// + /// There is no point of making this parameter lesser than + /// `MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX`, because then maximal number of relayer entries + /// will be limited by maximal number of messages. + /// + /// This value also represents maximal number of messages in single delivery transaction. + /// Transaction that is declaring more messages than this value, will be rejected. Even if + /// these messages are from different lanes. const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce; + + /// Return maximal dispatch weight of the message we're able to receive. + fn maximal_incoming_message_dispatch_weight() -> Weight { + // we leave 1/2 of `max_extrinsic_weight` for the delivery transaction itself + Self::max_extrinsic_weight() / 2 + } + + /// Return maximal size of the message we're able to receive. + fn maximal_incoming_message_size() -> u32 { + maximal_incoming_message_size(Self::max_extrinsic_size()) + } +} + +/// Return maximal size of the message the chain with `max_extrinsic_size` is able to receive. +pub fn maximal_incoming_message_size(max_extrinsic_size: u32) -> u32 { + // The maximal size of extrinsic at Substrate-based chain depends on the + // `frame_system::Config::MaximumBlockLength` and + // `frame_system::Config::AvailableBlockRatio` constants. This check is here to be sure that + // the lane won't stuck because message is too large to fit into delivery transaction. + // + // **IMPORTANT NOTE**: the delivery transaction contains storage proof of the message, not + // the message itself. The proof is always larger than the message. But unless chain state + // is enormously large, it should be several dozens/hundreds of bytes. The delivery + // transaction also contains signatures and signed extensions. Because of this, we reserve + // 1/3 of the the maximal extrinsic size for this data. + // + // **ANOTHER IMPORTANT NOTE**: large message means not only larger proofs and heavier + // proof verification, but also heavier message decoding and dispatch. So we have a hard + // limit of `64Kb`, which in practice limits the message size on all chains. Without this + // limit the **weight** (not the size) of the message will be higher than the + // `Self::maximal_incoming_message_dispatch_weight()`. + + sp_std::cmp::min(max_extrinsic_size / 3 * 2, HARD_MESSAGE_SIZE_LIMIT) } impl ChainWithMessages for T @@ -112,7 +167,19 @@ impl OperatingMode for MessagesOperatingMode { /// Lane id which implements `TypeId`. #[derive( - Clone, Copy, Decode, Default, Encode, Eq, Ord, PartialOrd, PartialEq, TypeInfo, MaxEncodedLen, + Clone, + Copy, + Decode, + Default, + Encode, + Eq, + Ord, + PartialOrd, + PartialEq, + TypeInfo, + MaxEncodedLen, + Serialize, + Deserialize, )] pub struct LaneId(pub [u8; 4]); @@ -435,7 +502,7 @@ where AccountId: sp_std::cmp::Ord, { // remember to reward relayers that have delivered messages - // this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain + // this loop is bounded by `T::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX` on the bridged chain let mut relayers_rewards = RelayersRewards::new(); for entry in messages_relayers { let nonce_begin = sp_std::cmp::max(entry.messages.begin, *received_range.start()); @@ -486,11 +553,11 @@ pub enum VerificationError { InvalidMessageWeight, /// Declared messages count doesn't match actual value. MessagesCountMismatch, - /// Error returned while reading/decoding message data from the storage proof. + /// Error returned while reading/decoding message data from the `VerifiedStorageProof`. MessageStorage(StorageProofError), /// The message is too large. MessageTooLarge, - /// Error returned while reading/decoding outbound lane data from the storage proof. + /// Error returned while reading/decoding outbound lane data from the `VerifiedStorageProof`. OutboundLaneStorage(StorageProofError), /// Storage proof related error. StorageProof(StorageProofError), diff --git a/bridges/primitives/messages/src/source_chain.rs b/bridges/primitives/messages/src/source_chain.rs index f4aefd9735583..64f015bdb822e 100644 --- a/bridges/primitives/messages/src/source_chain.rs +++ b/bridges/primitives/messages/src/source_chain.rs @@ -16,11 +16,11 @@ //! Primitives of messages module, that are used on the source chain. -use crate::{InboundLaneData, LaneId, MessageNonce, VerificationError}; +use crate::{LaneId, MessageNonce, UnrewardedRelayer}; -use crate::UnrewardedRelayer; -use bp_runtime::Size; -use frame_support::Parameter; +use bp_runtime::{raw_storage_proof_size, RawStorageProof, Size}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; use sp_core::RuntimeDebug; use sp_std::{ collections::{btree_map::BTreeMap, vec_deque::VecDeque}, @@ -28,42 +28,36 @@ use sp_std::{ ops::RangeInclusive, }; -/// Number of messages, delivered by relayers. -pub type RelayersRewards = BTreeMap; - -/// Target chain API. Used by source chain to verify target chain proofs. +/// Messages delivery proof from the bridged chain. /// -/// All implementations of this trait should only work with finalized data that -/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane -/// that's stuck) and/or processing messages without paying fees. +/// It contains everything required to prove that our (this chain) messages have been +/// delivered to the bridged (target) chain: /// -/// The `Payload` type here means the payload of the message that is sent from the -/// source chain to the target chain. The `AccountId` type here means the account -/// type used by the source chain. -pub trait TargetHeaderChain { - /// Proof that messages have been received by target chain. - type MessagesDeliveryProof: Parameter + Size; - - /// Verify message payload before we accept it. - /// - /// **CAUTION**: this is very important function. Incorrect implementation may lead - /// to stuck lanes and/or relayers loses. - /// - /// The proper implementation must ensure that the delivery-transaction with this - /// payload would (at least) be accepted into target chain transaction pool AND - /// eventually will be successfully mined. The most obvious incorrect implementation - /// example would be implementation for BTC chain that accepts payloads larger than - /// 1MB. BTC nodes aren't accepting transactions that are larger than 1MB, so relayer - /// will be unable to craft valid transaction => this (and all subsequent) messages will - /// never be delivered. - fn verify_message(payload: &Payload) -> Result<(), VerificationError>; - - /// Verify messages delivery proof and return lane && nonce of the latest received message. - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), VerificationError>; +/// - hash of finalized header; +/// +/// - storage proof of the inbound lane state; +/// +/// - lane id. +#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub struct FromBridgedChainMessagesDeliveryProof { + /// Hash of the bridge header the proof is for. + pub bridged_header_hash: BridgedHeaderHash, + /// Storage trie proof generated for [`Self::bridged_header_hash`]. + pub storage_proof: RawStorageProof, + /// Lane id of which messages were delivered and the proof is for. + pub lane: LaneId, +} + +impl Size for FromBridgedChainMessagesDeliveryProof { + fn size(&self) -> u32 { + use frame_support::sp_runtime::SaturatedConversion; + raw_storage_proof_size(&self.storage_proof).saturated_into() + } } +/// Number of messages, delivered by relayers. +pub type RelayersRewards = BTreeMap; + /// Manages payments that are happening at the source chain during delivery confirmation /// transaction. pub trait DeliveryConfirmationPayments { @@ -143,28 +137,10 @@ pub trait MessagesBridge { fn send_message(message: Self::SendMessageArgs) -> SendMessageArtifacts; } -/// Structure that may be used in place of `TargetHeaderChain` and -/// `MessageDeliveryAndDispatchPayment` on chains, where outbound messages are forbidden. +/// Structure that may be used in place `MessageDeliveryAndDispatchPayment` on chains, +/// where outbound messages are forbidden. pub struct ForbidOutboundMessages; -/// Error message that is used in `ForbidOutboundMessages` implementation. -const ALL_OUTBOUND_MESSAGES_REJECTED: &str = - "This chain is configured to reject all outbound messages"; - -impl TargetHeaderChain for ForbidOutboundMessages { - type MessagesDeliveryProof = (); - - fn verify_message(_payload: &Payload) -> Result<(), VerificationError> { - Err(VerificationError::Other(ALL_OUTBOUND_MESSAGES_REJECTED)) - } - - fn verify_messages_delivery_proof( - _proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), VerificationError> { - Err(VerificationError::Other(ALL_OUTBOUND_MESSAGES_REJECTED)) - } -} - impl DeliveryConfirmationPayments for ForbidOutboundMessages { type Error = &'static str; diff --git a/bridges/primitives/messages/src/target_chain.rs b/bridges/primitives/messages/src/target_chain.rs index 388ce16ccdc06..74fecb9d9f0d8 100644 --- a/bridges/primitives/messages/src/target_chain.rs +++ b/bridges/primitives/messages/src/target_chain.rs @@ -16,17 +16,48 @@ //! Primitives of messages module, that are used on the target chain. -use crate::{ - LaneId, Message, MessageKey, MessageNonce, MessagePayload, OutboundLaneData, VerificationError, -}; +use crate::{LaneId, Message, MessageKey, MessageNonce, MessagePayload, OutboundLaneData}; -use bp_runtime::{messages::MessageDispatchResult, Size}; +use bp_runtime::{messages::MessageDispatchResult, raw_storage_proof_size, RawStorageProof, Size}; use codec::{Decode, Encode, Error as CodecError}; -use frame_support::{weights::Weight, Parameter}; +use frame_support::weights::Weight; use scale_info::TypeInfo; use sp_core::RuntimeDebug; use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, marker::PhantomData, prelude::*}; +/// Messages proof from bridged chain. +/// +/// It contains everything required to prove that bridged (source) chain has +/// sent us some messages: +/// +/// - hash of finalized header; +/// +/// - storage proof of messages and (optionally) outbound lane state; +/// +/// - lane id; +/// +/// - nonces (inclusive range) of messages which are included in this proof. +#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub struct FromBridgedChainMessagesProof { + /// Hash of the finalized bridged header the proof is for. + pub bridged_header_hash: BridgedHeaderHash, + /// A storage trie proof of messages being delivered. + pub storage_proof: RawStorageProof, + /// Messages in this proof are sent over this lane. + pub lane: LaneId, + /// Nonce of the first message being delivered. + pub nonces_start: MessageNonce, + /// Nonce of the last message being delivered. + pub nonces_end: MessageNonce, +} + +impl Size for FromBridgedChainMessagesProof { + fn size(&self) -> u32 { + use frame_support::sp_runtime::SaturatedConversion; + raw_storage_proof_size(&self.storage_proof).saturated_into() + } +} + /// Proved messages from the source chain. pub type ProvedMessages = BTreeMap>; @@ -55,33 +86,6 @@ pub struct DispatchMessage { pub data: DispatchMessageData, } -/// Source chain API. Used by target chain, to verify source chain proofs. -/// -/// All implementations of this trait should only work with finalized data that -/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane -/// that's stuck) and/or processing messages without paying fees. -pub trait SourceHeaderChain { - /// Proof that messages are sent from source chain. This may also include proof - /// of corresponding outbound lane states. - type MessagesProof: Parameter + Size; - - /// Verify messages proof and return proved messages. - /// - /// Returns error if either proof is incorrect, or the number of messages in the proof - /// is not matching the `messages_count`. - /// - /// Messages vector is required to be sorted by nonce within each lane. Out-of-order - /// messages will be rejected. - /// - /// The `messages_count` argument verification (sane limits) is supposed to be made - /// outside this function. This function only verifies that the proof declares exactly - /// `messages_count` messages. - fn verify_messages_proof( - proof: Self::MessagesProof, - messages_count: u32, - ) -> Result, VerificationError>; -} - /// Called when inbound message is received. pub trait MessageDispatch { /// Decoded message payload type. Valid message may contain invalid payload. In this case @@ -167,32 +171,11 @@ impl DeliveryPayments for () { } } -/// Structure that may be used in place of `SourceHeaderChain` and `MessageDispatch` on chains, +/// Structure that may be used in place of `MessageDispatch` on chains, /// where inbound messages are forbidden. -pub struct ForbidInboundMessages( - PhantomData<(MessagesProof, DispatchPayload)>, -); - -/// Error message that is used in `ForbidInboundMessages` implementation. -const ALL_INBOUND_MESSAGES_REJECTED: &str = - "This chain is configured to reject all inbound messages"; - -impl SourceHeaderChain - for ForbidInboundMessages -{ - type MessagesProof = MessagesProof; - - fn verify_messages_proof( - _proof: Self::MessagesProof, - _messages_count: u32, - ) -> Result, VerificationError> { - Err(VerificationError::Other(ALL_INBOUND_MESSAGES_REJECTED)) - } -} +pub struct ForbidInboundMessages(PhantomData); -impl MessageDispatch - for ForbidInboundMessages -{ +impl MessageDispatch for ForbidInboundMessages { type DispatchPayload = DispatchPayload; type DispatchLevelResult = (); diff --git a/bridges/primitives/parachains/Cargo.toml b/bridges/primitives/parachains/Cargo.toml index a6e71876cefbb..173380c8224d2 100644 --- a/bridges/primitives/parachains/Cargo.toml +++ b/bridges/primitives/parachains/Cargo.toml @@ -11,22 +11,22 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -impl-trait-for-tuples = "0.2" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +impl-trait-for-tuples = { workspace = true } +scale-info = { features = ["derive"], workspace = true } # Bridge dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } # Substrate dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index d4b2f503e9e2c..acae2f431bf20 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -11,26 +11,26 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -parity-util-mem = { version = "0.12.0", optional = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +parity-util-mem = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Bridge Dependencies -bp-messages = { path = "../messages", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-messages = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -hex = "0.4" +hex = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/primitives/polkadot-core/src/parachains.rs b/bridges/primitives/polkadot-core/src/parachains.rs index 433cd2845abd9..d54ee108386ed 100644 --- a/bridges/primitives/polkadot-core/src/parachains.rs +++ b/bridges/primitives/polkadot-core/src/parachains.rs @@ -22,7 +22,7 @@ //! parachains. Having pallets that are referencing polkadot, would mean that there may //! be two versions of polkadot crates included in the runtime. Which is bad. -use bp_runtime::{RawStorageProof, Size}; +use bp_runtime::{raw_storage_proof_size, RawStorageProof, Size}; use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_core::Hasher; @@ -96,11 +96,7 @@ pub struct ParaHeadsProof { impl Size for ParaHeadsProof { fn size(&self) -> u32 { - u32::try_from( - self.storage_proof - .iter() - .fold(0usize, |sum, node| sum.saturating_add(node.len())), - ) - .unwrap_or(u32::MAX) + use frame_support::sp_runtime::SaturatedConversion; + raw_storage_proof_size(&self.storage_proof).saturated_into() } } diff --git a/bridges/primitives/relayers/Cargo.toml b/bridges/primitives/relayers/Cargo.toml index 5081dddce1e61..3448e8a409633 100644 --- a/bridges/primitives/relayers/Cargo.toml +++ b/bridges/primitives/relayers/Cargo.toml @@ -11,23 +11,23 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { features = ["bit-vec", "derive"], workspace = true } +scale-info = { features = ["bit-vec", "derive"], workspace = true } # Bridge Dependencies -bp-messages = { path = "../messages", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-messages = { workspace = true } +bp-runtime = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -hex = "0.4" -hex-literal = "0.4" +hex = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/primitives/relayers/src/lib.rs b/bridges/primitives/relayers/src/lib.rs index 2a9ef6a8e1e9a..436f33db40080 100644 --- a/bridges/primitives/relayers/src/lib.rs +++ b/bridges/primitives/relayers/src/lib.rs @@ -140,8 +140,8 @@ pub struct RelayerRewardsKeyProvider(PhantomData<(AccountId, impl StorageDoubleMapKeyProvider for RelayerRewardsKeyProvider where - AccountId: Codec + EncodeLike, - Reward: Codec + EncodeLike, + AccountId: 'static + Codec + EncodeLike + Send + Sync, + Reward: 'static + Codec + EncodeLike + Send + Sync, { const MAP_NAME: &'static str = "RelayerRewards"; diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index ac65ad538b498..117409b37b945 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -11,28 +11,28 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -hash-db = { version = "0.16.0", default-features = false } -impl-trait-for-tuples = "0.2.2" +codec = { workspace = true } +hash-db = { workspace = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } -num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +num-traits = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } -sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } -trie-db = { version = "0.29.0", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-state-machine = { workspace = true } +sp-std = { workspace = true } +sp-trie = { workspace = true } +trie-db = { workspace = true } [dev-dependencies] -hex-literal = "0.4" +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] @@ -53,3 +53,4 @@ std = [ "sp-trie/std", "trie-db/std", ] +test-helpers = [] diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index 369386e41b0cf..0db4eac79a750 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -24,7 +24,7 @@ use sp_runtime::{ AtLeast32Bit, AtLeast32BitUnsigned, Hash as HashT, Header as HeaderT, MaybeDisplay, MaybeSerialize, MaybeSerializeDeserialize, Member, SimpleBitOps, Verify, }, - FixedPointOperand, + FixedPointOperand, StateVersion, }; use sp_std::{fmt::Debug, hash::Hash, str::FromStr, vec, vec::Vec}; @@ -196,6 +196,10 @@ pub trait Chain: Send + Sync + 'static { /// Signature type, used on this chain. type Signature: Parameter + Verify; + /// Version of the state implementation used by this chain. This is directly related with the + /// `TrieLayout` configuration used by the storage. + const STATE_VERSION: StateVersion; + /// Get the maximum size (in bytes) of a Normal extrinsic at this chain. fn max_extrinsic_size() -> u32; /// Get the maximum weight (compute time) that a Normal extrinsic at this chain can use. @@ -223,6 +227,8 @@ where type Nonce = ::Nonce; type Signature = ::Signature; + const STATE_VERSION: StateVersion = ::STATE_VERSION; + fn max_extrinsic_size() -> u32 { ::max_extrinsic_size() } diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index 5daba0351ad48..8f5040ad9a1be 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -40,15 +40,18 @@ pub use chain::{ }; pub use frame_support::storage::storage_prefix as storage_value_final_key; use num_traits::{CheckedAdd, CheckedSub, One, SaturatingAdd, Zero}; +#[cfg(feature = "std")] +pub use storage_proof::craft_valid_storage_proof; +#[cfg(feature = "test-helpers")] pub use storage_proof::{ - record_all_keys as record_all_trie_keys, Error as StorageProofError, - ProofSize as StorageProofSize, RawStorageProof, StorageProofChecker, + grow_storage_proof, grow_storage_value, record_all_keys as record_all_trie_keys, + UnverifiedStorageProofParams, +}; +pub use storage_proof::{ + raw_storage_proof_size, RawStorageProof, StorageProofChecker, StorageProofError, }; pub use storage_types::BoundedStorageValue; -#[cfg(feature = "std")] -pub use storage_proof::craft_valid_storage_proof; - pub mod extensions; pub mod messages; @@ -255,9 +258,9 @@ pub trait StorageMapKeyProvider { /// The same as `StorageMap::Hasher1`. type Hasher: StorageHasher; /// The same as `StorageMap::Key1`. - type Key: FullCodec; + type Key: FullCodec + Send + Sync; /// The same as `StorageMap::Value`. - type Value: FullCodec; + type Value: 'static + FullCodec; /// This is a copy of the /// `frame_support::storage::generator::StorageMap::storage_map_final_key`. @@ -277,13 +280,13 @@ pub trait StorageDoubleMapKeyProvider { /// The same as `StorageDoubleMap::Hasher1`. type Hasher1: StorageHasher; /// The same as `StorageDoubleMap::Key1`. - type Key1: FullCodec; + type Key1: FullCodec + Send + Sync; /// The same as `StorageDoubleMap::Hasher2`. type Hasher2: StorageHasher; /// The same as `StorageDoubleMap::Key2`. - type Key2: FullCodec; + type Key2: FullCodec + Send + Sync; /// The same as `StorageDoubleMap::Value`. - type Value: FullCodec; + type Value: 'static + FullCodec; /// This is a copy of the /// `frame_support::storage::generator::StorageDoubleMap::storage_double_map_final_key`. @@ -461,38 +464,6 @@ macro_rules! generate_static_str_provider { }; } -/// Error message that is only displayable in `std` environment. -#[derive(Encode, Decode, Clone, Eq, PartialEq, PalletError, TypeInfo)] -#[scale_info(skip_type_params(T))] -pub struct StrippableError { - _phantom_data: sp_std::marker::PhantomData, - #[codec(skip)] - #[cfg(feature = "std")] - message: String, -} - -impl From for StrippableError { - fn from(_err: T) -> Self { - Self { - _phantom_data: Default::default(), - #[cfg(feature = "std")] - message: format!("{:?}", _err), - } - } -} - -impl Debug for StrippableError { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - f.write_str(&self.message) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - f.write_str("Stripped error") - } -} - /// A trait defining helper methods for `RangeInclusive` (start..=end) pub trait RangeInclusiveExt { /// Computes the length of the `RangeInclusive`, checking for underflow and overflow. diff --git a/bridges/primitives/runtime/src/storage_proof.rs b/bridges/primitives/runtime/src/storage_proof.rs index 1b706aa66c16f..7bfa0d6fde011 100644 --- a/bridges/primitives/runtime/src/storage_proof.rs +++ b/bridges/primitives/runtime/src/storage_proof.rs @@ -14,33 +14,91 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Logic for checking Substrate storage proofs. +//! Logic for working with storage proofs. -use crate::StrippableError; -use codec::{Decode, Encode}; use frame_support::PalletError; -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; -use scale_info::TypeInfo; -use sp_std::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec}; +use sp_core::RuntimeDebug; +use sp_std::{default::Default, vec::Vec}; use sp_trie::{ - read_trie_value, LayoutV1, MemoryDB, Recorder, StorageProof, Trie, TrieConfiguration, - TrieDBBuilder, TrieError, TrieHash, + accessed_nodes_tracker::AccessedNodesTracker, read_trie_value, LayoutV1, MemoryDB, StorageProof, }; +use codec::{Decode, Encode}; +use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; +use scale_info::TypeInfo; +#[cfg(feature = "test-helpers")] +use sp_trie::{recorder_ext::RecorderExt, Recorder, TrieDBBuilder, TrieError, TrieHash}; +#[cfg(feature = "test-helpers")] +use trie_db::{Trie, TrieConfiguration, TrieDBMut}; + +/// Errors that can occur when interacting with `UnverifiedStorageProof` and `VerifiedStorageProof`. +#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq, PalletError, TypeInfo)] +pub enum StorageProofError { + /// Call to `generate_trie_proof()` failed. + UnableToGenerateTrieProof, + /// Call to `verify_trie_proof()` failed. + InvalidProof, + /// The `Vec` entries weren't sorted as expected. + UnsortedEntries, + /// The provided key wasn't found. + UnavailableKey, + /// The value associated to the provided key is `None`. + EmptyVal, + /// Error decoding value associated to a provided key. + DecodeError, + /// At least one key or node wasn't read. + UnusedKey, + + /// Expected storage root is missing from the proof. (for non-compact proofs) + StorageRootMismatch, + /// Unable to reach expected storage value using provided trie nodes. (for non-compact proofs) + StorageValueUnavailable, + /// The proof contains duplicate nodes. (for non-compact proofs) + DuplicateNodes, +} + +impl From for StorageProofError { + fn from(e: sp_trie::StorageProofError) -> Self { + match e { + sp_trie::StorageProofError::DuplicateNodes => StorageProofError::DuplicateNodes, + } + } +} + +impl From for StorageProofError { + fn from(e: sp_trie::accessed_nodes_tracker::Error) -> Self { + match e { + sp_trie::accessed_nodes_tracker::Error::UnusedNodes => StorageProofError::UnusedKey, + } + } +} + /// Raw storage proof type (just raw trie nodes). -pub type RawStorageProof = Vec>; +pub type RawStorageProof = sp_trie::RawStorageProof; + +/// Calculates size for `RawStorageProof`. +pub fn raw_storage_proof_size(raw_storage_proof: &RawStorageProof) -> usize { + raw_storage_proof + .iter() + .fold(0usize, |sum, node| sum.saturating_add(node.len())) +} -/// Storage proof size requirements. +/// Storage values size requirements. /// /// This is currently used by benchmarks when generating storage proofs. -#[derive(Clone, Copy, Debug)] -pub enum ProofSize { - /// The proof is expected to be minimal. If value size may be changed, then it is expected to - /// have given size. - Minimal(u32), - /// The proof is expected to have at least given size and grow by increasing value that is - /// stored in the trie. - HasLargeLeaf(u32), +#[cfg(feature = "test-helpers")] +#[derive(Clone, Copy, Debug, Default)] +pub struct UnverifiedStorageProofParams { + /// Expected storage proof size in bytes. + pub db_size: Option, +} + +#[cfg(feature = "test-helpers")] +impl UnverifiedStorageProofParams { + /// Make storage proof parameters that require proof of at least `db_size` bytes. + pub fn from_db_size(db_size: u32) -> Self { + Self { db_size: Some(db_size) } + } } /// This struct is used to read storage values from a subset of a Merklized database. The "proof" @@ -51,10 +109,9 @@ pub struct StorageProofChecker where H: Hasher, { - proof_nodes_count: usize, root: H::Out, db: MemoryDB, - recorder: Recorder>, + accessed_nodes_tracker: AccessedNodesTracker, } impl StorageProofChecker @@ -64,99 +121,161 @@ where /// Constructs a new storage proof checker. /// /// This returns an error if the given proof is invalid with respect to the given root. - pub fn new(root: H::Out, proof: RawStorageProof) -> Result { - // 1. we don't want extra items in the storage proof - // 2. `StorageProof` is storing all trie nodes in the `BTreeSet` - // - // => someone could simply add duplicate items to the proof and we won't be - // able to detect that by just using `StorageProof` - // - // => let's check it when we are converting our "raw proof" into `StorageProof` - let proof_nodes_count = proof.len(); - let proof = StorageProof::new(proof); - if proof_nodes_count != proof.iter_nodes().count() { - return Err(Error::DuplicateNodesInProof) - } + pub fn new(root: H::Out, proof: RawStorageProof) -> Result { + let proof = StorageProof::new_with_duplicate_nodes_check(proof)?; + + let recorder = AccessedNodesTracker::new(proof.len()); let db = proof.into_memory_db(); if !db.contains(&root, EMPTY_PREFIX) { - return Err(Error::StorageRootMismatch) + return Err(StorageProofError::StorageRootMismatch) } - let recorder = Recorder::default(); - let checker = StorageProofChecker { proof_nodes_count, root, db, recorder }; - Ok(checker) + Ok(StorageProofChecker { root, db, accessed_nodes_tracker: recorder }) } /// Returns error if the proof has some nodes that are left intact by previous `read_value` /// calls. - pub fn ensure_no_unused_nodes(mut self) -> Result<(), Error> { - let visited_nodes = self - .recorder - .drain() - .into_iter() - .map(|record| record.data) - .collect::>(); - let visited_nodes_count = visited_nodes.len(); - if self.proof_nodes_count == visited_nodes_count { - Ok(()) - } else { - Err(Error::UnusedNodesInTheProof) - } + pub fn ensure_no_unused_nodes(self) -> Result<(), StorageProofError> { + self.accessed_nodes_tracker.ensure_no_unused_nodes().map_err(Into::into) } /// Reads a value from the available subset of storage. If the value cannot be read due to an /// incomplete or otherwise invalid proof, this function returns an error. - pub fn read_value(&mut self, key: &[u8]) -> Result>, Error> { + pub fn read_value(&mut self, key: &[u8]) -> Result>, StorageProofError> { // LayoutV1 or LayoutV0 is identical for proof that only read values. - read_trie_value::, _>(&self.db, &self.root, key, Some(&mut self.recorder), None) - .map_err(|_| Error::StorageValueUnavailable) + read_trie_value::, _>( + &self.db, + &self.root, + key, + Some(&mut self.accessed_nodes_tracker), + None, + ) + .map_err(|_| StorageProofError::StorageValueUnavailable) } /// Reads and decodes a value from the available subset of storage. If the value cannot be read /// due to an incomplete or otherwise invalid proof, this function returns an error. If value is /// read, but decoding fails, this function returns an error. - pub fn read_and_decode_value(&mut self, key: &[u8]) -> Result, Error> { + pub fn read_and_decode_value( + &mut self, + key: &[u8], + ) -> Result, StorageProofError> { self.read_value(key).and_then(|v| { - v.map(|v| T::decode(&mut &v[..]).map_err(|e| Error::StorageValueDecodeFailed(e.into()))) - .transpose() + v.map(|v| { + T::decode(&mut &v[..]).map_err(|e| { + log::warn!(target: "bridge-storage-proofs", "read_and_decode_value error: {e:?}"); + StorageProofError::DecodeError + }) + }) + .transpose() }) } /// Reads and decodes a value from the available subset of storage. If the value cannot be read /// due to an incomplete or otherwise invalid proof, or if the value is `None`, this function /// returns an error. If value is read, but decoding fails, this function returns an error. - pub fn read_and_decode_mandatory_value(&mut self, key: &[u8]) -> Result { - self.read_and_decode_value(key)?.ok_or(Error::StorageValueEmpty) + pub fn read_and_decode_mandatory_value( + &mut self, + key: &[u8], + ) -> Result { + self.read_and_decode_value(key)?.ok_or(StorageProofError::EmptyVal) } /// Reads and decodes a value from the available subset of storage. If the value cannot be read /// due to an incomplete or otherwise invalid proof, this function returns `Ok(None)`. /// If value is read, but decoding fails, this function returns an error. - pub fn read_and_decode_opt_value(&mut self, key: &[u8]) -> Result, Error> { + pub fn read_and_decode_opt_value( + &mut self, + key: &[u8], + ) -> Result, StorageProofError> { match self.read_and_decode_value(key) { Ok(outbound_lane_data) => Ok(outbound_lane_data), - Err(Error::StorageValueUnavailable) => Ok(None), + Err(StorageProofError::StorageValueUnavailable) => Ok(None), Err(e) => Err(e), } } } -/// Storage proof related errors. -#[derive(Encode, Decode, Clone, Eq, PartialEq, PalletError, Debug, TypeInfo)] -pub enum Error { - /// Duplicate trie nodes are found in the proof. - DuplicateNodesInProof, - /// Unused trie nodes are found in the proof. - UnusedNodesInTheProof, - /// Expected storage root is missing from the proof. - StorageRootMismatch, - /// Unable to reach expected storage value using provided trie nodes. - StorageValueUnavailable, - /// The storage value is `None`. - StorageValueEmpty, - /// Failed to decode storage value. - StorageValueDecodeFailed(StrippableError), +/// Add extra data to the storage value so that it'll be of given size. +#[cfg(feature = "test-helpers")] +pub fn grow_storage_value(mut value: Vec, params: &UnverifiedStorageProofParams) -> Vec { + if let Some(db_size) = params.db_size { + if db_size as usize > value.len() { + value.extend(sp_std::iter::repeat(42u8).take(db_size as usize - value.len())); + } + } + value +} + +/// Insert values in the provided trie at common-prefix keys in order to inflate the resulting +/// storage proof. +/// +/// This function can add at most 15 common-prefix keys per prefix nibble (4 bits). +/// Each such key adds about 33 bytes (a node) to the proof. +#[cfg(feature = "test-helpers")] +pub fn grow_storage_proof( + trie: &mut TrieDBMut, + prefix: Vec, + num_extra_nodes: usize, +) { + use sp_trie::TrieMut; + + let mut added_nodes = 0; + for i in 0..prefix.len() { + let mut prefix = prefix[0..=i].to_vec(); + // 1 byte has 2 nibbles (4 bits each) + let first_nibble = (prefix[i] & 0xf0) >> 4; + let second_nibble = prefix[i] & 0x0f; + + // create branches at the 1st nibble + for branch in 1..=15 { + if added_nodes >= num_extra_nodes { + return + } + + // create branches at the 1st nibble + prefix[i] = (first_nibble.wrapping_add(branch) % 16) << 4; + trie.insert(&prefix, &[0; 32]) + .map_err(|_| "TrieMut::insert has failed") + .expect("TrieMut::insert should not fail in benchmarks"); + added_nodes += 1; + } + + // create branches at the 2nd nibble + for branch in 1..=15 { + if added_nodes >= num_extra_nodes { + return + } + + prefix[i] = (first_nibble << 4) | (second_nibble.wrapping_add(branch) % 16); + trie.insert(&prefix, &[0; 32]) + .map_err(|_| "TrieMut::insert has failed") + .expect("TrieMut::insert should not fail in benchmarks"); + added_nodes += 1; + } + } + + assert_eq!(added_nodes, num_extra_nodes) +} + +/// Record all keys for a given root. +#[cfg(feature = "test-helpers")] +pub fn record_all_keys( + db: &DB, + root: &TrieHash, +) -> Result>> +where + DB: hash_db::HashDBRef, +{ + let mut recorder = Recorder::::new(); + let trie = TrieDBBuilder::::new(db, root).with_recorder(&mut recorder).build(); + for x in trie.iter()? { + let (key, _) = x?; + trie.get(&key)?; + } + + Ok(recorder.into_raw_storage_proof()) } /// Return valid storage proof and state root. @@ -170,7 +289,7 @@ pub fn craft_valid_storage_proof() -> (sp_core::H256, RawStorageProof) { // construct storage proof let backend = >::from(( - vec![ + sp_std::vec![ (None, vec![(b"key1".to_vec(), Some(b"value1".to_vec()))]), (None, vec![(b"key2".to_vec(), Some(b"value2".to_vec()))]), (None, vec![(b"key3".to_vec(), Some(b"value3".to_vec()))]), @@ -180,41 +299,15 @@ pub fn craft_valid_storage_proof() -> (sp_core::H256, RawStorageProof) { ], state_version, )); - let root = backend.storage_root(std::iter::empty(), state_version).0; + let root = backend.storage_root(sp_std::iter::empty(), state_version).0; let proof = prove_read(backend, &[&b"key1"[..], &b"key2"[..], &b"key4"[..], &b"key22"[..]]).unwrap(); (root, proof.into_nodes().into_iter().collect()) } -/// Record all keys for a given root. -pub fn record_all_keys( - db: &DB, - root: &TrieHash, -) -> Result>> -where - DB: hash_db::HashDBRef, -{ - let mut recorder = Recorder::::new(); - let trie = TrieDBBuilder::::new(db, root).with_recorder(&mut recorder).build(); - for x in trie.iter()? { - let (key, _) = x?; - trie.get(&key)?; - } - - // recorder may record the same trie node multiple times and we don't want duplicate nodes - // in our proofs => let's deduplicate it by collecting to the BTreeSet first - Ok(recorder - .drain() - .into_iter() - .map(|n| n.data.to_vec()) - .collect::>() - .into_iter() - .collect()) -} - #[cfg(test)] -pub mod tests { +pub mod tests_for_storage_proof_checker { use super::*; use codec::Encode; @@ -228,29 +321,21 @@ pub mod tests { assert_eq!(checker.read_value(b"key1"), Ok(Some(b"value1".to_vec()))); assert_eq!(checker.read_value(b"key2"), Ok(Some(b"value2".to_vec()))); assert_eq!(checker.read_value(b"key4"), Ok(Some((42u64, 42u32, 42u16, 42u8).encode()))); - assert_eq!(checker.read_value(b"key11111"), Err(Error::StorageValueUnavailable)); + assert_eq!( + checker.read_value(b"key11111"), + Err(StorageProofError::StorageValueUnavailable) + ); assert_eq!(checker.read_value(b"key22"), Ok(None)); assert_eq!(checker.read_and_decode_value(b"key4"), Ok(Some((42u64, 42u32, 42u16, 42u8))),); assert!(matches!( checker.read_and_decode_value::<[u8; 64]>(b"key4"), - Err(Error::StorageValueDecodeFailed(_)), + Err(StorageProofError::DecodeError), )); // checking proof against invalid commitment fails assert_eq!( >::new(sp_core::H256::random(), proof).err(), - Some(Error::StorageRootMismatch) - ); - } - - #[test] - fn proof_with_duplicate_items_is_rejected() { - let (root, mut proof) = craft_valid_storage_proof(); - proof.push(proof.first().unwrap().clone()); - - assert_eq!( - StorageProofChecker::::new(root, proof).map(drop), - Err(Error::DuplicateNodesInProof), + Some(StorageProofError::StorageRootMismatch) ); } @@ -260,13 +345,13 @@ pub mod tests { let mut checker = StorageProofChecker::::new(root, proof.clone()).unwrap(); - checker.read_value(b"key1").unwrap(); + checker.read_value(b"key1").unwrap().unwrap(); checker.read_value(b"key2").unwrap(); checker.read_value(b"key4").unwrap(); checker.read_value(b"key22").unwrap(); assert_eq!(checker.ensure_no_unused_nodes(), Ok(())); let checker = StorageProofChecker::::new(root, proof).unwrap(); - assert_eq!(checker.ensure_no_unused_nodes(), Err(Error::UnusedNodesInTheProof)); + assert_eq!(checker.ensure_no_unused_nodes(), Err(StorageProofError::UnusedKey)); } } diff --git a/bridges/primitives/test-utils/Cargo.toml b/bridges/primitives/test-utils/Cargo.toml index 99f5ee0d1aee4..5e6e389339353 100644 --- a/bridges/primitives/test-utils/Cargo.toml +++ b/bridges/primitives/test-utils/Cargo.toml @@ -11,19 +11,19 @@ repository.workspace = true workspace = true [dependencies] -bp-header-chain = { path = "../header-chain", default-features = false } -bp-parachains = { path = "../parachains", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -ed25519-dalek = { version = "2.1", default-features = false } -finality-grandpa = { version = "0.16.2", default-features = false } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +bp-header-chain = { workspace = true } +bp-parachains = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-runtime = { features = ["test-helpers"], workspace = true } +codec = { workspace = true } +ed25519-dalek = { workspace = true } +finality-grandpa = { workspace = true } +sp-application-crypto = { workspace = true } +sp-consensus-grandpa = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-trie = { workspace = true } [features] default = ["std"] diff --git a/bridges/primitives/test-utils/src/lib.rs b/bridges/primitives/test-utils/src/lib.rs index f4fe4a242e79c..9855c32a46895 100644 --- a/bridges/primitives/test-utils/src/lib.rs +++ b/bridges/primitives/test-utils/src/lib.rs @@ -177,6 +177,7 @@ pub fn prepare_parachain_heads_proof( let mut parachains = Vec::with_capacity(heads.len()); let mut root = Default::default(); let mut mdb = MemoryDB::default(); + let mut storage_keys = vec![]; { let mut trie = TrieDBMutBuilderV1::::new(&mut mdb, &mut root).build(); for (parachain, head) in heads { @@ -185,11 +186,12 @@ pub fn prepare_parachain_heads_proof( trie.insert(&storage_key.0, &head.encode()) .map_err(|_| "TrieMut::insert has failed") .expect("TrieMut::insert should not fail in tests"); + storage_keys.push(storage_key.0); parachains.push((ParaId(parachain), head.hash())); } } - // generate storage proof to be delivered to This chain + // generate storage proof to be delivered to this chain let storage_proof = record_all_trie_keys::, _>(&mdb, &root) .map_err(|_| "record_all_trie_keys has failed") .expect("record_all_trie_keys should not fail in benchmarks"); diff --git a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml index b94e722024562..c3cf3356184be 100644 --- a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml @@ -11,12 +11,12 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { features = ["bit-vec", "derive"], workspace = true } +scale-info = { features = ["bit-vec", "derive"], workspace = true } # Substrate Dependencies -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-runtime = { workspace = true } +sp-core = { workspace = true } [features] default = ["std"] diff --git a/bridges/primitives/xcm-bridge-hub/Cargo.toml b/bridges/primitives/xcm-bridge-hub/Cargo.toml index 27881bc99d1f8..932e9ade01974 100644 --- a/bridges/primitives/xcm-bridge-hub/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # Substrate Dependencies -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/relays/client-substrate/Cargo.toml b/bridges/relays/client-substrate/Cargo.toml index cb7eae4f340c7..969cd73d6194f 100644 --- a/bridges/relays/client-substrate/Cargo.toml +++ b/bridges/relays/client-substrate/Cargo.toml @@ -11,50 +11,49 @@ publish = false workspace = true [dependencies] -async-std = { version = "1.9.0", features = ["attributes"] } -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -jsonrpsee = { version = "0.22", features = ["macros", "ws-client"] } +async-std = { features = ["attributes"], workspace = true } +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } +jsonrpsee = { features = ["macros", "ws-client"], workspace = true } log = { workspace = true } -num-traits = "0.2" -rand = "0.8.5" -scale-info = { version = "2.11.1", features = ["derive"] } -tokio = { version = "1.37", features = ["rt-multi-thread"] } +num-traits = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +serde_json = { workspace = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } +tokio = { features = ["rt-multi-thread"], workspace = true, default-features = true } thiserror = { workspace = true } +quick_cache = { workspace = true } # Bridge dependencies -bp-header-chain = { path = "../../primitives/header-chain" } -bp-messages = { path = "../../primitives/messages" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-runtime = { path = "../../primitives/runtime" } -pallet-bridge-messages = { path = "../../modules/messages" } -finality-relay = { path = "../finality" } -relay-utils = { path = "../utils" } +bp-header-chain = { workspace = true, default-features = true } +bp-messages = { workspace = true, default-features = true } +bp-polkadot-core = { workspace = true, default-features = true } +bp-runtime = { workspace = true, default-features = true } +finality-relay = { workspace = true } +relay-utils = { workspace = true } # Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support" } -frame-system = { path = "../../../substrate/frame/system" } -pallet-balances = { path = "../../../substrate/frame/balances" } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment" } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api" } -pallet-utility = { path = "../../../substrate/frame/utility" } -sc-chain-spec = { path = "../../../substrate/client/chain-spec" } -sc-rpc-api = { path = "../../../substrate/client/rpc-api" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-rpc = { path = "../../../substrate/primitives/rpc" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-std = { path = "../../../substrate/primitives/std" } -sp-trie = { path = "../../../substrate/primitives/trie" } -sp-version = { path = "../../../substrate/primitives/version" } +frame-support = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } # Polkadot Dependencies -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm" } +xcm = { workspace = true, default-features = true } [features] default = [] diff --git a/bridges/relays/client-substrate/src/chain.rs b/bridges/relays/client-substrate/src/chain.rs index 40269fe64c879..227e9c31c5bfc 100644 --- a/bridges/relays/client-substrate/src/chain.rs +++ b/bridges/relays/client-substrate/src/chain.rs @@ -36,6 +36,9 @@ use sp_runtime::{ }; use std::{fmt::Debug, time::Duration}; +/// Signed block type of given chain. +pub type SignedBlockOf = ::SignedBlock; + /// Substrate-based chain from minimal relay-client point of view. pub trait Chain: ChainBase + Clone { /// Chain name. diff --git a/bridges/relays/client-substrate/src/client.rs b/bridges/relays/client-substrate/src/client.rs deleted file mode 100644 index 2e7cb7455f76c..0000000000000 --- a/bridges/relays/client-substrate/src/client.rs +++ /dev/null @@ -1,1032 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate node client. - -use crate::{ - chain::{Chain, ChainWithTransactions}, - guard::Environment, - rpc::{ - SubstrateAuthorClient, SubstrateChainClient, SubstrateFinalityClient, - SubstrateFrameSystemClient, SubstrateStateClient, SubstrateSystemClient, - }, - transaction_stall_timeout, AccountKeyPairOf, ChainWithGrandpa, ConnectionParams, Error, HashOf, - HeaderIdOf, Result, SignParam, TransactionTracker, UnsignedTransaction, -}; - -use async_std::sync::{Arc, Mutex, RwLock}; -use async_trait::async_trait; -use bp_runtime::{HeaderIdProvider, StorageDoubleMapKeyProvider, StorageMapKeyProvider}; -use codec::{Decode, Encode}; -use frame_support::weights::Weight; -use futures::{SinkExt, StreamExt}; -use jsonrpsee::{ - core::DeserializeOwned, - ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}, -}; -use num_traits::{Saturating, Zero}; -use pallet_transaction_payment::RuntimeDispatchInfo; -use relay_utils::{relay_loop::RECONNECT_DELAY, STALL_TIMEOUT}; -use sp_core::{ - storage::{StorageData, StorageKey}, - Bytes, Hasher, Pair, -}; -use sp_runtime::{ - traits::Header as HeaderT, - transaction_validity::{TransactionSource, TransactionValidity}, -}; -use sp_trie::StorageProof; -use sp_version::RuntimeVersion; -use std::{cmp::Ordering, future::Future}; - -const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; -const SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF: &str = - "GrandpaApi_generate_key_ownership_proof"; -const SUB_API_TXPOOL_VALIDATE_TRANSACTION: &str = "TaggedTransactionQueue_validate_transaction"; -const SUB_API_TX_PAYMENT_QUERY_INFO: &str = "TransactionPaymentApi_query_info"; -const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; - -/// The difference between best block number and number of its ancestor, that is enough -/// for us to consider that ancestor an "ancient" block with dropped state. -/// -/// The relay does not assume that it is connected to the archive node, so it always tries -/// to use the best available chain state. But sometimes it still may use state of some -/// old block. If the state of that block is already dropped, relay will see errors when -/// e.g. it tries to prove something. -/// -/// By default Substrate-based nodes are storing state for last 256 blocks. We'll use -/// half of this value. -pub const ANCIENT_BLOCK_THRESHOLD: u32 = 128; - -/// Returns `true` if we think that the state is already discarded for given block. -pub fn is_ancient_block + PartialOrd + Saturating>(block: N, best: N) -> bool { - best.saturating_sub(block) >= N::from(ANCIENT_BLOCK_THRESHOLD) -} - -/// Opaque justifications subscription type. -pub struct Subscription( - pub(crate) Mutex>>, - // The following field is not explicitly used by the code. But when it is dropped, - // the bakground task receives a shutdown signal. - #[allow(dead_code)] pub(crate) futures::channel::oneshot::Sender<()>, -); - -/// Opaque GRANDPA authorities set. -pub type OpaqueGrandpaAuthoritiesSet = Vec; - -/// A simple runtime version. It only includes the `spec_version` and `transaction_version`. -#[derive(Copy, Clone, Debug)] -pub struct SimpleRuntimeVersion { - /// Version of the runtime specification. - pub spec_version: u32, - /// All existing dispatches are fully compatible when this number doesn't change. - pub transaction_version: u32, -} - -impl SimpleRuntimeVersion { - /// Create a new instance of `SimpleRuntimeVersion` from a `RuntimeVersion`. - pub const fn from_runtime_version(runtime_version: &RuntimeVersion) -> Self { - Self { - spec_version: runtime_version.spec_version, - transaction_version: runtime_version.transaction_version, - } - } -} - -/// Chain runtime version in client -#[derive(Copy, Clone, Debug)] -pub enum ChainRuntimeVersion { - /// Auto query from chain. - Auto, - /// Custom runtime version, defined by user. - Custom(SimpleRuntimeVersion), -} - -/// Substrate client type. -/// -/// Cloning `Client` is a cheap operation that only clones internal references. Different -/// clones of the same client are guaranteed to use the same references. -pub struct Client { - // Lock order: `submit_signed_extrinsic_lock`, `data` - /// Client connection params. - params: Arc, - /// Saved chain runtime version. - chain_runtime_version: ChainRuntimeVersion, - /// If several tasks are submitting their transactions simultaneously using - /// `submit_signed_extrinsic` method, they may get the same transaction nonce. So one of - /// transactions will be rejected from the pool. This lock is here to prevent situations like - /// that. - submit_signed_extrinsic_lock: Arc>, - /// Genesis block hash. - genesis_hash: HashOf, - /// Shared dynamic data. - data: Arc>, -} - -/// Client data, shared by all `Client` clones. -struct ClientData { - /// Tokio runtime handle. - tokio: Arc, - /// Substrate RPC client. - client: Arc, -} - -/// Already encoded value. -struct PreEncoded(Vec); - -impl Encode for PreEncoded { - fn encode(&self) -> Vec { - self.0.clone() - } -} - -#[async_trait] -impl relay_utils::relay_loop::Client for Client { - type Error = Error; - - async fn reconnect(&mut self) -> Result<()> { - let mut data = self.data.write().await; - let (tokio, client) = Self::build_client(&self.params).await?; - data.tokio = tokio; - data.client = client; - Ok(()) - } -} - -impl Clone for Client { - fn clone(&self) -> Self { - Client { - params: self.params.clone(), - chain_runtime_version: self.chain_runtime_version, - submit_signed_extrinsic_lock: self.submit_signed_extrinsic_lock.clone(), - genesis_hash: self.genesis_hash, - data: self.data.clone(), - } - } -} - -impl std::fmt::Debug for Client { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("Client").field("genesis_hash", &self.genesis_hash).finish() - } -} - -impl Client { - /// Returns client that is able to call RPCs on Substrate node over websocket connection. - /// - /// This function will keep connecting to given Substrate node until connection is established - /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. - pub async fn new(params: ConnectionParams) -> Self { - let params = Arc::new(params); - loop { - match Self::try_connect(params.clone()).await { - Ok(client) => return client, - Err(error) => log::error!( - target: "bridge", - "Failed to connect to {} node: {:?}. Going to retry in {}s", - C::NAME, - error, - RECONNECT_DELAY.as_secs(), - ), - } - - async_std::task::sleep(RECONNECT_DELAY).await; - } - } - - /// Try to connect to Substrate node over websocket. Returns Substrate RPC client if connection - /// has been established or error otherwise. - pub async fn try_connect(params: Arc) -> Result { - let (tokio, client) = Self::build_client(¶ms).await?; - - let number: C::BlockNumber = Zero::zero(); - let genesis_hash_client = client.clone(); - let genesis_hash = tokio - .spawn(async move { - SubstrateChainClient::::block_hash(&*genesis_hash_client, Some(number)).await - }) - .await??; - - let chain_runtime_version = params.chain_runtime_version; - let mut client = Self { - params, - chain_runtime_version, - submit_signed_extrinsic_lock: Arc::new(Mutex::new(())), - genesis_hash, - data: Arc::new(RwLock::new(ClientData { tokio, client })), - }; - Self::ensure_correct_runtime_version(&mut client, chain_runtime_version).await?; - Ok(client) - } - - // Check runtime version to understand if we need are connected to expected version, or we - // need to wait for upgrade, we need to abort immediately. - async fn ensure_correct_runtime_version>( - env: &mut E, - expected: ChainRuntimeVersion, - ) -> Result<()> { - // we are only interested if version mode is bundled or passed using CLI - let expected = match expected { - ChainRuntimeVersion::Auto => return Ok(()), - ChainRuntimeVersion::Custom(expected) => expected, - }; - - // we need to wait if actual version is < than expected, we are OK of versions are the - // same and we need to abort if actual version is > than expected - let actual = SimpleRuntimeVersion::from_runtime_version(&env.runtime_version().await?); - match actual.spec_version.cmp(&expected.spec_version) { - Ordering::Less => - Err(Error::WaitingForRuntimeUpgrade { chain: C::NAME.into(), expected, actual }), - Ordering::Equal => Ok(()), - Ordering::Greater => { - log::error!( - target: "bridge", - "The {} client is configured to use runtime version {expected:?} and actual \ - version is {actual:?}. Aborting", - C::NAME, - ); - env.abort().await; - Err(Error::Custom("Aborted".into())) - }, - } - } - - /// Build client to use in connection. - async fn build_client( - params: &ConnectionParams, - ) -> Result<(Arc, Arc)> { - let tokio = tokio::runtime::Runtime::new()?; - - let uri = match params.uri { - Some(ref uri) => uri.clone(), - None => { - format!( - "{}://{}:{}{}", - if params.secure { "wss" } else { "ws" }, - params.host, - params.port, - match params.path { - Some(ref path) => format!("/{}", path), - None => String::new(), - }, - ) - }, - }; - log::info!(target: "bridge", "Connecting to {} node at {}", C::NAME, uri); - - let client = tokio - .spawn(async move { - RpcClientBuilder::default() - .max_buffer_capacity_per_subscription(MAX_SUBSCRIPTION_CAPACITY) - .build(&uri) - .await - }) - .await??; - - Ok((Arc::new(tokio), Arc::new(client))) - } -} - -impl Client { - /// Return simple runtime version, only include `spec_version` and `transaction_version`. - pub async fn simple_runtime_version(&self) -> Result { - Ok(match &self.chain_runtime_version { - ChainRuntimeVersion::Auto => { - let runtime_version = self.runtime_version().await?; - SimpleRuntimeVersion::from_runtime_version(&runtime_version) - }, - ChainRuntimeVersion::Custom(version) => *version, - }) - } - - /// Returns true if client is connected to at least one peer and is in synced state. - pub async fn ensure_synced(&self) -> Result<()> { - self.jsonrpsee_execute(|client| async move { - let health = SubstrateSystemClient::::health(&*client).await?; - let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); - if is_synced { - Ok(()) - } else { - Err(Error::ClientNotSynced(health)) - } - }) - .await - } - - /// Return hash of the genesis block. - pub fn genesis_hash(&self) -> &C::Hash { - &self.genesis_hash - } - - /// Return hash of the best finalized block. - pub async fn best_finalized_header_hash(&self) -> Result { - self.jsonrpsee_execute(|client| async move { - Ok(SubstrateChainClient::::finalized_head(&*client).await?) - }) - .await - .map_err(|e| Error::FailedToReadBestFinalizedHeaderHash { - chain: C::NAME.into(), - error: e.boxed(), - }) - } - - /// Return number of the best finalized block. - pub async fn best_finalized_header_number(&self) -> Result { - Ok(*self.best_finalized_header().await?.number()) - } - - /// Return header of the best finalized block. - pub async fn best_finalized_header(&self) -> Result { - self.header_by_hash(self.best_finalized_header_hash().await?).await - } - - /// Returns the best Substrate header. - pub async fn best_header(&self) -> Result - where - C::Header: DeserializeOwned, - { - self.jsonrpsee_execute(|client| async move { - Ok(SubstrateChainClient::::header(&*client, None).await?) - }) - .await - .map_err(|e| Error::FailedToReadBestHeader { chain: C::NAME.into(), error: e.boxed() }) - } - - /// Get a Substrate block from its hash. - pub async fn get_block(&self, block_hash: Option) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::block(&*client, block_hash).await?) - }) - .await - } - - /// Get a Substrate header by its hash. - pub async fn header_by_hash(&self, block_hash: C::Hash) -> Result - where - C::Header: DeserializeOwned, - { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::header(&*client, Some(block_hash)).await?) - }) - .await - .map_err(|e| Error::FailedToReadHeaderByHash { - chain: C::NAME.into(), - hash: format!("{block_hash}"), - error: e.boxed(), - }) - } - - /// Get a Substrate block hash by its number. - pub async fn block_hash_by_number(&self, number: C::BlockNumber) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::block_hash(&*client, Some(number)).await?) - }) - .await - } - - /// Get a Substrate header by its number. - pub async fn header_by_number(&self, block_number: C::BlockNumber) -> Result - where - C::Header: DeserializeOwned, - { - let block_hash = Self::block_hash_by_number(self, block_number).await?; - let header_by_hash = Self::header_by_hash(self, block_hash).await?; - Ok(header_by_hash) - } - - /// Return runtime version. - pub async fn runtime_version(&self) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateStateClient::::runtime_version(&*client).await?) - }) - .await - } - - /// Read value from runtime storage. - pub async fn storage_value( - &self, - storage_key: StorageKey, - block_hash: Option, - ) -> Result> { - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read `MapStorage` value from runtime storage. - pub async fn storage_map_value( - &self, - pallet_prefix: &str, - key: &T::Key, - block_hash: Option, - ) -> Result> { - let storage_key = T::final_key(pallet_prefix, key); - - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::Value::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read `DoubleMapStorage` value from runtime storage. - pub async fn storage_double_map_value( - &self, - pallet_prefix: &str, - key1: &T::Key1, - key2: &T::Key2, - block_hash: Option, - ) -> Result> { - let storage_key = T::final_key(pallet_prefix, key1, key2); - - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::Value::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read raw value from runtime storage. - pub async fn raw_storage_value( - &self, - storage_key: StorageKey, - block_hash: Option, - ) -> Result> { - let cloned_storage_key = storage_key.clone(); - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateStateClient::::storage(&*client, storage_key.clone(), block_hash) - .await?) - }) - .await - .map_err(|e| Error::FailedToReadRuntimeStorageValue { - chain: C::NAME.into(), - key: cloned_storage_key, - error: e.boxed(), - }) - } - - /// Get the nonce of the given Substrate account. - /// - /// Note: It's the caller's responsibility to make sure `account` is a valid SS58 address. - pub async fn next_account_index(&self, account: C::AccountId) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateFrameSystemClient::::account_next_index(&*client, account).await?) - }) - .await - } - - /// Submit unsigned extrinsic for inclusion in a block. - /// - /// Note: The given transaction needs to be SCALE encoded beforehand. - pub async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result { - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - let best_header_hash = self.best_header().await?.hash(); - self.validate_transaction(best_header_hash, PreEncoded(transaction.0.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - self.jsonrpsee_execute(move |client| async move { - let tx_hash = SubstrateAuthorClient::::submit_extrinsic(&*client, transaction) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - Ok(tx_hash) - }) - .await - } - - async fn build_sign_params(&self, signer: AccountKeyPairOf) -> Result> - where - C: ChainWithTransactions, - { - let runtime_version = self.simple_runtime_version().await?; - Ok(SignParam:: { - spec_version: runtime_version.spec_version, - transaction_version: runtime_version.transaction_version, - genesis_hash: self.genesis_hash, - signer, - }) - } - - /// Submit an extrinsic signed by given account. - /// - /// All calls of this method are synchronized, so there can't be more than one active - /// `submit_signed_extrinsic()` call. This guarantees that no nonces collision may happen - /// if all client instances are clones of the same initial `Client`. - /// - /// Note: The given transaction needs to be SCALE encoded beforehand. - pub async fn submit_signed_extrinsic( - &self, - signer: &AccountKeyPairOf, - prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Nonce) -> Result> - + Send - + 'static, - ) -> Result - where - C: ChainWithTransactions, - C::AccountId: From<::Public>, - { - let _guard = self.submit_signed_extrinsic_lock.lock().await; - let transaction_nonce = self.next_account_index(signer.public().into()).await?; - let best_header = self.best_header().await?; - let signing_data = self.build_sign_params(signer.clone()).await?; - - // By using parent of best block here, we are protecing again best-block reorganizations. - // E.g. transaction may have been submitted when the best block was `A[num=100]`. Then it - // has been changed to `B[num=100]`. Hash of `A` has been included into transaction - // signature payload. So when signature will be checked, the check will fail and transaction - // will be dropped from the pool. - let best_header_id = best_header.parent_id().unwrap_or_else(|| best_header.id()); - - let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; - let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); - - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - self.validate_transaction(best_header_id.1, PreEncoded(signed_extrinsic.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - self.jsonrpsee_execute(move |client| async move { - let tx_hash = - SubstrateAuthorClient::::submit_extrinsic(&*client, Bytes(signed_extrinsic)) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - Ok(tx_hash) - }) - .await - } - - /// Does exactly the same as `submit_signed_extrinsic`, but keeps watching for extrinsic status - /// after submission. - pub async fn submit_and_watch_signed_extrinsic( - &self, - signer: &AccountKeyPairOf, - prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Nonce) -> Result> - + Send - + 'static, - ) -> Result> - where - C: ChainWithTransactions, - C::AccountId: From<::Public>, - { - let self_clone = self.clone(); - let signing_data = self.build_sign_params(signer.clone()).await?; - let _guard = self.submit_signed_extrinsic_lock.lock().await; - let transaction_nonce = self.next_account_index(signer.public().into()).await?; - let best_header = self.best_header().await?; - let best_header_id = best_header.id(); - - let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; - let stall_timeout = transaction_stall_timeout( - extrinsic.era.mortality_period(), - C::AVERAGE_BLOCK_INTERVAL, - STALL_TIMEOUT, - ); - let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); - - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - self.validate_transaction(best_header_id.1, PreEncoded(signed_extrinsic.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - let (cancel_sender, cancel_receiver) = futures::channel::oneshot::channel(); - let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); - let (tracker, subscription) = self - .jsonrpsee_execute(move |client| async move { - let tx_hash = C::Hasher::hash(&signed_extrinsic); - let subscription = SubstrateAuthorClient::::submit_and_watch_extrinsic( - &*client, - Bytes(signed_extrinsic), - ) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - let tracker = TransactionTracker::new( - self_clone, - stall_timeout, - tx_hash, - Subscription(Mutex::new(receiver), cancel_sender), - ); - Ok((tracker, subscription)) - }) - .await?; - self.data.read().await.tokio.spawn(Subscription::background_worker( - C::NAME.into(), - "extrinsic".into(), - subscription, - sender, - cancel_receiver, - )); - Ok(tracker) - } - - /// Returns pending extrinsics from transaction pool. - pub async fn pending_extrinsics(&self) -> Result> { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateAuthorClient::::pending_extrinsics(&*client).await?) - }) - .await - } - - /// Validate transaction at given block state. - pub async fn validate_transaction( - &self, - at_block: C::Hash, - transaction: SignedTransaction, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let call = SUB_API_TXPOOL_VALIDATE_TRANSACTION.to_string(); - let data = Bytes((TransactionSource::External, transaction, at_block).encode()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, Some(at_block)).await?; - let validity = TransactionValidity::decode(&mut &encoded_response.0[..]) - .map_err(Error::ResponseParseFailed)?; - - Ok(validity) - }) - .await - } - - /// Returns weight of the given transaction. - pub async fn extimate_extrinsic_weight( - &self, - transaction: SignedTransaction, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let transaction_len = transaction.encoded_size() as u32; - - let call = SUB_API_TX_PAYMENT_QUERY_INFO.to_string(); - let data = Bytes((transaction, transaction_len).encode()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, None).await?; - let dispatch_info = - RuntimeDispatchInfo::::decode(&mut &encoded_response.0[..]) - .map_err(Error::ResponseParseFailed)?; - - Ok(dispatch_info.weight) - }) - .await - } - - /// Get the GRANDPA authority set at given block. - pub async fn grandpa_authorities_set( - &self, - block: C::Hash, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let call = SUB_API_GRANDPA_AUTHORITIES.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, Some(block)).await?; - let authority_list = encoded_response.0; - - Ok(authority_list) - }) - .await - } - - /// Execute runtime call at given block, provided the input and output types. - /// It also performs the input encode and output decode. - pub async fn typed_state_call( - &self, - method_name: String, - input: Input, - at_block: Option, - ) -> Result { - let encoded_output = self - .state_call(method_name.clone(), Bytes(input.encode()), at_block) - .await - .map_err(|e| Error::ErrorExecutingRuntimeCall { - chain: C::NAME.into(), - method: method_name, - error: e.boxed(), - })?; - Output::decode(&mut &encoded_output.0[..]).map_err(Error::ResponseParseFailed) - } - - /// Execute runtime call at given block. - pub async fn state_call( - &self, - method: String, - data: Bytes, - at_block: Option, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - SubstrateStateClient::::call(&*client, method, data, at_block) - .await - .map_err(Into::into) - }) - .await - } - - /// Returns storage proof of given storage keys. - pub async fn prove_storage( - &self, - keys: Vec, - at_block: C::Hash, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - SubstrateStateClient::::prove_storage(&*client, keys, Some(at_block)) - .await - .map(|proof| { - StorageProof::new(proof.proof.into_iter().map(|b| b.0).collect::>()) - }) - .map_err(Into::into) - }) - .await - } - - /// Return `tokenDecimals` property from the set of chain properties. - pub async fn token_decimals(&self) -> Result> { - self.jsonrpsee_execute(move |client| async move { - let system_properties = SubstrateSystemClient::::properties(&*client).await?; - Ok(system_properties.get("tokenDecimals").and_then(|v| v.as_u64())) - }) - .await - } - - /// Return new finality justifications stream. - pub async fn subscribe_finality_justifications>( - &self, - ) -> Result> { - let subscription = self - .jsonrpsee_execute(move |client| async move { - Ok(FC::subscribe_justifications(&client).await?) - }) - .await?; - let (cancel_sender, cancel_receiver) = futures::channel::oneshot::channel(); - let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); - self.data.read().await.tokio.spawn(Subscription::background_worker( - C::NAME.into(), - "justification".into(), - subscription, - sender, - cancel_receiver, - )); - Ok(Subscription(Mutex::new(receiver), cancel_sender)) - } - - /// Generates a proof of key ownership for the given authority in the given set. - pub async fn generate_grandpa_key_ownership_proof( - &self, - at: HashOf, - set_id: sp_consensus_grandpa::SetId, - authority_id: sp_consensus_grandpa::AuthorityId, - ) -> Result> - where - C: ChainWithGrandpa, - { - self.typed_state_call( - SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF.into(), - (set_id, authority_id), - Some(at), - ) - .await - } - - /// Execute jsonrpsee future in tokio context. - async fn jsonrpsee_execute(&self, make_jsonrpsee_future: MF) -> Result - where - MF: FnOnce(Arc) -> F + Send + 'static, - F: Future> + Send + 'static, - T: Send + 'static, - { - let data = self.data.read().await; - let client = data.client.clone(); - data.tokio.spawn(make_jsonrpsee_future(client)).await? - } - - /// Returns `true` if version guard can be started. - /// - /// There's no reason to run version guard when version mode is set to `Auto`. It can - /// lead to relay shutdown when chain is upgraded, even though we have explicitly - /// said that we don't want to shutdown. - pub fn can_start_version_guard(&self) -> bool { - !matches!(self.chain_runtime_version, ChainRuntimeVersion::Auto) - } -} - -impl Subscription { - /// Consumes subscription and returns future statuses stream. - pub fn into_stream(self) -> impl futures::Stream { - futures::stream::unfold(Some(self), |mut this| async move { - let Some(this) = this.take() else { return None }; - let item = this.0.lock().await.next().await.unwrap_or(None); - match item { - Some(item) => Some((item, Some(this))), - None => { - // let's make it explicit here - let _ = this.1.send(()); - None - }, - } - }) - } - - /// Return next item from the subscription. - pub async fn next(&self) -> Result> { - let mut receiver = self.0.lock().await; - let item = receiver.next().await; - Ok(item.unwrap_or(None)) - } - - /// Background worker that is executed in tokio context as `jsonrpsee` requires. - async fn background_worker( - chain_name: String, - item_type: String, - subscription: jsonrpsee::core::client::Subscription, - mut sender: futures::channel::mpsc::Sender>, - cancel_receiver: futures::channel::oneshot::Receiver<()>, - ) { - log::trace!( - target: "bridge", - "Starting background worker for {} {} subscription stream.", - chain_name, - item_type, - ); - - futures::pin_mut!(subscription, cancel_receiver); - loop { - match futures::future::select(subscription.next(), &mut cancel_receiver).await { - futures::future::Either::Left((Some(Ok(item)), _)) => - if sender.send(Some(item)).await.is_err() { - log::trace!( - target: "bridge", - "{} {} subscription stream: no listener. Stopping background worker.", - chain_name, - item_type, - ); - - break - }, - futures::future::Either::Left((Some(Err(e)), _)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream has returned '{:?}'. Stream needs to be restarted. Stopping background worker.", - chain_name, - item_type, - e, - ); - let _ = sender.send(None).await; - break - }, - futures::future::Either::Left((None, _)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream has returned None. Stream needs to be restarted. Stopping background worker.", - chain_name, - item_type, - ); - let _ = sender.send(None).await; - break - }, - futures::future::Either::Right((_, _)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream: listener has been dropped. Stopping background worker.", - chain_name, - item_type, - ); - break; - }, - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{guard::tests::TestEnvironment, test_chain::TestChain}; - use futures::{channel::mpsc::unbounded, FutureExt}; - - async fn run_ensure_correct_runtime_version( - expected: ChainRuntimeVersion, - actual: RuntimeVersion, - ) -> Result<()> { - let ( - (mut runtime_version_tx, runtime_version_rx), - (slept_tx, _slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded()); - runtime_version_tx.send(actual).await.unwrap(); - let mut env = TestEnvironment { runtime_version_rx, slept_tx, aborted_tx }; - - let ensure_correct_runtime_version = - Client::::ensure_correct_runtime_version(&mut env, expected).boxed(); - let aborted = aborted_rx.next().map(|_| Err(Error::Custom("".into()))).boxed(); - futures::pin_mut!(ensure_correct_runtime_version, aborted); - futures::future::select(ensure_correct_runtime_version, aborted) - .await - .into_inner() - .0 - } - - #[async_std::test] - async fn ensure_correct_runtime_version_works() { - // when we are configured to use auto version - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Auto, - RuntimeVersion { - spec_version: 100, - transaction_version: 100, - ..Default::default() - }, - ) - .await, - Ok(()), - )); - // when actual == expected - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Custom(SimpleRuntimeVersion { - spec_version: 100, - transaction_version: 100 - }), - RuntimeVersion { - spec_version: 100, - transaction_version: 100, - ..Default::default() - }, - ) - .await, - Ok(()), - )); - // when actual spec version < expected spec version - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Custom(SimpleRuntimeVersion { - spec_version: 100, - transaction_version: 100 - }), - RuntimeVersion { spec_version: 99, transaction_version: 100, ..Default::default() }, - ) - .await, - Err(Error::WaitingForRuntimeUpgrade { - expected: SimpleRuntimeVersion { spec_version: 100, transaction_version: 100 }, - actual: SimpleRuntimeVersion { spec_version: 99, transaction_version: 100 }, - .. - }), - )); - // when actual spec version > expected spec version - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Custom(SimpleRuntimeVersion { - spec_version: 100, - transaction_version: 100 - }), - RuntimeVersion { - spec_version: 101, - transaction_version: 100, - ..Default::default() - }, - ) - .await, - Err(Error::Custom(_)), - )); - } -} diff --git a/bridges/relays/client-substrate/src/client/caching.rs b/bridges/relays/client-substrate/src/client/caching.rs new file mode 100644 index 0000000000000..a574e5985bc82 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/caching.rs @@ -0,0 +1,472 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Client implementation that is caching (whenever possible) results of its backend +//! method calls. + +use crate::{ + client::{Client, SubscriptionBroadcaster}, + error::{Error, Result}, + AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, ChainWithGrandpa, ChainWithTransactions, + HashOf, HeaderIdOf, HeaderOf, NonceOf, SignedBlockOf, SimpleRuntimeVersion, Subscription, + TransactionTracker, UnsignedTransaction, ANCIENT_BLOCK_THRESHOLD, +}; +use std::{cmp::Ordering, future::Future, task::Poll}; + +use async_std::{ + sync::{Arc, Mutex, RwLock}, + task::JoinHandle, +}; +use async_trait::async_trait; +use codec::Encode; +use frame_support::weights::Weight; +use futures::{FutureExt, StreamExt}; +use quick_cache::unsync::Cache; +use sp_consensus_grandpa::{AuthorityId, OpaqueKeyOwnershipProof, SetId}; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, Pair, +}; +use sp_runtime::{traits::Header as _, transaction_validity::TransactionValidity}; +use sp_trie::StorageProof; +use sp_version::RuntimeVersion; + +/// `quick_cache::unsync::Cache` wrapped in async-aware synchronization primitives. +type SyncCache = Arc>>; + +/// Client implementation that is caching (whenever possible) results of its backend +/// method calls. Apart from caching call results, it also supports some (at the +/// moment: justifications) subscription sharing, meaning that the single server +/// subscription may be shared by multiple subscribers at the client side. +#[derive(Clone)] +pub struct CachingClient> { + backend: B, + data: Arc>, +} + +/// Client data, shared by all `CachingClient` clones. +struct ClientData { + grandpa_justifications: Arc>>>, + beefy_justifications: Arc>>>, + background_task_handle: Arc>>>, + best_header: Arc>>>, + best_finalized_header: Arc>>>, + // `quick_cache::sync::Cache` has the `get_or_insert_async` method, which fits our needs, + // but it uses synchronization primitives that are not aware of async execution. They + // can block the executor threads and cause deadlocks => let's use primitives from + // `async_std` crate around `quick_cache::unsync::Cache` + header_hash_by_number_cache: SyncCache, HashOf>, + header_by_hash_cache: SyncCache, HeaderOf>, + block_by_hash_cache: SyncCache, SignedBlockOf>, + raw_storage_value_cache: SyncCache<(HashOf, StorageKey), Option>, + state_call_cache: SyncCache<(HashOf, String, Bytes), Bytes>, +} + +impl> CachingClient { + /// Creates new `CachingClient` on top of given `backend`. + pub async fn new(backend: B) -> Self { + // most of relayer operations will never touch more than `ANCIENT_BLOCK_THRESHOLD` + // headers, so we'll use this as a cache capacity for all chain-related caches + let chain_state_capacity = ANCIENT_BLOCK_THRESHOLD as usize; + let best_header = Arc::new(RwLock::new(None)); + let best_finalized_header = Arc::new(RwLock::new(None)); + let header_by_hash_cache = Arc::new(RwLock::new(Cache::new(chain_state_capacity))); + let background_task_handle = Self::start_background_task( + backend.clone(), + best_header.clone(), + best_finalized_header.clone(), + header_by_hash_cache.clone(), + ) + .await; + CachingClient { + backend, + data: Arc::new(ClientData { + grandpa_justifications: Arc::new(Mutex::new(None)), + beefy_justifications: Arc::new(Mutex::new(None)), + background_task_handle: Arc::new(Mutex::new(background_task_handle)), + best_header, + best_finalized_header, + header_hash_by_number_cache: Arc::new(RwLock::new(Cache::new( + chain_state_capacity, + ))), + header_by_hash_cache, + block_by_hash_cache: Arc::new(RwLock::new(Cache::new(chain_state_capacity))), + raw_storage_value_cache: Arc::new(RwLock::new(Cache::new(1_024))), + state_call_cache: Arc::new(RwLock::new(Cache::new(1_024))), + }), + } + } + + /// Try to get value from the cache, or compute and insert it using given future. + async fn get_or_insert_async( + &self, + cache: &Arc>>, + key: &K, + with: impl std::future::Future>, + ) -> Result { + // try to get cached value first using read lock + { + let cache = cache.read().await; + if let Some(value) = cache.get(key) { + return Ok(value.clone()) + } + } + + // let's compute the value without holding any locks - it may cause additional misses and + // double insertions, but that's better than holding a lock for a while + let value = with.await?; + + // insert/update the value in the cache + cache.write().await.insert(key.clone(), value.clone()); + Ok(value) + } + + /// Subscribe to finality justifications, trying to reuse existing subscription. + async fn subscribe_finality_justifications<'a>( + &'a self, + maybe_broadcaster: &Mutex>>, + do_subscribe: impl Future>> + 'a, + ) -> Result> { + let mut maybe_broadcaster = maybe_broadcaster.lock().await; + let broadcaster = match maybe_broadcaster.as_ref() { + Some(justifications) => justifications, + None => { + let broadcaster = match SubscriptionBroadcaster::new(do_subscribe.await?) { + Ok(broadcaster) => broadcaster, + Err(subscription) => return Ok(subscription), + }; + maybe_broadcaster.get_or_insert(broadcaster) + }, + }; + + broadcaster.subscribe().await + } + + /// Start background task that reads best (and best finalized) headers from subscriptions. + async fn start_background_task( + backend: B, + best_header: Arc>>>, + best_finalized_header: Arc>>>, + header_by_hash_cache: SyncCache, HeaderOf>, + ) -> JoinHandle> { + async_std::task::spawn(async move { + // initialize by reading headers directly from backend to avoid doing that in the + // high-level code + let mut last_finalized_header = + backend.header_by_hash(backend.best_finalized_header_hash().await?).await?; + *best_header.write().await = Some(backend.best_header().await?); + *best_finalized_header.write().await = Some(last_finalized_header.clone()); + + // ...and then continue with subscriptions + let mut best_headers = backend.subscribe_best_headers().await?; + let mut finalized_headers = backend.subscribe_finalized_headers().await?; + loop { + futures::select! { + new_best_header = best_headers.next().fuse() => { + // we assume that the best header is always the actual best header, even if its + // number is lower than the number of previous-best-header (chain may use its own + // best header selection algorithms) + let new_best_header = new_best_header + .ok_or_else(|| Error::ChannelError(format!("Mandatory best headers subscription for {} has finished", C::NAME)))?; + let new_best_header_hash = new_best_header.hash(); + header_by_hash_cache.write().await.insert(new_best_header_hash, new_best_header.clone()); + *best_header.write().await = Some(new_best_header); + }, + new_finalized_header = finalized_headers.next().fuse() => { + // in theory we'll always get finalized headers in order, but let's double check + let new_finalized_header = new_finalized_header. + ok_or_else(|| Error::ChannelError(format!("Finalized headers subscription for {} has finished", C::NAME)))?; + let new_finalized_header_number = *new_finalized_header.number(); + let last_finalized_header_number = *last_finalized_header.number(); + match new_finalized_header_number.cmp(&last_finalized_header_number) { + Ordering::Greater => { + let new_finalized_header_hash = new_finalized_header.hash(); + header_by_hash_cache.write().await.insert(new_finalized_header_hash, new_finalized_header.clone()); + *best_finalized_header.write().await = Some(new_finalized_header.clone()); + last_finalized_header = new_finalized_header; + }, + Ordering::Less => { + return Err(Error::unordered_finalized_headers::( + new_finalized_header_number, + last_finalized_header_number, + )); + }, + _ => (), + } + }, + } + } + }) + } + + /// Ensure that the background task is active. + async fn ensure_background_task_active(&self) -> Result<()> { + let mut background_task_handle = self.data.background_task_handle.lock().await; + if let Poll::Ready(result) = futures::poll!(&mut *background_task_handle) { + return Err(Error::ChannelError(format!( + "Background task of {} client has exited with result: {:?}", + C::NAME, + result + ))) + } + + Ok(()) + } + + /// Try to get header, read elsewhere by background task through subscription. + async fn read_header_from_background<'a>( + &'a self, + header: &Arc>>>, + read_header_from_backend: impl Future>> + 'a, + ) -> Result> { + // ensure that the background task is active + self.ensure_background_task_active().await?; + + // now we know that the background task is active, so we could trust that the + // `header` has the most recent updates from it + match header.read().await.clone() { + Some(header) => Ok(header), + None => { + // header has not yet been read from the subscription, which means that + // we are just starting - let's read header directly from backend this time + read_header_from_backend.await + }, + } + } +} + +impl> std::fmt::Debug for CachingClient { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.write_fmt(format_args!("CachingClient<{:?}>", self.backend)) + } +} + +#[async_trait] +impl> Client for CachingClient { + async fn ensure_synced(&self) -> Result<()> { + self.backend.ensure_synced().await + } + + async fn reconnect(&self) -> Result<()> { + self.backend.reconnect().await?; + // since we have new underlying client, we need to restart subscriptions too + *self.data.grandpa_justifications.lock().await = None; + *self.data.beefy_justifications.lock().await = None; + // also restart background task too + *self.data.best_header.write().await = None; + *self.data.best_finalized_header.write().await = None; + *self.data.background_task_handle.lock().await = Self::start_background_task( + self.backend.clone(), + self.data.best_header.clone(), + self.data.best_finalized_header.clone(), + self.data.header_by_hash_cache.clone(), + ) + .await; + Ok(()) + } + + fn genesis_hash(&self) -> HashOf { + self.backend.genesis_hash() + } + + async fn header_hash_by_number(&self, number: BlockNumberOf) -> Result> { + self.get_or_insert_async( + &self.data.header_hash_by_number_cache, + &number, + self.backend.header_hash_by_number(number), + ) + .await + } + + async fn header_by_hash(&self, hash: HashOf) -> Result> { + self.get_or_insert_async( + &self.data.header_by_hash_cache, + &hash, + self.backend.header_by_hash(hash), + ) + .await + } + + async fn block_by_hash(&self, hash: HashOf) -> Result> { + self.get_or_insert_async( + &self.data.block_by_hash_cache, + &hash, + self.backend.block_by_hash(hash), + ) + .await + } + + async fn best_finalized_header_hash(&self) -> Result> { + self.read_header_from_background( + &self.data.best_finalized_header, + self.backend.best_finalized_header(), + ) + .await + .map(|h| h.hash()) + } + + async fn best_header(&self) -> Result> { + self.read_header_from_background(&self.data.best_header, self.backend.best_header()) + .await + } + + async fn subscribe_best_headers(&self) -> Result>> { + // we may share the sunbscription here, but atm there's no callers of this method + self.backend.subscribe_best_headers().await + } + + async fn subscribe_finalized_headers(&self) -> Result>> { + // we may share the sunbscription here, but atm there's no callers of this method + self.backend.subscribe_finalized_headers().await + } + + async fn subscribe_grandpa_finality_justifications(&self) -> Result> + where + C: ChainWithGrandpa, + { + self.subscribe_finality_justifications( + &self.data.grandpa_justifications, + self.backend.subscribe_grandpa_finality_justifications(), + ) + .await + } + + async fn generate_grandpa_key_ownership_proof( + &self, + at: HashOf, + set_id: SetId, + authority_id: AuthorityId, + ) -> Result> { + self.backend + .generate_grandpa_key_ownership_proof(at, set_id, authority_id) + .await + } + + async fn subscribe_beefy_finality_justifications(&self) -> Result> { + self.subscribe_finality_justifications( + &self.data.beefy_justifications, + self.backend.subscribe_beefy_finality_justifications(), + ) + .await + } + + async fn token_decimals(&self) -> Result> { + self.backend.token_decimals().await + } + + async fn runtime_version(&self) -> Result { + self.backend.runtime_version().await + } + + async fn simple_runtime_version(&self) -> Result { + self.backend.simple_runtime_version().await + } + + fn can_start_version_guard(&self) -> bool { + self.backend.can_start_version_guard() + } + + async fn raw_storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result> { + self.get_or_insert_async( + &self.data.raw_storage_value_cache, + &(at, storage_key.clone()), + self.backend.raw_storage_value(at, storage_key), + ) + .await + } + + async fn pending_extrinsics(&self) -> Result> { + self.backend.pending_extrinsics().await + } + + async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result> { + self.backend.submit_unsigned_extrinsic(transaction).await + } + + async fn submit_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + self.backend.submit_signed_extrinsic(signer, prepare_extrinsic).await + } + + async fn submit_and_watch_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + self.backend + .submit_and_watch_signed_extrinsic(signer, prepare_extrinsic) + .await + .map(|t| t.switch_environment(self.clone())) + } + + async fn validate_transaction( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + self.backend.validate_transaction(at, transaction).await + } + + async fn estimate_extrinsic_weight( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + self.backend.estimate_extrinsic_weight(at, transaction).await + } + + async fn raw_state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result { + let encoded_arguments = Bytes(arguments.encode()); + self.get_or_insert_async( + &self.data.state_call_cache, + &(at, method.clone(), encoded_arguments), + self.backend.raw_state_call(at, method, arguments), + ) + .await + } + + async fn prove_storage( + &self, + at: HashOf, + keys: Vec, + ) -> Result<(StorageProof, HashOf)> { + self.backend.prove_storage(at, keys).await + } +} diff --git a/bridges/relays/client-substrate/src/client/mod.rs b/bridges/relays/client-substrate/src/client/mod.rs new file mode 100644 index 0000000000000..62a1119d718ff --- /dev/null +++ b/bridges/relays/client-substrate/src/client/mod.rs @@ -0,0 +1,91 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Layered Substrate client implementation. + +use crate::{Chain, ConnectionParams}; + +use caching::CachingClient; +use num_traits::Saturating; +use rpc::RpcClient; +use sp_version::RuntimeVersion; + +pub mod caching; +pub mod rpc; + +mod rpc_api; +mod subscription; +mod traits; + +pub use subscription::{StreamDescription, Subscription, SubscriptionBroadcaster}; +pub use traits::Client; + +/// Type of RPC client with caching support. +pub type RpcWithCachingClient = CachingClient>; + +/// Creates new RPC client with caching support. +pub async fn rpc_with_caching(params: ConnectionParams) -> RpcWithCachingClient { + let rpc = rpc::RpcClient::::new(params).await; + caching::CachingClient::new(rpc).await +} + +/// The difference between best block number and number of its ancestor, that is enough +/// for us to consider that ancestor an "ancient" block with dropped state. +/// +/// The relay does not assume that it is connected to the archive node, so it always tries +/// to use the best available chain state. But sometimes it still may use state of some +/// old block. If the state of that block is already dropped, relay will see errors when +/// e.g. it tries to prove something. +/// +/// By default Substrate-based nodes are storing state for last 256 blocks. We'll use +/// half of this value. +pub const ANCIENT_BLOCK_THRESHOLD: u32 = 128; + +/// Returns `true` if we think that the state is already discarded for given block. +pub fn is_ancient_block + PartialOrd + Saturating>(block: N, best: N) -> bool { + best.saturating_sub(block) >= N::from(ANCIENT_BLOCK_THRESHOLD) +} + +/// Opaque GRANDPA authorities set. +pub type OpaqueGrandpaAuthoritiesSet = Vec; + +/// A simple runtime version. It only includes the `spec_version` and `transaction_version`. +#[derive(Copy, Clone, Debug)] +pub struct SimpleRuntimeVersion { + /// Version of the runtime specification. + pub spec_version: u32, + /// All existing dispatches are fully compatible when this number doesn't change. + pub transaction_version: u32, +} + +impl SimpleRuntimeVersion { + /// Create a new instance of `SimpleRuntimeVersion` from a `RuntimeVersion`. + pub const fn from_runtime_version(runtime_version: &RuntimeVersion) -> Self { + Self { + spec_version: runtime_version.spec_version, + transaction_version: runtime_version.transaction_version, + } + } +} + +/// Chain runtime version in client +#[derive(Copy, Clone, Debug)] +pub enum ChainRuntimeVersion { + /// Auto query from chain. + Auto, + /// Custom runtime version, defined by user. + Custom(SimpleRuntimeVersion), +} diff --git a/bridges/relays/client-substrate/src/client/rpc.rs b/bridges/relays/client-substrate/src/client/rpc.rs new file mode 100644 index 0000000000000..9c7f769462e56 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/rpc.rs @@ -0,0 +1,755 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Client implementation that connects to the Substrate node over `ws`/`wss` connection +//! and is using RPC methods to get required data and submit transactions. + +use crate::{ + client::{ + rpc_api::{ + SubstrateAuthorClient, SubstrateBeefyClient, SubstrateChainClient, + SubstrateFrameSystemClient, SubstrateGrandpaClient, SubstrateStateClient, + SubstrateSystemClient, + }, + subscription::{StreamDescription, Subscription}, + Client, + }, + error::{Error, Result}, + guard::Environment, + transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, BalanceOf, BlockNumberOf, Chain, + ChainRuntimeVersion, ChainWithGrandpa, ChainWithTransactions, ConnectionParams, HashOf, + HeaderIdOf, HeaderOf, NonceOf, SignParam, SignedBlockOf, SimpleRuntimeVersion, + TransactionTracker, UnsignedTransaction, +}; + +use async_std::sync::{Arc, Mutex, RwLock}; +use async_trait::async_trait; +use bp_runtime::HeaderIdProvider; +use codec::Encode; +use frame_support::weights::Weight; +use futures::TryFutureExt; +use jsonrpsee::{ + core::{client::Subscription as RpcSubscription, ClientError}, + ws_client::{WsClient, WsClientBuilder}, +}; +use num_traits::Zero; +use pallet_transaction_payment::RuntimeDispatchInfo; +use relay_utils::{relay_loop::RECONNECT_DELAY, STALL_TIMEOUT}; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, Hasher, Pair, +}; +use sp_runtime::{ + traits::Header, + transaction_validity::{TransactionSource, TransactionValidity}, +}; +use sp_trie::StorageProof; +use sp_version::RuntimeVersion; +use std::{cmp::Ordering, future::Future, marker::PhantomData}; + +const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; + +const SUB_API_TXPOOL_VALIDATE_TRANSACTION: &str = "TaggedTransactionQueue_validate_transaction"; +const SUB_API_TX_PAYMENT_QUERY_INFO: &str = "TransactionPaymentApi_query_info"; +const SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF: &str = + "GrandpaApi_generate_key_ownership_proof"; + +/// Client implementation that connects to the Substrate node over `ws`/`wss` connection +/// and is using RPC methods to get required data and submit transactions. +pub struct RpcClient { + // Lock order: `submit_signed_extrinsic_lock`, `data` + /// Client connection params. + params: Arc, + /// If several tasks are submitting their transactions simultaneously using + /// `submit_signed_extrinsic` method, they may get the same transaction nonce. So one of + /// transactions will be rejected from the pool. This lock is here to prevent situations like + /// that. + submit_signed_extrinsic_lock: Arc>, + /// Genesis block hash. + genesis_hash: HashOf, + /// Shared dynamic data. + data: Arc>, + /// Generic arguments dump. + _phantom: PhantomData, +} + +/// Client data, shared by all `RpcClient` clones. +struct ClientData { + /// Tokio runtime handle. + tokio: Arc, + /// Substrate RPC client. + client: Arc, +} + +/// Already encoded value. +struct PreEncoded(Vec); + +impl Encode for PreEncoded { + fn encode(&self) -> Vec { + self.0.clone() + } +} + +impl std::fmt::Debug for RpcClient { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.write_fmt(format_args!("RpcClient<{}>", C::NAME)) + } +} + +impl RpcClient { + /// Returns client that is able to call RPCs on Substrate node over websocket connection. + /// + /// This function will keep connecting to given Substrate node until connection is established + /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. + pub async fn new(params: ConnectionParams) -> Self { + let params = Arc::new(params); + loop { + match Self::try_connect(params.clone()).await { + Ok(client) => return client, + Err(error) => log::error!( + target: "bridge", + "Failed to connect to {} node: {:?}. Going to retry in {}s", + C::NAME, + error, + RECONNECT_DELAY.as_secs(), + ), + } + + async_std::task::sleep(RECONNECT_DELAY).await; + } + } + + /// Try to connect to Substrate node over websocket. Returns Substrate RPC client if connection + /// has been established or error otherwise. + async fn try_connect(params: Arc) -> Result { + let (tokio, client) = Self::build_client(¶ms).await?; + + let genesis_hash_client = client.clone(); + let genesis_hash = tokio + .spawn(async move { + SubstrateChainClient::::block_hash(&*genesis_hash_client, Some(Zero::zero())) + .await + }) + .await??; + + let chain_runtime_version = params.chain_runtime_version; + let mut client = Self { + params, + submit_signed_extrinsic_lock: Arc::new(Mutex::new(())), + genesis_hash, + data: Arc::new(RwLock::new(ClientData { tokio, client })), + _phantom: PhantomData, + }; + Self::ensure_correct_runtime_version(&mut client, chain_runtime_version).await?; + Ok(client) + } + + // Check runtime version to understand if we need are connected to expected version, or we + // need to wait for upgrade, we need to abort immediately. + async fn ensure_correct_runtime_version>( + env: &mut E, + expected: ChainRuntimeVersion, + ) -> Result<()> { + // we are only interested if version mode is bundled or passed using CLI + let expected = match expected { + ChainRuntimeVersion::Auto => return Ok(()), + ChainRuntimeVersion::Custom(expected) => expected, + }; + + // we need to wait if actual version is < than expected, we are OK of versions are the + // same and we need to abort if actual version is > than expected + let actual = SimpleRuntimeVersion::from_runtime_version(&env.runtime_version().await?); + match actual.spec_version.cmp(&expected.spec_version) { + Ordering::Less => + Err(Error::WaitingForRuntimeUpgrade { chain: C::NAME.into(), expected, actual }), + Ordering::Equal => Ok(()), + Ordering::Greater => { + log::error!( + target: "bridge", + "The {} client is configured to use runtime version {expected:?} and actual \ + version is {actual:?}. Aborting", + C::NAME, + ); + env.abort().await; + Err(Error::Custom("Aborted".into())) + }, + } + } + + /// Build client to use in connection. + async fn build_client( + params: &ConnectionParams, + ) -> Result<(Arc, Arc)> { + let tokio = tokio::runtime::Runtime::new()?; + let uri = match params.uri { + Some(ref uri) => uri.clone(), + None => { + format!( + "{}://{}:{}{}", + if params.secure { "wss" } else { "ws" }, + params.host, + params.port, + match params.path { + Some(ref path) => format!("/{}", path), + None => String::new(), + }, + ) + }, + }; + log::info!(target: "bridge", "Connecting to {} node at {}", C::NAME, uri); + + let client = tokio + .spawn(async move { + WsClientBuilder::default() + .max_buffer_capacity_per_subscription(MAX_SUBSCRIPTION_CAPACITY) + .build(&uri) + .await + }) + .await??; + + Ok((Arc::new(tokio), Arc::new(client))) + } + + /// Execute jsonrpsee future in tokio context. + async fn jsonrpsee_execute(&self, make_jsonrpsee_future: MF) -> Result + where + MF: FnOnce(Arc) -> F + Send + 'static, + F: Future> + Send + 'static, + T: Send + 'static, + { + let data = self.data.read().await; + let client = data.client.clone(); + data.tokio.spawn(make_jsonrpsee_future(client)).await? + } + + /// Prepare parameters used to sign chain transactions. + async fn build_sign_params(&self, signer: AccountKeyPairOf) -> Result> + where + C: ChainWithTransactions, + { + let runtime_version = self.simple_runtime_version().await?; + Ok(SignParam:: { + spec_version: runtime_version.spec_version, + transaction_version: runtime_version.transaction_version, + genesis_hash: self.genesis_hash, + signer, + }) + } + + /// Get the nonce of the given Substrate account. + pub async fn next_account_index(&self, account: AccountIdOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateFrameSystemClient::::account_next_index(&*client, account).await?) + }) + .await + } + + /// Subscribe to finality justifications. + async fn subscribe_finality_justifications( + &self, + gadget_name: &str, + do_subscribe: impl FnOnce(Arc) -> Fut + Send + 'static, + ) -> Result> + where + Fut: Future, ClientError>> + Send, + { + let subscription = self + .jsonrpsee_execute(move |client| async move { Ok(do_subscribe(client).await?) }) + .map_err(|e| Error::failed_to_subscribe_justification::(e)) + .await?; + + Ok(Subscription::new_forwarded( + StreamDescription::new(format!("{} justifications", gadget_name), C::NAME.into()), + subscription, + )) + } + + /// Subscribe to headers stream. + async fn subscribe_headers( + &self, + stream_name: &str, + do_subscribe: impl FnOnce(Arc) -> Fut + Send + 'static, + map_err: impl FnOnce(Error) -> Error, + ) -> Result>> + where + Fut: Future>, ClientError>> + Send, + { + let subscription = self + .jsonrpsee_execute(move |client| async move { Ok(do_subscribe(client).await?) }) + .map_err(map_err) + .await?; + + Ok(Subscription::new_forwarded( + StreamDescription::new(format!("{} headers", stream_name), C::NAME.into()), + subscription, + )) + } +} + +impl Clone for RpcClient { + fn clone(&self) -> Self { + RpcClient { + params: self.params.clone(), + submit_signed_extrinsic_lock: self.submit_signed_extrinsic_lock.clone(), + genesis_hash: self.genesis_hash, + data: self.data.clone(), + _phantom: PhantomData, + } + } +} + +#[async_trait] +impl Client for RpcClient { + async fn ensure_synced(&self) -> Result<()> { + let health = self + .jsonrpsee_execute(|client| async move { + Ok(SubstrateSystemClient::::health(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_get_system_health::(e))?; + + let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); + if is_synced { + Ok(()) + } else { + Err(Error::ClientNotSynced(health)) + } + } + + async fn reconnect(&self) -> Result<()> { + let mut data = self.data.write().await; + let (tokio, client) = Self::build_client(&self.params).await?; + data.tokio = tokio; + data.client = client; + Ok(()) + } + + fn genesis_hash(&self) -> HashOf { + self.genesis_hash + } + + async fn header_hash_by_number(&self, number: BlockNumberOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateChainClient::::block_hash(&*client, Some(number)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_header_hash_by_number::(number, e)) + } + + async fn header_by_hash(&self, hash: HashOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateChainClient::::header(&*client, Some(hash)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_header_by_hash::(hash, e)) + } + + async fn block_by_hash(&self, hash: HashOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateChainClient::::block(&*client, Some(hash)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_block_by_hash::(hash, e)) + } + + async fn best_finalized_header_hash(&self) -> Result> { + self.jsonrpsee_execute(|client| async move { + Ok(SubstrateChainClient::::finalized_head(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_read_best_finalized_header_hash::(e)) + } + + async fn best_header(&self) -> Result> { + self.jsonrpsee_execute(|client| async move { + Ok(SubstrateChainClient::::header(&*client, None).await?) + }) + .await + .map_err(|e| Error::failed_to_read_best_header::(e)) + } + + async fn subscribe_best_headers(&self) -> Result>> { + self.subscribe_headers( + "best headers", + move |client| async move { SubstrateChainClient::::subscribe_new_heads(&*client).await }, + |e| Error::failed_to_subscribe_best_headers::(e), + ) + .await + } + + async fn subscribe_finalized_headers(&self) -> Result>> { + self.subscribe_headers( + "best finalized headers", + move |client| async move { + SubstrateChainClient::::subscribe_finalized_heads(&*client).await + }, + |e| Error::failed_to_subscribe_finalized_headers::(e), + ) + .await + } + + async fn subscribe_grandpa_finality_justifications(&self) -> Result> + where + C: ChainWithGrandpa, + { + self.subscribe_finality_justifications("GRANDPA", move |client| async move { + SubstrateGrandpaClient::::subscribe_justifications(&*client).await + }) + .await + } + + async fn generate_grandpa_key_ownership_proof( + &self, + at: HashOf, + set_id: sp_consensus_grandpa::SetId, + authority_id: sp_consensus_grandpa::AuthorityId, + ) -> Result> { + self.state_call( + at, + SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF.into(), + (set_id, authority_id), + ) + .await + } + + async fn subscribe_beefy_finality_justifications(&self) -> Result> { + self.subscribe_finality_justifications("BEEFY", move |client| async move { + SubstrateBeefyClient::::subscribe_justifications(&*client).await + }) + .await + } + + async fn token_decimals(&self) -> Result> { + self.jsonrpsee_execute(move |client| async move { + let system_properties = SubstrateSystemClient::::properties(&*client).await?; + Ok(system_properties.get("tokenDecimals").and_then(|v| v.as_u64())) + }) + .await + } + + async fn runtime_version(&self) -> Result { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateStateClient::::runtime_version(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_read_runtime_version::(e)) + } + + async fn simple_runtime_version(&self) -> Result { + Ok(match self.params.chain_runtime_version { + ChainRuntimeVersion::Auto => { + let runtime_version = self.runtime_version().await?; + SimpleRuntimeVersion::from_runtime_version(&runtime_version) + }, + ChainRuntimeVersion::Custom(ref version) => *version, + }) + } + + fn can_start_version_guard(&self) -> bool { + !matches!(self.params.chain_runtime_version, ChainRuntimeVersion::Auto) + } + + async fn raw_storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result> { + let cloned_storage_key = storage_key.clone(); + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateStateClient::::storage(&*client, cloned_storage_key, Some(at)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_storage_value::(at, storage_key, e)) + } + + async fn pending_extrinsics(&self) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateAuthorClient::::pending_extrinsics(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_get_pending_extrinsics::(e)) + } + + async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result> { + // one last check that the transaction is valid. Most of checks happen in the relay loop and + // it is the "final" check before submission. + let best_header_hash = self.best_header_hash().await?; + self.validate_transaction(best_header_hash, PreEncoded(transaction.0.clone())) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e))? + .map_err(|e| Error::failed_to_submit_transaction::(Error::TransactionInvalid(e)))?; + + self.jsonrpsee_execute(move |client| async move { + let tx_hash = SubstrateAuthorClient::::submit_extrinsic(&*client, transaction) + .await + .map_err(|e| { + log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); + e + })?; + log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); + Ok(tx_hash) + }) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e)) + } + + async fn submit_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + let _guard = self.submit_signed_extrinsic_lock.lock().await; + let transaction_nonce = self.next_account_index(signer.public().into()).await?; + let best_header = self.best_header().await?; + let signing_data = self.build_sign_params(signer.clone()).await?; + + // By using parent of best block here, we are protecting again best-block reorganizations. + // E.g. transaction may have been submitted when the best block was `A[num=100]`. Then it + // has been changed to `B[num=100]`. Hash of `A` has been included into transaction + // signature payload. So when signature will be checked, the check will fail and transaction + // will be dropped from the pool. + let best_header_id = best_header.parent_id().unwrap_or_else(|| best_header.id()); + + let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; + let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); + self.submit_unsigned_extrinsic(Bytes(signed_extrinsic)).await + } + + async fn submit_and_watch_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + let self_clone = self.clone(); + let signing_data = self.build_sign_params(signer.clone()).await?; + let _guard = self.submit_signed_extrinsic_lock.lock().await; + let transaction_nonce = self.next_account_index(signer.public().into()).await?; + let best_header = self.best_header().await?; + let best_header_id = best_header.id(); + + let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; + let stall_timeout = transaction_stall_timeout( + extrinsic.era.mortality_period(), + C::AVERAGE_BLOCK_INTERVAL, + STALL_TIMEOUT, + ); + let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); + + // one last check that the transaction is valid. Most of checks happen in the relay loop and + // it is the "final" check before submission. + self.validate_transaction(best_header_id.hash(), PreEncoded(signed_extrinsic.clone())) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e))? + .map_err(|e| Error::failed_to_submit_transaction::(Error::TransactionInvalid(e)))?; + + self.jsonrpsee_execute(move |client| async move { + let tx_hash = C::Hasher::hash(&signed_extrinsic); + let subscription: jsonrpsee::core::client::Subscription<_> = + SubstrateAuthorClient::::submit_and_watch_extrinsic( + &*client, + Bytes(signed_extrinsic), + ) + .await + .map_err(|e| { + log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); + e + })?; + log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); + Ok(TransactionTracker::new( + self_clone, + stall_timeout, + tx_hash, + Subscription::new_forwarded( + StreamDescription::new("transaction events".into(), C::NAME.into()), + subscription, + ), + )) + }) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e)) + } + + async fn validate_transaction( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + self.state_call( + at, + SUB_API_TXPOOL_VALIDATE_TRANSACTION.into(), + (TransactionSource::External, transaction, at), + ) + .await + } + + async fn estimate_extrinsic_weight( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + let transaction_len = transaction.encoded_size() as u32; + let dispatch_info: RuntimeDispatchInfo> = self + .state_call(at, SUB_API_TX_PAYMENT_QUERY_INFO.into(), (transaction, transaction_len)) + .await?; + + Ok(dispatch_info.weight) + } + + async fn raw_state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result { + let arguments = Bytes(arguments.encode()); + let arguments_clone = arguments.clone(); + let method_clone = method.clone(); + self.jsonrpsee_execute(move |client| async move { + SubstrateStateClient::::call(&*client, method, arguments, Some(at)) + .await + .map_err(Into::into) + }) + .await + .map_err(|e| Error::failed_state_call::(at, method_clone, arguments_clone, e)) + } + + async fn prove_storage( + &self, + at: HashOf, + keys: Vec, + ) -> Result<(StorageProof, HashOf)> { + let state_root = *self.header_by_hash(at).await?.state_root(); + + let keys_clone = keys.clone(); + let read_proof = self + .jsonrpsee_execute(move |client| async move { + SubstrateStateClient::::prove_storage(&*client, keys_clone, Some(at)) + .await + .map(|proof| StorageProof::new(proof.proof.into_iter().map(|b| b.0))) + .map_err(Into::into) + }) + .await + .map_err(|e| Error::failed_to_prove_storage::(at, keys.clone(), e))?; + + Ok((read_proof, state_root)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{guard::tests::TestEnvironment, test_chain::TestChain}; + use futures::{channel::mpsc::unbounded, FutureExt, SinkExt, StreamExt}; + + async fn run_ensure_correct_runtime_version( + expected: ChainRuntimeVersion, + actual: RuntimeVersion, + ) -> Result<()> { + let ( + (mut runtime_version_tx, runtime_version_rx), + (slept_tx, _slept_rx), + (aborted_tx, mut aborted_rx), + ) = (unbounded(), unbounded(), unbounded()); + runtime_version_tx.send(actual).await.unwrap(); + let mut env = TestEnvironment { runtime_version_rx, slept_tx, aborted_tx }; + + let ensure_correct_runtime_version = + RpcClient::::ensure_correct_runtime_version(&mut env, expected).boxed(); + let aborted = aborted_rx.next().map(|_| Err(Error::Custom("".into()))).boxed(); + futures::pin_mut!(ensure_correct_runtime_version, aborted); + futures::future::select(ensure_correct_runtime_version, aborted) + .await + .into_inner() + .0 + } + + #[async_std::test] + async fn ensure_correct_runtime_version_works() { + // when we are configured to use auto version + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Auto, + RuntimeVersion { + spec_version: 100, + transaction_version: 100, + ..Default::default() + }, + ) + .await, + Ok(()), + )); + // when actual == expected + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Custom(SimpleRuntimeVersion { + spec_version: 100, + transaction_version: 100 + }), + RuntimeVersion { + spec_version: 100, + transaction_version: 100, + ..Default::default() + }, + ) + .await, + Ok(()), + )); + // when actual spec version < expected spec version + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Custom(SimpleRuntimeVersion { + spec_version: 100, + transaction_version: 100 + }), + RuntimeVersion { spec_version: 99, transaction_version: 100, ..Default::default() }, + ) + .await, + Err(Error::WaitingForRuntimeUpgrade { + expected: SimpleRuntimeVersion { spec_version: 100, transaction_version: 100 }, + actual: SimpleRuntimeVersion { spec_version: 99, transaction_version: 100 }, + .. + }), + )); + // when actual spec version > expected spec version + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Custom(SimpleRuntimeVersion { + spec_version: 100, + transaction_version: 100 + }), + RuntimeVersion { + spec_version: 101, + transaction_version: 100, + ..Default::default() + }, + ) + .await, + Err(Error::Custom(_)), + )); + } +} diff --git a/bridges/relays/client-substrate/src/rpc.rs b/bridges/relays/client-substrate/src/client/rpc_api.rs similarity index 80% rename from bridges/relays/client-substrate/src/rpc.rs rename to bridges/relays/client-substrate/src/client/rpc_api.rs index 60c29cdeb5c77..9cac69f7a13d0 100644 --- a/bridges/relays/client-substrate/src/rpc.rs +++ b/bridges/relays/client-substrate/src/client/rpc_api.rs @@ -16,15 +16,9 @@ //! The most generic Substrate node RPC interface. -use async_trait::async_trait; - use crate::{Chain, ChainWithGrandpa, TransactionStatusOf}; -use jsonrpsee::{ - core::{client::Subscription, ClientError}, - proc_macros::rpc, - ws_client::WsClient, -}; +use jsonrpsee::proc_macros::rpc; use pallet_transaction_payment_rpc_runtime_api::FeeDetails; use sc_rpc_api::{state::ReadProof, system::Health}; use sp_core::{ @@ -60,6 +54,20 @@ pub(crate) trait SubstrateChain { /// Return signed block (with justifications) by its hash. #[method(name = "getBlock")] async fn block(&self, block_hash: Option) -> RpcResult; + /// Subscribe to best headers. + #[subscription( + name = "subscribeNewHeads" => "newHead", + unsubscribe = "unsubscribeNewHeads", + item = C::Header + )] + async fn subscribe_new_heads(&self); + /// Subscribe to finalized headers. + #[subscription( + name = "subscribeFinalizedHeads" => "finalizedHead", + unsubscribe = "unsubscribeFinalizedHeads", + item = C::Header + )] + async fn subscribe_finalized_heads(&self); } /// RPC methods of Substrate `author` namespace, that we are using. @@ -106,15 +114,6 @@ pub(crate) trait SubstrateState { ) -> RpcResult>; } -/// RPC methods that we are using for a certain finality gadget. -#[async_trait] -pub trait SubstrateFinalityClient { - /// Subscribe to finality justifications. - async fn subscribe_justifications( - client: &WsClient, - ) -> Result, ClientError>; -} - /// RPC methods of Substrate `grandpa` namespace, that we are using. #[rpc(client, client_bounds(C: ChainWithGrandpa), namespace = "grandpa")] pub(crate) trait SubstrateGrandpa { @@ -123,17 +122,6 @@ pub(crate) trait SubstrateGrandpa { async fn subscribe_justifications(&self); } -/// RPC finality methods of Substrate `grandpa` namespace, that we are using. -pub struct SubstrateGrandpaFinalityClient; -#[async_trait] -impl SubstrateFinalityClient for SubstrateGrandpaFinalityClient { - async fn subscribe_justifications( - client: &WsClient, - ) -> Result, ClientError> { - SubstrateGrandpaClient::::subscribe_justifications(client).await - } -} - // TODO: Use `ChainWithBeefy` instead of `Chain` after #1606 is merged /// RPC methods of Substrate `beefy` namespace, that we are using. #[rpc(client, client_bounds(C: Chain), namespace = "beefy")] @@ -143,18 +131,6 @@ pub(crate) trait SubstrateBeefy { async fn subscribe_justifications(&self); } -/// RPC finality methods of Substrate `beefy` namespace, that we are using. -pub struct SubstrateBeefyFinalityClient; -// TODO: Use `ChainWithBeefy` instead of `Chain` after #1606 is merged -#[async_trait] -impl SubstrateFinalityClient for SubstrateBeefyFinalityClient { - async fn subscribe_justifications( - client: &WsClient, - ) -> Result, ClientError> { - SubstrateBeefyClient::::subscribe_justifications(client).await - } -} - /// RPC methods of Substrate `system` frame pallet, that we are using. #[rpc(client, client_bounds(C: Chain), namespace = "system")] pub(crate) trait SubstrateFrameSystem { diff --git a/bridges/relays/client-substrate/src/client/subscription.rs b/bridges/relays/client-substrate/src/client/subscription.rs new file mode 100644 index 0000000000000..9f08097cb583a --- /dev/null +++ b/bridges/relays/client-substrate/src/client/subscription.rs @@ -0,0 +1,238 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::error::Result as ClientResult; + +use async_std::{ + channel::{bounded, Receiver, Sender}, + stream::StreamExt, +}; +use futures::{FutureExt, Stream}; +use sp_runtime::DeserializeOwned; +use std::{ + fmt::Debug, + pin::Pin, + result::Result as StdResult, + task::{Context, Poll}, +}; + +/// Once channel reaches this capacity, the subscription breaks. +const CHANNEL_CAPACITY: usize = 128; + +/// Structure describing a stream. +#[derive(Clone)] +pub struct StreamDescription { + stream_name: String, + chain_name: String, +} + +impl StreamDescription { + /// Create a new instance of `StreamDescription`. + pub fn new(stream_name: String, chain_name: String) -> Self { + Self { stream_name, chain_name } + } + + /// Get a stream description. + fn get(&self) -> String { + format!("{} stream of {}", self.stream_name, self.chain_name) + } +} + +/// Chainable stream that transforms items of type `Result` to items of type `T`. +/// +/// If it encounters an item of type `Err`, it returns `Poll::Ready(None)` +/// and terminates the underlying stream. +struct Unwrap>, T, E> { + desc: StreamDescription, + stream: Option, +} + +impl>, T, E> Unwrap { + /// Create a new instance of `Unwrap`. + pub fn new(desc: StreamDescription, stream: S) -> Self { + Self { desc, stream: Some(stream) } + } +} + +impl> + Unpin, T: DeserializeOwned, E: Debug> Stream + for Unwrap +{ + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Poll::Ready(match self.stream.as_mut() { + Some(subscription) => match futures::ready!(Pin::new(subscription).poll_next(cx)) { + Some(Ok(item)) => Some(item), + Some(Err(e)) => { + self.stream.take(); + log::debug!( + target: "bridge", + "{} has returned error: {:?}. It may need to be restarted", + self.desc.get(), + e, + ); + None + }, + None => { + self.stream.take(); + log::debug!( + target: "bridge", + "{} has returned `None`. It may need to be restarted", + self.desc.get() + ); + None + }, + }, + None => None, + }) + } +} + +/// Subscription factory that produces subscriptions, sharing the same background thread. +#[derive(Clone)] +pub struct SubscriptionBroadcaster { + desc: StreamDescription, + subscribers_sender: Sender>, +} + +impl SubscriptionBroadcaster { + /// Create new subscription factory. + pub fn new(subscription: Subscription) -> StdResult> { + // It doesn't make sense to further broadcast a broadcasted subscription. + if subscription.is_broadcasted { + return Err(subscription) + } + + let desc = subscription.desc().clone(); + let (subscribers_sender, subscribers_receiver) = bounded(CHANNEL_CAPACITY); + async_std::task::spawn(background_worker(subscription, subscribers_receiver)); + Ok(Self { desc, subscribers_sender }) + } + + /// Produce new subscription. + pub async fn subscribe(&self) -> ClientResult> { + let (items_sender, items_receiver) = bounded(CHANNEL_CAPACITY); + self.subscribers_sender.try_send(items_sender)?; + + Ok(Subscription::new_broadcasted(self.desc.clone(), items_receiver)) + } +} + +/// Subscription to some chain events. +pub struct Subscription { + desc: StreamDescription, + subscription: Box + Unpin + Send>, + is_broadcasted: bool, +} + +impl Subscription { + /// Create new forwarded subscription. + pub fn new_forwarded( + desc: StreamDescription, + subscription: impl Stream> + Unpin + Send + 'static, + ) -> Self { + Self { + desc: desc.clone(), + subscription: Box::new(Unwrap::new(desc, subscription)), + is_broadcasted: false, + } + } + + /// Create new broadcasted subscription. + pub fn new_broadcasted( + desc: StreamDescription, + subscription: impl Stream + Unpin + Send + 'static, + ) -> Self { + Self { desc, subscription: Box::new(subscription), is_broadcasted: true } + } + + /// Get the description of the underlying stream + pub fn desc(&self) -> &StreamDescription { + &self.desc + } +} + +impl Stream for Subscription { + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Poll::Ready(futures::ready!(Pin::new(&mut self.subscription).poll_next(cx))) + } +} + +/// Background worker that is executed in tokio context as `jsonrpsee` requires. +/// +/// This task may exit under some circumstances. It'll send the correspondent +/// message (`Err` or `None`) to all known listeners. Also, when it stops, all +/// subsequent reads and new subscribers will get the connection error (`ChannelError`). +async fn background_worker( + mut subscription: Subscription, + mut subscribers_receiver: Receiver>, +) { + fn log_task_exit(desc: &StreamDescription, reason: &str) { + log::debug!( + target: "bridge", + "Background task of subscription broadcaster for {} has stopped: {}", + desc.get(), + reason, + ); + } + + // wait for first subscriber until actually starting subscription + let subscriber = match subscribers_receiver.next().await { + Some(subscriber) => subscriber, + None => { + // it means that the last subscriber/factory has been dropped, so we need to + // exit too + return log_task_exit(subscription.desc(), "client has stopped") + }, + }; + + // actually subscribe + let mut subscribers = vec![subscriber]; + + // start listening for new items and receivers + loop { + futures::select! { + subscriber = subscribers_receiver.next().fuse() => { + match subscriber { + Some(subscriber) => subscribers.push(subscriber), + None => { + // it means that the last subscriber/factory has been dropped, so we need to + // exit too + return log_task_exit(subscription.desc(), "client has stopped") + }, + } + }, + maybe_item = subscription.subscription.next().fuse() => { + match maybe_item { + Some(item) => { + // notify subscribers + subscribers.retain(|subscriber| { + let send_result = subscriber.try_send(item.clone()); + send_result.is_ok() + }); + } + None => { + // The underlying client has dropped, so we can't do anything here + // and need to stop the task. + return log_task_exit(subscription.desc(), "stream has finished"); + } + } + }, + } + } +} diff --git a/bridges/relays/client-substrate/src/client/traits.rs b/bridges/relays/client-substrate/src/client/traits.rs new file mode 100644 index 0000000000000..6f4ef5aa95106 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/traits.rs @@ -0,0 +1,234 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::{ + error::{Error, Result}, + AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, ChainWithGrandpa, ChainWithTransactions, + HashOf, HeaderIdOf, HeaderOf, NonceOf, SignedBlockOf, SimpleRuntimeVersion, Subscription, + TransactionTracker, UnsignedTransaction, +}; + +use async_trait::async_trait; +use bp_runtime::{StorageDoubleMapKeyProvider, StorageMapKeyProvider}; +use codec::{Decode, Encode}; +use frame_support::weights::Weight; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, Pair, +}; +use sp_runtime::{traits::Header as _, transaction_validity::TransactionValidity}; +use sp_trie::StorageProof; +use sp_version::RuntimeVersion; +use std::fmt::Debug; + +/// Relay uses the `Client` to communicate with the node, connected to Substrate +/// chain `C`. +#[async_trait] +pub trait Client: 'static + Send + Sync + Clone + Debug { + /// Returns error if client has no connected peers or it believes it is far + /// behind the chain tip. + async fn ensure_synced(&self) -> Result<()>; + /// Reconnects the client. + async fn reconnect(&self) -> Result<()>; + + /// Return hash of the genesis block. + fn genesis_hash(&self) -> HashOf; + /// Get header hash by number. + async fn header_hash_by_number(&self, number: BlockNumberOf) -> Result>; + /// Get header by hash. + async fn header_by_hash(&self, hash: HashOf) -> Result>; + /// Get header by number. + async fn header_by_number(&self, number: BlockNumberOf) -> Result> { + self.header_by_hash(self.header_hash_by_number(number).await?).await + } + /// Get block by hash. + async fn block_by_hash(&self, hash: HashOf) -> Result>; + + /// Get best finalized header hash. + async fn best_finalized_header_hash(&self) -> Result>; + /// Get best finalized header number. + async fn best_finalized_header_number(&self) -> Result> { + Ok(*self.best_finalized_header().await?.number()) + } + /// Get best finalized header. + async fn best_finalized_header(&self) -> Result> { + self.header_by_hash(self.best_finalized_header_hash().await?).await + } + + /// Get best header. + async fn best_header(&self) -> Result>; + /// Get best header hash. + async fn best_header_hash(&self) -> Result> { + Ok(self.best_header().await?.hash()) + } + + /// Subscribe to new best headers. + async fn subscribe_best_headers(&self) -> Result>>; + /// Subscribe to new finalized headers. + async fn subscribe_finalized_headers(&self) -> Result>>; + + /// Subscribe to GRANDPA finality justifications. + async fn subscribe_grandpa_finality_justifications(&self) -> Result> + where + C: ChainWithGrandpa; + /// Generates a proof of key ownership for the given authority in the given set. + async fn generate_grandpa_key_ownership_proof( + &self, + at: HashOf, + set_id: sp_consensus_grandpa::SetId, + authority_id: sp_consensus_grandpa::AuthorityId, + ) -> Result>; + + /// Subscribe to BEEFY finality justifications. + async fn subscribe_beefy_finality_justifications(&self) -> Result>; + + /// Return `tokenDecimals` property from the set of chain properties. + async fn token_decimals(&self) -> Result>; + /// Get runtime version of the connected chain. + async fn runtime_version(&self) -> Result; + /// Get partial runtime version, to use when signing transactions. + async fn simple_runtime_version(&self) -> Result; + /// Returns `true` if version guard can be started. + /// + /// There's no reason to run version guard when version mode is set to `Auto`. It can + /// lead to relay shutdown when chain is upgraded, even though we have explicitly + /// said that we don't want to shutdown. + fn can_start_version_guard(&self) -> bool; + + /// Read raw value from runtime storage. + async fn raw_storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result>; + /// Read and decode value from runtime storage. + async fn storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result> { + self.raw_storage_value(at, storage_key.clone()) + .await? + .map(|encoded_value| { + T::decode(&mut &encoded_value.0[..]).map_err(|e| { + Error::failed_to_read_storage_value::(at, storage_key, e.into()) + }) + }) + .transpose() + } + /// Read and decode value from runtime storage map. + /// + /// `pallet_prefix` is the name of the pallet (used in `construct_runtime`), which + /// "contains" the storage map. + async fn storage_map_value( + &self, + at: HashOf, + pallet_prefix: &str, + storage_key: &T::Key, + ) -> Result> { + self.storage_value(at, T::final_key(pallet_prefix, storage_key)).await + } + /// Read and decode value from runtime storage double map. + /// + /// `pallet_prefix` is the name of the pallet (used in `construct_runtime`), which + /// "contains" the storage double map. + async fn storage_double_map_value( + &self, + at: HashOf, + pallet_prefix: &str, + key1: &T::Key1, + key2: &T::Key2, + ) -> Result> { + self.storage_value(at, T::final_key(pallet_prefix, key1, key2)).await + } + + /// Returns pending extrinsics from transaction pool. + async fn pending_extrinsics(&self) -> Result>; + /// Submit unsigned extrinsic for inclusion in a block. + /// + /// Note: The given transaction needs to be SCALE encoded beforehand. + async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result>; + /// Submit an extrinsic signed by given account. + /// + /// All calls of this method are synchronized, so there can't be more than one active + /// `submit_signed_extrinsic()` call. This guarantees that no nonces collision may happen + /// if all client instances are clones of the same initial `Client`. + /// + /// Note: The given transaction needs to be SCALE encoded beforehand. + async fn submit_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>; + /// Does exactly the same as `submit_signed_extrinsic`, but keeps watching for extrinsic status + /// after submission. + async fn submit_and_watch_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>; + /// Validate transaction at given block. + async fn validate_transaction( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result; + /// Returns weight of the given transaction. + async fn estimate_extrinsic_weight( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result; + + /// Execute runtime call at given block. + async fn raw_state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result; + /// Execute runtime call at given block, provided the input and output types. + /// It also performs the input encode and output decode. + async fn state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result { + let encoded_arguments = arguments.encode(); + let encoded_output = self.raw_state_call(at, method.clone(), arguments).await?; + Ret::decode(&mut &encoded_output.0[..]).map_err(|e| { + Error::failed_state_call::(at, method, Bytes(encoded_arguments), e.into()) + }) + } + + /// Returns storage proof of given storage keys and state root. + async fn prove_storage( + &self, + at: HashOf, + keys: Vec, + ) -> Result<(StorageProof, HashOf)>; +} diff --git a/bridges/relays/client-substrate/src/error.rs b/bridges/relays/client-substrate/src/error.rs index 2133c18887846..ee3c73f806e65 100644 --- a/bridges/relays/client-substrate/src/error.rs +++ b/bridges/relays/client-substrate/src/error.rs @@ -16,13 +16,13 @@ //! Substrate node RPC errors. -use crate::SimpleRuntimeVersion; +use crate::{BlockNumberOf, Chain, HashOf, SimpleRuntimeVersion}; use bp_header_chain::SubmitFinalityProofCallExtras; use bp_polkadot_core::parachains::ParaId; use jsonrpsee::core::ClientError as RpcError; use relay_utils::MaybeConnectionError; use sc_rpc_api::system::Health; -use sp_core::storage::StorageKey; +use sp_core::{storage::StorageKey, Bytes}; use sp_runtime::transaction_validity::TransactionValidityError; use thiserror::Error; @@ -43,12 +43,10 @@ pub enum Error { /// The response from the server could not be SCALE decoded. #[error("Response parse failed: {0}")] ResponseParseFailed(#[from] codec::Error), - /// Account does not exist on the chain. - #[error("Account does not exist on the chain.")] - AccountDoesNotExist, - /// Runtime storage is missing some mandatory value. - #[error("Mandatory storage value is missing from the runtime storage.")] - MissingMandatoryStorageValue, + /// Internal channel error - communication channel is either closed, or full. + /// It can be solved with reconnect. + #[error("Internal communication channel error: {0:?}.")] + ChannelError(String), /// Required parachain head is not present at the relay chain. #[error("Parachain {0:?} head {1} is missing from the relay chain storage.")] MissingRequiredParachainHead(ParaId, u64), @@ -58,6 +56,14 @@ pub enum Error { /// The client we're connected to is not synced, so we can't rely on its state. #[error("Substrate client is not synced {0}.")] ClientNotSynced(Health), + /// Failed to get system health. + #[error("Failed to get system health of {chain} node: {error:?}.")] + FailedToGetSystemHealth { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, /// Failed to read best finalized header hash from given chain. #[error("Failed to read best finalized header hash of {chain}: {error:?}.")] FailedToReadBestFinalizedHeaderHash { @@ -74,6 +80,16 @@ pub enum Error { /// Underlying error. error: Box, }, + /// Failed to read header hash by number from given chain. + #[error("Failed to read header hash by number {number} of {chain}: {error:?}.")] + FailedToReadHeaderHashByNumber { + /// Name of the chain where the error has happened. + chain: String, + /// Number of the header we've tried to read. + number: String, + /// Underlying error. + error: Box, + }, /// Failed to read header by hash from given chain. #[error("Failed to read header {hash} of {chain}: {error:?}.")] FailedToReadHeaderByHash { @@ -84,38 +100,119 @@ pub enum Error { /// Underlying error. error: Box, }, - /// Failed to execute runtime call at given chain. - #[error("Failed to execute runtime call {method} at {chain}: {error:?}.")] - ErrorExecutingRuntimeCall { + /// Failed to read block by hash from given chain. + #[error("Failed to read block {hash} of {chain}: {error:?}.")] + FailedToReadBlockByHash { /// Name of the chain where the error has happened. chain: String, - /// Runtime method name. - method: String, + /// Hash of the header we've tried to read. + hash: String, /// Underlying error. error: Box, }, /// Failed to read sotrage value at given chain. #[error("Failed to read storage value {key:?} at {chain}: {error:?}.")] - FailedToReadRuntimeStorageValue { + FailedToReadStorageValue { /// Name of the chain where the error has happened. chain: String, + /// Hash of the block we've tried to read value from. + hash: String, /// Runtime storage key key: StorageKey, /// Underlying error. error: Box, }, + /// Failed to read runtime version of given chain. + #[error("Failed to read runtime version of {chain}: {error:?}.")] + FailedToReadRuntimeVersion { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to get pending extrinsics. + #[error("Failed to get pending extrinsics of {chain}: {error:?}.")] + FailedToGetPendingExtrinsics { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to submit transaction. + #[error("Failed to submit {chain} transaction: {error:?}.")] + FailedToSubmitTransaction { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Runtime call has failed. + #[error("Runtime call {method} with arguments {arguments:?} of chain {chain} at {hash} has failed: {error:?}.")] + FailedStateCall { + /// Name of the chain where the error has happened. + chain: String, + /// Hash of the block we've tried to call at. + hash: String, + /// Runtime API method. + method: String, + /// Encoded method arguments. + arguments: Bytes, + /// Underlying error. + error: Box, + }, + /// Failed to prove storage keys. + #[error("Failed to prove storage keys {storage_keys:?} of {chain} at {hash}: {error:?}.")] + FailedToProveStorage { + /// Name of the chain where the error has happened. + chain: String, + /// Hash of the block we've tried to prove keys at. + hash: String, + /// Storage keys we have tried to prove. + storage_keys: Vec, + /// Underlying error. + error: Box, + }, + /// Failed to subscribe to GRANDPA justifications stream. + #[error("Failed to subscribe to {chain} best headers: {error:?}.")] + FailedToSubscribeBestHeaders { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to subscribe to GRANDPA justifications stream. + #[error("Failed to subscribe to {chain} finalized headers: {error:?}.")] + FailedToSubscribeFinalizedHeaders { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to subscribe to GRANDPA justifications stream. + #[error("Failed to subscribe to {chain} justifications: {error:?}.")] + FailedToSubscribeJustifications { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Headers of the chain are finalized out of order. Maybe chain has been + /// restarted? + #[error("Finalized headers of {chain} are unordered: previously finalized {prev_number} vs new {next_number}")] + UnorderedFinalizedHeaders { + /// Name of the chain where the error has happened. + chain: String, + /// Previously finalized header number. + prev_number: String, + /// New finalized header number. + next_number: String, + }, /// The bridge pallet is halted and all transactions will be rejected. #[error("Bridge pallet is halted.")] BridgePalletIsHalted, /// The bridge pallet is not yet initialized and all transactions will be rejected. #[error("Bridge pallet is not initialized.")] BridgePalletIsNotInitialized, - /// There's no best head of the parachain at the `pallet-bridge-parachains` at the target side. - #[error("No head of the ParaId({0}) at the bridge parachains pallet at {1}.")] - NoParachainHeadAtTarget(u32, String), - /// An error has happened when we have tried to parse storage proof. - #[error("Error when parsing storage proof: {0:?}.")] - StorageProofError(bp_runtime::StorageProofError), /// The Substrate transaction is invalid. #[error("Substrate transaction is invalid: {0:?}")] TransactionInvalid(#[from] TransactionValidityError), @@ -143,7 +240,19 @@ pub enum Error { impl From for Error { fn from(error: tokio::task::JoinError) -> Self { - Error::Custom(format!("Failed to wait tokio task: {error}")) + Error::ChannelError(format!("failed to wait tokio task: {error}")) + } +} + +impl From> for Error { + fn from(error: async_std::channel::TrySendError) -> Self { + Error::ChannelError(format!("`try_send` has failed: {error:?}")) + } +} + +impl From for Error { + fn from(error: async_std::channel::RecvError) -> Self { + Error::ChannelError(format!("`recv` has failed: {error:?}")) } } @@ -152,21 +261,170 @@ impl Error { pub fn boxed(self) -> Box { Box::new(self) } + + /// Returns nested error reference. + pub fn nested(&self) -> Option<&Self> { + match *self { + Self::FailedToReadBestFinalizedHeaderHash { ref error, .. } => Some(&**error), + Self::FailedToReadBestHeader { ref error, .. } => Some(&**error), + Self::FailedToReadHeaderHashByNumber { ref error, .. } => Some(&**error), + Self::FailedToReadHeaderByHash { ref error, .. } => Some(&**error), + Self::FailedToReadBlockByHash { ref error, .. } => Some(&**error), + Self::FailedToReadStorageValue { ref error, .. } => Some(&**error), + Self::FailedToReadRuntimeVersion { ref error, .. } => Some(&**error), + Self::FailedToGetPendingExtrinsics { ref error, .. } => Some(&**error), + Self::FailedToSubmitTransaction { ref error, .. } => Some(&**error), + Self::FailedStateCall { ref error, .. } => Some(&**error), + Self::FailedToProveStorage { ref error, .. } => Some(&**error), + Self::FailedToGetSystemHealth { ref error, .. } => Some(&**error), + Self::FailedToSubscribeBestHeaders { ref error, .. } => Some(&**error), + Self::FailedToSubscribeFinalizedHeaders { ref error, .. } => Some(&**error), + Self::FailedToSubscribeJustifications { ref error, .. } => Some(&**error), + _ => None, + } + } + + /// Constructs `FailedToReadHeaderHashByNumber` variant. + pub fn failed_to_read_header_hash_by_number( + number: BlockNumberOf, + e: Error, + ) -> Self { + Error::FailedToReadHeaderHashByNumber { + chain: C::NAME.into(), + number: format!("{number}"), + error: e.boxed(), + } + } + + /// Constructs `FailedToReadHeaderByHash` variant. + pub fn failed_to_read_header_by_hash(hash: HashOf, e: Error) -> Self { + Error::FailedToReadHeaderByHash { + chain: C::NAME.into(), + hash: format!("{hash}"), + error: e.boxed(), + } + } + + /// Constructs `FailedToReadBlockByHash` variant. + pub fn failed_to_read_block_by_hash(hash: HashOf, e: Error) -> Self { + Error::FailedToReadHeaderByHash { + chain: C::NAME.into(), + hash: format!("{hash}"), + error: e.boxed(), + } + } + + /// Constructs `FailedToReadBestFinalizedHeaderHash` variant. + pub fn failed_to_read_best_finalized_header_hash(e: Error) -> Self { + Error::FailedToReadBestFinalizedHeaderHash { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToReadBestHeader` variant. + pub fn failed_to_read_best_header(e: Error) -> Self { + Error::FailedToReadBestHeader { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToReadRuntimeVersion` variant. + pub fn failed_to_read_runtime_version(e: Error) -> Self { + Error::FailedToReadRuntimeVersion { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToReadStorageValue` variant. + pub fn failed_to_read_storage_value( + at: HashOf, + key: StorageKey, + e: Error, + ) -> Self { + Error::FailedToReadStorageValue { + chain: C::NAME.into(), + hash: format!("{at}"), + key, + error: e.boxed(), + } + } + + /// Constructs `FailedToGetPendingExtrinsics` variant. + pub fn failed_to_get_pending_extrinsics(e: Error) -> Self { + Error::FailedToGetPendingExtrinsics { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubmitTransaction` variant. + pub fn failed_to_submit_transaction(e: Error) -> Self { + Error::FailedToSubmitTransaction { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedStateCall` variant. + pub fn failed_state_call( + at: HashOf, + method: String, + arguments: Bytes, + e: Error, + ) -> Self { + Error::FailedStateCall { + chain: C::NAME.into(), + hash: format!("{at}"), + method, + arguments, + error: e.boxed(), + } + } + + /// Constructs `FailedToProveStorage` variant. + pub fn failed_to_prove_storage( + at: HashOf, + storage_keys: Vec, + e: Error, + ) -> Self { + Error::FailedToProveStorage { + chain: C::NAME.into(), + hash: format!("{at}"), + storage_keys, + error: e.boxed(), + } + } + + /// Constructs `FailedToGetSystemHealth` variant. + pub fn failed_to_get_system_health(e: Error) -> Self { + Error::FailedToGetSystemHealth { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubscribeBestHeaders` variant. + pub fn failed_to_subscribe_best_headers(e: Error) -> Self { + Error::FailedToSubscribeBestHeaders { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubscribeFinalizedHeaders` variant. + pub fn failed_to_subscribe_finalized_headers(e: Error) -> Self { + Error::FailedToSubscribeFinalizedHeaders { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubscribeJustifications` variant. + pub fn failed_to_subscribe_justification(e: Error) -> Self { + Error::FailedToSubscribeJustifications { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `Un` + pub fn unordered_finalized_headers( + prev_number: BlockNumberOf, + next_number: BlockNumberOf, + ) -> Self { + Error::UnorderedFinalizedHeaders { + chain: C::NAME.into(), + prev_number: format!("{}", prev_number), + next_number: format!("{}", next_number), + } + } } impl MaybeConnectionError for Error { fn is_connection_error(&self) -> bool { match *self { - Error::RpcError(RpcError::Transport(_)) | - Error::RpcError(RpcError::RestartNeeded(_)) | + Error::ChannelError(_) => true, + Error::RpcError(ref e) => + matches!(*e, RpcError::Transport(_) | RpcError::RestartNeeded(_),), Error::ClientNotSynced(_) => true, - Error::FailedToReadBestFinalizedHeaderHash { ref error, .. } => - error.is_connection_error(), - Error::FailedToReadBestHeader { ref error, .. } => error.is_connection_error(), - Error::FailedToReadHeaderByHash { ref error, .. } => error.is_connection_error(), - Error::ErrorExecutingRuntimeCall { ref error, .. } => error.is_connection_error(), - Error::FailedToReadRuntimeStorageValue { ref error, .. } => error.is_connection_error(), - _ => false, + Error::UnorderedFinalizedHeaders { .. } => true, + _ => self.nested().map(|e| e.is_connection_error()).unwrap_or(false), } } } diff --git a/bridges/relays/client-substrate/src/guard.rs b/bridges/relays/client-substrate/src/guard.rs index 47454892cd039..3dbf95bff8e10 100644 --- a/bridges/relays/client-substrate/src/guard.rs +++ b/bridges/relays/client-substrate/src/guard.rs @@ -98,7 +98,7 @@ fn conditions_check_delay() -> Duration { } #[async_trait] -impl Environment for Client { +impl> Environment for Clnt { type Error = Error; async fn runtime_version(&mut self) -> Result { diff --git a/bridges/relays/client-substrate/src/lib.rs b/bridges/relays/client-substrate/src/lib.rs index d5b8d4dcced2d..12a1c48c09c7a 100644 --- a/bridges/relays/client-substrate/src/lib.rs +++ b/bridges/relays/client-substrate/src/lib.rs @@ -21,7 +21,6 @@ mod chain; mod client; mod error; -mod rpc; mod sync_header; mod transaction_tracker; @@ -37,14 +36,15 @@ pub use crate::{ AccountKeyPairOf, BlockWithJustification, CallOf, Chain, ChainWithBalances, ChainWithGrandpa, ChainWithMessages, ChainWithRuntimeVersion, ChainWithTransactions, ChainWithUtilityPallet, FullRuntimeUtilityPallet, MockedRuntimeUtilityPallet, Parachain, - RelayChain, SignParam, TransactionStatusOf, UnsignedTransaction, UtilityPallet, + RelayChain, SignParam, SignedBlockOf, TransactionStatusOf, UnsignedTransaction, + UtilityPallet, }, client::{ - is_ancient_block, ChainRuntimeVersion, Client, OpaqueGrandpaAuthoritiesSet, - SimpleRuntimeVersion, Subscription, ANCIENT_BLOCK_THRESHOLD, + is_ancient_block, rpc_with_caching as new, ChainRuntimeVersion, Client, + OpaqueGrandpaAuthoritiesSet, RpcWithCachingClient, SimpleRuntimeVersion, StreamDescription, + Subscription, ANCIENT_BLOCK_THRESHOLD, }, error::{Error, Result}, - rpc::{SubstrateBeefyFinalityClient, SubstrateFinalityClient, SubstrateGrandpaFinalityClient}, sync_header::SyncHeader, transaction_tracker::TransactionTracker, }; diff --git a/bridges/relays/client-substrate/src/metrics/float_storage_value.rs b/bridges/relays/client-substrate/src/metrics/float_storage_value.rs index 7bb92693b38d2..27c9d8cd7a8b6 100644 --- a/bridges/relays/client-substrate/src/metrics/float_storage_value.rs +++ b/bridges/relays/client-substrate/src/metrics/float_storage_value.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::{chain::Chain, client::Client, Error as SubstrateError}; +use crate::{Chain, Client, Error as SubstrateError}; use async_std::sync::{Arc, RwLock}; use async_trait::async_trait; @@ -66,20 +66,20 @@ impl FloatStorageValue for FixedU128OrOne { /// Metric that represents fixed-point runtime storage value as float gauge. #[derive(Clone, Debug)] -pub struct FloatStorageValueMetric { +pub struct FloatStorageValueMetric { value_converter: V, - client: Client, + client: Clnt, storage_key: StorageKey, metric: Gauge, shared_value_ref: F64SharedRef, - _phantom: PhantomData, + _phantom: PhantomData<(C, V)>, } -impl FloatStorageValueMetric { +impl FloatStorageValueMetric { /// Create new metric. pub fn new( value_converter: V, - client: Client, + client: Clnt, storage_key: StorageKey, name: String, help: String, @@ -101,32 +101,39 @@ impl FloatStorageValueMetric { } } -impl Metric for FloatStorageValueMetric { +impl, V: FloatStorageValue> Metric + for FloatStorageValueMetric +{ fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { register(self.metric.clone(), registry).map(drop) } } #[async_trait] -impl StandaloneMetric for FloatStorageValueMetric { +impl, V: FloatStorageValue> StandaloneMetric + for FloatStorageValueMetric +{ fn update_interval(&self) -> Duration { C::AVERAGE_BLOCK_INTERVAL * UPDATE_INTERVAL_IN_BLOCKS } async fn update(&self) { - let value = self - .client - .raw_storage_value(self.storage_key.clone(), None) - .await - .and_then(|maybe_storage_value| { - self.value_converter.decode(maybe_storage_value).map(|maybe_fixed_point_value| { - maybe_fixed_point_value.map(|fixed_point_value| { - fixed_point_value.into_inner().unique_saturated_into() as f64 / - V::Value::DIV.unique_saturated_into() as f64 - }) + let value = async move { + let best_header_hash = self.client.best_header_hash().await?; + let maybe_storage_value = self + .client + .raw_storage_value(best_header_hash, self.storage_key.clone()) + .await?; + self.value_converter.decode(maybe_storage_value).map(|maybe_fixed_point_value| { + maybe_fixed_point_value.map(|fixed_point_value| { + fixed_point_value.into_inner().unique_saturated_into() as f64 / + V::Value::DIV.unique_saturated_into() as f64 }) }) - .map_err(|e| e.to_string()); + } + .await + .map_err(|e| e.to_string()); + relay_utils::metrics::set_gauge_value(&self.metric, value.clone()); *self.shared_value_ref.write().await = value.ok().and_then(|x| x); } diff --git a/bridges/relays/client-substrate/src/test_chain.rs b/bridges/relays/client-substrate/src/test_chain.rs index cfd241c022a26..991202e9874c7 100644 --- a/bridges/relays/client-substrate/src/test_chain.rs +++ b/bridges/relays/client-substrate/src/test_chain.rs @@ -24,7 +24,7 @@ use crate::{Chain, ChainWithBalances, ChainWithMessages}; use bp_messages::{ChainWithMessages as ChainWithMessagesBase, MessageNonce}; use bp_runtime::ChainId; -use frame_support::weights::Weight; +use frame_support::{sp_runtime::StateVersion, weights::Weight}; use std::time::Duration; /// Chain that may be used in tests. @@ -44,6 +44,8 @@ impl bp_runtime::Chain for TestChain { type Nonce = u32; type Signature = sp_runtime::testing::TestSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { 100000 } @@ -100,6 +102,8 @@ impl bp_runtime::Chain for TestParachainBase { type Nonce = u32; type Signature = sp_runtime::testing::TestSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { unreachable!() } diff --git a/bridges/relays/client-substrate/src/transaction_tracker.rs b/bridges/relays/client-substrate/src/transaction_tracker.rs index b181a945c2c15..b4801c89f51e1 100644 --- a/bridges/relays/client-substrate/src/transaction_tracker.rs +++ b/bridges/relays/client-substrate/src/transaction_tracker.rs @@ -16,7 +16,7 @@ //! Helper for tracking transaction invalidation events. -use crate::{Chain, Client, Error, HashOf, HeaderIdOf, Subscription, TransactionStatusOf}; +use crate::{Chain, Error, HashOf, HeaderIdOf, Subscription, TransactionStatusOf}; use async_trait::async_trait; use futures::{future::Either, Future, FutureExt, Stream, StreamExt}; @@ -31,8 +31,10 @@ pub trait Environment: Send + Sync { async fn header_id_by_hash(&self, hash: HashOf) -> Result, Error>; } +// TODO (https://github.com/paritytech/parity-bridges-common/issues/2133): remove `Environment` trait +// after test client is implemented #[async_trait] -impl Environment for Client { +impl> Environment for T { async fn header_id_by_hash(&self, hash: HashOf) -> Result, Error> { self.header_by_hash(hash).await.map(|h| HeaderId(*h.number(), hash)) } @@ -76,6 +78,21 @@ impl> TransactionTracker { Self { environment, stall_timeout, transaction_hash, subscription } } + // TODO (https://github.com/paritytech/parity-bridges-common/issues/2133): remove me after + // test client is implemented + /// Converts self into tracker with different environment. + pub fn switch_environment>( + self, + environment: NewE, + ) -> TransactionTracker { + TransactionTracker { + environment, + stall_timeout: self.stall_timeout, + transaction_hash: self.transaction_hash, + subscription: self.subscription, + } + } + /// Wait for final transaction status and return it along with last known internal invalidation /// status. async fn do_wait( @@ -88,7 +105,7 @@ impl> TransactionTracker { let wait_for_invalidation = watch_transaction_status::<_, C, _>( self.environment, self.transaction_hash, - self.subscription.into_stream(), + self.subscription, ); futures::pin_mut!(wait_for_stall_timeout, wait_for_invalidation); @@ -284,7 +301,7 @@ async fn watch_transaction_status< #[cfg(test)] mod tests { use super::*; - use crate::test_chain::TestChain; + use crate::{test_chain::TestChain, StreamDescription}; use futures::{FutureExt, SinkExt}; use sc_transaction_pool_api::TransactionStatus; @@ -306,22 +323,27 @@ mod tests { TrackedTransactionStatus>, InvalidationStatus>, )> { - let (cancel_sender, _cancel_receiver) = futures::channel::oneshot::channel(); let (mut sender, receiver) = futures::channel::mpsc::channel(1); let tx_tracker = TransactionTracker::::new( TestEnvironment(Ok(HeaderId(0, Default::default()))), Duration::from_secs(0), Default::default(), - Subscription(async_std::sync::Mutex::new(receiver), cancel_sender), + Subscription::new_forwarded( + StreamDescription::new("test".into(), "test".into()), + receiver, + ), ); - let wait_for_stall_timeout = futures::future::pending(); + // we can't do `.now_or_never()` on `do_wait()` call, because `Subscription` has its own + // background thread, which may cause additional async task switches => let's leave some + // relatively small timeout here + let wait_for_stall_timeout = async_std::task::sleep(std::time::Duration::from_millis(100)); let wait_for_stall_timeout_rest = futures::future::ready(()); - sender.send(Some(status)).await.unwrap(); - tx_tracker - .do_wait(wait_for_stall_timeout, wait_for_stall_timeout_rest) - .now_or_never() - .map(|(ts, is)| (ts, is.unwrap())) + sender.send(Ok(status)).await.unwrap(); + + let (ts, is) = + tx_tracker.do_wait(wait_for_stall_timeout, wait_for_stall_timeout_rest).await; + is.map(|is| (ts, is)) } #[async_std::test] @@ -429,13 +451,15 @@ mod tests { #[async_std::test] async fn lost_on_timeout_when_waiting_for_invalidation_status() { - let (cancel_sender, _cancel_receiver) = futures::channel::oneshot::channel(); let (_sender, receiver) = futures::channel::mpsc::channel(1); let tx_tracker = TransactionTracker::::new( TestEnvironment(Ok(HeaderId(0, Default::default()))), Duration::from_secs(0), Default::default(), - Subscription(async_std::sync::Mutex::new(receiver), cancel_sender), + Subscription::new_forwarded( + StreamDescription::new("test".into(), "test".into()), + receiver, + ), ); let wait_for_stall_timeout = futures::future::ready(()).shared(); diff --git a/bridges/relays/equivocation/Cargo.toml b/bridges/relays/equivocation/Cargo.toml index 5a067b62e0774..09bdda23f2c25 100644 --- a/bridges/relays/equivocation/Cargo.toml +++ b/bridges/relays/equivocation/Cargo.toml @@ -12,12 +12,12 @@ publish = false workspace = true [dependencies] -async-std = { version = "1.9.0", features = ["attributes"] } -async-trait = "0.1.79" -bp-header-chain = { path = "../../primitives/header-chain" } -finality-relay = { path = "../finality" } -frame-support = { path = "../../../substrate/frame/support" } -futures = "0.3.30" +async-std = { features = ["attributes"], workspace = true } +async-trait = { workspace = true } +bp-header-chain = { workspace = true, default-features = true } +finality-relay = { workspace = true } +frame-support = { workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true } -num-traits = "0.2" -relay-utils = { path = "../utils" } +num-traits = { workspace = true, default-features = true } +relay-utils = { workspace = true } diff --git a/bridges/relays/finality/Cargo.toml b/bridges/relays/finality/Cargo.toml index 5ee4b10fa638f..06c4a5dcc43e0 100644 --- a/bridges/relays/finality/Cargo.toml +++ b/bridges/relays/finality/Cargo.toml @@ -12,14 +12,14 @@ publish = false workspace = true [dependencies] -async-std = "1.9.0" -async-trait = "0.1.79" -backoff = "0.4" -bp-header-chain = { path = "../../primitives/header-chain" } -futures = "0.3.30" +async-std = { workspace = true } +async-trait = { workspace = true } +backoff = { workspace = true } +bp-header-chain = { workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true } -num-traits = "0.2" -relay-utils = { path = "../utils" } +num-traits = { workspace = true, default-features = true } +relay-utils = { workspace = true } [dev-dependencies] -parking_lot = "0.12.1" +parking_lot = { workspace = true, default-features = true } diff --git a/bridges/relays/finality/src/base.rs b/bridges/relays/finality/src/base.rs index 4253468eaace1..8704bff95494a 100644 --- a/bridges/relays/finality/src/base.rs +++ b/bridges/relays/finality/src/base.rs @@ -45,7 +45,3 @@ pub trait SourceClientBase: RelayClient { /// Subscribe to new finality proofs. async fn finality_proofs(&self) -> Result; } - -/// Target client used in finality related loops. -#[async_trait] -pub trait TargetClientBase: RelayClient {} diff --git a/bridges/relays/lib-substrate-relay/Cargo.toml b/bridges/relays/lib-substrate-relay/Cargo.toml index 077d1b1ff356a..b0f93e5b5485f 100644 --- a/bridges/relays/lib-substrate-relay/Cargo.toml +++ b/bridges/relays/lib-substrate-relay/Cargo.toml @@ -11,52 +11,50 @@ publish = false workspace = true [dependencies] -anyhow = "1.0" -async-std = "1.9.0" -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -hex = "0.4" +anyhow = { workspace = true } +async-std = { workspace = true } +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } +hex = { workspace = true, default-features = true } log = { workspace = true } -num-traits = "0.2" -rbtag = "0.3" -structopt = "0.3" -strum = { version = "0.26.2", features = ["derive"] } +num-traits = { workspace = true, default-features = true } +rbtag = { workspace = true } +structopt = { workspace = true } +strum = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } # Bridge dependencies +bp-header-chain = { workspace = true, default-features = true } +bp-parachains = { workspace = true, default-features = true } +bp-polkadot-core = { workspace = true, default-features = true } +bp-relayers = { workspace = true, default-features = true } -bp-header-chain = { path = "../../primitives/header-chain" } -bp-parachains = { path = "../../primitives/parachains" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-relayers = { path = "../../primitives/relayers" } -bridge-runtime-common = { path = "../../bin/runtime-common" } +equivocation-detector = { workspace = true } +finality-relay = { workspace = true } +parachains-relay = { workspace = true } +relay-utils = { workspace = true } +messages-relay = { workspace = true } +relay-substrate-client = { workspace = true } -equivocation-detector = { path = "../equivocation" } -finality-grandpa = { version = "0.16.2" } -finality-relay = { path = "../finality" } -parachains-relay = { path = "../parachains" } -relay-utils = { path = "../utils" } -messages-relay = { path = "../messages" } -relay-substrate-client = { path = "../client-substrate" } +pallet-bridge-grandpa = { workspace = true, default-features = true } +pallet-bridge-messages = { workspace = true, default-features = true } +pallet-bridge-parachains = { workspace = true, default-features = true } -pallet-bridge-grandpa = { path = "../../modules/grandpa" } -pallet-bridge-messages = { path = "../../modules/messages" } -pallet-bridge-parachains = { path = "../../modules/parachains" } - -bp-runtime = { path = "../../primitives/runtime" } -bp-messages = { path = "../../primitives/messages" } +bp-runtime = { workspace = true, default-features = true } +bp-messages = { workspace = true, default-features = true } # Substrate Dependencies - -frame-support = { path = "../../../substrate/frame/support" } -frame-system = { path = "../../../substrate/frame/system" } -pallet-balances = { path = "../../../substrate/frame/balances" } -pallet-grandpa = { path = "../../../substrate/frame/grandpa" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } +frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-grandpa = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-trie = { workspace = true } [dev-dependencies] -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment" } -relay-substrate-client = { path = "../client-substrate", features = ["test-helpers"] } +scale-info = { features = ["derive"], workspace = true } +pallet-transaction-payment = { workspace = true, default-features = true } +relay-substrate-client = { features = ["test-helpers"], workspace = true } diff --git a/bridges/relays/lib-substrate-relay/src/cli/bridge.rs b/bridges/relays/lib-substrate-relay/src/cli/bridge.rs index 316f59a2b0c86..5631285b3c544 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/bridge.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/bridge.rs @@ -19,7 +19,7 @@ use crate::{ equivocation::SubstrateEquivocationDetectionPipeline, finality::SubstrateFinalitySyncPipeline, - messages_lane::{MessagesRelayLimits, SubstrateMessageLane}, + messages::{MessagesRelayLimits, SubstrateMessageLane}, parachains::SubstrateParachainsPipeline, }; use pallet_bridge_parachains::{RelayBlockHash, RelayBlockHasher, RelayBlockNumber}; diff --git a/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs b/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs index 6246bdbf01515..d985d35c9e802 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs @@ -123,11 +123,11 @@ macro_rules! declare_chain_connection_params_cli_schema { #[allow(dead_code)] pub async fn into_client( self, - ) -> anyhow::Result> { + ) -> anyhow::Result<$crate::cli::DefaultClient> { let chain_runtime_version = self .[<$chain_prefix _runtime_version>] .into_runtime_version(Chain::RUNTIME_VERSION)?; - Ok(relay_substrate_client::Client::new(relay_substrate_client::ConnectionParams { + Ok(relay_substrate_client::new(relay_substrate_client::ConnectionParams { uri: self.[<$chain_prefix _uri>], host: self.[<$chain_prefix _host>], port: self.[<$chain_prefix _port>], diff --git a/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs b/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs index b98e41b2a43e4..3921685d9e8ad 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs @@ -23,7 +23,7 @@ use crate::{ }; use async_trait::async_trait; -use relay_substrate_client::ChainWithTransactions; +use relay_substrate_client::{ChainWithTransactions, Client}; use structopt::StructOpt; /// Start equivocation detection loop. diff --git a/bridges/relays/lib-substrate-relay/src/cli/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/mod.rs index 270608bf6ed8e..ddb3e416dc326 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/mod.rs @@ -35,6 +35,11 @@ pub mod relay_parachains; /// The target that will be used when publishing logs related to this pallet. pub const LOG_TARGET: &str = "bridge"; +/// Default Substrate client type that we are using. We'll use it all over the glue CLI code +/// to avoid multiple level generic arguments and constraints. We still allow usage of other +/// clients in the **core logic code**. +pub type DefaultClient = relay_substrate_client::RpcWithCachingClient; + /// Lane id. #[derive(Debug, Clone, PartialEq, Eq)] pub struct HexLaneId(pub [u8; 4]); diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs index 093f98ef21ed2..ea92a0c9acce1 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs @@ -29,6 +29,7 @@ use crate::{ finality::SubstrateFinalitySyncPipeline, HeadersToRelay, }; +use relay_substrate_client::Client; /// Chain headers relaying params. #[derive(StructOpt)] diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs index a796df6721b8c..338dda3c63309 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs @@ -37,8 +37,8 @@ use structopt::StructOpt; use futures::{FutureExt, TryFutureExt}; use crate::{ - cli::{bridge::MessagesCliBridge, HexLaneId, PrometheusParams}, - messages_lane::{MessagesRelayLimits, MessagesRelayParams}, + cli::{bridge::MessagesCliBridge, DefaultClient, HexLaneId, PrometheusParams}, + messages::{MessagesRelayLimits, MessagesRelayParams}, on_demand::OnDemandRelay, HeadersToRelay, TaggedAccount, TransactionParams, }; @@ -46,7 +46,7 @@ use bp_messages::LaneId; use bp_runtime::BalanceOf; use relay_substrate_client::{ AccountIdOf, AccountKeyPairOf, Chain, ChainWithBalances, ChainWithMessages, - ChainWithRuntimeVersion, ChainWithTransactions, Client, + ChainWithRuntimeVersion, ChainWithTransactions, }; use relay_utils::metrics::MetricsParams; use sp_core::Pair; @@ -118,7 +118,7 @@ impl< /// Parameters that are associated with one side of the bridge. pub struct BridgeEndCommonParams { /// Chain client. - pub client: Client, + pub client: DefaultClient, /// Params used for sending transactions to the chain. pub tx_params: TransactionParams>, /// Accounts, which balances are exposed as metrics by the relay process. @@ -165,7 +165,7 @@ where target_to_source_headers_relay: Arc>, lane_id: LaneId, maybe_limits: Option, - ) -> MessagesRelayParams { + ) -> MessagesRelayParams, DefaultClient> { MessagesRelayParams { source_client: self.source.client.clone(), source_transaction_params: self.source.tx_params.clone(), @@ -298,14 +298,14 @@ where .collect::>(); { let common = self.mut_base().mut_common(); - crate::messages_metrics::add_relay_balances_metrics::<_, Self::Right>( + crate::messages::metrics::add_relay_balances_metrics::<_, Self::Right>( common.left.client.clone(), &common.metrics_params, &common.left.accounts, &lanes, ) .await?; - crate::messages_metrics::add_relay_balances_metrics::<_, Self::Left>( + crate::messages::metrics::add_relay_balances_metrics::<_, Self::Left>( common.right.client.clone(), &common.metrics_params, &common.right.accounts, @@ -317,28 +317,30 @@ where // Need 2x capacity since we consider both directions for each lane let mut message_relays = Vec::with_capacity(lanes.len() * 2); for lane in lanes { - let left_to_right_messages = crate::messages_lane::run::< - ::MessagesLane, - >(self.left_to_right().messages_relay_params( - left_to_right_on_demand_headers.clone(), - right_to_left_on_demand_headers.clone(), - lane, - Self::L2R::maybe_messages_limits(), - )) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); + let left_to_right_messages = + crate::messages::run::<::MessagesLane, _, _>( + self.left_to_right().messages_relay_params( + left_to_right_on_demand_headers.clone(), + right_to_left_on_demand_headers.clone(), + lane, + Self::L2R::maybe_messages_limits(), + ), + ) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); message_relays.push(left_to_right_messages); - let right_to_left_messages = crate::messages_lane::run::< - ::MessagesLane, - >(self.right_to_left().messages_relay_params( - right_to_left_on_demand_headers.clone(), - left_to_right_on_demand_headers.clone(), - lane, - Self::R2L::maybe_messages_limits(), - )) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); + let right_to_left_messages = + crate::messages::run::<::MessagesLane, _, _>( + self.right_to_left().messages_relay_params( + right_to_left_on_demand_headers.clone(), + left_to_right_on_demand_headers.clone(), + lane, + Self::R2L::maybe_messages_limits(), + ), + ) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); message_relays.push(right_to_left_messages); } diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs index 7f6f407778236..8104be7af807a 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs @@ -23,6 +23,7 @@ use crate::{ cli::{ bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}, relay_headers_and_messages::{Full2WayBridgeBase, Full2WayBridgeCommonParams}, + DefaultClient, }, finality::SubstrateFinalitySyncPipeline, on_demand::{ @@ -52,9 +53,9 @@ pub struct ParachainToParachainBridge< pub common: Full2WayBridgeCommonParams<::Target, ::Target>, /// Client of the left relay chain. - pub left_relay: Client<::SourceRelay>, + pub left_relay: DefaultClient<::SourceRelay>, /// Client of the right relay chain. - pub right_relay: Client<::SourceRelay>, + pub right_relay: DefaultClient<::SourceRelay>, } /// Create set of configuration objects specific to parachain-to-parachain relayer. @@ -175,25 +176,33 @@ where ) .await?; - let left_relay_to_right_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.left_relay.clone(), - self.common.right.client.clone(), - self.common.right.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); - let right_relay_to_left_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.right_relay.clone(), - self.common.left.client.clone(), - self.common.left.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); + let left_relay_to_right_on_demand_headers = OnDemandHeadersRelay::< + ::RelayFinality, + _, + _, + >::new( + self.left_relay.clone(), + self.common.right.client.clone(), + self.common.right.tx_params.clone(), + self.common.shared.headers_to_relay(), + Some(self.common.metrics_params.clone()), + ); + let right_relay_to_left_on_demand_headers = OnDemandHeadersRelay::< + ::RelayFinality, + _, + _, + >::new( + self.right_relay.clone(), + self.common.left.client.clone(), + self.common.left.tx_params.clone(), + self.common.shared.headers_to_relay(), + Some(self.common.metrics_params.clone()), + ); let left_to_right_on_demand_parachains = OnDemandParachainsRelay::< ::ParachainFinality, + _, + _, >::new( self.left_relay.clone(), self.common.right.client.clone(), @@ -202,6 +211,8 @@ where ); let right_to_left_on_demand_parachains = OnDemandParachainsRelay::< ::ParachainFinality, + _, + _, >::new( self.right_relay.clone(), self.common.left.client.clone(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs index 5911fe49df4ad..6c078973fedc0 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs @@ -26,6 +26,7 @@ use crate::{ RelayToRelayHeadersCliBridge, }, relay_headers_and_messages::{Full2WayBridgeBase, Full2WayBridgeCommonParams}, + DefaultClient, }, finality::SubstrateFinalitySyncPipeline, on_demand::{ @@ -54,7 +55,7 @@ pub struct RelayToParachainBridge< pub common: Full2WayBridgeCommonParams<::Target, ::Target>, /// Client of the right relay chain. - pub right_relay: Client<::SourceRelay>, + pub right_relay: DefaultClient<::SourceRelay>, } /// Create set of configuration objects specific to relay-to-parachain relayer. @@ -167,23 +168,28 @@ where .await?; let left_to_right_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( + OnDemandHeadersRelay::<::Finality, _, _>::new( self.common.left.client.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), self.common.shared.headers_to_relay(), None, ); - let right_relay_to_left_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.right_relay.clone(), - self.common.left.client.clone(), - self.common.left.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); + let right_relay_to_left_on_demand_headers = OnDemandHeadersRelay::< + ::RelayFinality, + _, + _, + >::new( + self.right_relay.clone(), + self.common.left.client.clone(), + self.common.left.tx_params.clone(), + self.common.shared.headers_to_relay(), + Some(self.common.metrics_params.clone()), + ); let right_to_left_on_demand_parachains = OnDemandParachainsRelay::< ::ParachainFinality, + _, + _, >::new( self.right_relay.clone(), self.common.left.client.clone(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs index 832df4ae4003c..3f8c8bb40c99c 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs @@ -32,7 +32,7 @@ use crate::{ on_demand::{headers::OnDemandHeadersRelay, OnDemandRelay}, }; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, ChainWithRuntimeVersion, ChainWithTransactions, + AccountIdOf, AccountKeyPairOf, ChainWithRuntimeVersion, ChainWithTransactions, Client, }; use sp_core::Pair; @@ -148,7 +148,7 @@ where .await?; let left_to_right_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( + OnDemandHeadersRelay::<::Finality, _, _>::new( self.common.left.client.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), @@ -156,7 +156,7 @@ where None, ); let right_to_left_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( + OnDemandHeadersRelay::<::Finality, _, _>::new( self.common.right.client.clone(), self.common.left.client.clone(), self.common.left.tx_params.clone(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs index 943feba072e40..68bbe71ae599c 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs @@ -18,7 +18,7 @@ use crate::{ cli::{bridge::*, chain_schema::*, HexLaneId, PrometheusParams}, - messages_lane::MessagesRelayParams, + messages::MessagesRelayParams, TransactionParams, }; @@ -29,7 +29,8 @@ use structopt::StructOpt; use bp_messages::MessageNonce; use bp_runtime::HeaderIdProvider; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BalanceOf, Chain, ChainWithRuntimeVersion, ChainWithTransactions, + AccountIdOf, AccountKeyPairOf, BalanceOf, Chain, ChainWithRuntimeVersion, + ChainWithTransactions, Client, }; use relay_utils::UniqueSaturatedInto; @@ -116,7 +117,7 @@ where let target_sign = data.target_sign.to_keypair::()?; let target_transactions_mortality = data.target_sign.transactions_mortality()?; - crate::messages_lane::run::(MessagesRelayParams { + crate::messages::run::(MessagesRelayParams { source_client, source_transaction_params: TransactionParams { signer: source_sign, @@ -160,7 +161,7 @@ where })? .id(); - crate::messages_lane::relay_messages_range::( + crate::messages::relay_messages_range::( source_client, target_client, TransactionParams { signer: source_sign, mortality: source_transactions_mortality }, @@ -196,7 +197,7 @@ where })? .id(); - crate::messages_lane::relay_messages_delivery_confirmation::( + crate::messages::relay_messages_delivery_confirmation::( source_client, target_client, TransactionParams { signer: source_sign, mortality: source_transactions_mortality }, diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs index 00f8cf79ef1fb..77cd395ff7225 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs @@ -21,7 +21,7 @@ use async_trait::async_trait; use bp_polkadot_core::BlockNumber as RelayBlockNumber; use bp_runtime::HeaderIdProvider; use parachains_relay::parachains_loop::{AvailableHeader, SourceClient, TargetClient}; -use relay_substrate_client::Parachain; +use relay_substrate_client::{Client, Parachain}; use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; use std::sync::Arc; use structopt::StructOpt; @@ -30,7 +30,7 @@ use crate::{ cli::{ bridge::{CliBridgeBase, ParachainToRelayHeadersCliBridge}, chain_schema::*, - PrometheusParams, + DefaultClient, PrometheusParams, }, parachains::{source::ParachainsSource, target::ParachainsTarget, ParachainsPipelineAdapter}, TransactionParams, @@ -72,16 +72,19 @@ pub struct RelayParachainHeadParams { #[async_trait] pub trait ParachainsRelayer: ParachainToRelayHeadersCliBridge where - ParachainsSource: + ParachainsSource>: SourceClient>, - ParachainsTarget: - TargetClient>, + ParachainsTarget< + Self::ParachainFinality, + DefaultClient, + DefaultClient, + >: TargetClient>, ::Source: Parachain, { /// Start relaying parachains finality. async fn relay_parachains(data: RelayParachainsParams) -> anyhow::Result<()> { let source_chain_client = data.source.into_client::().await?; - let source_client = ParachainsSource::::new( + let source_client = ParachainsSource::::new( source_chain_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -91,7 +94,7 @@ where mortality: data.target_sign.target_transactions_mortality, }; let target_chain_client = data.target.into_client::().await?; - let target_client = ParachainsTarget::::new( + let target_client = ParachainsTarget::::new( source_chain_client, target_chain_client, target_transaction_params, @@ -121,7 +124,7 @@ where .map_err(|e| anyhow::format_err!("{}", e))? .id(); - let source_client = ParachainsSource::::new( + let source_client = ParachainsSource::::new( source_chain_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -131,7 +134,7 @@ where mortality: data.target_sign.target_transactions_mortality, }; let target_chain_client = data.target.into_client::().await?; - let target_client = ParachainsTarget::::new( + let target_client = ParachainsTarget::::new( source_chain_client, target_chain_client, target_transaction_params, diff --git a/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs b/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs index f6d58cbaa4ab4..f8077923b8202 100644 --- a/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs @@ -69,7 +69,7 @@ pub trait SubstrateEquivocationDetectionPipeline: /// Add relay guards if required. async fn start_relay_guards( - source_client: &Client, + source_client: &impl Client, enable_version_guard: bool, ) -> relay_substrate_client::Result<()> { if enable_version_guard { @@ -199,8 +199,8 @@ macro_rules! generate_report_equivocation_call_builder { /// Run Substrate-to-Substrate equivocations detection loop. pub async fn run( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, source_transaction_params: TransactionParams>, metrics_params: MetricsParams, ) -> anyhow::Result<()> { @@ -212,8 +212,8 @@ pub async fn run( ); equivocation_detector::run( - SubstrateEquivocationSource::

::new(source_client, source_transaction_params), - SubstrateEquivocationTarget::

::new(target_client), + SubstrateEquivocationSource::::new(source_client, source_transaction_params), + SubstrateEquivocationTarget::::new(target_client), P::TargetChain::AVERAGE_BLOCK_INTERVAL, metrics_params, futures::future::pending(), diff --git a/bridges/relays/lib-substrate-relay/src/equivocation/source.rs b/bridges/relays/lib-substrate-relay/src/equivocation/source.rs index a0c7dcf5cbc32..66d651600a1ec 100644 --- a/bridges/relays/lib-substrate-relay/src/equivocation/source.rs +++ b/bridges/relays/lib-substrate-relay/src/equivocation/source.rs @@ -35,29 +35,35 @@ use relay_substrate_client::{ use relay_utils::relay_loop::Client as RelayClient; /// Substrate node as equivocation source. -pub struct SubstrateEquivocationSource { - client: Client, +pub struct SubstrateEquivocationSource { + client: SourceClnt, transaction_params: TransactionParams>, } -impl SubstrateEquivocationSource

{ +impl> + SubstrateEquivocationSource +{ /// Create new instance of `SubstrateEquivocationSource`. pub fn new( - client: Client, + client: SourceClnt, transaction_params: TransactionParams>, ) -> Self { Self { client, transaction_params } } } -impl Clone for SubstrateEquivocationSource

{ +impl> Clone + for SubstrateEquivocationSource +{ fn clone(&self) -> Self { Self { client: self.client.clone(), transaction_params: self.transaction_params.clone() } } } #[async_trait] -impl RelayClient for SubstrateEquivocationSource

{ +impl> RelayClient + for SubstrateEquivocationSource +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -66,8 +72,9 @@ impl RelayClient for SubstrateEquivoc } #[async_trait] -impl - SourceClientBase> for SubstrateEquivocationSource

+impl> + SourceClientBase> + for SubstrateEquivocationSource { type FinalityProofsStream = SubstrateFinalityProofsStream

; @@ -77,10 +84,11 @@ impl } #[async_trait] -impl - SourceClient> for SubstrateEquivocationSource

+impl> + SourceClient> + for SubstrateEquivocationSource { - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn report_equivocation( &self, diff --git a/bridges/relays/lib-substrate-relay/src/equivocation/target.rs b/bridges/relays/lib-substrate-relay/src/equivocation/target.rs index 6eee2ab91d45b..7d054e843d0db 100644 --- a/bridges/relays/lib-substrate-relay/src/equivocation/target.rs +++ b/bridges/relays/lib-substrate-relay/src/equivocation/target.rs @@ -34,27 +34,33 @@ use sp_runtime::traits::Header; use std::marker::PhantomData; /// Substrate node as equivocation source. -pub struct SubstrateEquivocationTarget { - client: Client, +pub struct SubstrateEquivocationTarget { + client: TargetClnt, _phantom: PhantomData

, } -impl SubstrateEquivocationTarget

{ +impl> + SubstrateEquivocationTarget +{ /// Create new instance of `SubstrateEquivocationTarget`. - pub fn new(client: Client) -> Self { + pub fn new(client: TargetClnt) -> Self { Self { client, _phantom: Default::default() } } } -impl Clone for SubstrateEquivocationTarget

{ +impl> Clone + for SubstrateEquivocationTarget +{ fn clone(&self) -> Self { Self { client: self.client.clone(), _phantom: Default::default() } } } #[async_trait] -impl RelayClient for SubstrateEquivocationTarget

{ +impl> RelayClient + for SubstrateEquivocationTarget +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -63,8 +69,9 @@ impl RelayClient for SubstrateEquivoc } #[async_trait] -impl - TargetClient> for SubstrateEquivocationTarget

+impl> + TargetClient> + for SubstrateEquivocationTarget { async fn best_finalized_header_number( &self, diff --git a/bridges/relays/lib-substrate-relay/src/finality/initialize.rs b/bridges/relays/lib-substrate-relay/src/finality/initialize.rs index 5dde46c39dd67..a972f743e117c 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/initialize.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/initialize.rs @@ -39,8 +39,8 @@ pub async fn initialize< TargetChain: ChainWithTransactions, F, >( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, target_signer: AccountKeyPairOf, prepare_initialize_transaction: F, dry_run: bool, @@ -101,8 +101,8 @@ async fn do_initialize< TargetChain: ChainWithTransactions, F, >( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, target_signer: AccountKeyPairOf, prepare_initialize_transaction: F, dry_run: bool, diff --git a/bridges/relays/lib-substrate-relay/src/finality/mod.rs b/bridges/relays/lib-substrate-relay/src/finality/mod.rs index 0293e1da224a6..a2379eb4812e2 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/mod.rs @@ -77,7 +77,7 @@ pub trait SubstrateFinalitySyncPipeline: BaseSubstrateFinalitySyncPipeline { /// Add relay guards if required. async fn start_relay_guards( - target_client: &Client, + target_client: &impl Client, enable_version_guard: bool, ) -> relay_substrate_client::Result<()> { if enable_version_guard { @@ -240,8 +240,8 @@ macro_rules! generate_submit_finality_proof_ex_call_builder { /// Run Substrate-to-Substrate finality sync loop. pub async fn run( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, headers_to_relay: HeadersToRelay, transaction_params: TransactionParams>, metrics_params: MetricsParams, @@ -255,8 +255,8 @@ pub async fn run( ); finality_relay::run( - SubstrateFinalitySource::

::new(source_client, None), - SubstrateFinalityTarget::

::new(target_client, transaction_params.clone()), + SubstrateFinalitySource::::new(source_client, None), + SubstrateFinalityTarget::::new(target_client, transaction_params.clone()), finality_relay::FinalitySyncParams { tick: std::cmp::max( P::SourceChain::AVERAGE_BLOCK_INTERVAL, @@ -279,12 +279,12 @@ pub async fn run( /// Relay single header. No checks are made to ensure that transaction will succeed. pub async fn relay_single_header( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, transaction_params: TransactionParams>, header_number: BlockNumberOf, ) -> anyhow::Result<()> { - let finality_source = SubstrateFinalitySource::

::new(source_client, None); + let finality_source = SubstrateFinalitySource::::new(source_client, None); let (header, proof) = finality_source.header_and_finality_proof(header_number).await?; let Some(proof) = proof else { return Err(anyhow::format_err!( @@ -295,7 +295,7 @@ pub async fn relay_single_header( )); }; - let finality_target = SubstrateFinalityTarget::

::new(target_client, transaction_params); + let finality_target = SubstrateFinalityTarget::::new(target_client, transaction_params); let tx_tracker = finality_target.submit_finality_proof(header, proof, false).await?; match tx_tracker.wait().await { TrackedTransactionStatus::Finalized(_) => Ok(()), diff --git a/bridges/relays/lib-substrate-relay/src/finality/source.rs b/bridges/relays/lib-substrate-relay/src/finality/source.rs index c94af6108957a..f6fa5c24add50 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/source.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/source.rs @@ -40,22 +40,24 @@ use relay_utils::{relay_loop::Client as RelayClient, UniqueSaturatedInto}; pub type RequiredHeaderNumberRef = Arc::BlockNumber>>; /// Substrate node as finality source. -pub struct SubstrateFinalitySource { - client: Client, +pub struct SubstrateFinalitySource { + client: SourceClnt, maximal_header_number: Option>, } -impl SubstrateFinalitySource

{ +impl> + SubstrateFinalitySource +{ /// Create new headers source using given client. pub fn new( - client: Client, + client: SourceClnt, maximal_header_number: Option>, ) -> Self { SubstrateFinalitySource { client, maximal_header_number } } /// Returns reference to the underlying RPC client. - pub fn client(&self) -> &Client { + pub fn client(&self) -> &SourceClnt { &self.client } @@ -174,7 +176,9 @@ impl SubstrateFinalitySource

{ } } -impl Clone for SubstrateFinalitySource

{ +impl Clone + for SubstrateFinalitySource +{ fn clone(&self) -> Self { SubstrateFinalitySource { client: self.client.clone(), @@ -184,7 +188,9 @@ impl Clone for SubstrateFinalitySource

{ } #[async_trait] -impl RelayClient for SubstrateFinalitySource

{ +impl> RelayClient + for SubstrateFinalitySource +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -193,8 +199,8 @@ impl RelayClient for SubstrateFinalitySource

SourceClientBase> - for SubstrateFinalitySource

+impl> + SourceClientBase> for SubstrateFinalitySource { type FinalityProofsStream = SubstrateFinalityProofsStream

; @@ -204,8 +210,8 @@ impl SourceClientBase SourceClient> - for SubstrateFinalitySource

+impl> + SourceClient> for SubstrateFinalitySource { async fn best_finalized_block_number(&self) -> Result, Error> { let mut finalized_header_number = self.on_chain_best_finalized_block_number().await?; @@ -235,7 +241,7 @@ impl SourceClient( - client: &Client, + client: &impl Client, number: BlockNumberOf, ) -> Result< ( @@ -244,8 +250,8 @@ async fn header_and_finality_proof( ), Error, > { - let header_hash = client.block_hash_by_number(number).await?; - let signed_block = client.get_block(Some(header_hash)).await?; + let header_hash = client.header_hash_by_number(number).await?; + let signed_block = client.block_by_hash(header_hash).await?; let justification = signed_block .justification(P::FinalityEngine::ID) diff --git a/bridges/relays/lib-substrate-relay/src/finality/target.rs b/bridges/relays/lib-substrate-relay/src/finality/target.rs index 52ab2462c62c4..18b696685dd4e 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/target.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/target.rs @@ -28,22 +28,25 @@ use async_trait::async_trait; use bp_runtime::BlockNumberOf; use finality_relay::TargetClient; use relay_substrate_client::{ - AccountKeyPairOf, Chain, Client, Error, HeaderIdOf, HeaderOf, SyncHeader, TransactionEra, - TransactionTracker, UnsignedTransaction, + AccountIdOf, AccountKeyPairOf, Chain, Client, Error, HeaderIdOf, HeaderOf, SyncHeader, + TransactionEra, TransactionTracker, UnsignedTransaction, }; use relay_utils::relay_loop::Client as RelayClient; +use sp_core::Pair; use sp_runtime::traits::Header; /// Substrate client as Substrate finality target. -pub struct SubstrateFinalityTarget { - client: Client, +pub struct SubstrateFinalityTarget { + client: TargetClnt, transaction_params: TransactionParams>, } -impl SubstrateFinalityTarget

{ +impl> + SubstrateFinalityTarget +{ /// Create new Substrate headers target. pub fn new( - client: Client, + client: TargetClnt, transaction_params: TransactionParams>, ) -> Self { SubstrateFinalityTarget { client, transaction_params } @@ -65,7 +68,9 @@ impl SubstrateFinalityTarget

{ } } -impl Clone for SubstrateFinalityTarget

{ +impl Clone + for SubstrateFinalityTarget +{ fn clone(&self) -> Self { SubstrateFinalityTarget { client: self.client.clone(), @@ -75,7 +80,9 @@ impl Clone for SubstrateFinalityTarget

{ } #[async_trait] -impl RelayClient for SubstrateFinalityTarget

{ +impl> RelayClient + for SubstrateFinalityTarget +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -84,10 +91,12 @@ impl RelayClient for SubstrateFinalityTarget

TargetClient> - for SubstrateFinalityTarget

+impl> + TargetClient> for SubstrateFinalityTarget +where + AccountIdOf: From< as Pair>::Public>, { - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn best_finalized_source_block_id(&self) -> Result, Error> { // we can't continue to relay finality if target node is out of sync, because @@ -109,10 +118,10 @@ impl TargetClient Result>, Self::Error> { Ok(self .client - .typed_state_call( + .state_call( + self.client.best_header().await?.hash(), P::SourceChain::FREE_HEADERS_INTERVAL_METHOD.into(), (), - Some(self.client.best_header().await?.hash()), ) .await .unwrap_or_else(|e| { diff --git a/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs b/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs index 5a9ec42fde5a3..4f15d68771940 100644 --- a/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs +++ b/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs @@ -28,10 +28,11 @@ use bp_header_chain::{ }; use bp_runtime::{BasicOperatingMode, HeaderIdProvider, OperatingMode}; use codec::{Decode, Encode}; +use futures::stream::StreamExt; use num_traits::{One, Zero}; use relay_substrate_client::{ BlockNumberOf, Chain, ChainWithGrandpa, Client, Error as SubstrateError, HashOf, HeaderOf, - Subscription, SubstrateFinalityClient, SubstrateGrandpaFinalityClient, + Subscription, }; use sp_consensus_grandpa::{AuthorityList as GrandpaAuthoritiesSet, GRANDPA_ENGINE_ID}; use sp_core::{storage::StorageKey, Bytes}; @@ -45,8 +46,6 @@ pub trait Engine: Send { const ID: ConsensusEngineId; /// A reader that can extract the consensus log from the header digest and interpret it. type ConsensusLogReader: ConsensusLogReader; - /// Type of Finality RPC client used by this engine. - type FinalityClient: SubstrateFinalityClient; /// Type of finality proofs, used by consensus engine. type FinalityProof: FinalityProof, BlockNumberOf> + Decode + Encode; /// The context needed for verifying finality proofs. @@ -74,10 +73,10 @@ pub trait Engine: Send { /// Returns `Ok(true)` if finality pallet at the bridged chain has already been initialized. async fn is_initialized( - target_client: &Client, + target_client: &impl Client, ) -> Result { Ok(target_client - .raw_storage_value(Self::is_initialized_key(), None) + .raw_storage_value(target_client.best_header_hash().await?, Self::is_initialized_key()) .await? .is_some()) } @@ -88,10 +87,13 @@ pub trait Engine: Send { /// Returns `Ok(true)` if finality pallet at the bridged chain is halted. async fn is_halted( - target_client: &Client, + target_client: &impl Client, ) -> Result { Ok(target_client - .storage_value::(Self::pallet_operating_mode_key(), None) + .storage_value::( + target_client.best_header_hash().await?, + Self::pallet_operating_mode_key(), + ) .await? .map(|operating_mode| operating_mode.is_halted()) .unwrap_or(false)) @@ -99,17 +101,15 @@ pub trait Engine: Send { /// A method to subscribe to encoded finality proofs, given source client. async fn source_finality_proofs( - source_client: &Client, - ) -> Result, SubstrateError> { - source_client.subscribe_finality_justifications::().await - } + source_client: &impl Client, + ) -> Result, SubstrateError>; /// Verify and optimize finality proof before sending it to the target node. /// /// Apart from optimization, we expect this method to perform all required checks /// that the `header` and `proof` are valid at the current state of the target chain. async fn verify_and_optimize_proof( - target_client: &Client, + target_client: &impl Client, header: &C::Header, proof: &mut Self::FinalityProof, ) -> Result; @@ -123,19 +123,19 @@ pub trait Engine: Send { /// Prepare initialization data for the finality bridge pallet. async fn prepare_initialization_data( - client: Client, + client: impl Client, ) -> Result, BlockNumberOf>>; /// Get the context needed for validating a finality proof. async fn finality_verification_context( - target_client: &Client, + target_client: &impl Client, at: HashOf, ) -> Result; /// Returns the finality info associated to the source headers synced with the target /// at the provided block. async fn synced_headers_finality_info( - target_client: &Client, + target_client: &impl Client, at: TargetChain::Hash, ) -> Result< Vec>, @@ -144,7 +144,7 @@ pub trait Engine: Send { /// Generate key ownership proof for the provided equivocation. async fn generate_source_key_ownership_proof( - source_client: &Client, + source_client: &impl Client, at: C::Hash, equivocation: &Self::EquivocationProof, ) -> Result; @@ -156,7 +156,7 @@ pub struct Grandpa(PhantomData); impl Grandpa { /// Read header by hash from the source client. async fn source_header( - source_client: &Client, + source_client: &impl Client, header_hash: C::Hash, ) -> Result, BlockNumberOf>> { source_client @@ -167,15 +167,15 @@ impl Grandpa { /// Read GRANDPA authorities set at given header. async fn source_authorities_set( - source_client: &Client, + source_client: &impl Client, header_hash: C::Hash, ) -> Result, BlockNumberOf>> { - let raw_authorities_set = source_client - .grandpa_authorities_set(header_hash) + const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; + + source_client + .state_call(header_hash, SUB_API_GRANDPA_AUTHORITIES.to_string(), ()) .await - .map_err(|err| Error::RetrieveAuthorities(C::NAME, header_hash, err))?; - GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..]) - .map_err(|err| Error::DecodeAuthorities(C::NAME, header_hash, err)) + .map_err(|err| Error::RetrieveAuthorities(C::NAME, header_hash, err)) } } @@ -183,7 +183,6 @@ impl Grandpa { impl Engine for Grandpa { const ID: ConsensusEngineId = GRANDPA_ENGINE_ID; type ConsensusLogReader = GrandpaConsensusLogReader<::Number>; - type FinalityClient = SubstrateGrandpaFinalityClient; type FinalityProof = GrandpaJustification>; type FinalityVerificationContext = JustificationVerificationContext; type EquivocationProof = sp_consensus_grandpa::EquivocationProof, BlockNumberOf>; @@ -200,8 +199,14 @@ impl Engine for Grandpa { bp_header_chain::storage_keys::pallet_operating_mode_key(C::WITH_CHAIN_GRANDPA_PALLET_NAME) } + async fn source_finality_proofs( + client: &impl Client, + ) -> Result, SubstrateError> { + client.subscribe_grandpa_finality_justifications().await + } + async fn verify_and_optimize_proof( - target_client: &Client, + target_client: &impl Client, header: &C::Header, proof: &mut Self::FinalityProof, ) -> Result { @@ -239,7 +244,7 @@ impl Engine for Grandpa { /// Prepare initialization data for the GRANDPA verifier pallet. async fn prepare_initialization_data( - source_client: Client, + source_client: impl Client, ) -> Result, BlockNumberOf>> { // In ideal world we just need to get best finalized header and then to read GRANDPA // authorities set (`pallet_grandpa::CurrentSetId` + `GrandpaApi::grandpa_authorities()`) at @@ -248,17 +253,14 @@ impl Engine for Grandpa { // But now there are problems with this approach - `CurrentSetId` may return invalid value. // So here we're waiting for the next justification, read the authorities set and then try // to figure out the set id with bruteforce. - let justifications = Self::source_finality_proofs(&source_client) + let mut justifications = Self::source_finality_proofs(&source_client) .await .map_err(|err| Error::Subscribe(C::NAME, err))?; // Read next justification - the header that it finalizes will be used as initial header. let justification = justifications .next() .await - .map_err(|e| Error::ReadJustification(C::NAME, e)) - .and_then(|justification| { - justification.ok_or(Error::ReadJustificationStreamEnded(C::NAME)) - })?; + .ok_or(Error::ReadJustificationStreamEnded(C::NAME))?; // Read initial header. let justification: GrandpaJustification = @@ -359,14 +361,14 @@ impl Engine for Grandpa { } async fn finality_verification_context( - target_client: &Client, + target_client: &impl Client, at: HashOf, ) -> Result { let current_authority_set_key = bp_header_chain::storage_keys::current_authority_set_key( C::WITH_CHAIN_GRANDPA_PALLET_NAME, ); let authority_set: AuthoritySet = target_client - .storage_value(current_authority_set_key, Some(at)) + .storage_value(at, current_authority_set_key) .await? .map(Ok) .unwrap_or(Err(SubstrateError::Custom(format!( @@ -385,11 +387,11 @@ impl Engine for Grandpa { } async fn synced_headers_finality_info( - target_client: &Client, + target_client: &impl Client, at: TargetChain::Hash, ) -> Result>>, SubstrateError> { let stored_headers_grandpa_info: Vec>> = target_client - .typed_state_call(C::SYNCED_HEADERS_GRANDPA_INFO_METHOD.to_string(), (), Some(at)) + .state_call(at, C::SYNCED_HEADERS_GRANDPA_INFO_METHOD.to_string(), ()) .await?; let mut headers_grandpa_info = vec![]; @@ -407,7 +409,7 @@ impl Engine for Grandpa { } async fn generate_source_key_ownership_proof( - source_client: &Client, + source_client: &impl Client, at: C::Hash, equivocation: &Self::EquivocationProof, ) -> Result { diff --git a/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs b/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs index 825960b1b3ef2..71d15ca3868e0 100644 --- a/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs @@ -50,11 +50,11 @@ pub type SubstrateFinalityProofsStream

= /// Subscribe to new finality proofs. pub async fn finality_proofs( - client: &Client, + client: &impl Client, ) -> Result, Error> { Ok(unfold( P::FinalityEngine::source_finality_proofs(client).await?, - move |subscription| async move { + move |mut subscription| async move { loop { let log_error = |err| { log::error!( @@ -65,8 +65,7 @@ pub async fn finality_proofs( ); }; - let next_justification = - subscription.next().await.map_err(|err| log_error(err.to_string())).ok()??; + let next_justification = subscription.next().await?; let decoded_justification = >::FinalityProof::decode( @@ -93,7 +92,7 @@ pub async fn finality_proofs( /// /// The runtime API method should be `FinalityApi::best_finalized()`. pub async fn best_synced_header_id( - target_client: &Client, + target_client: &impl Client, at: HashOf, ) -> Result>, Error> where @@ -102,6 +101,6 @@ where { // now let's read id of best finalized peer header at our best finalized block target_client - .typed_state_call(SourceChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), Some(at)) + .state_call(at, SourceChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), ()) .await } diff --git a/bridges/relays/lib-substrate-relay/src/lib.rs b/bridges/relays/lib-substrate-relay/src/lib.rs index b3e8e7ed9a205..c004540a9f495 100644 --- a/bridges/relays/lib-substrate-relay/src/lib.rs +++ b/bridges/relays/lib-substrate-relay/src/lib.rs @@ -30,10 +30,7 @@ pub mod equivocation; pub mod error; pub mod finality; pub mod finality_base; -pub mod messages_lane; -pub mod messages_metrics; -pub mod messages_source; -pub mod messages_target; +pub mod messages; pub mod on_demand; pub mod parachains; @@ -130,3 +127,17 @@ impl BatchCallBuilder for () { unreachable!("never called, because ()::new_builder() returns None; qed") } } + +/// Module for handling storage proofs compatibility. +pub mod proofs { + use bp_runtime::{HashOf, RawStorageProof}; + use relay_substrate_client::Chain; + use sp_trie::StorageProof; + + /// Converts proof to `RawStorageProof` type. + pub fn to_raw_storage_proof( + proof: (StorageProof, HashOf), + ) -> RawStorageProof { + proof.0.into_iter_nodes().collect() + } +} diff --git a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs b/bridges/relays/lib-substrate-relay/src/messages/metrics.rs similarity index 99% rename from bridges/relays/lib-substrate-relay/src/messages_metrics.rs rename to bridges/relays/lib-substrate-relay/src/messages/metrics.rs index b30e75bd8bacb..8845f43dcb62a 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/metrics.rs @@ -36,7 +36,7 @@ use std::{fmt::Debug, marker::PhantomData}; /// Add relay accounts balance metrics. pub async fn add_relay_balances_metrics( - client: Client, + client: impl Client, metrics: &MetricsParams, relay_accounts: &Vec>>, lanes: &[LaneId], diff --git a/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/bridges/relays/lib-substrate-relay/src/messages/mod.rs similarity index 59% rename from bridges/relays/lib-substrate-relay/src/messages_lane.rs rename to bridges/relays/lib-substrate-relay/src/messages/mod.rs index 08550d19bae03..e52b702066694 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_lane.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/mod.rs @@ -17,20 +17,21 @@ //! Tools for supporting message lanes between two Substrate-based chains. use crate::{ - messages_source::{SubstrateMessagesProof, SubstrateMessagesSource}, - messages_target::{SubstrateMessagesDeliveryProof, SubstrateMessagesTarget}, + messages::{ + source::{SubstrateMessagesProof, SubstrateMessagesSource}, + target::{SubstrateMessagesDeliveryProof, SubstrateMessagesTarget}, + }, on_demand::OnDemandRelay, BatchCallBuilder, BatchCallBuilderConstructor, TransactionParams, }; use async_std::sync::Arc; -use bp_messages::{ChainWithMessages as _, LaneId, MessageNonce}; +use bp_messages::{ + target_chain::FromBridgedChainMessagesProof, ChainWithMessages as _, LaneId, MessageNonce, +}; use bp_runtime::{ AccountIdOf, Chain as _, EncodedOrDecodedCall, HeaderIdOf, TransactionEra, WeightExtraOps, }; -use bridge_runtime_common::messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, -}; use codec::Encode; use frame_support::{dispatch::GetDispatchInfo, weights::Weight}; use messages_relay::{message_lane::MessageLane, message_lane_loop::BatchTransaction}; @@ -48,6 +49,10 @@ use sp_core::Pair; use sp_runtime::traits::Zero; use std::{fmt::Debug, marker::PhantomData, ops::RangeInclusive}; +pub mod metrics; +pub mod source; +pub mod target; + /// Substrate -> Substrate messages synchronization pipeline. pub trait SubstrateMessageLane: 'static + Clone + Debug + Send + Sync { /// Messages of this chain are relayed to the `TargetChain`. @@ -88,13 +93,13 @@ impl MessageLane for MessageLaneAdapter

{ } /// Substrate <-> Substrate messages relay parameters. -pub struct MessagesRelayParams { +pub struct MessagesRelayParams { /// Messages source client. - pub source_client: Client, + pub source_client: SourceClnt, /// Source transaction params. pub source_transaction_params: TransactionParams>, /// Messages target client. - pub target_client: Client, + pub target_client: TargetClnt, /// Target transaction params. pub target_transaction_params: TransactionParams>, /// Optional on-demand source to target headers relay. @@ -179,8 +184,13 @@ impl>> } /// Run Substrate-to-Substrate messages sync loop. -pub async fn run(params: MessagesRelayParams

) -> anyhow::Result<()> +pub async fn run( + params: MessagesRelayParams, +) -> anyhow::Result<()> where + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, AccountIdOf: From< as Pair>::Public>, AccountIdOf: From< as Pair>::Public>, BalanceOf: TryFrom>, @@ -190,7 +200,7 @@ where let limits = match params.limits { Some(limits) => limits, None => - select_delivery_transaction_limits_rpc::

( + select_delivery_transaction_limits_rpc( ¶ms, P::TargetChain::max_extrinsic_weight(), P::SourceChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, @@ -250,14 +260,14 @@ where max_messages_size_in_single_batch, }, }, - SubstrateMessagesSource::

::new( + SubstrateMessagesSource::::new( source_client.clone(), target_client.clone(), params.lane_id, params.source_transaction_params, params.target_to_source_headers_relay, ), - SubstrateMessagesTarget::

::new( + SubstrateMessagesTarget::::new( target_client, source_client, params.lane_id, @@ -278,8 +288,8 @@ where /// Deliver range of Substrate-to-Substrate messages. No checks are made to ensure that transaction /// will succeed. pub async fn relay_messages_range( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, source_transaction_params: TransactionParams>, target_transaction_params: TransactionParams>, at_source_block: HeaderIdOf, @@ -295,14 +305,14 @@ where let relayer_id_at_source: AccountIdOf = source_transaction_params.signer.public().into(); messages_relay::relay_messages_range( - SubstrateMessagesSource::

::new( + SubstrateMessagesSource::::new( source_client.clone(), target_client.clone(), lane_id, source_transaction_params, None, ), - SubstrateMessagesTarget::

::new( + SubstrateMessagesTarget::::new( target_client, source_client, lane_id, @@ -321,8 +331,8 @@ where /// Relay messages delivery confirmation of Substrate-to-Substrate messages. /// No checks are made to ensure that transaction will succeed. pub async fn relay_messages_delivery_confirmation( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, source_transaction_params: TransactionParams>, at_target_block: HeaderIdOf, lane_id: LaneId, @@ -335,14 +345,14 @@ where let relayer_id_at_source: AccountIdOf = source_transaction_params.signer.public().into(); messages_relay::relay_messages_delivery_confirmation( - SubstrateMessagesSource::

::new( + SubstrateMessagesSource::::new( source_client.clone(), target_client.clone(), lane_id, source_transaction_params, None, ), - SubstrateMessagesTarget::

::new( + SubstrateMessagesTarget::::new( target_client, source_client, lane_id, @@ -378,11 +388,10 @@ pub struct DirectReceiveMessagesProofCallBuilder { impl ReceiveMessagesProofCallBuilder

for DirectReceiveMessagesProofCallBuilder where P: SubstrateMessageLane, - R: BridgeMessagesConfig>, + R: BridgeMessagesConfig, I: 'static, - R::SourceHeaderChain: bp_messages::target_chain::SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof>, - >, + R::BridgedChain: + bp_runtime::Chain, Hash = HashOf>, CallOf: From> + GetDispatchInfo, { fn build_receive_messages_proof_call( @@ -394,7 +403,7 @@ where ) -> CallOf { let call: CallOf = BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain: relayer_id_at_source, - proof: proof.1, + proof: proof.1.into(), messages_count, dispatch_weight, } @@ -427,26 +436,26 @@ macro_rules! generate_receive_message_proof_call_builder { ($pipeline:ident, $mocked_builder:ident, $bridge_messages:path, $receive_messages_proof:path) => { pub struct $mocked_builder; - impl $crate::messages_lane::ReceiveMessagesProofCallBuilder<$pipeline> + impl $crate::messages::ReceiveMessagesProofCallBuilder<$pipeline> for $mocked_builder { fn build_receive_messages_proof_call( relayer_id_at_source: relay_substrate_client::AccountIdOf< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::SourceChain + <$pipeline as $crate::messages::SubstrateMessageLane>::SourceChain >, - proof: $crate::messages_source::SubstrateMessagesProof< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::SourceChain + proof: $crate::messages::source::SubstrateMessagesProof< + <$pipeline as $crate::messages::SubstrateMessageLane>::SourceChain >, messages_count: u32, dispatch_weight: bp_messages::Weight, _trace_call: bool, ) -> relay_substrate_client::CallOf< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::TargetChain + <$pipeline as $crate::messages::SubstrateMessageLane>::TargetChain > { bp_runtime::paste::item! { $bridge_messages($receive_messages_proof { relayer_id_at_bridged_chain: relayer_id_at_source, - proof: proof.1, + proof: proof.1.into(), messages_count: messages_count, dispatch_weight: dispatch_weight, }) @@ -478,11 +487,7 @@ where P: SubstrateMessageLane, R: BridgeMessagesConfig, I: 'static, - R::TargetHeaderChain: bp_messages::source_chain::TargetHeaderChain< - R::OutboundPayload, - R::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof>, - >, + R::BridgedChain: bp_runtime::Chain>, CallOf: From> + GetDispatchInfo, { fn build_receive_messages_delivery_proof_call( @@ -491,7 +496,7 @@ where ) -> CallOf { let call: CallOf = BridgeMessagesCall::::receive_messages_delivery_proof { - proof: proof.1, + proof: proof.1.into(), relayers_state: proof.0, } .into(); @@ -523,16 +528,16 @@ macro_rules! generate_receive_message_delivery_proof_call_builder { ($pipeline:ident, $mocked_builder:ident, $bridge_messages:path, $receive_messages_delivery_proof:path) => { pub struct $mocked_builder; - impl $crate::messages_lane::ReceiveMessagesDeliveryProofCallBuilder<$pipeline> + impl $crate::messages::ReceiveMessagesDeliveryProofCallBuilder<$pipeline> for $mocked_builder { fn build_receive_messages_delivery_proof_call( - proof: $crate::messages_target::SubstrateMessagesDeliveryProof< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::TargetChain + proof: $crate::messages::target::SubstrateMessagesDeliveryProof< + <$pipeline as $crate::messages::SubstrateMessageLane>::TargetChain >, _trace_call: bool, ) -> relay_substrate_client::CallOf< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::SourceChain + <$pipeline as $crate::messages::SubstrateMessageLane>::SourceChain > { bp_runtime::paste::item! { $bridge_messages($receive_messages_delivery_proof { @@ -546,12 +551,15 @@ macro_rules! generate_receive_message_delivery_proof_call_builder { } /// Returns maximal number of messages and their maximal cumulative dispatch weight. -async fn select_delivery_transaction_limits_rpc( - params: &MessagesRelayParams

, +async fn select_delivery_transaction_limits_rpc( + params: &MessagesRelayParams, max_extrinsic_weight: Weight, max_unconfirmed_messages_at_inbound_lane: MessageNonce, ) -> anyhow::Result where + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, AccountIdOf: From< as Pair>::Public>, { // We may try to guess accurate value, based on maximal number of messages and per-message @@ -567,20 +575,21 @@ where let weight_for_messages_dispatch = max_extrinsic_weight - weight_for_delivery_tx; // weight of empty message delivery with outbound lane state - let delivery_tx_with_zero_messages = dummy_messages_delivery_transaction::

(params, 0)?; + let best_target_block_hash = params.target_client.best_header_hash().await?; + let delivery_tx_with_zero_messages = dummy_messages_delivery_transaction::(params, 0)?; let delivery_tx_with_zero_messages_weight = params .target_client - .extimate_extrinsic_weight(delivery_tx_with_zero_messages) + .estimate_extrinsic_weight(best_target_block_hash, delivery_tx_with_zero_messages) .await .map_err(|e| { anyhow::format_err!("Failed to estimate delivery extrinsic weight: {:?}", e) })?; // weight of single message delivery with outbound lane state - let delivery_tx_with_one_message = dummy_messages_delivery_transaction::

(params, 1)?; + let delivery_tx_with_one_message = dummy_messages_delivery_transaction::(params, 1)?; let delivery_tx_with_one_message_weight = params .target_client - .extimate_extrinsic_weight(delivery_tx_with_one_message) + .estimate_extrinsic_weight(best_target_block_hash, delivery_tx_with_one_message) .await .map_err(|e| { anyhow::format_err!("Failed to estimate delivery extrinsic weight: {:?}", e) @@ -615,8 +624,8 @@ where } /// Returns dummy message delivery transaction with zero messages and `1kb` proof. -fn dummy_messages_delivery_transaction( - params: &MessagesRelayParams

, +fn dummy_messages_delivery_transaction( + params: &MessagesRelayParams, messages: u32, ) -> anyhow::Result<::SignedTransaction> where @@ -634,13 +643,7 @@ where Weight::zero(), FromBridgedChainMessagesProof { bridged_header_hash: Default::default(), - // we may use per-chain `EXTRA_STORAGE_PROOF_SIZE`, but since we don't need - // exact values, this global estimation is fine - storage_proof: vec![vec![ - 42u8; - pallet_bridge_messages::EXTRA_STORAGE_PROOF_SIZE - as usize - ]], + storage_proof: Default::default(), lane: Default::default(), nonces_start: 1, nonces_end: messages as u64, @@ -666,3 +669,362 @@ where ) .map_err(Into::into) } + +#[cfg(test)] +mod tests { + use super::*; + use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, UnrewardedRelayersState, + }; + use relay_substrate_client::calls::{UtilityCall as MockUtilityCall, UtilityCall}; + + #[derive(codec::Decode, codec::Encode, Clone, Debug, PartialEq)] + pub enum RuntimeCall { + #[codec(index = 53)] + BridgeMessages(CodegenBridgeMessagesCall), + #[codec(index = 123)] + Utility(UtilityCall), + } + pub type CodegenBridgeMessagesCall = bp_messages::BridgeMessagesCall< + u64, + Box>, + FromBridgedChainMessagesDeliveryProof, + >; + + impl From> for RuntimeCall { + fn from(value: MockUtilityCall) -> RuntimeCall { + match value { + MockUtilityCall::batch_all(calls) => + RuntimeCall::Utility(UtilityCall::::batch_all(calls)), + } + } + } + + #[test] + fn ensure_macro_compatibility_for_generate_receive_message_proof_call_builder() { + // data + let receive_messages_proof = FromBridgedChainMessagesProof { + bridged_header_hash: Default::default(), + storage_proof: Default::default(), + lane: LaneId([0, 0, 0, 0]), + nonces_start: 0, + nonces_end: 0, + }; + let account = 1234; + let messages_count = 0; + let dispatch_weight = Default::default(); + + // construct pallet Call directly + let pallet_receive_messages_proof = + pallet_bridge_messages::Call::::receive_messages_proof { + relayer_id_at_bridged_chain: account, + proof: receive_messages_proof.clone().into(), + messages_count, + dispatch_weight, + }; + + // construct mock enum Call + let mock_enum_receive_messages_proof = CodegenBridgeMessagesCall::receive_messages_proof { + relayer_id_at_bridged_chain: account, + proof: receive_messages_proof.clone().into(), + messages_count, + dispatch_weight, + }; + + // now we should be able to use macro `generate_receive_message_proof_call_builder` + let relayer_call_builder_receive_messages_proof = relayer::ThisChainToBridgedChainMessageLaneReceiveMessagesProofCallBuilder::build_receive_messages_proof_call( + account, + (Default::default(), receive_messages_proof), + messages_count, + dispatch_weight, + false, + ); + + // ensure they are all equal + assert_eq!( + pallet_receive_messages_proof.encode(), + mock_enum_receive_messages_proof.encode() + ); + match relayer_call_builder_receive_messages_proof { + RuntimeCall::BridgeMessages(call) => match call { + call @ CodegenBridgeMessagesCall::receive_messages_proof { .. } => + assert_eq!(pallet_receive_messages_proof.encode(), call.encode()), + _ => panic!("Unexpected CodegenBridgeMessagesCall type"), + }, + _ => panic!("Unexpected RuntimeCall type"), + }; + } + + #[test] + fn ensure_macro_compatibility_for_generate_receive_message_delivery_proof_call_builder() { + // data + let receive_messages_delivery_proof = FromBridgedChainMessagesDeliveryProof { + bridged_header_hash: Default::default(), + storage_proof: Default::default(), + lane: LaneId([0, 0, 0, 0]), + }; + let relayers_state = UnrewardedRelayersState { + unrewarded_relayer_entries: 0, + messages_in_oldest_entry: 0, + total_messages: 0, + last_delivered_nonce: 0, + }; + + // construct pallet Call directly + let pallet_receive_messages_delivery_proof = + pallet_bridge_messages::Call::::receive_messages_delivery_proof { + proof: receive_messages_delivery_proof.clone(), + relayers_state: relayers_state.clone(), + }; + + // construct mock enum Call + let mock_enum_receive_messages_delivery_proof = + CodegenBridgeMessagesCall::receive_messages_delivery_proof { + proof: receive_messages_delivery_proof.clone(), + relayers_state: relayers_state.clone(), + }; + + // now we should be able to use macro `generate_receive_message_proof_call_builder` + let relayer_call_builder_receive_messages_delivery_proof = relayer::ThisChainToBridgedChainMessageLaneReceiveMessagesDeliveryProofCallBuilder::build_receive_messages_delivery_proof_call( + (relayers_state, receive_messages_delivery_proof), + false, + ); + + // ensure they are all equal + assert_eq!( + pallet_receive_messages_delivery_proof.encode(), + mock_enum_receive_messages_delivery_proof.encode() + ); + match relayer_call_builder_receive_messages_delivery_proof { + RuntimeCall::BridgeMessages(call) => match call { + call @ CodegenBridgeMessagesCall::receive_messages_delivery_proof { .. } => + assert_eq!(pallet_receive_messages_delivery_proof.encode(), call.encode()), + _ => panic!("Unexpected CodegenBridgeMessagesCall type"), + }, + _ => panic!("Unexpected RuntimeCall type"), + }; + } + + // mock runtime with `pallet_bridge_messages` + mod mock { + use super::super::*; + use bp_messages::target_chain::ForbidInboundMessages; + use bp_runtime::ChainId; + use frame_support::derive_impl; + use sp_core::H256; + use sp_runtime::{ + generic, testing::Header as SubstrateHeader, traits::BlakeTwo256, StateVersion, + }; + + type Block = frame_system::mocking::MockBlock; + pub type SignedBlock = generic::SignedBlock; + + frame_support::construct_runtime! { + pub enum TestRuntime + { + System: frame_system, + Messages: pallet_bridge_messages, + } + } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for TestRuntime { + type Block = Block; + } + + impl pallet_bridge_messages::Config for TestRuntime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type ThisChain = ThisUnderlyingChain; + type BridgedChain = BridgedUnderlyingChain; + type BridgedHeaderChain = BridgedHeaderChain; + type ActiveOutboundLanes = (); + type OutboundPayload = Vec; + type InboundPayload = Vec; + type DeliveryPayments = (); + type DeliveryConfirmationPayments = (); + type OnMessagesDelivered = (); + type MessageDispatch = ForbidInboundMessages>; + } + + pub struct ThisUnderlyingChain; + + impl bp_runtime::Chain for ThisUnderlyingChain { + const ID: ChainId = *b"tuch"; + type BlockNumber = u64; + type Hash = H256; + type Hasher = BlakeTwo256; + type Header = SubstrateHeader; + type AccountId = u64; + type Balance = u64; + type Nonce = u64; + type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { + u32::MAX + } + fn max_extrinsic_weight() -> Weight { + Weight::MAX + } + } + + impl bp_messages::ChainWithMessages for ThisUnderlyingChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = ""; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 1000; + } + + pub struct BridgedUnderlyingChain; + + pub type BridgedHeaderHash = H256; + pub type BridgedChainHeader = SubstrateHeader; + + impl bp_runtime::Chain for BridgedUnderlyingChain { + const ID: ChainId = *b"bgdc"; + type BlockNumber = u64; + type Hash = BridgedHeaderHash; + type Hasher = BlakeTwo256; + type Header = BridgedChainHeader; + type AccountId = u64; + type Balance = u64; + type Nonce = u64; + type Signature = sp_runtime::MultiSignature; + const STATE_VERSION: StateVersion = StateVersion::V1; + fn max_extrinsic_size() -> u32 { + 4096 + } + fn max_extrinsic_weight() -> Weight { + Weight::MAX + } + } + + impl bp_messages::ChainWithMessages for BridgedUnderlyingChain { + const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = ""; + const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 16; + const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 1000; + } + + pub struct BridgedHeaderChain; + + impl bp_header_chain::HeaderChain for BridgedHeaderChain { + fn finalized_header_state_root( + _hash: HashOf, + ) -> Option> { + unreachable!() + } + } + } + + // relayer configuration + mod relayer { + use super::*; + use crate::{ + messages::{ + tests::{mock, RuntimeCall}, + SubstrateMessageLane, + }, + UtilityPalletBatchCallBuilder, + }; + use bp_runtime::UnderlyingChainProvider; + use relay_substrate_client::{MockedRuntimeUtilityPallet, SignParam, UnsignedTransaction}; + use std::time::Duration; + + #[derive(Clone)] + pub struct ThisChain; + impl UnderlyingChainProvider for ThisChain { + type Chain = mock::ThisUnderlyingChain; + } + impl relay_substrate_client::Chain for ThisChain { + const NAME: &'static str = ""; + const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = ""; + const FREE_HEADERS_INTERVAL_METHOD: &'static str = ""; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(0); + type SignedBlock = mock::SignedBlock; + type Call = RuntimeCall; + } + impl relay_substrate_client::ChainWithTransactions for ThisChain { + type AccountKeyPair = sp_core::sr25519::Pair; + type SignedTransaction = (); + + fn sign_transaction( + _: SignParam, + _: UnsignedTransaction, + ) -> Result + where + Self: Sized, + { + todo!() + } + } + impl relay_substrate_client::ChainWithMessages for ThisChain { + const WITH_CHAIN_RELAYERS_PALLET_NAME: Option<&'static str> = None; + const TO_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = ""; + const FROM_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = ""; + } + impl relay_substrate_client::ChainWithUtilityPallet for ThisChain { + type UtilityPallet = MockedRuntimeUtilityPallet; + } + + #[derive(Clone)] + pub struct BridgedChain; + impl UnderlyingChainProvider for BridgedChain { + type Chain = mock::BridgedUnderlyingChain; + } + impl relay_substrate_client::Chain for BridgedChain { + const NAME: &'static str = ""; + const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = ""; + const FREE_HEADERS_INTERVAL_METHOD: &'static str = ""; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(0); + type SignedBlock = mock::SignedBlock; + type Call = RuntimeCall; + } + impl relay_substrate_client::ChainWithTransactions for BridgedChain { + type AccountKeyPair = sp_core::sr25519::Pair; + type SignedTransaction = (); + + fn sign_transaction( + _: SignParam, + _: UnsignedTransaction, + ) -> Result + where + Self: Sized, + { + todo!() + } + } + impl relay_substrate_client::ChainWithMessages for BridgedChain { + const WITH_CHAIN_RELAYERS_PALLET_NAME: Option<&'static str> = None; + const TO_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = ""; + const FROM_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = ""; + } + impl relay_substrate_client::ChainWithUtilityPallet for BridgedChain { + type UtilityPallet = MockedRuntimeUtilityPallet; + } + + #[derive(Clone, Debug)] + pub struct ThisChainToBridgedChainMessageLane; + impl SubstrateMessageLane for ThisChainToBridgedChainMessageLane { + type SourceChain = ThisChain; + type TargetChain = BridgedChain; + type ReceiveMessagesProofCallBuilder = + ThisChainToBridgedChainMessageLaneReceiveMessagesProofCallBuilder; + type ReceiveMessagesDeliveryProofCallBuilder = + ThisChainToBridgedChainMessageLaneReceiveMessagesDeliveryProofCallBuilder; + type SourceBatchCallBuilder = UtilityPalletBatchCallBuilder; + type TargetBatchCallBuilder = UtilityPalletBatchCallBuilder; + } + + generate_receive_message_proof_call_builder!( + ThisChainToBridgedChainMessageLane, + ThisChainToBridgedChainMessageLaneReceiveMessagesProofCallBuilder, + RuntimeCall::BridgeMessages, + CodegenBridgeMessagesCall::receive_messages_proof + ); + generate_receive_message_delivery_proof_call_builder!( + ThisChainToBridgedChainMessageLane, + ThisChainToBridgedChainMessageLaneReceiveMessagesDeliveryProofCallBuilder, + RuntimeCall::BridgeMessages, + CodegenBridgeMessagesCall::receive_messages_delivery_proof + ); + } +} diff --git a/bridges/relays/lib-substrate-relay/src/messages_source.rs b/bridges/relays/lib-substrate-relay/src/messages/source.rs similarity index 86% rename from bridges/relays/lib-substrate-relay/src/messages_source.rs rename to bridges/relays/lib-substrate-relay/src/messages/source.rs index 49deff046f9ca..b75fc86d5eee2 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_source.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/source.rs @@ -20,11 +20,12 @@ use crate::{ finality_base::best_synced_header_id, - messages_lane::{ + messages::{ BatchProofTransaction, MessageLaneAdapter, ReceiveMessagesDeliveryProofCallBuilder, SubstrateMessageLane, }, on_demand::OnDemandRelay, + proofs::to_raw_storage_proof, TransactionParams, }; @@ -32,11 +33,11 @@ use async_std::sync::Arc; use async_trait::async_trait; use bp_messages::{ storage_keys::{operating_mode_key, outbound_lane_data_key}, + target_chain::FromBridgedChainMessagesProof, ChainWithMessages as _, InboundMessageDetails, LaneId, MessageNonce, MessagePayload, MessagesOperatingMode, OutboundLaneData, OutboundMessageDetails, }; -use bp_runtime::{BasicOperatingMode, HeaderIdProvider}; -use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use bp_runtime::{BasicOperatingMode, HeaderIdProvider, RangeInclusiveExt}; use codec::Encode; use frame_support::weights::Weight; use messages_relay::{ @@ -63,19 +64,21 @@ pub type SubstrateMessagesProof = (Weight, FromBridgedChainMessagesProof = Vec<(MessagePayload, &'a mut OutboundMessageDetails)>; /// Substrate client as Substrate messages source. -pub struct SubstrateMessagesSource { - source_client: Client, - target_client: Client, +pub struct SubstrateMessagesSource { + source_client: SourceClnt, + target_client: TargetClnt, lane_id: LaneId, transaction_params: TransactionParams>, target_to_source_headers_relay: Option>>, } -impl SubstrateMessagesSource

{ +impl, TargetClnt> + SubstrateMessagesSource +{ /// Create new Substrate headers source. pub fn new( - source_client: Client, - target_client: Client, + source_client: SourceClnt, + target_client: TargetClnt, lane_id: LaneId, transaction_params: TransactionParams>, target_to_source_headers_relay: Option< @@ -98,22 +101,25 @@ impl SubstrateMessagesSource

{ ) -> Result, SubstrateError> { self.source_client .storage_value( + id.hash(), outbound_lane_data_key( P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, ), - Some(id.1), ) .await } /// Ensure that the messages pallet at source chain is active. async fn ensure_pallet_active(&self) -> Result<(), SubstrateError> { - ensure_messages_pallet_active::(&self.source_client).await + ensure_messages_pallet_active::(&self.source_client) + .await } } -impl Clone for SubstrateMessagesSource

{ +impl Clone + for SubstrateMessagesSource +{ fn clone(&self) -> Self { Self { source_client: self.source_client.clone(), @@ -126,7 +132,12 @@ impl Clone for SubstrateMessagesSource

{ } #[async_trait] -impl RelayClient for SubstrateMessagesSource

{ +impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > RelayClient for SubstrateMessagesSource +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -150,13 +161,17 @@ impl RelayClient for SubstrateMessagesSource

{ } #[async_trait] -impl SourceClient> for SubstrateMessagesSource

+impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > SourceClient> for SubstrateMessagesSource where AccountIdOf: From< as Pair>::Public>, { type BatchTransaction = BatchProofTransaction; - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn state(&self) -> Result>, SubstrateError> { // we can't continue to deliver confirmations if source node is out of sync, because @@ -169,7 +184,7 @@ where // we can't relay confirmations if messages pallet at source chain is halted self.ensure_pallet_active().await?; - read_client_state(&self.source_client, Some(&self.target_client)).await + read_client_state_from_both_chains(&self.source_client, &self.target_client).await } async fn latest_generated_nonce( @@ -203,12 +218,12 @@ where id: SourceHeaderIdOf>, nonces: RangeInclusive, ) -> Result>, SubstrateError> { - let mut out_msgs_details = self + let mut out_msgs_details: Vec<_> = self .source_client - .typed_state_call::<_, Vec<_>>( + .state_call::<_, Vec<_>>( + id.hash(), P::TargetChain::TO_CHAIN_MESSAGE_DETAILS_METHOD.into(), (self.lane_id, *nonces.start(), *nonces.end()), - Some(id.1), ) .await?; validate_out_msgs_details::(&out_msgs_details, nonces)?; @@ -226,7 +241,7 @@ where out_msg_details.nonce, ); let msg_payload: MessagePayload = - self.source_client.storage_value(msg_key, Some(id.1)).await?.ok_or_else(|| { + self.source_client.storage_value(id.hash(), msg_key).await?.ok_or_else(|| { SubstrateError::Custom(format!( "Message to {} {:?}/{} is missing from runtime the storage of {} at {:?}", P::TargetChain::NAME, @@ -240,15 +255,16 @@ where msgs_to_refine.push((msg_payload, out_msg_details)); } + let best_target_header_hash = self.target_client.best_header_hash().await?; for mut msgs_to_refine_batch in split_msgs_to_refine::(self.lane_id, msgs_to_refine)? { let in_msgs_details = self .target_client - .typed_state_call::<_, Vec>( + .state_call::<_, Vec>( + best_target_header_hash, P::SourceChain::FROM_CHAIN_MESSAGE_DETAILS_METHOD.into(), (self.lane_id, &msgs_to_refine_batch), - None, ) .await?; if in_msgs_details.len() != msgs_to_refine_batch.len() { @@ -305,34 +321,27 @@ where ), SubstrateError, > { - let mut storage_keys = - Vec::with_capacity(nonces.end().saturating_sub(*nonces.start()) as usize + 1); - let mut message_nonce = *nonces.start(); - while message_nonce <= *nonces.end() { + let mut storage_keys = Vec::with_capacity(nonces.saturating_len() as usize); + for message_nonce in nonces.clone() { let message_key = bp_messages::storage_keys::message_key( P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, message_nonce, ); storage_keys.push(message_key); - message_nonce += 1; } if proof_parameters.outbound_state_proof_required { - storage_keys.push(bp_messages::storage_keys::outbound_lane_data_key( + storage_keys.push(outbound_lane_data_key( P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, )); } - let proof = self - .source_client - .prove_storage(storage_keys, id.1) - .await? - .into_iter_nodes() - .collect(); + let storage_proof = + self.source_client.prove_storage(id.hash(), storage_keys.clone()).await?; let proof = FromBridgedChainMessagesProof { bridged_header_hash: id.1, - storage_proof: proof, + storage_proof: to_raw_storage_proof::(storage_proof), lane: self.lane_id, nonces_start: *nonces.start(), nonces_end: *nonces.end(), @@ -387,15 +396,19 @@ where } /// Ensure that the messages pallet at source chain is active. -pub(crate) async fn ensure_messages_pallet_active( - client: &Client, +pub(crate) async fn ensure_messages_pallet_active( + client: &AtChainClient, ) -> Result<(), SubstrateError> where AtChain: ChainWithMessages, WithChain: ChainWithMessages, + AtChainClient: Client, { let operating_mode = client - .storage_value(operating_mode_key(WithChain::WITH_CHAIN_MESSAGES_PALLET_NAME), None) + .storage_value( + client.best_header_hash().await?, + operating_mode_key(WithChain::WITH_CHAIN_MESSAGES_PALLET_NAME), + ) .await?; let is_halted = operating_mode == Some(MessagesOperatingMode::Basic(BasicOperatingMode::Halted)); @@ -412,11 +425,10 @@ where /// bridge GRANDPA pallet deployed and it provides `best_finalized_header_id_method_name` /// runtime API to read the best finalized Bridged chain header. /// -/// If `peer_client` is `None`, the value of `actual_best_finalized_peer_at_best_self` will -/// always match the `best_finalized_peer_at_best_self`. +/// The value of `actual_best_finalized_peer_at_best_self` will always match +/// the `best_finalized_peer_at_best_self`. pub async fn read_client_state( - self_client: &Client, - peer_client: Option<&Client>, + self_client: &impl Client, ) -> Result, HeaderIdOf>, SubstrateError> where SelfChain: Chain, @@ -431,30 +443,42 @@ where let peer_on_self_best_finalized_id = best_synced_header_id::(self_client, self_best_id.hash()).await?; - // read actual header, matching the `peer_on_self_best_finalized_id` from the peer chain - let actual_peer_on_self_best_finalized_id = - match (peer_client, peer_on_self_best_finalized_id.as_ref()) { - (Some(peer_client), Some(peer_on_self_best_finalized_id)) => { - let actual_peer_on_self_best_finalized = - peer_client.header_by_number(peer_on_self_best_finalized_id.number()).await?; - Some(actual_peer_on_self_best_finalized.id()) - }, - _ => peer_on_self_best_finalized_id, - }; - Ok(ClientState { best_self: self_best_id, best_finalized_self: self_best_finalized_id, best_finalized_peer_at_best_self: peer_on_self_best_finalized_id, - actual_best_finalized_peer_at_best_self: actual_peer_on_self_best_finalized_id, + actual_best_finalized_peer_at_best_self: peer_on_self_best_finalized_id, }) } +/// Does the same stuff as `read_client_state`, but properly fills the +/// `actual_best_finalized_peer_at_best_self` field of the result. +pub async fn read_client_state_from_both_chains( + self_client: &impl Client, + peer_client: &impl Client, +) -> Result, HeaderIdOf>, SubstrateError> +where + SelfChain: Chain, + PeerChain: Chain, +{ + let mut client_state = read_client_state::(self_client).await?; + client_state.actual_best_finalized_peer_at_best_self = + match client_state.best_finalized_peer_at_best_self.as_ref() { + Some(peer_on_self_best_finalized_id) => { + let actual_peer_on_self_best_finalized = + peer_client.header_by_number(peer_on_self_best_finalized_id.number()).await?; + Some(actual_peer_on_self_best_finalized.id()) + }, + _ => client_state.best_finalized_peer_at_best_self, + }; + Ok(client_state) +} + /// Reads best `PeerChain` header known to the `SelfChain` using provided runtime API method. /// /// Method is supposed to be the `FinalityApi::best_finalized()` method. pub async fn best_finalized_peer_header_at_self( - self_client: &Client, + self_client: &impl Client, at_self_hash: HashOf, ) -> Result>, SubstrateError> where @@ -463,10 +487,10 @@ where { // now let's read id of best finalized peer header at our best finalized block self_client - .typed_state_call::<_, Option<_>>( + .state_call::<_, Option<_>>( + at_self_hash, PeerChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), - Some(at_self_hash), ) .await } diff --git a/bridges/relays/lib-substrate-relay/src/messages_target.rs b/bridges/relays/lib-substrate-relay/src/messages/target.rs similarity index 84% rename from bridges/relays/lib-substrate-relay/src/messages_target.rs rename to bridges/relays/lib-substrate-relay/src/messages/target.rs index 5ffb2b6c771e0..a6bf169cffb67 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_target.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/target.rs @@ -19,22 +19,25 @@ //! `` chain. use crate::{ - messages_lane::{ + messages::{ + source::{ + ensure_messages_pallet_active, read_client_state_from_both_chains, + SubstrateMessagesProof, + }, BatchProofTransaction, MessageLaneAdapter, ReceiveMessagesProofCallBuilder, SubstrateMessageLane, }, - messages_source::{ensure_messages_pallet_active, read_client_state, SubstrateMessagesProof}, on_demand::OnDemandRelay, + proofs::to_raw_storage_proof, TransactionParams, }; use async_std::sync::Arc; use async_trait::async_trait; use bp_messages::{ - storage_keys::inbound_lane_data_key, ChainWithMessages as _, InboundLaneData, LaneId, - MessageNonce, UnrewardedRelayersState, + source_chain::FromBridgedChainMessagesDeliveryProof, storage_keys::inbound_lane_data_key, + ChainWithMessages as _, InboundLaneData, LaneId, MessageNonce, UnrewardedRelayersState, }; -use bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof; use messages_relay::{ message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, message_lane_loop::{NoncesSubmitArtifacts, TargetClient, TargetClientState}, @@ -45,27 +48,31 @@ use relay_substrate_client::{ }; use relay_utils::relay_loop::Client as RelayClient; use sp_core::Pair; -use std::ops::RangeInclusive; +use std::{convert::TryFrom, ops::RangeInclusive}; /// Message receiving proof returned by the target Substrate node. pub type SubstrateMessagesDeliveryProof = (UnrewardedRelayersState, FromBridgedChainMessagesDeliveryProof>); /// Substrate client as Substrate messages target. -pub struct SubstrateMessagesTarget { - target_client: Client, - source_client: Client, +pub struct SubstrateMessagesTarget { + target_client: TargetClnt, + source_client: SourceClnt, lane_id: LaneId, relayer_id_at_source: AccountIdOf, transaction_params: Option>>, source_to_target_headers_relay: Option>>, } -impl SubstrateMessagesTarget

{ +impl SubstrateMessagesTarget +where + P: SubstrateMessageLane, + TargetClnt: Client, +{ /// Create new Substrate headers target. pub fn new( - target_client: Client, - source_client: Client, + target_client: TargetClnt, + source_client: SourceClnt, lane_id: LaneId, relayer_id_at_source: AccountIdOf, transaction_params: Option>>, @@ -90,22 +97,25 @@ impl SubstrateMessagesTarget

{ ) -> Result>>, SubstrateError> { self.target_client .storage_value( + id.hash(), inbound_lane_data_key( P::SourceChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, ), - Some(id.1), ) .await } /// Ensure that the messages pallet at target chain is active. async fn ensure_pallet_active(&self) -> Result<(), SubstrateError> { - ensure_messages_pallet_active::(&self.target_client).await + ensure_messages_pallet_active::(&self.target_client) + .await } } -impl Clone for SubstrateMessagesTarget

{ +impl Clone + for SubstrateMessagesTarget +{ fn clone(&self) -> Self { Self { target_client: self.target_client.clone(), @@ -119,7 +129,12 @@ impl Clone for SubstrateMessagesTarget

{ } #[async_trait] -impl RelayClient for SubstrateMessagesTarget

{ +impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > RelayClient for SubstrateMessagesTarget +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -143,14 +158,18 @@ impl RelayClient for SubstrateMessagesTarget

{ } #[async_trait] -impl TargetClient> for SubstrateMessagesTarget

+impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > TargetClient> for SubstrateMessagesTarget where AccountIdOf: From< as Pair>::Public>, BalanceOf: TryFrom>, { type BatchTransaction = BatchProofTransaction; - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn state(&self) -> Result>, SubstrateError> { // we can't continue to deliver confirmations if source node is out of sync, because @@ -163,7 +182,7 @@ where // we can't relay messages if messages pallet at target chain is halted self.ensure_pallet_active().await?; - read_client_state(&self.target_client, Some(&self.source_client)).await + read_client_state_from_both_chains(&self.target_client, &self.source_client).await } async fn latest_received_nonce( @@ -213,19 +232,16 @@ where SubstrateError, > { let (id, relayers_state) = self.unrewarded_relayers_state(id).await?; - let inbound_data_key = bp_messages::storage_keys::inbound_lane_data_key( + let storage_keys = vec![inbound_lane_data_key( P::SourceChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, - ); - let proof = self - .target_client - .prove_storage(vec![inbound_data_key], id.1) - .await? - .into_iter_nodes() - .collect(); + )]; + + let storage_proof = + self.target_client.prove_storage(id.hash(), storage_keys.clone()).await?; let proof = FromBridgedChainMessagesDeliveryProof { bridged_header_hash: id.1, - storage_proof: proof, + storage_proof: to_raw_storage_proof::(storage_proof), lane: self.lane_id, }; Ok((id, (relayers_state, proof))) diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs b/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs index 202f53ea4e4f5..d18c582dfac43 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs @@ -53,25 +53,30 @@ use crate::{ /// relay) needs it to continue its regular work. When enough headers are relayed, on-demand stops /// syncing headers. #[derive(Clone)] -pub struct OnDemandHeadersRelay { +pub struct OnDemandHeadersRelay { /// Relay task name. relay_task_name: String, /// Shared reference to maximal required finalized header number. required_header_number: RequiredHeaderNumberRef, /// Client of the source chain. - source_client: Client, + source_client: SourceClnt, /// Client of the target chain. - target_client: Client, + target_client: TargetClnt, } -impl OnDemandHeadersRelay

{ +impl< + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, + TargetClnt: Client, + > OnDemandHeadersRelay +{ /// Create new on-demand headers relay. /// /// If `metrics_params` is `Some(_)`, the metrics of the finality relay are registered. /// Otherwise, all required metrics must be exposed outside of this method. pub fn new( - source_client: Client, - target_client: Client, + source_client: SourceClnt, + target_client: TargetClnt, target_transaction_params: TransactionParams>, headers_to_relay: HeadersToRelay, metrics_params: Option, @@ -104,8 +109,12 @@ impl OnDemandHeadersRelay

{ } #[async_trait] -impl OnDemandRelay - for OnDemandHeadersRelay

+impl< + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, + TargetClnt: Client, + > OnDemandRelay + for OnDemandHeadersRelay { async fn reconnect(&self) -> Result<(), SubstrateError> { // using clone is fine here (to avoid mut requirement), because clone on Client clones @@ -139,7 +148,7 @@ impl OnDemandRelay::new(self.source_client.clone(), None); + SubstrateFinalitySource::::new(self.source_client.clone(), None); let (header, mut proof) = finality_source.prove_block_finality(current_required_header).await?; let header_id = header.id(); @@ -198,8 +207,8 @@ impl OnDemandRelay( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, target_transaction_params: TransactionParams>, headers_to_relay: HeadersToRelay, required_header_number: RequiredHeaderNumberRef, @@ -209,7 +218,7 @@ async fn background_task( { let relay_task_name = on_demand_headers_relay_name::(); let target_transactions_mortality = target_transaction_params.mortality; - let mut finality_source = SubstrateFinalitySource::

::new( + let mut finality_source = SubstrateFinalitySource::::new( source_client.clone(), Some(required_header_number.clone()), ); @@ -246,7 +255,8 @@ async fn background_task( // read best finalized source header number from target let best_finalized_source_header_at_target = - best_finalized_source_header_at_target::

(&finality_target, &relay_task_name).await; + best_finalized_source_header_at_target::(&finality_target, &relay_task_name) + .await; if matches!(best_finalized_source_header_at_target, Err(ref e) if e.is_connection_error()) { relay_utils::relay_loop::reconnect_failed_client( FailedClient::Target, @@ -410,13 +420,17 @@ async fn mandatory_headers_scan_range( /// it. /// /// Returns `true` if header was found and (asked to be) relayed and `false` otherwise. -async fn relay_mandatory_header_from_range( - finality_source: &SubstrateFinalitySource

, +async fn relay_mandatory_header_from_range( + finality_source: &SubstrateFinalitySource, required_header_number: &RequiredHeaderNumberRef, best_finalized_source_header_at_target: String, range: (BlockNumberOf, BlockNumberOf), relay_task_name: &str, -) -> Result { +) -> Result +where + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, +{ // search for mandatory header first let mandatory_source_header_number = find_mandatory_header_in_range(finality_source, range).await?; @@ -451,10 +465,14 @@ async fn relay_mandatory_header_from_range( /// Read best finalized source block number from source client. /// /// Returns `None` if we have failed to read the number. -async fn best_finalized_source_header_at_source( - finality_source: &SubstrateFinalitySource

, +async fn best_finalized_source_header_at_source( + finality_source: &SubstrateFinalitySource, relay_task_name: &str, -) -> Result, relay_substrate_client::Error> { +) -> Result, relay_substrate_client::Error> +where + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, +{ finality_source.on_chain_best_finalized_block_number().await.map_err(|error| { log::error!( target: "bridge", @@ -470,11 +488,16 @@ async fn best_finalized_source_header_at_source( - finality_target: &SubstrateFinalityTarget

, +async fn best_finalized_source_header_at_target( + finality_target: &SubstrateFinalityTarget, relay_task_name: &str, -) -> Result, as RelayClient>::Error> +) -> Result< + BlockNumberOf, + as RelayClient>::Error, +> where + P: SubstrateFinalitySyncPipeline, + TargetClnt: Client, AccountIdOf: From< as sp_core::Pair>::Public>, { finality_target @@ -496,10 +519,14 @@ where /// Read first mandatory header in given inclusive range. /// /// Returns `Ok(None)` if there were no mandatory headers in the range. -async fn find_mandatory_header_in_range( - finality_source: &SubstrateFinalitySource

, +async fn find_mandatory_header_in_range( + finality_source: &SubstrateFinalitySource, range: (BlockNumberOf, BlockNumberOf), -) -> Result>, relay_substrate_client::Error> { +) -> Result>, relay_substrate_client::Error> +where + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, +{ let mut current = range.0; while current <= range.1 { let header = finality_source.client().header_by_number(current).await?; diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs index 966bdc3107203..4579222a2c681 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs @@ -17,7 +17,7 @@ //! On-demand Substrate -> Substrate parachain finality relay. use crate::{ - messages_source::best_finalized_peer_header_at_self, + messages::source::best_finalized_peer_header_at_self, on_demand::OnDemandRelay, parachains::{ source::ParachainsSource, target::ParachainsTarget, ParachainsPipelineAdapter, @@ -53,29 +53,34 @@ use std::fmt::Debug; /// (e.g. messages relay) needs it to continue its regular work. When enough parachain headers /// are relayed, on-demand stops syncing headers. #[derive(Clone)] -pub struct OnDemandParachainsRelay { +pub struct OnDemandParachainsRelay { /// Relay task name. relay_task_name: String, /// Channel used to communicate with background task and ask for relay of parachain heads. required_header_number_sender: Sender>, /// Source relay chain client. - source_relay_client: Client, + source_relay_client: SourceRelayClnt, /// Target chain client. - target_client: Client, + target_client: TargetClnt, /// On-demand relay chain relay. on_demand_source_relay_to_target_headers: Arc>, } -impl OnDemandParachainsRelay

{ +impl< + P: SubstrateParachainsPipeline, + SourceRelayClnt: Client, + TargetClnt: Client, + > OnDemandParachainsRelay +{ /// Create new on-demand parachains relay. /// /// Note that the argument is the source relay chain client, not the parachain client. /// That's because parachain finality is determined by the relay chain and we don't /// need to connect to the parachain itself here. pub fn new( - source_relay_client: Client, - target_client: Client, + source_relay_client: SourceRelayClnt, + target_client: TargetClnt, target_transaction_params: TransactionParams>, on_demand_source_relay_to_target_headers: Arc< dyn OnDemandRelay, @@ -114,10 +119,13 @@ impl OnDemandParachainsRelay

{ } #[async_trait] -impl OnDemandRelay - for OnDemandParachainsRelay

+impl + OnDemandRelay + for OnDemandParachainsRelay where P::SourceParachain: Chain, + SourceRelayClnt: Client, + TargetClnt: Client, { async fn reconnect(&self) -> Result<(), SubstrateError> { // using clone is fine here (to avoid mut requirement), because clone on Client clones @@ -147,7 +155,7 @@ where required_parachain_header: BlockNumberOf, ) -> Result<(HeaderIdOf, Vec>), SubstrateError> { // select headers to prove - let parachains_source = ParachainsSource::

::new( + let parachains_source = ParachainsSource::::new( self.source_relay_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -231,8 +239,8 @@ where /// Background task that is responsible for starting parachain headers relay. async fn background_task( - source_relay_client: Client, - target_client: Client, + source_relay_client: impl Client, + target_client: impl Client, target_transaction_params: TransactionParams>, on_demand_source_relay_to_target_headers: Arc< dyn OnDemandRelay, @@ -255,9 +263,11 @@ async fn background_task( let parachains_relay_task = futures::future::Fuse::terminated(); futures::pin_mut!(parachains_relay_task); - let mut parachains_source = - ParachainsSource::

::new(source_relay_client.clone(), required_para_header_ref.clone()); - let mut parachains_target = ParachainsTarget::

::new( + let mut parachains_source = ParachainsSource::::new( + source_relay_client.clone(), + required_para_header_ref.clone(), + ); + let mut parachains_target = ParachainsTarget::::new( source_relay_client.clone(), target_client.clone(), target_transaction_params.clone(), @@ -446,9 +456,9 @@ struct RelayData { } /// Read required data from source and target clients. -async fn read_relay_data( - source: &ParachainsSource

, - target: &ParachainsTarget

, +async fn read_relay_data( + source: &ParachainsSource, + target: &ParachainsTarget, required_header_number: BlockNumberOf, ) -> Result< RelayData< @@ -459,7 +469,9 @@ async fn read_relay_data( FailedClient, > where - ParachainsTarget

: + SourceRelayClnt: Client, + TargetClnt: Client, + ParachainsTarget: TargetClient> + RelayClient, { let map_target_err = |e| { @@ -642,13 +654,19 @@ trait SelectHeadersToProveEnvironment { } #[async_trait] -impl<'a, P: SubstrateParachainsPipeline> +impl<'a, P: SubstrateParachainsPipeline, SourceRelayClnt, TargetClnt> SelectHeadersToProveEnvironment< BlockNumberOf, HashOf, BlockNumberOf, HashOf, - > for (&'a OnDemandParachainsRelay

, &'a ParachainsSource

) + > + for ( + &'a OnDemandParachainsRelay, + &'a ParachainsSource, + ) where + SourceRelayClnt: Client, + TargetClnt: Client, { fn parachain_id(&self) -> ParaId { ParaId(P::SourceParachain::PARACHAIN_ID) @@ -663,9 +681,8 @@ impl<'a, P: SubstrateParachainsPipeline> async fn best_finalized_relay_block_at_target( &self, ) -> Result, SubstrateError> { - Ok(crate::messages_source::read_client_state::( + Ok(crate::messages::source::read_client_state::( &self.0.target_client, - None, ) .await? .best_finalized_peer_at_best_self diff --git a/bridges/relays/lib-substrate-relay/src/parachains/source.rs b/bridges/relays/lib-substrate-relay/src/parachains/source.rs index 4cc512b9d9b45..1aa12d1c913d1 100644 --- a/bridges/relays/lib-substrate-relay/src/parachains/source.rs +++ b/bridges/relays/lib-substrate-relay/src/parachains/source.rs @@ -16,8 +16,10 @@ //! Parachain heads source. -use crate::parachains::{ParachainsPipelineAdapter, SubstrateParachainsPipeline}; - +use crate::{ + parachains::{ParachainsPipelineAdapter, SubstrateParachainsPipeline}, + proofs::to_raw_storage_proof, +}; use async_std::sync::{Arc, Mutex}; use async_trait::async_trait; use bp_parachains::parachain_head_storage_key_at_source; @@ -37,22 +39,24 @@ pub type RequiredHeaderIdRef = Arc>>>; /// Substrate client as parachain heads source. #[derive(Clone)] -pub struct ParachainsSource { - client: Client, +pub struct ParachainsSource { + client: SourceRelayClnt, max_head_id: RequiredHeaderIdRef, } -impl ParachainsSource

{ +impl> + ParachainsSource +{ /// Creates new parachains source client. pub fn new( - client: Client, + client: SourceRelayClnt, max_head_id: RequiredHeaderIdRef, ) -> Self { ParachainsSource { client, max_head_id } } /// Returns reference to the underlying RPC client. - pub fn client(&self) -> &Client { + pub fn client(&self) -> &SourceRelayClnt { &self.client } @@ -64,8 +68,8 @@ impl ParachainsSource

{ let para_id = ParaId(P::SourceParachain::PARACHAIN_ID); let storage_key = parachain_head_storage_key_at_source(P::SourceRelayChain::PARAS_PALLET_NAME, para_id); - let para_head = self.client.raw_storage_value(storage_key, Some(at_block.1)).await?; - let para_head = para_head.map(|h| ParaHead::decode(&mut &h.0[..])).transpose()?; + let para_head: Option = + self.client.storage_value(at_block.hash(), storage_key).await?; let para_head = match para_head { Some(para_head) => para_head, None => return Ok(None), @@ -76,7 +80,9 @@ impl ParachainsSource

{ } #[async_trait] -impl RelayClient for ParachainsSource

{ +impl> RelayClient + for ParachainsSource +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -85,8 +91,8 @@ impl RelayClient for ParachainsSource

{ } #[async_trait] -impl SourceClient> - for ParachainsSource

+impl> + SourceClient> for ParachainsSource where P::SourceParachain: Chain, { @@ -149,12 +155,9 @@ where let parachain = ParaId(P::SourceParachain::PARACHAIN_ID); let storage_key = parachain_head_storage_key_at_source(P::SourceRelayChain::PARAS_PALLET_NAME, parachain); - let parachain_heads_proof = self - .client - .prove_storage(vec![storage_key.clone()], at_block.1) - .await? - .into_iter_nodes() - .collect(); + + let storage_proof = + self.client.prove_storage(at_block.hash(), vec![storage_key.clone()]).await?; // why we're reading parachain head here once again (it has already been read at the // `parachain_head`)? that's because `parachain_head` sometimes returns obsolete parachain @@ -165,10 +168,8 @@ where // rereading actual value here let parachain_head = self .client - .raw_storage_value(storage_key, Some(at_block.1)) + .storage_value::(at_block.hash(), storage_key) .await? - .map(|h| ParaHead::decode(&mut &h.0[..])) - .transpose()? .ok_or_else(|| { SubstrateError::Custom(format!( "Failed to read expected parachain {parachain:?} head at {at_block:?}" @@ -176,6 +177,11 @@ where })?; let parachain_head_hash = parachain_head.hash(); - Ok((ParaHeadsProof { storage_proof: parachain_heads_proof }, parachain_head_hash)) + Ok(( + ParaHeadsProof { + storage_proof: to_raw_storage_proof::(storage_proof), + }, + parachain_head_hash, + )) } } diff --git a/bridges/relays/lib-substrate-relay/src/parachains/target.rs b/bridges/relays/lib-substrate-relay/src/parachains/target.rs index 531d55b532236..f66b193340c1a 100644 --- a/bridges/relays/lib-substrate-relay/src/parachains/target.rs +++ b/bridges/relays/lib-substrate-relay/src/parachains/target.rs @@ -42,31 +42,42 @@ use relay_substrate_client::{ }; use relay_utils::relay_loop::Client as RelayClient; use sp_core::Pair; +use sp_runtime::traits::Header; /// Substrate client as parachain heads source. -pub struct ParachainsTarget { - source_client: Client, - target_client: Client, +pub struct ParachainsTarget { + source_client: SourceClnt, + target_client: TargetClnt, transaction_params: TransactionParams>, } -impl ParachainsTarget

{ +impl< + P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Client, + > ParachainsTarget +{ /// Creates new parachains target client. pub fn new( - source_client: Client, - target_client: Client, + source_client: SourceClnt, + target_client: TargetClnt, transaction_params: TransactionParams>, ) -> Self { ParachainsTarget { source_client, target_client, transaction_params } } /// Returns reference to the underlying RPC client. - pub fn target_client(&self) -> &Client { + pub fn target_client(&self) -> &TargetClnt { &self.target_client } } -impl Clone for ParachainsTarget

{ +impl< + P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Clone, + > Clone for ParachainsTarget +{ fn clone(&self) -> Self { ParachainsTarget { source_client: self.source_client.clone(), @@ -77,7 +88,12 @@ impl Clone for ParachainsTarget

{ } #[async_trait] -impl RelayClient for ParachainsTarget

{ +impl< + P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Client, + > RelayClient for ParachainsTarget +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -88,14 +104,17 @@ impl RelayClient for ParachainsTarget

{ } #[async_trait] -impl

TargetClient> for ParachainsTarget

+impl TargetClient> + for ParachainsTarget where P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Client, AccountIdOf: From< as Pair>::Public>, P::SourceParachain: ChainBase, P::SourceRelayChain: ChainBase, { - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn best_block(&self) -> Result, Self::Error> { let best_header = self.target_client.best_header().await?; @@ -109,10 +128,10 @@ where at_block: &HeaderIdOf, ) -> Result, Self::Error> { self.target_client - .typed_state_call::<_, Option>>( + .state_call::<_, Option>>( + at_block.hash(), P::SourceRelayChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), - Some(at_block.1), ) .await? .map(Ok) @@ -124,7 +143,11 @@ where ) -> Result>, Self::Error> { Ok(self .target_client - .typed_state_call(P::SourceRelayChain::FREE_HEADERS_INTERVAL_METHOD.into(), (), None) + .state_call( + self.target_client.best_header().await?.hash(), + P::SourceRelayChain::FREE_HEADERS_INTERVAL_METHOD.into(), + (), + ) .await .unwrap_or_else(|e| { log::info!( @@ -151,7 +174,7 @@ where &P::SourceParachain::PARACHAIN_ID.into(), ); let storage_value: Option = - self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; + self.target_client.storage_value(at_block.hash(), storage_key).await?; let para_info = match storage_value { Some(para_info) => para_info, None => return Ok(None), @@ -172,7 +195,7 @@ where ¶_info.best_head_hash.head_hash, ); let storage_value: Option = - self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; + self.target_client.storage_value(at_block.hash(), storage_key).await?; let para_head_number = match storage_value { Some(para_head_data) => para_head_data.decode_parachain_head_data::()?.number, diff --git a/bridges/relays/messages/Cargo.toml b/bridges/relays/messages/Cargo.toml index 570e11c0da6fe..c7a132bb3bae7 100644 --- a/bridges/relays/messages/Cargo.toml +++ b/bridges/relays/messages/Cargo.toml @@ -11,19 +11,18 @@ publish = false workspace = true [dependencies] -async-std = { version = "1.9.0", features = ["attributes"] } -async-trait = "0.1.79" -env_logger = "0.11" -futures = "0.3.30" -hex = "0.4" +async-std = { features = ["attributes"], workspace = true } +async-trait = { workspace = true } +futures = { workspace = true } +hex = { workspace = true, default-features = true } log = { workspace = true } -num-traits = "0.2" -parking_lot = "0.12.1" +num-traits = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } # Bridge Dependencies -bp-messages = { path = "../../primitives/messages" } -finality-relay = { path = "../finality" } -relay-utils = { path = "../utils" } +bp-messages = { workspace = true, default-features = true } +finality-relay = { workspace = true } +relay-utils = { workspace = true } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic" } +sp-arithmetic = { workspace = true, default-features = true } diff --git a/bridges/relays/parachains/Cargo.toml b/bridges/relays/parachains/Cargo.toml index 8d38e4e6bd07c..ed03bdbb0f65e 100644 --- a/bridges/relays/parachains/Cargo.toml +++ b/bridges/relays/parachains/Cargo.toml @@ -11,18 +11,18 @@ publish = false workspace = true [dependencies] -async-std = "1.9.0" -async-trait = "0.1.79" -futures = "0.3.30" +async-std = { workspace = true } +async-trait = { workspace = true } +futures = { workspace = true } log = { workspace = true } -relay-utils = { path = "../utils" } +relay-utils = { workspace = true } # Bridge dependencies -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -relay-substrate-client = { path = "../client-substrate" } +bp-polkadot-core = { workspace = true, default-features = true } +relay-substrate-client = { workspace = true } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -relay-substrate-client = { path = "../client-substrate", features = ["test-helpers"] } -sp-core = { path = "../../../substrate/primitives/core" } +codec = { workspace = true, default-features = true } +relay-substrate-client = { features = ["test-helpers"], workspace = true } +sp-core = { workspace = true, default-features = true } diff --git a/bridges/relays/parachains/src/parachains_loop.rs b/bridges/relays/parachains/src/parachains_loop.rs index fd73ca2d46c00..0fd1d72c7075b 100644 --- a/bridges/relays/parachains/src/parachains_loop.rs +++ b/bridges/relays/parachains/src/parachains_loop.rs @@ -680,7 +680,6 @@ impl SubmittedHeadsTracker

{ mod tests { use super::*; use async_std::sync::{Arc, Mutex}; - use codec::Encode; use futures::{SinkExt, StreamExt}; use relay_substrate_client::test_chain::{TestChain, TestParachain}; use relay_utils::{HeaderId, MaybeConnectionError}; @@ -821,8 +820,7 @@ mod tests { let head_result = SourceClient::::parachain_head(self, at_block).await?; let head = head_result.as_available().unwrap(); - let storage_proof = vec![head.hash().encode()]; - let proof = (ParaHeadsProof { storage_proof }, head.hash()); + let proof = (ParaHeadsProof { storage_proof: Default::default() }, head.hash()); self.data.lock().await.source_proof.clone().map(|_| proof) } } diff --git a/bridges/relays/utils/Cargo.toml b/bridges/relays/utils/Cargo.toml index 4765730a0b4f9..93e42763967b9 100644 --- a/bridges/relays/utils/Cargo.toml +++ b/bridges/relays/utils/Cargo.toml @@ -11,29 +11,29 @@ publish = false workspace = true [dependencies] -ansi_term = "0.12" -anyhow = "1.0" -async-std = "1.9.0" -async-trait = "0.1.79" -backoff = "0.4" -isahc = "1.2" -env_logger = "0.11.3" -futures = "0.3.30" -jsonpath_lib = "0.3" +ansi_term = { workspace = true } +anyhow = { workspace = true } +async-std = { workspace = true } +async-trait = { workspace = true } +backoff = { workspace = true } +isahc = { workspace = true } +env_logger = { workspace = true } +futures = { workspace = true } +jsonpath_lib = { workspace = true } log = { workspace = true } -num-traits = "0.2" -parking_lot = "0.12.1" +num-traits = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sysinfo = "0.30" -time = { version = "0.3", features = ["formatting", "local-offset", "std"] } -tokio = { version = "1.37", features = ["rt"] } +sysinfo = { workspace = true } +time = { features = ["formatting", "local-offset", "std"], workspace = true } +tokio = { features = ["rt"], workspace = true, default-features = true } thiserror = { workspace = true } # Bridge dependencies -bp-runtime = { path = "../../primitives/runtime" } +bp-runtime = { workspace = true, default-features = true } # Substrate dependencies -sp-runtime = { path = "../../../substrate/primitives/runtime" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } +sp-runtime = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index cab2b06b0931e..666ac3fbc8a2a 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -17,34 +17,34 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { optional = true, workspace = true, default-features = true } serde_json = { optional = true, workspace = true, default-features = true } -codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1", optional = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-io = { optional = true, workspace = true } -snowbridge-core = { path = "../../primitives/core", default-features = false } -snowbridge-ethereum = { path = "../../primitives/ethereum", default-features = false } -snowbridge-pallet-ethereum-client-fixtures = { path = "fixtures", default-features = false, optional = true } -snowbridge-beacon-primitives = { path = "../../primitives/beacon", default-features = false } -static_assertions = { version = "1.1.0", default-features = false } -pallet-timestamp = { path = "../../../../substrate/frame/timestamp", default-features = false, optional = true } +snowbridge-core = { workspace = true } +snowbridge-ethereum = { workspace = true } +snowbridge-pallet-ethereum-client-fixtures = { optional = true, workspace = true } +snowbridge-beacon-primitives = { workspace = true } +static_assertions = { workspace = true } +pallet-timestamp = { optional = true, workspace = true } [dev-dependencies] -rand = "0.8.5" -sp-keyring = { path = "../../../../substrate/primitives/keyring" } +rand = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -hex-literal = "0.4.1" -pallet-timestamp = { path = "../../../../substrate/frame/timestamp" } -snowbridge-pallet-ethereum-client-fixtures = { path = "fixtures" } -sp-io = { path = "../../../../substrate/primitives/io" } +hex-literal = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +snowbridge-pallet-ethereum-client-fixtures = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } [features] diff --git a/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml index 858e2513a9612..bd4176875733f 100644 --- a/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml @@ -15,11 +15,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -hex-literal = { version = "0.4.1" } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -snowbridge-core = { path = "../../../primitives/core", default-features = false } -snowbridge-beacon-primitives = { path = "../../../primitives/beacon", default-features = false } +hex-literal = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +snowbridge-core = { workspace = true } +snowbridge-beacon-primitives = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index d63398770f207..1b08bb39b4346 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -16,35 +16,35 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { optional = true, workspace = true, default-features = true } -codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1", optional = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -alloy-primitives = { version = "0.4.2", default-features = false, features = ["rlp"] } -alloy-sol-types = { version = "0.4.2", default-features = false } +alloy-primitives = { features = ["rlp"], workspace = true } +alloy-sol-types = { workspace = true } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } -pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } -snowbridge-core = { path = "../../primitives/core", default-features = false } -snowbridge-router-primitives = { path = "../../primitives/router", default-features = false } -snowbridge-beacon-primitives = { path = "../../primitives/beacon", default-features = false } -snowbridge-pallet-inbound-queue-fixtures = { path = "fixtures", default-features = false, optional = true } +snowbridge-core = { workspace = true } +snowbridge-router-primitives = { workspace = true } +snowbridge-beacon-primitives = { workspace = true } +snowbridge-pallet-inbound-queue-fixtures = { optional = true, workspace = true } [dev-dependencies] -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -snowbridge-pallet-ethereum-client = { path = "../ethereum-client" } -hex-literal = { version = "0.4.1" } +frame-benchmarking = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +snowbridge-pallet-ethereum-client = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml index e84246fb5a551..b66b57c3620ad 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml @@ -15,11 +15,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -hex-literal = { version = "0.4.1" } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -snowbridge-core = { path = "../../../primitives/core", default-features = false } -snowbridge-beacon-primitives = { path = "../../../primitives/beacon", default-features = false } +hex-literal = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +snowbridge-core = { workspace = true } +snowbridge-beacon-primitives = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index a842f9aa60cb9..a031676c6076a 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -53,20 +53,11 @@ parameter_types! { pub const ExistentialDeposit: u128 = 1; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml index 15c6c3a5b32b0..78546e258daa3 100644 --- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml @@ -16,27 +16,27 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], workspace = true } -codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-arithmetic = { path = "../../../../substrate/primitives/arithmetic", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } +sp-io = { workspace = true } +sp-arithmetic = { workspace = true } -bridge-hub-common = { path = "../../../../cumulus/parachains/runtimes/bridge-hubs/common", default-features = false } +bridge-hub-common = { workspace = true } -snowbridge-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -snowbridge-outbound-queue-merkle-tree = { path = "merkle-tree", default-features = false } -ethabi = { package = "ethabi-decode", version = "1.0.0", default-features = false } +snowbridge-core = { features = ["serde"], workspace = true } +snowbridge-outbound-queue-merkle-tree = { workspace = true } +ethabi = { workspace = true } [dev-dependencies] -pallet-message-queue = { path = "../../../../substrate/frame/message-queue", default-features = false } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } +pallet-message-queue = { workspace = true } +sp-keyring = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index 1b1a9905928f8..00cc700fbe832 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -15,18 +15,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } -scale-info = { version = "2.7.0", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -hex-literal = { version = "0.4.1" } -env_logger = "0.11" -hex = "0.4" -array-bytes = "6.2.2" -sp-crypto-hashing = { path = "../../../../../substrate/primitives/crypto/hashing" } +hex-literal = { workspace = true, default-features = true } +env_logger = { workspace = true } +hex = { workspace = true, default-features = true } +array-bytes = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml index b8d704f1cb92d..d35bdde5a81e7 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml @@ -15,12 +15,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "3.6.12", package = "parity-scale-codec", features = ["derive"], default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -snowbridge-outbound-queue-merkle-tree = { path = "../merkle-tree", default-features = false } -snowbridge-core = { path = "../../../primitives/core", default-features = false } +codec = { features = ["derive"], workspace = true } +sp-std = { workspace = true } +sp-api = { workspace = true } +frame-support = { workspace = true } +snowbridge-outbound-queue-merkle-tree = { workspace = true } +snowbridge-core = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml index 5bbbb1d9310da..f1e749afb9977 100644 --- a/bridges/snowbridge/pallets/system/Cargo.toml +++ b/bridges/snowbridge/pallets/system/Cargo.toml @@ -15,33 +15,33 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } log = { workspace = true } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } -snowbridge-core = { path = "../../primitives/core", default-features = false } +snowbridge-core = { workspace = true } [dev-dependencies] -hex = "0.4.1" -hex-literal = { version = "0.4.1" } -pallet-balances = { path = "../../../../substrate/frame/balances" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -polkadot-primitives = { path = "../../../../polkadot/primitives" } -pallet-message-queue = { path = "../../../../substrate/frame/message-queue" } -snowbridge-pallet-outbound-queue = { path = "../outbound-queue" } +hex = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } +snowbridge-pallet-outbound-queue = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml index 42df5edfb7b2d..7c524dd2edadb 100644 --- a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml @@ -15,13 +15,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -snowbridge-core = { path = "../../../primitives/core", default-features = false } +], workspace = true } +sp-std = { workspace = true } +sp-api = { workspace = true } +xcm = { workspace = true } +snowbridge-core = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index d7fc4152b3710..98bd3da9ab27c 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -112,20 +112,11 @@ impl frame_system::Config for Test { type Block = Block; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_xcm_origin::Config for Test { diff --git a/bridges/snowbridge/primitives/beacon/Cargo.toml b/bridges/snowbridge/primitives/beacon/Cargo.toml index 18123910c35b2..9ced99fbf3fdd 100644 --- a/bridges/snowbridge/primitives/beacon/Cargo.toml +++ b/bridges/snowbridge/primitives/beacon/Cargo.toml @@ -13,26 +13,26 @@ workspace = true [dependencies] serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -hex = { version = "0.4", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -rlp = { version = "0.5", default-features = false } +hex = { workspace = true } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +rlp = { workspace = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } +frame-support = { workspace = true } +sp-runtime = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } -ssz_rs = { version = "0.9.0", default-features = false } -ssz_rs_derive = { version = "0.9.0", default-features = false } -byte-slice-cast = { version = "1.2.1", default-features = false } +ssz_rs = { workspace = true } +ssz_rs_derive = { workspace = true } +byte-slice-cast = { workspace = true } -snowbridge-ethereum = { path = "../ethereum", default-features = false } -milagro-bls = { package = "snowbridge-milagro-bls", version = "1.5.4", default-features = false } +snowbridge-ethereum = { workspace = true } +milagro-bls = { workspace = true } [dev-dependencies] -hex-literal = { version = "0.4.1" } +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index 573ab6608e5f9..f9bee1ff4959a 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -13,28 +13,28 @@ workspace = true [dependencies] serde = { optional = true, features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1" } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +hex-literal = { workspace = true, default-features = true } -polkadot-parachain-primitives = { path = "../../../../polkadot/parachain", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../polkadot/xcm/xcm-builder", default-features = false } +polkadot-parachain-primitives = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-arithmetic = { path = "../../../../substrate/primitives/arithmetic", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } +sp-arithmetic = { workspace = true } -snowbridge-beacon-primitives = { path = "../beacon", default-features = false } +snowbridge-beacon-primitives = { workspace = true } -ethabi = { package = "ethabi-decode", version = "1.0.0", default-features = false } +ethabi = { workspace = true } [dev-dependencies] -hex = { version = "0.4.3" } +hex = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/primitives/ethereum/Cargo.toml b/bridges/snowbridge/primitives/ethereum/Cargo.toml index fb0b6cbaf3c2f..764ce90b8139d 100644 --- a/bridges/snowbridge/primitives/ethereum/Cargo.toml +++ b/bridges/snowbridge/primitives/ethereum/Cargo.toml @@ -14,23 +14,23 @@ workspace = true [dependencies] serde = { optional = true, features = ["derive"], workspace = true, default-features = true } serde-big-array = { optional = true, features = ["const-generics"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -ethbloom = { version = "0.13.0", default-features = false } -ethereum-types = { version = "0.14.1", default-features = false, features = ["codec", "rlp", "serialize"] } -hex-literal = { version = "0.4.1", default-features = false } -parity-bytes = { version = "0.1.2", default-features = false } -rlp = { version = "0.5.2", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +ethbloom = { workspace = true } +ethereum-types = { features = ["codec", "rlp", "serialize"], workspace = true } +hex-literal = { workspace = true } +parity-bytes = { workspace = true } +rlp = { workspace = true } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sp-io = { workspace = true } +sp-std = { workspace = true } +sp-runtime = { workspace = true } -ethabi = { package = "ethabi-decode", version = "1.0.0", default-features = false } +ethabi = { workspace = true } [dev-dependencies] -wasm-bindgen-test = "0.3.19" -rand = "0.8.5" +wasm-bindgen-test = { workspace = true } +rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } [features] diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index ec0888dd41b0c..ee8d481cec12a 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -12,25 +12,24 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } -snowbridge-core = { path = "../core", default-features = false } +snowbridge-core = { workspace = true } -hex-literal = { version = "0.4.1" } +hex-literal = { workspace = true, default-features = true } [dev-dependencies] -rustc-hex = { version = "2.1.0" } [features] default = ["std"] diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml index 2372908b86ab5..d47cb3cb7101f 100644 --- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml +++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml @@ -13,15 +13,15 @@ workspace = true [dependencies] log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-arithmetic = { path = "../../../../substrate/primitives/arithmetic", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +codec = { workspace = true } +frame-support = { workspace = true } +sp-std = { workspace = true } +sp-arithmetic = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } -snowbridge-core = { path = "../../primitives/core", default-features = false } +snowbridge-core = { workspace = true } [dev-dependencies] diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index e19c682de4542..6f8e586bf5ff1 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -11,38 +11,38 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } # Substrate -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } -pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } -pallet-session = { path = "../../../../substrate/frame/session", default-features = false } -pallet-message-queue = { path = "../../../../substrate/frame/message-queue", default-features = false } -pallet-timestamp = { path = "../../../../substrate/frame/timestamp", default-features = false } -pallet-utility = { path = "../../../../substrate/frame/utility", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +pallet-session = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-utility = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../../cumulus/pallets/parachain-system", default-features = false } -pallet-collator-selection = { path = "../../../../cumulus/pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../../cumulus/parachains/pallets/parachain-info", default-features = false } -parachains-runtimes-test-utils = { path = "../../../../cumulus/parachains/runtimes/test-utils", default-features = false } +cumulus-pallet-parachain-system = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-runtimes-test-utils = { workspace = true } # Ethereum Bridge (Snowbridge) -snowbridge-core = { path = "../../primitives/core", default-features = false } -snowbridge-pallet-ethereum-client = { path = "../../pallets/ethereum-client", default-features = false } -snowbridge-pallet-ethereum-client-fixtures = { path = "../../pallets/ethereum-client/fixtures", default-features = false } -snowbridge-pallet-outbound-queue = { path = "../../pallets/outbound-queue", default-features = false } -snowbridge-pallet-system = { path = "../../pallets/system", default-features = false } +snowbridge-core = { workspace = true } +snowbridge-pallet-ethereum-client = { workspace = true } +snowbridge-pallet-ethereum-client-fixtures = { workspace = true } +snowbridge-pallet-outbound-queue = { workspace = true } +snowbridge-pallet-system = { workspace = true } [features] default = ["std"] diff --git a/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json b/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json index b2dddaa19ed15..ca3abcc528cfa 100644 --- a/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json +++ b/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json @@ -736,9 +736,9 @@ } }, "node_modules/ws": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.16.0.tgz", - "integrity": "sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ==", + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", "engines": { "node": ">=10.0.0" }, diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 410ac8b983d96..9b6f6b73960b4 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -10,15 +10,15 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12" } -url = "2.4.0" +clap = { features = ["derive"], workspace = true } +codec = { workspace = true, default-features = true } +url = { workspace = true } # Substrate -sc-cli = { path = "../../../substrate/client/cli" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-chain-spec = { path = "../../../substrate/client/chain-spec" } -sc-service = { path = "../../../substrate/client/service" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 39cedf87a0cb1..6ebde0c2c653b 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -10,41 +10,41 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -parking_lot = "0.12.1" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" -tracing = "0.1.25" +parking_lot = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +tracing = { workspace = true, default-features = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } +sc-client-api = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } # Polkadot -polkadot-node-primitives = { path = "../../../polkadot/node/primitives" } -polkadot-node-subsystem = { path = "../../../polkadot/node/subsystem" } -polkadot-overseer = { path = "../../../polkadot/node/overseer" } -polkadot-primitives = { path = "../../../polkadot/primitives" } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } # Cumulus -cumulus-client-consensus-common = { path = "../consensus/common" } -cumulus-client-network = { path = "../network" } -cumulus-primitives-core = { path = "../../primitives/core" } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-client-network = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } [dev-dependencies] -async-trait = "0.1.79" +async-trait = { workspace = true } # Substrate -sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } # Polkadot -polkadot-node-subsystem-test-helpers = { path = "../../../polkadot/node/subsystem-test-helpers" } +polkadot-node-subsystem-test-helpers = { workspace = true } # Cumulus -cumulus-test-client = { path = "../../test/client" } -cumulus-test-runtime = { path = "../../test/runtime" } -cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } +cumulus-test-client = { workspace = true } +cumulus-test-runtime = { workspace = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index fad30e59e869d..01e07cb395a95 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -10,44 +10,47 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.28" -tracing = "0.1.37" -schnellru = "0.2.1" +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +parking_lot = { workspace = true } +tracing = { workspace = true, default-features = true } +schnellru = { workspace = true } +tokio = { workspace = true, features = ["macros"] } # Substrate -sc-client-api = { path = "../../../../substrate/client/api" } -sc-consensus = { path = "../../../../substrate/client/consensus/common" } -sc-consensus-aura = { path = "../../../../substrate/client/consensus/aura" } -sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } -sc-consensus-slots = { path = "../../../../substrate/client/consensus/slots" } -sc-telemetry = { path = "../../../../substrate/client/telemetry" } -sp-api = { path = "../../../../substrate/primitives/api" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-block-builder = { path = "../../../../substrate/primitives/block-builder" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -sp-consensus-aura = { path = "../../../../substrate/primitives/consensus/aura" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-inherents = { path = "../../../../substrate/primitives/inherents" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-timestamp = { path = "../../../../substrate/primitives/timestamp" } -sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../../substrate/utils/prometheus" } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-slots = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } # Cumulus -cumulus-client-consensus-common = { path = "../common" } -cumulus-relay-chain-interface = { path = "../../relay-chain-interface" } -cumulus-client-consensus-proposer = { path = "../proposer" } -cumulus-client-parachain-inherent = { path = "../../parachain-inherent" } -cumulus-primitives-aura = { path = "../../../primitives/aura" } -cumulus-primitives-core = { path = "../../../primitives/core" } -cumulus-client-collator = { path = "../../collator" } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-client-consensus-proposer = { workspace = true, default-features = true } +cumulus-client-parachain-inherent = { workspace = true, default-features = true } +cumulus-primitives-aura = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-client-collator = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../../polkadot/primitives" } -polkadot-node-primitives = { path = "../../../../polkadot/node/primitives" } -polkadot-node-subsystem = { path = "../../../../polkadot/node/subsystem" } -polkadot-overseer = { path = "../../../../polkadot/node/overseer" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index 776052215d939..dc830e463a4f5 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -156,15 +156,8 @@ where Ok((paras_inherent_data, other_inherent_data)) } - /// Propose, seal, and import a block, packaging it into a collation. - /// - /// Provide the slot to build at as well as any other necessary pre-digest logs, - /// the inherent data, and the proposal duration and PoV size limits. - /// - /// The Aura pre-digest should not be explicitly provided and is set internally. - /// - /// This does not announce the collation to the parachain network or the relay chain. - pub async fn collate( + /// Build and import a parachain block on the given parent header, using the given slot claim. + pub async fn build_block_and_import( &mut self, parent_header: &Block::Header, slot_claim: &SlotClaim, @@ -172,10 +165,7 @@ where inherent_data: (ParachainInherentData, InherentData), proposal_duration: Duration, max_pov_size: usize, - ) -> Result< - Option<(Collation, ParachainBlockData, Block::Hash)>, - Box, - > { + ) -> Result>, Box> { let mut digest = additional_pre_digest.into().unwrap_or_default(); digest.push(slot_claim.pre_digest.clone()); @@ -205,7 +195,6 @@ where ) .map_err(|e| e as Box)?; - let post_hash = sealed_importable.post_hash(); let block = Block::new( sealed_importable.post_header(), sealed_importable @@ -220,11 +209,46 @@ where .map_err(|e| Box::new(e) as Box) .await?; - if let Some((collation, block_data)) = self.collator_service.build_collation( - parent_header, - post_hash, - ParachainCandidate { block, proof: proposal.proof }, - ) { + Ok(Some(ParachainCandidate { block, proof: proposal.proof })) + } + + /// Propose, seal, import a block and packaging it into a collation. + /// + /// Provide the slot to build at as well as any other necessary pre-digest logs, + /// the inherent data, and the proposal duration and PoV size limits. + /// + /// The Aura pre-digest should not be explicitly provided and is set internally. + /// + /// This does not announce the collation to the parachain network or the relay chain. + pub async fn collate( + &mut self, + parent_header: &Block::Header, + slot_claim: &SlotClaim, + additional_pre_digest: impl Into>>, + inherent_data: (ParachainInherentData, InherentData), + proposal_duration: Duration, + max_pov_size: usize, + ) -> Result< + Option<(Collation, ParachainBlockData, Block::Hash)>, + Box, + > { + let maybe_candidate = self + .build_block_and_import( + parent_header, + slot_claim, + additional_pre_digest, + inherent_data, + proposal_duration, + max_pov_size, + ) + .await?; + + let Some(candidate) = maybe_candidate else { return Ok(None) }; + + let hash = candidate.block.header().hash(); + if let Some((collation, block_data)) = + self.collator_service.build_collation(parent_header, hash, candidate) + { tracing::info!( target: crate::LOG_TARGET, "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", @@ -241,7 +265,7 @@ where ); } - Ok(Some((collation, block_data, post_hash))) + Ok(Some((collation, block_data, hash))) } else { Err(Box::::from("Unable to produce collation") as Box) diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index 1047c6219ad13..4efd50a04ec6e 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -41,7 +41,6 @@ use sc_consensus::BlockImport; use sp_api::{CallApiAt, ProvideRuntimeApi}; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; -use sp_consensus::SyncOracle; use sp_consensus_aura::AuraApi; use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; @@ -53,7 +52,7 @@ use std::{sync::Arc, time::Duration}; use crate::collator as collator_util; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -64,8 +63,6 @@ pub struct Params { pub para_client: Arc, /// A handle to the relay-chain client. pub relay_client: RClient, - /// A chain synchronization oracle. - pub sync_oracle: SO, /// The underlying keystore, which should contain Aura consensus keys. pub keystore: KeystorePtr, /// The collator key used to sign collations before submitting to validators. @@ -89,8 +86,8 @@ pub struct Params { } /// Run bare Aura consensus as a relay-chain-driven collator. -pub fn run( - params: Params, +pub fn run( + params: Params, ) -> impl Future + Send + 'static where Block: BlockT + Send, @@ -108,7 +105,6 @@ where CIDP: CreateInherentDataProviders + Send + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - SO: SyncOracle + Send + Sync + Clone + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, P: Pair, diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 09416233ea9b3..749b131123949 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -33,46 +33,34 @@ use codec::{Codec, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; -use cumulus_client_consensus_common::{ - self as consensus_common, load_abridged_host_configuration, ParachainBlockImportMarker, - ParentSearchParams, -}; +use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{ - relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, -}; +use cumulus_primitives_core::{CollectCollationInfo, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_primitives::SubmitCollationParams; -use polkadot_node_subsystem::messages::{ - CollationGenerationMessage, RuntimeApiMessage, RuntimeApiRequest, -}; +use polkadot_node_subsystem::messages::CollationGenerationMessage; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{ - AsyncBackingParams, CollatorPair, CoreIndex, CoreState, Id as ParaId, OccupiedCoreAssumption, -}; +use polkadot_primitives::{CollatorPair, Id as ParaId, OccupiedCoreAssumption}; -use futures::{channel::oneshot, prelude::*}; +use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; use sc_consensus::BlockImport; -use sc_consensus_aura::standalone as aura_internal; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; -use sp_consensus::SyncOracle; use sp_consensus_aura::{AuraApi, Slot}; use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; -use sp_timestamp::Timestamp; use std::{sync::Arc, time::Duration}; -use crate::collator::{self as collator_util, SlotClaim}; +use crate::collator::{self as collator_util}; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -87,8 +75,6 @@ pub struct Params { pub relay_client: RClient, /// A validation code hash provider, used to get the current validation code hash. pub code_hash_provider: CHP, - /// A chain synchronization oracle. - pub sync_oracle: SO, /// The underlying keystore, which should contain Aura consensus keys. pub keystore: KeystorePtr, /// The collator key used to sign collations before submitting to validators. @@ -110,8 +96,8 @@ pub struct Params { } /// Run async-backing-friendly Aura. -pub fn run( - mut params: Params, +pub fn run( + mut params: Params, ) -> impl Future + Send + 'static where Block: BlockT, @@ -130,7 +116,6 @@ where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - SO: SyncOracle + Send + Sync + Clone + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, @@ -138,14 +123,6 @@ where P::Public: AppPublic + Member + Codec, P::Signature: TryFrom> + Member + Codec, { - // This is an arbitrary value which is likely guaranteed to exceed any reasonable - // limit, as it would correspond to 10 non-included blocks. - // - // Since we only search for parent blocks which have already been imported, - // we can guarantee that all imported blocks respect the unincluded segment - // rules specified by the parachain's runtime and thus will never be too deep. - const PARENT_SEARCH_DEPTH: usize = 10; - async move { cumulus_client_collator::initialize_collator_subsystems( &mut params.overseer_handle, @@ -186,12 +163,9 @@ where while let Some(relay_parent_header) = import_notifications.next().await { let relay_parent = relay_parent_header.hash(); - // TODO: Currently we use just the first core here, but for elastic scaling - // we iterate and build on all of the cores returned. - let core_index = if let Some(core_index) = cores_scheduled_for_para( + let core_index = if let Some(core_index) = super::cores_scheduled_for_para( relay_parent, params.para_id, - &mut params.overseer_handle, &mut params.relay_client, ) .await @@ -226,42 +200,16 @@ where }, }; - let parent_search_params = ParentSearchParams { + let (included_block, initial_parent) = match crate::collators::find_parent( relay_parent, - para_id: params.para_id, - ancestry_lookback: async_backing_params(relay_parent, ¶ms.relay_client) - .await - .map(|c| c.allowed_ancestry_len as usize) - .unwrap_or(0), - max_depth: PARENT_SEARCH_DEPTH, - ignore_alternative_branches: true, - }; - - let potential_parents = - cumulus_client_consensus_common::find_potential_parents::( - parent_search_params, - &*params.para_backend, - ¶ms.relay_client, - ) - .await; - - let mut potential_parents = match potential_parents { - Err(e) => { - tracing::error!( - target: crate::LOG_TARGET, - ?relay_parent, - err = ?e, - "Could not fetch potential parents to build upon" - ); - - continue - }, - Ok(x) => x, - }; - - let included_block = match potential_parents.iter().find(|x| x.depth == 0) { - None => continue, // also serves as an `is_empty` check. - Some(b) => b.hash, + params.para_id, + &*params.para_backend, + ¶ms.relay_client, + ) + .await + { + Some(value) => value, + None => continue, }; let para_client = &*params.para_client; @@ -292,7 +240,7 @@ where relay_chain_slot_duration = ?params.relay_chain_slot_duration, "Adjusted relay-chain slot to parachain slot" ); - Some(can_build_upon::<_, _, P>( + Some(super::can_build_upon::<_, _, P>( slot_now, timestamp, block_hash, @@ -302,13 +250,6 @@ where )) }; - // Sort by depth, ascending, to choose the longest chain. - // - // If the longest chain has space, build upon that. Otherwise, don't - // build at all. - potential_parents.sort_by_key(|a| a.depth); - let Some(initial_parent) = potential_parents.pop() else { continue }; - // Build in a loop until not allowed. Note that the authorities can change // at any block, so we need to re-claim our slot every time. let mut parent_hash = initial_parent.hash; @@ -363,13 +304,11 @@ where Ok(x) => x, }; - let validation_code_hash = match params.code_hash_provider.code_hash_at(parent_hash) - { - None => { - tracing::error!(target: crate::LOG_TARGET, ?parent_hash, "Could not fetch validation code hash"); - break - }, - Some(v) => v, + let Some(validation_code_hash) = + params.code_hash_provider.code_hash_at(parent_hash) + else { + tracing::error!(target: crate::LOG_TARGET, ?parent_hash, "Could not fetch validation code hash"); + break }; super::check_validation_code_or_log( @@ -437,124 +376,3 @@ where } } } - -// Checks if we own the slot at the given block and whether there -// is space in the unincluded segment. -async fn can_build_upon( - slot: Slot, - timestamp: Timestamp, - parent_hash: Block::Hash, - included_block: Block::Hash, - client: &Client, - keystore: &KeystorePtr, -) -> Option> -where - Client: ProvideRuntimeApi, - Client::Api: AuraApi + AuraUnincludedSegmentApi, - P: Pair, - P::Public: Codec, - P::Signature: Codec, -{ - let runtime_api = client.runtime_api(); - let authorities = runtime_api.authorities(parent_hash).ok()?; - let author_pub = aura_internal::claim_slot::

(slot, &authorities, keystore).await?; - - // Here we lean on the property that building on an empty unincluded segment must always - // be legal. Skipping the runtime API query here allows us to seamlessly run this - // collator against chains which have not yet upgraded their runtime. - if parent_hash != included_block { - if !runtime_api.can_build_upon(parent_hash, included_block, slot).ok()? { - return None - } - } - - Some(SlotClaim::unchecked::

(author_pub, slot, timestamp)) -} - -/// Reads async backing parameters from the relay chain storage at the given relay parent. -async fn async_backing_params( - relay_parent: PHash, - relay_client: &impl RelayChainInterface, -) -> Option { - match load_abridged_host_configuration(relay_parent, relay_client).await { - Ok(Some(config)) => Some(config.async_backing_params), - Ok(None) => { - tracing::error!( - target: crate::LOG_TARGET, - "Active config is missing in relay chain storage", - ); - None - }, - Err(err) => { - tracing::error!( - target: crate::LOG_TARGET, - ?err, - ?relay_parent, - "Failed to read active config from relay chain client", - ); - None - }, - } -} - -// Return all the cores assigned to the para at the provided relay parent. -async fn cores_scheduled_for_para( - relay_parent: PHash, - para_id: ParaId, - overseer_handle: &mut OverseerHandle, - relay_client: &impl RelayChainInterface, -) -> Vec { - // Get `AvailabilityCores` from runtime - let (tx, rx) = oneshot::channel(); - let request = RuntimeApiRequest::AvailabilityCores(tx); - overseer_handle - .send_msg(RuntimeApiMessage::Request(relay_parent, request), "LookaheadCollator") - .await; - - let cores = match rx.await { - Ok(Ok(cores)) => cores, - Ok(Err(error)) => { - tracing::error!( - target: crate::LOG_TARGET, - ?error, - ?relay_parent, - "Failed to query availability cores runtime API", - ); - return Vec::new() - }, - Err(oneshot::Canceled) => { - tracing::error!( - target: crate::LOG_TARGET, - ?relay_parent, - "Sender for availability cores runtime request dropped", - ); - return Vec::new() - }, - }; - - let max_candidate_depth = async_backing_params(relay_parent, relay_client) - .await - .map(|c| c.max_candidate_depth) - .unwrap_or(0); - - cores - .iter() - .enumerate() - .filter_map(|(index, core)| { - let core_para_id = match core { - CoreState::Scheduled(scheduled_core) => Some(scheduled_core.para_id), - CoreState::Occupied(occupied_core) if max_candidate_depth >= 1 => occupied_core - .next_up_on_available - .as_ref() - .map(|scheduled_core| scheduled_core.para_id), - CoreState::Free | CoreState::Occupied(_) => None, - }; - - if core_para_id == Some(para_id) { - Some(CoreIndex(index as u32)) - } else { - None - } - }) - .collect() -} diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 6e0067d0cedb6..7d430ecdc727a 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -20,13 +20,35 @@ //! included parachain block, as well as the [`lookahead`] collator, which prospectively //! builds on parachain blocks which have not yet been included in the relay chain. +use crate::collator::SlotClaim; +use codec::Codec; +use cumulus_client_consensus_common::{ + self as consensus_common, load_abridged_host_configuration, ParentSearchParams, +}; +use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; +use cumulus_primitives_core::{relay_chain::Hash as ParaHash, BlockT}; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_primitives::{ - Hash as RHash, Id as ParaId, OccupiedCoreAssumption, ValidationCodeHash, + AsyncBackingParams, CoreIndex, CoreState, Hash as RelayHash, Id as ParaId, + OccupiedCoreAssumption, ValidationCodeHash, }; +use sc_consensus_aura::{standalone as aura_internal, AuraApi}; +use sp_api::ProvideRuntimeApi; +use sp_core::Pair; +use sp_keystore::KeystorePtr; +use sp_timestamp::Timestamp; pub mod basic; pub mod lookahead; +pub mod slot_based; + +// This is an arbitrary value which is likely guaranteed to exceed any reasonable +// limit, as it would correspond to 10 non-included blocks. +// +// Since we only search for parent blocks which have already been imported, +// we can guarantee that all imported blocks respect the unincluded segment +// rules specified by the parachain's runtime and thus will never be too deep. +const PARENT_SEARCH_DEPTH: usize = 10; /// Check the `local_validation_code_hash` against the validation code hash in the relay chain /// state. @@ -36,7 +58,7 @@ async fn check_validation_code_or_log( local_validation_code_hash: &ValidationCodeHash, para_id: ParaId, relay_client: &impl RelayChainInterface, - relay_parent: RHash, + relay_parent: RelayHash, ) { let state_validation_code_hash = match relay_client .validation_code_hash(relay_parent, para_id, OccupiedCoreAssumption::Included) @@ -64,7 +86,7 @@ async fn check_validation_code_or_log( ?relay_parent, ?local_validation_code_hash, relay_validation_code_hash = ?state, - "Parachain code doesn't match validation code stored in the relay chain state", + "Parachain code doesn't match validation code stored in the relay chain state.", ); }, None => { @@ -77,3 +99,159 @@ async fn check_validation_code_or_log( }, } } + +/// Reads async backing parameters from the relay chain storage at the given relay parent. +async fn async_backing_params( + relay_parent: RelayHash, + relay_client: &impl RelayChainInterface, +) -> Option { + match load_abridged_host_configuration(relay_parent, relay_client).await { + Ok(Some(config)) => Some(config.async_backing_params), + Ok(None) => { + tracing::error!( + target: crate::LOG_TARGET, + "Active config is missing in relay chain storage", + ); + None + }, + Err(err) => { + tracing::error!( + target: crate::LOG_TARGET, + ?err, + ?relay_parent, + "Failed to read active config from relay chain client", + ); + None + }, + } +} + +// Return all the cores assigned to the para at the provided relay parent. +async fn cores_scheduled_for_para( + relay_parent: RelayHash, + para_id: ParaId, + relay_client: &impl RelayChainInterface, +) -> Vec { + // Get `AvailabilityCores` from runtime + let cores = match relay_client.availability_cores(relay_parent).await { + Ok(cores) => cores, + Err(error) => { + tracing::error!( + target: crate::LOG_TARGET, + ?error, + ?relay_parent, + "Failed to query availability cores runtime API", + ); + return Vec::new() + }, + }; + + let max_candidate_depth = async_backing_params(relay_parent, relay_client) + .await + .map(|c| c.max_candidate_depth) + .unwrap_or(0); + + cores + .iter() + .enumerate() + .filter_map(|(index, core)| { + let core_para_id = match core { + CoreState::Scheduled(scheduled_core) => Some(scheduled_core.para_id), + CoreState::Occupied(occupied_core) if max_candidate_depth > 0 => occupied_core + .next_up_on_available + .as_ref() + .map(|scheduled_core| scheduled_core.para_id), + CoreState::Free | CoreState::Occupied(_) => None, + }; + + if core_para_id == Some(para_id) { + Some(CoreIndex(index as u32)) + } else { + None + } + }) + .collect() +} + +// Checks if we own the slot at the given block and whether there +// is space in the unincluded segment. +async fn can_build_upon( + slot: Slot, + timestamp: Timestamp, + parent_hash: Block::Hash, + included_block: Block::Hash, + client: &Client, + keystore: &KeystorePtr, +) -> Option> +where + Client: ProvideRuntimeApi, + Client::Api: AuraApi + AuraUnincludedSegmentApi, + P: Pair, + P::Public: Codec, + P::Signature: Codec, +{ + let runtime_api = client.runtime_api(); + let authorities = runtime_api.authorities(parent_hash).ok()?; + let author_pub = aura_internal::claim_slot::

(slot, &authorities, keystore).await?; + + // Here we lean on the property that building on an empty unincluded segment must always + // be legal. Skipping the runtime API query here allows us to seamlessly run this + // collator against chains which have not yet upgraded their runtime. + if parent_hash != included_block && + !runtime_api.can_build_upon(parent_hash, included_block, slot).ok()? + { + return None + } + + Some(SlotClaim::unchecked::

(author_pub, slot, timestamp)) +} + +/// Use [`cumulus_client_consensus_common::find_potential_parents`] to find parachain blocks that +/// we can build on. Once a list of potential parents is retrieved, return the last one of the +/// longest chain. +async fn find_parent( + relay_parent: ParaHash, + para_id: ParaId, + para_backend: &impl sc_client_api::Backend, + relay_client: &impl RelayChainInterface, +) -> Option<(::Hash, consensus_common::PotentialParent)> +where + Block: BlockT, +{ + let parent_search_params = ParentSearchParams { + relay_parent, + para_id, + ancestry_lookback: crate::collators::async_backing_params(relay_parent, relay_client) + .await + .map_or(0, |params| params.allowed_ancestry_len as usize), + max_depth: PARENT_SEARCH_DEPTH, + ignore_alternative_branches: true, + }; + + let potential_parents = cumulus_client_consensus_common::find_potential_parents::( + parent_search_params, + para_backend, + relay_client, + ) + .await; + + let potential_parents = match potential_parents { + Err(e) => { + tracing::error!( + target: crate::LOG_TARGET, + ?relay_parent, + err = ?e, + "Could not fetch potential parents to build upon" + ); + + return None + }, + Ok(x) => x, + }; + + let included_block = potential_parents.iter().find(|x| x.depth == 0)?.hash; + potential_parents + .into_iter() + .max_by_key(|a| a.depth) + .map(|parent| (included_block, parent)) +} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs new file mode 100644 index 0000000000000..1fbc0689da862 --- /dev/null +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -0,0 +1,491 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use codec::{Codec, Encode}; + +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::{CollectCollationInfo, PersistedValidationData}; +use cumulus_relay_chain_interface::RelayChainInterface; + +use polkadot_primitives::{ + BlockId, CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, + OccupiedCoreAssumption, +}; + +use futures::prelude::*; +use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; +use sc_consensus::BlockImport; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::AppPublic; +use sp_blockchain::HeaderBackend; +use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; +use sp_core::crypto::Pair; +use sp_inherents::CreateInherentDataProviders; +use sp_keystore::KeystorePtr; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; +use sp_timestamp::Timestamp; +use std::{sync::Arc, time::Duration}; + +use super::CollatorMessage; +use crate::{ + collator::{self as collator_util}, + collators::{check_validation_code_or_log, cores_scheduled_for_para}, + LOG_TARGET, +}; + +/// Parameters for [`run_block_builder`]. +pub struct BuilderTaskParams< + Block: BlockT, + BI, + CIDP, + Client, + Backend, + RelayClient, + CHP, + Proposer, + CS, +> { + /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. + /// the timestamp, slot, and paras inherents should be omitted, as they are set by this + /// collator. + pub create_inherent_data_providers: CIDP, + /// Used to actually import blocks. + pub block_import: BI, + /// The underlying para client. + pub para_client: Arc, + /// The para client's backend, used to access the database. + pub para_backend: Arc, + /// A handle to the relay-chain client. + pub relay_client: RelayClient, + /// A validation code hash provider, used to get the current validation code hash. + pub code_hash_provider: CHP, + /// The underlying keystore, which should contain Aura consensus keys. + pub keystore: KeystorePtr, + /// The para's ID. + pub para_id: ParaId, + /// The underlying block proposer this should call into. + pub proposer: Proposer, + /// The generic collator service used to plug into this consensus engine. + pub collator_service: CS, + /// The amount of time to spend authoring each block. + pub authoring_duration: Duration, + /// Channel to send built blocks to the collation task. + pub collator_sender: sc_utils::mpsc::TracingUnboundedSender>, + /// Slot duration of the relay chain + pub relay_chain_slot_duration: Duration, + /// Drift every slot by this duration. + /// This is a time quantity that is subtracted from the actual timestamp when computing + /// the time left to enter a new slot. In practice, this *left-shifts* the clock time with the + /// intent to keep our "clock" slightly behind the relay chain one and thus reducing the + /// likelihood of encountering unfavorable notification arrival timings (i.e. we don't want to + /// wait for relay chain notifications because we woke up too early). + pub slot_drift: Duration, +} + +#[derive(Debug)] +struct SlotInfo { + pub timestamp: Timestamp, + pub slot: Slot, + pub slot_duration: SlotDuration, +} + +#[derive(Debug)] +struct SlotTimer { + client: Arc, + drift: Duration, + _marker: std::marker::PhantomData<(Block, Box)>, +} + +/// Returns current duration since Unix epoch. +fn duration_now() -> Duration { + use std::time::SystemTime; + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_else(|e| { + panic!("Current time {:?} is before Unix epoch. Something is wrong: {:?}", now, e) + }) +} + +/// Returns the duration until the next slot from now. +fn time_until_next_slot(slot_duration: Duration, drift: Duration) -> Duration { + let now = duration_now().as_millis() - drift.as_millis(); + + let next_slot = (now + slot_duration.as_millis()) / slot_duration.as_millis(); + let remaining_millis = next_slot * slot_duration.as_millis() - now; + Duration::from_millis(remaining_millis as u64) +} + +impl SlotTimer +where + Block: BlockT, + Client: ProvideRuntimeApi + Send + Sync + 'static + UsageProvider, + Client::Api: AuraApi, + P: Pair, + P::Public: AppPublic + Member + Codec, + P::Signature: TryFrom> + Member + Codec, +{ + pub fn new_with_drift(client: Arc, drift: Duration) -> Self { + Self { client, drift, _marker: Default::default() } + } + + /// Returns a future that resolves when the next slot arrives. + pub async fn wait_until_next_slot(&self) -> Result { + let Ok(slot_duration) = crate::slot_duration(&*self.client) else { + tracing::error!(target: crate::LOG_TARGET, "Failed to fetch slot duration from runtime."); + return Err(()) + }; + + let time_until_next_slot = time_until_next_slot(slot_duration.as_duration(), self.drift); + tokio::time::sleep(time_until_next_slot).await; + let timestamp = sp_timestamp::Timestamp::current(); + Ok(SlotInfo { + slot: Slot::from_timestamp(timestamp, slot_duration), + timestamp, + slot_duration, + }) + } +} + +/// Run block-builder. +pub fn run_block_builder( + params: BuilderTaskParams, +) -> impl Future + Send + 'static +where + Block: BlockT, + Client: ProvideRuntimeApi + + UsageProvider + + BlockOf + + AuxStore + + HeaderBackend + + BlockBackend + + Send + + Sync + + 'static, + Client::Api: + AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Backend: sc_client_api::Backend + 'static, + RelayClient: RelayChainInterface + Clone + 'static, + CIDP: CreateInherentDataProviders + 'static, + CIDP::InherentDataProviders: Send, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + Proposer: ProposerInterface + Send + Sync + 'static, + CS: CollatorServiceInterface + Send + Sync + 'static, + CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, + P: Pair, + P::Public: AppPublic + Member + Codec, + P::Signature: TryFrom> + Member + Codec, +{ + async move { + tracing::info!(target: LOG_TARGET, "Starting slot-based block-builder task."); + let BuilderTaskParams { + relay_client, + create_inherent_data_providers, + para_client, + keystore, + block_import, + para_id, + proposer, + collator_service, + collator_sender, + code_hash_provider, + authoring_duration, + para_backend, + relay_chain_slot_duration, + slot_drift, + } = params; + + let slot_timer = SlotTimer::<_, _, P>::new_with_drift(para_client.clone(), slot_drift); + + let mut collator = { + let params = collator_util::Params { + create_inherent_data_providers, + block_import, + relay_client: relay_client.clone(), + keystore: keystore.clone(), + para_id, + proposer, + collator_service, + }; + + collator_util::Collator::::new(params) + }; + + let mut relay_chain_fetcher = RelayChainCachingFetcher::new(relay_client.clone(), para_id); + + loop { + // We wait here until the next slot arrives. + let Ok(para_slot) = slot_timer.wait_until_next_slot().await else { + return; + }; + + let Some(expected_cores) = + expected_core_count(relay_chain_slot_duration, para_slot.slot_duration) + else { + return + }; + + let Ok(RelayChainData { + relay_parent_header, + max_pov_size, + relay_parent_hash: relay_parent, + scheduled_cores, + }) = relay_chain_fetcher.get_relay_chain_data().await + else { + continue; + }; + + if scheduled_cores.is_empty() { + tracing::debug!(target: LOG_TARGET, "Parachain not scheduled, skipping slot."); + continue; + } + + let core_index_in_scheduled: u64 = *para_slot.slot % expected_cores; + let Some(core_index) = scheduled_cores.get(core_index_in_scheduled as usize) else { + tracing::debug!(target: LOG_TARGET, core_index_in_scheduled, core_len = scheduled_cores.len(), "Para is scheduled, but not enough cores available."); + continue; + }; + + let Some((included_block, parent)) = + crate::collators::find_parent(relay_parent, para_id, &*para_backend, &relay_client) + .await + else { + continue + }; + + let parent_header = parent.header; + let parent_hash = parent.hash; + + // We mainly call this to inform users at genesis if there is a mismatch with the + // on-chain data. + collator.collator_service().check_block_status(parent_hash, &parent_header); + + let slot_claim = match crate::collators::can_build_upon::<_, _, P>( + para_slot.slot, + para_slot.timestamp, + parent_hash, + included_block, + &*para_client, + &keystore, + ) + .await + { + Some(slot) => slot, + None => { + tracing::debug!( + target: crate::LOG_TARGET, + ?core_index, + slot_info = ?para_slot, + unincluded_segment_len = parent.depth, + relay_parent = %relay_parent, + included = %included_block, + parent = %parent_hash, + "Not building block." + ); + continue + }, + }; + + tracing::debug!( + target: crate::LOG_TARGET, + ?core_index, + slot_info = ?para_slot, + unincluded_segment_len = parent.depth, + relay_parent = %relay_parent, + included = %included_block, + parent = %parent_hash, + "Building block." + ); + + let validation_data = PersistedValidationData { + parent_head: parent_header.encode().into(), + relay_parent_number: *relay_parent_header.number(), + relay_parent_storage_root: *relay_parent_header.state_root(), + max_pov_size, + }; + + let (parachain_inherent_data, other_inherent_data) = match collator + .create_inherent_data( + relay_parent, + &validation_data, + parent_hash, + slot_claim.timestamp(), + ) + .await + { + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err); + break + }, + Ok(x) => x, + }; + + let validation_code_hash = match code_hash_provider.code_hash_at(parent_hash) { + None => { + tracing::error!(target: crate::LOG_TARGET, ?parent_hash, "Could not fetch validation code hash"); + break + }, + Some(v) => v, + }; + + check_validation_code_or_log( + &validation_code_hash, + para_id, + &relay_client, + relay_parent, + ) + .await; + + let Ok(Some(candidate)) = collator + .build_block_and_import( + &parent_header, + &slot_claim, + None, + (parachain_inherent_data, other_inherent_data), + authoring_duration, + // Set the block limit to 50% of the maximum PoV size. + // + // TODO: If we got benchmarking that includes the proof size, + // we should be able to use the maximum pov size. + (validation_data.max_pov_size / 2) as usize, + ) + .await + else { + tracing::error!(target: crate::LOG_TARGET, "Unable to build block at slot."); + continue; + }; + + let new_block_hash = candidate.block.header().hash(); + + // Announce the newly built block to our peers. + collator.collator_service().announce_block(new_block_hash, None); + + if let Err(err) = collator_sender.unbounded_send(CollatorMessage { + relay_parent, + parent_header, + parachain_candidate: candidate, + validation_code_hash, + core_index: *core_index, + }) { + tracing::error!(target: crate::LOG_TARGET, ?err, "Unable to send block to collation task."); + return + } + } + } +} + +/// Calculate the expected core count based on the slot duration of the relay and parachain. +/// +/// If `slot_duration` is smaller than `relay_chain_slot_duration` that means that we produce more +/// than one parachain block per relay chain block. In order to get these backed, we need multiple +/// cores. This method calculates how many cores we should expect to have scheduled under the +/// assumption that we have a fixed number of cores assigned to our parachain. +fn expected_core_count( + relay_chain_slot_duration: Duration, + slot_duration: SlotDuration, +) -> Option { + let slot_duration_millis = slot_duration.as_millis(); + u64::try_from(relay_chain_slot_duration.as_millis()) + .map_err(|e| tracing::error!("Unable to calculate expected parachain core count: {e}")) + .map(|relay_slot_duration| (relay_slot_duration / slot_duration_millis).max(1)) + .ok() +} + +/// Contains relay chain data necessary for parachain block building. +#[derive(Clone)] +struct RelayChainData { + /// Current relay chain parent header. + pub relay_parent_header: RelayHeader, + /// The cores this para is scheduled on in the context of the relay parent. + pub scheduled_cores: Vec, + /// Maximum configured PoV size on the relay chain. + pub max_pov_size: u32, + /// Current relay chain parent header. + pub relay_parent_hash: RelayHash, +} + +/// Simple helper to fetch relay chain data and cache it based on the current relay chain best block +/// hash. +struct RelayChainCachingFetcher { + relay_client: RI, + para_id: ParaId, + last_data: Option<(RelayHash, RelayChainData)>, +} + +impl RelayChainCachingFetcher +where + RI: RelayChainInterface + Clone + 'static, +{ + pub fn new(relay_client: RI, para_id: ParaId) -> Self { + Self { relay_client, para_id, last_data: None } + } + + /// Fetch required [`RelayChainData`] from the relay chain. + /// If this data has been fetched in the past for the incoming hash, it will reuse + /// cached data. + pub async fn get_relay_chain_data(&mut self) -> Result { + let Ok(relay_parent) = self.relay_client.best_block_hash().await else { + tracing::warn!(target: crate::LOG_TARGET, "Unable to fetch latest relay chain block hash."); + return Err(()) + }; + + match &self.last_data { + Some((last_seen_hash, data)) if *last_seen_hash == relay_parent => { + tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Using cached data for relay parent."); + Ok(data.clone()) + }, + _ => { + tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Relay chain best block changed, fetching new data from relay chain."); + let data = self.update_for_relay_parent(relay_parent).await?; + self.last_data = Some((relay_parent, data.clone())); + Ok(data) + }, + } + } + + /// Fetch fresh data from the relay chain for the given relay parent hash. + async fn update_for_relay_parent(&self, relay_parent: RelayHash) -> Result { + let scheduled_cores = + cores_scheduled_for_para(relay_parent, self.para_id, &self.relay_client).await; + let Ok(Some(relay_parent_header)) = + self.relay_client.header(BlockId::Hash(relay_parent)).await + else { + tracing::warn!(target: crate::LOG_TARGET, "Unable to fetch latest relay chain block header."); + return Err(()) + }; + + let max_pov_size = match self + .relay_client + .persisted_validation_data(relay_parent, self.para_id, OccupiedCoreAssumption::Included) + .await + { + Ok(None) => return Err(()), + Ok(Some(pvd)) => pvd.max_pov_size, + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to gather information from relay-client"); + return Err(()) + }, + }; + + Ok(RelayChainData { + relay_parent_hash: relay_parent, + relay_parent_header, + scheduled_cores, + max_pov_size, + }) + } +} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs new file mode 100644 index 0000000000000..5b8151f6302c4 --- /dev/null +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -0,0 +1,140 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use codec::Encode; + +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_relay_chain_interface::RelayChainInterface; + +use polkadot_node_primitives::{MaybeCompressedPoV, SubmitCollationParams}; +use polkadot_node_subsystem::messages::CollationGenerationMessage; +use polkadot_overseer::Handle as OverseerHandle; +use polkadot_primitives::{CollatorPair, Id as ParaId}; + +use futures::prelude::*; + +use sc_utils::mpsc::TracingUnboundedReceiver; +use sp_runtime::traits::{Block as BlockT, Header}; + +use super::CollatorMessage; + +const LOG_TARGET: &str = "aura::cumulus::collation_task"; + +/// Parameters for the collation task. +pub struct Params { + /// A handle to the relay-chain client. + pub relay_client: RClient, + /// The collator key used to sign collations before submitting to validators. + pub collator_key: CollatorPair, + /// The para's ID. + pub para_id: ParaId, + /// Whether we should reinitialize the collator config (i.e. we are transitioning to aura). + pub reinitialize: bool, + /// Collator service interface + pub collator_service: CS, + /// Receiver channel for communication with the block builder task. + pub collator_receiver: TracingUnboundedReceiver>, +} + +/// Asynchronously executes the collation task for a parachain. +/// +/// This function initializes the collator subsystems necessary for producing and submitting +/// collations to the relay chain. It listens for new best relay chain block notifications and +/// handles collator messages. If our parachain is scheduled on a core and we have a candidate, +/// the task will build a collation and send it to the relay chain. +pub async fn run_collation_task(mut params: Params) +where + Block: BlockT, + CS: CollatorServiceInterface + Send + Sync + 'static, + RClient: RelayChainInterface + Clone + 'static, +{ + let Ok(mut overseer_handle) = params.relay_client.overseer_handle() else { + tracing::error!(target: LOG_TARGET, "Failed to get overseer handle."); + return + }; + + cumulus_client_collator::initialize_collator_subsystems( + &mut overseer_handle, + params.collator_key, + params.para_id, + params.reinitialize, + ) + .await; + + let collator_service = params.collator_service; + while let Some(collator_message) = params.collator_receiver.next().await { + handle_collation_message(collator_message, &collator_service, &mut overseer_handle).await; + } +} + +/// Handle an incoming collation message from the block builder task. +/// This builds the collation from the [`CollatorMessage`] and submits it to +/// the collation-generation subsystem of the relay chain. +async fn handle_collation_message( + message: CollatorMessage, + collator_service: &impl CollatorServiceInterface, + overseer_handle: &mut OverseerHandle, +) { + let CollatorMessage { + parent_header, + parachain_candidate, + validation_code_hash, + relay_parent, + core_index, + } = message; + + let hash = parachain_candidate.block.header().hash(); + let number = *parachain_candidate.block.header().number(); + let (collation, block_data) = + match collator_service.build_collation(&parent_header, hash, parachain_candidate) { + Some(collation) => collation, + None => { + tracing::warn!(target: LOG_TARGET, %hash, ?number, ?core_index, "Unable to build collation."); + return; + }, + }; + + tracing::info!( + target: LOG_TARGET, + "PoV size {{ header: {:.2}kB, extrinsics: {:.2}kB, storage_proof: {:.2}kB }}", + block_data.header().encoded_size() as f64 / 1024f64, + block_data.extrinsics().encoded_size() as f64 / 1024f64, + block_data.storage_proof().encoded_size() as f64 / 1024f64, + ); + + if let MaybeCompressedPoV::Compressed(ref pov) = collation.proof_of_validity { + tracing::info!( + target: LOG_TARGET, + "Compressed PoV size: {}kb", + pov.block_data.0.len() as f64 / 1024f64, + ); + } + + tracing::debug!(target: LOG_TARGET, ?core_index, %hash, %number, "Submitting collation for core."); + overseer_handle + .send_msg( + CollationGenerationMessage::SubmitCollation(SubmitCollationParams { + relay_parent, + collation, + parent_head: parent_header.encode().into(), + validation_code_hash, + core_index, + result_sender: None, + }), + "SubmitCollation", + ) + .await; +} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs new file mode 100644 index 0000000000000..0fe49d58d25be --- /dev/null +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -0,0 +1,178 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! A collator for Aura that looks ahead of the most recently included parachain block +//! when determining what to build upon. +//! +//! The block building mechanism consists of two parts: +//! 1. A block-builder task that builds parachain blocks at each of our slots. +//! 2. A collator task that transforms the blocks into a collation and submits them to the relay +//! chain. +//! +//! Blocks are built on every parachain slot if there is a core scheduled on the relay chain. At the +//! beginning of each block building loop, we determine how many blocks we expect to build per relay +//! chain block. The collator implementation then expects that we have that many cores scheduled +//! during the relay chain block. After the block is built, the block builder task sends it to +//! the collation task which compresses it and submits it to the collation-generation subsystem. + +use codec::Codec; +use consensus_common::ParachainCandidate; +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::CollectCollationInfo; +use cumulus_relay_chain_interface::RelayChainInterface; +use polkadot_primitives::{ + CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, ValidationCodeHash, +}; + +use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; +use sc_consensus::BlockImport; +use sc_utils::mpsc::tracing_unbounded; + +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::AppPublic; +use sp_blockchain::HeaderBackend; +use sp_consensus_aura::AuraApi; +use sp_core::crypto::Pair; +use sp_inherents::CreateInherentDataProviders; +use sp_keystore::KeystorePtr; +use sp_runtime::traits::{Block as BlockT, Member}; + +use std::{sync::Arc, time::Duration}; + +use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; + +mod block_builder_task; +mod collation_task; + +/// Parameters for [`run`]. +pub struct Params { + /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. + /// the timestamp, slot, and paras inherents should be omitted, as they are set by this + /// collator. + pub create_inherent_data_providers: CIDP, + /// Used to actually import blocks. + pub block_import: BI, + /// The underlying para client. + pub para_client: Arc, + /// The para client's backend, used to access the database. + pub para_backend: Arc, + /// A handle to the relay-chain client. + pub relay_client: RClient, + /// A validation code hash provider, used to get the current validation code hash. + pub code_hash_provider: CHP, + /// The underlying keystore, which should contain Aura consensus keys. + pub keystore: KeystorePtr, + /// The collator key used to sign collations before submitting to validators. + pub collator_key: CollatorPair, + /// The para's ID. + pub para_id: ParaId, + /// The length of slots in the relay chain. + pub relay_chain_slot_duration: Duration, + /// The underlying block proposer this should call into. + pub proposer: Proposer, + /// The generic collator service used to plug into this consensus engine. + pub collator_service: CS, + /// The amount of time to spend authoring each block. + pub authoring_duration: Duration, + /// Whether we should reinitialize the collator config (i.e. we are transitioning to aura). + pub reinitialize: bool, + /// Drift slots by a fixed duration. This can be used to create more preferrable authoring + /// timings. + pub slot_drift: Duration, +} + +/// Run aura-based block building and collation task. +pub fn run( + params: Params, +) -> (impl futures::Future, impl futures::Future) +where + Block: BlockT, + Client: ProvideRuntimeApi + + BlockOf + + AuxStore + + HeaderBackend + + BlockBackend + + UsageProvider + + Send + + Sync + + 'static, + Client::Api: + AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Backend: sc_client_api::Backend + 'static, + RClient: RelayChainInterface + Clone + 'static, + CIDP: CreateInherentDataProviders + 'static, + CIDP::InherentDataProviders: Send, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + Proposer: ProposerInterface + Send + Sync + 'static, + CS: CollatorServiceInterface + Send + Sync + Clone + 'static, + CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, + P: Pair + 'static, + P::Public: AppPublic + Member + Codec, + P::Signature: TryFrom> + Member + Codec, +{ + let (tx, rx) = tracing_unbounded("mpsc_builder_to_collator", 100); + let collator_task_params = collation_task::Params { + relay_client: params.relay_client.clone(), + collator_key: params.collator_key, + para_id: params.para_id, + reinitialize: params.reinitialize, + collator_service: params.collator_service.clone(), + collator_receiver: rx, + }; + + let collation_task_fut = run_collation_task::(collator_task_params); + + let block_builder_params = block_builder_task::BuilderTaskParams { + create_inherent_data_providers: params.create_inherent_data_providers, + block_import: params.block_import, + para_client: params.para_client, + para_backend: params.para_backend, + relay_client: params.relay_client, + code_hash_provider: params.code_hash_provider, + keystore: params.keystore, + para_id: params.para_id, + proposer: params.proposer, + collator_service: params.collator_service, + authoring_duration: params.authoring_duration, + collator_sender: tx, + relay_chain_slot_duration: params.relay_chain_slot_duration, + slot_drift: params.slot_drift, + }; + + let block_builder_fut = + run_block_builder::(block_builder_params); + + (collation_task_fut, block_builder_fut) +} + +/// Message to be sent from the block builder to the collation task. +/// +/// Contains all data necessary to submit a collation to the relay chain. +struct CollatorMessage { + /// The hash of the relay chain block that provides the context for the parachain block. + pub relay_parent: RelayHash, + /// The header of the parent block. + pub parent_header: Block::Header, + /// The parachain block candidate. + pub parachain_candidate: ParachainCandidate, + /// The validation code hash at the parent block. + pub validation_code_hash: ValidationCodeHash, + /// Core index that this block should be submitted on + pub core_index: CoreIndex, +} diff --git a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs index be554bdcfc79b..68f2d37c87488 100644 --- a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs +++ b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs @@ -21,6 +21,7 @@ /// should be thrown out and which ones should be kept. use codec::Codec; use cumulus_client_consensus_common::ParachainBlockImportMarker; +use parking_lot::Mutex; use schnellru::{ByLength, LruMap}; use sc_consensus::{ @@ -70,7 +71,7 @@ impl NaiveEquivocationDefender { struct Verifier { client: Arc, create_inherent_data_providers: CIDP, - defender: NaiveEquivocationDefender, + defender: Mutex, telemetry: Option, _phantom: std::marker::PhantomData (Block, P)>, } @@ -88,7 +89,7 @@ where CIDP: CreateInherentDataProviders, { async fn verify( - &mut self, + &self, mut block_params: BlockImportParams, ) -> Result, String> { // Skip checks that include execution, if being told so, or when importing only state. @@ -137,7 +138,7 @@ where block_params.post_hash = Some(post_hash); // Check for and reject egregious amounts of equivocations. - if self.defender.insert_and_check(slot) { + if self.defender.lock().insert_and_check(slot) { return Err(format!( "Rejecting block {:?} due to excessive equivocations at slot", post_hash, @@ -243,7 +244,7 @@ where let verifier = Verifier:: { client, create_inherent_data_providers, - defender: NaiveEquivocationDefender::default(), + defender: Mutex::new(NaiveEquivocationDefender::default()), telemetry, _phantom: std::marker::PhantomData, }; diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 09c2f58d45e4e..4bc2f1d1e600e 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -10,42 +10,42 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -dyn-clone = "1.0.16" -futures = "0.3.28" +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +dyn-clone = { workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -tracing = "0.1.37" +tracing = { workspace = true, default-features = true } # Substrate -sc-client-api = { path = "../../../../substrate/client/api" } -sc-consensus = { path = "../../../../substrate/client/consensus/common" } -sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -sp-consensus-slots = { path = "../../../../substrate/primitives/consensus/slots" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-timestamp = { path = "../../../../substrate/primitives/timestamp" } -sp-trie = { path = "../../../../substrate/primitives/trie" } -sp-version = { path = "../../../../substrate/primitives/version" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../../substrate/utils/prometheus" } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../../polkadot/primitives" } +polkadot-primitives = { workspace = true, default-features = true } # Cumulus -cumulus-primitives-core = { path = "../../../primitives/core" } -cumulus-relay-chain-interface = { path = "../../relay-chain-interface" } -cumulus-client-pov-recovery = { path = "../../pov-recovery" } -schnellru = "0.2.1" +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-client-pov-recovery = { workspace = true, default-features = true } +schnellru = { workspace = true } [dev-dependencies] -futures-timer = "3.0.2" +futures-timer = { workspace = true } # Substrate -sp-tracing = { path = "../../../../substrate/primitives/tracing" } +sp-tracing = { workspace = true, default-features = true } # Cumulus -cumulus-test-client = { path = "../../../test/client" } -cumulus-test-relay-sproof-builder = { path = "../../../test/relay-sproof-builder" } +cumulus-test-client = { workspace = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/common/src/import_queue.rs b/cumulus/client/consensus/common/src/import_queue.rs index 8024b7695a285..488693604fefc 100644 --- a/cumulus/client/consensus/common/src/import_queue.rs +++ b/cumulus/client/consensus/common/src/import_queue.rs @@ -50,7 +50,7 @@ pub struct VerifyNothing; #[async_trait::async_trait] impl Verifier for VerifyNothing { async fn verify( - &mut self, + &self, params: BlockImportParams, ) -> Result, String> { Ok(params) diff --git a/cumulus/client/consensus/common/src/lib.rs b/cumulus/client/consensus/common/src/lib.rs index cebe34e7ea588..e12750dcc553f 100644 --- a/cumulus/client/consensus/common/src/lib.rs +++ b/cumulus/client/consensus/common/src/lib.rs @@ -19,16 +19,13 @@ use polkadot_primitives::{ Block as PBlock, Hash as PHash, Header as PHeader, PersistedValidationData, ValidationCodeHash, }; -use cumulus_primitives_core::{ - relay_chain::{self, BlockId as RBlockId, OccupiedCoreAssumption}, - AbridgedHostConfiguration, ParaId, -}; +use cumulus_primitives_core::{relay_chain, AbridgedHostConfiguration}; use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; -use sc_client_api::{Backend, HeaderBackend}; +use sc_client_api::Backend; use sc_consensus::{shared_data::SharedData, BlockImport, ImportResult}; -use sp_blockchain::Backend as BlockchainBackend; use sp_consensus_slots::Slot; + use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_timestamp::Timestamp; @@ -36,9 +33,12 @@ use std::{sync::Arc, time::Duration}; mod level_monitor; mod parachain_consensus; +mod parent_search; #[cfg(test)] mod tests; +pub use parent_search::*; + pub use parachain_consensus::run_parachain_consensus; use level_monitor::LevelMonitor; @@ -172,13 +172,13 @@ impl Clone for ParachainBlockImport { impl BlockImport for ParachainBlockImport where Block: BlockT, - BI: BlockImport + Send, + BI: BlockImport + Send + Sync, BE: Backend, { type Error = BI::Error; async fn check_block( - &mut self, + &self, block: sc_consensus::BlockCheckParams, ) -> Result { self.inner.check_block(block).await @@ -229,196 +229,6 @@ pub trait ParachainBlockImportMarker {} impl ParachainBlockImportMarker for ParachainBlockImport {} -/// Parameters when searching for suitable parents to build on top of. -#[derive(Debug)] -pub struct ParentSearchParams { - /// The relay-parent that is intended to be used. - pub relay_parent: PHash, - /// The ID of the parachain. - pub para_id: ParaId, - /// A limitation on the age of relay parents for parachain blocks that are being - /// considered. This is relative to the `relay_parent` number. - pub ancestry_lookback: usize, - /// How "deep" parents can be relative to the included parachain block at the relay-parent. - /// The included block has depth 0. - pub max_depth: usize, - /// Whether to only ignore "alternative" branches, i.e. branches of the chain - /// which do not contain the block pending availability. - pub ignore_alternative_branches: bool, -} - -/// A potential parent block returned from [`find_potential_parents`] -#[derive(Debug, PartialEq)] -pub struct PotentialParent { - /// The hash of the block. - pub hash: B::Hash, - /// The header of the block. - pub header: B::Header, - /// The depth of the block. - pub depth: usize, - /// Whether the block is the included block, is itself pending on-chain, or descends - /// from the block pending availability. - pub aligned_with_pending: bool, -} - -/// Perform a recursive search through blocks to find potential -/// parent blocks for a new block. -/// -/// This accepts a relay-chain block to be used as an anchor and a maximum search depth, -/// along with some arguments for filtering parachain blocks and performs a recursive search -/// for parachain blocks. The search begins at the last included parachain block and returns -/// a set of [`PotentialParent`]s which could be potential parents of a new block with this -/// relay-parent according to the search parameters. -/// -/// A parachain block is a potential parent if it is either the last included parachain block, the -/// pending parachain block (when `max_depth` >= 1), or all of the following hold: -/// * its parent is a potential parent -/// * its relay-parent is within `ancestry_lookback` of the targeted relay-parent. -/// * its relay-parent is within the same session as the targeted relay-parent. -/// * the block number is within `max_depth` blocks of the included block -pub async fn find_potential_parents( - params: ParentSearchParams, - client: &impl Backend, - relay_client: &impl RelayChainInterface, -) -> Result>, RelayChainError> { - // 1. Build up the ancestry record of the relay chain to compare against. - let rp_ancestry = { - let mut ancestry = Vec::with_capacity(params.ancestry_lookback + 1); - let mut current_rp = params.relay_parent; - let mut required_session = None; - - while ancestry.len() <= params.ancestry_lookback { - let header = match relay_client.header(RBlockId::hash(current_rp)).await? { - None => break, - Some(h) => h, - }; - - let session = relay_client.session_index_for_child(current_rp).await?; - if let Some(required_session) = required_session { - // Respect the relay-chain rule not to cross session boundaries. - if session != required_session { - break - } - } else { - required_session = Some(session); - } - - ancestry.push((current_rp, *header.state_root())); - current_rp = *header.parent_hash(); - - // don't iterate back into the genesis block. - if header.number == 1 { - break - } - } - - ancestry - }; - - let is_hash_in_ancestry = |hash| rp_ancestry.iter().any(|x| x.0 == hash); - let is_root_in_ancestry = |root| rp_ancestry.iter().any(|x| x.1 == root); - - // 2. Get the included and pending availability blocks. - let included_header = relay_client - .persisted_validation_data( - params.relay_parent, - params.para_id, - OccupiedCoreAssumption::TimedOut, - ) - .await?; - - let included_header = match included_header { - Some(pvd) => pvd.parent_head, - None => return Ok(Vec::new()), // this implies the para doesn't exist. - }; - - let pending_header = relay_client - .persisted_validation_data( - params.relay_parent, - params.para_id, - OccupiedCoreAssumption::Included, - ) - .await? - .and_then(|x| if x.parent_head != included_header { Some(x.parent_head) } else { None }); - - let included_header = match B::Header::decode(&mut &included_header.0[..]).ok() { - None => return Ok(Vec::new()), - Some(x) => x, - }; - // Silently swallow if pending block can't decode. - let pending_header = pending_header.and_then(|p| B::Header::decode(&mut &p.0[..]).ok()); - let included_hash = included_header.hash(); - let pending_hash = pending_header.as_ref().map(|hdr| hdr.hash()); - - let mut frontier = vec![PotentialParent:: { - hash: included_hash, - header: included_header, - depth: 0, - aligned_with_pending: true, - }]; - - // Recursive search through descendants of the included block which have acceptable - // relay parents. - let mut potential_parents = Vec::new(); - while let Some(entry) = frontier.pop() { - let is_pending = - entry.depth == 1 && pending_hash.as_ref().map_or(false, |h| &entry.hash == h); - let is_included = entry.depth == 0; - - // note: even if the pending block or included block have a relay parent - // outside of the expected part of the relay chain, they are always allowed - // because they have already been posted on chain. - let is_potential = is_pending || is_included || { - let digest = entry.header.digest(); - cumulus_primitives_core::extract_relay_parent(digest).map_or(false, is_hash_in_ancestry) || - cumulus_primitives_core::rpsr_digest::extract_relay_parent_storage_root(digest) - .map(|(r, _n)| r) - .map_or(false, is_root_in_ancestry) - }; - - let parent_aligned_with_pending = entry.aligned_with_pending; - let child_depth = entry.depth + 1; - let hash = entry.hash; - - if is_potential { - potential_parents.push(entry); - } - - if !is_potential || child_depth > params.max_depth { - continue - } - - // push children onto search frontier. - for child in client.blockchain().children(hash).ok().into_iter().flatten() { - let aligned_with_pending = parent_aligned_with_pending && - if child_depth == 1 { - pending_hash.as_ref().map_or(true, |h| &child == h) - } else { - true - }; - - if params.ignore_alternative_branches && !aligned_with_pending { - continue - } - - let header = match client.blockchain().header(child) { - Ok(Some(h)) => h, - Ok(None) => continue, - Err(_) => continue, - }; - - frontier.push(PotentialParent { - hash: child, - header, - depth: child_depth, - aligned_with_pending, - }); - } - } - - Ok(potential_parents) -} - /// Get the relay-parent slot and timestamp from a header. pub fn relay_slot_and_timestamp( relay_parent_header: &PHeader, diff --git a/cumulus/client/consensus/common/src/parachain_consensus.rs b/cumulus/client/consensus/common/src/parachain_consensus.rs index b4b315bb32be6..944917673b119 100644 --- a/cumulus/client/consensus/common/src/parachain_consensus.rs +++ b/cumulus/client/consensus/common/src/parachain_consensus.rs @@ -375,60 +375,61 @@ async fn handle_new_best_parachain_head( target: LOG_TARGET, block_hash = ?hash, "Skipping set new best block, because block is already the best.", - ) - } else { - // Make sure the block is already known or otherwise we skip setting new best. - match parachain.block_status(hash) { - Ok(BlockStatus::InChainWithState) => { - unset_best_header.take(); - tracing::debug!( - target: LOG_TARGET, - ?hash, - "Importing block as new best for parachain.", - ); - import_block_as_new_best(hash, parachain_head, parachain).await; - }, - Ok(BlockStatus::InChainPruned) => { - tracing::error!( - target: LOG_TARGET, - block_hash = ?hash, - "Trying to set pruned block as new best!", - ); - }, - Ok(BlockStatus::Unknown) => { - *unset_best_header = Some(parachain_head); + ); + return; + } - tracing::debug!( - target: LOG_TARGET, - block_hash = ?hash, - "Parachain block not yet imported, waiting for import to enact as best block.", - ); - - if let Some(ref mut recovery_chan_tx) = recovery_chan_tx { - // Best effort channel to actively encourage block recovery. - // An error here is not fatal; the relay chain continuously re-announces - // the best block, thus we will have other opportunities to retry. - let req = RecoveryRequest { hash, kind: RecoveryKind::Full }; - if let Err(err) = recovery_chan_tx.try_send(req) { - tracing::warn!( - target: LOG_TARGET, - block_hash = ?hash, - error = ?err, - "Unable to notify block recovery subsystem" - ) - } + // Make sure the block is already known or otherwise we skip setting new best. + match parachain.block_status(hash) { + Ok(BlockStatus::InChainWithState) => { + unset_best_header.take(); + tracing::debug!( + target: LOG_TARGET, + included = ?hash, + "Importing block as new best for parachain.", + ); + import_block_as_new_best(hash, parachain_head, parachain).await; + }, + Ok(BlockStatus::InChainPruned) => { + tracing::error!( + target: LOG_TARGET, + block_hash = ?hash, + "Trying to set pruned block as new best!", + ); + }, + Ok(BlockStatus::Unknown) => { + *unset_best_header = Some(parachain_head); + + tracing::debug!( + target: LOG_TARGET, + block_hash = ?hash, + "Parachain block not yet imported, waiting for import to enact as best block.", + ); + + if let Some(ref mut recovery_chan_tx) = recovery_chan_tx { + // Best effort channel to actively encourage block recovery. + // An error here is not fatal; the relay chain continuously re-announces + // the best block, thus we will have other opportunities to retry. + let req = RecoveryRequest { hash, kind: RecoveryKind::Full }; + if let Err(err) = recovery_chan_tx.try_send(req) { + tracing::warn!( + target: LOG_TARGET, + block_hash = ?hash, + error = ?err, + "Unable to notify block recovery subsystem" + ) } - }, - Err(e) => { - tracing::error!( - target: LOG_TARGET, - block_hash = ?hash, - error = ?e, - "Failed to get block status of block.", - ); - }, - _ => {}, - } + } + }, + Err(e) => { + tracing::error!( + target: LOG_TARGET, + block_hash = ?hash, + error = ?e, + "Failed to get block status of block.", + ); + }, + _ => {}, } } diff --git a/cumulus/client/consensus/common/src/parent_search.rs b/cumulus/client/consensus/common/src/parent_search.rs new file mode 100644 index 0000000000000..c371ec62f8455 --- /dev/null +++ b/cumulus/client/consensus/common/src/parent_search.rs @@ -0,0 +1,418 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use codec::Decode; +use polkadot_primitives::Hash as RelayHash; + +use cumulus_primitives_core::{ + relay_chain::{BlockId as RBlockId, OccupiedCoreAssumption}, + ParaId, +}; +use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; + +use sc_client_api::{Backend, HeaderBackend}; + +use sp_blockchain::{Backend as BlockchainBackend, TreeRoute}; + +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; + +const PARENT_SEARCH_LOG_TARGET: &str = "consensus::common::find_potential_parents"; + +/// Parameters when searching for suitable parents to build on top of. +#[derive(Debug)] +pub struct ParentSearchParams { + /// The relay-parent that is intended to be used. + pub relay_parent: RelayHash, + /// The ID of the parachain. + pub para_id: ParaId, + /// A limitation on the age of relay parents for parachain blocks that are being + /// considered. This is relative to the `relay_parent` number. + pub ancestry_lookback: usize, + /// How "deep" parents can be relative to the included parachain block at the relay-parent. + /// The included block has depth 0. + pub max_depth: usize, + /// Whether to only ignore "alternative" branches, i.e. branches of the chain + /// which do not contain the block pending availability. + pub ignore_alternative_branches: bool, +} + +/// A potential parent block returned from [`find_potential_parents`] +#[derive(PartialEq)] +pub struct PotentialParent { + /// The hash of the block. + pub hash: B::Hash, + /// The header of the block. + pub header: B::Header, + /// The depth of the block with respect to the included block. + pub depth: usize, + /// Whether the block is the included block, is itself pending on-chain, or descends + /// from the block pending availability. + pub aligned_with_pending: bool, +} + +impl std::fmt::Debug for PotentialParent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("PotentialParent") + .field("hash", &self.hash) + .field("depth", &self.depth) + .field("aligned_with_pending", &self.aligned_with_pending) + .field("number", &self.header.number()) + .finish() + } +} + +/// Perform a recursive search through blocks to find potential +/// parent blocks for a new block. +/// +/// This accepts a relay-chain block to be used as an anchor and a maximum search depth, +/// along with some arguments for filtering parachain blocks and performs a recursive search +/// for parachain blocks. The search begins at the last included parachain block and returns +/// a set of [`PotentialParent`]s which could be potential parents of a new block with this +/// relay-parent according to the search parameters. +/// +/// A parachain block is a potential parent if it is either the last included parachain block, the +/// pending parachain block (when `max_depth` >= 1), or all of the following hold: +/// * its parent is a potential parent +/// * its relay-parent is within `ancestry_lookback` of the targeted relay-parent. +/// * its relay-parent is within the same session as the targeted relay-parent. +/// * the block number is within `max_depth` blocks of the included block +pub async fn find_potential_parents( + params: ParentSearchParams, + backend: &impl Backend, + relay_client: &impl RelayChainInterface, +) -> Result>, RelayChainError> { + tracing::trace!("Parent search parameters: {params:?}"); + // Get the included block. + let Some((included_header, included_hash)) = + fetch_included_from_relay_chain(relay_client, backend, params.para_id, params.relay_parent) + .await? + else { + return Ok(Default::default()) + }; + + let only_included = vec![PotentialParent { + hash: included_hash, + header: included_header.clone(), + depth: 0, + aligned_with_pending: true, + }]; + + if params.max_depth == 0 { + return Ok(only_included) + }; + + // Pending header and hash. + let maybe_pending = { + // Fetch the most recent pending header from the relay chain. We use + // `OccupiedCoreAssumption::Included` so the candidate pending availability gets enacted + // before being returned to us. + let pending_header = relay_client + .persisted_validation_data( + params.relay_parent, + params.para_id, + OccupiedCoreAssumption::Included, + ) + .await? + .and_then(|p| B::Header::decode(&mut &p.parent_head.0[..]).ok()) + .filter(|x| x.hash() != included_hash); + + // If the pending block is not locally known, we can't do anything. + if let Some(header) = pending_header { + let pending_hash = header.hash(); + match backend.blockchain().header(pending_hash) { + // We are supposed to ignore branches that don't contain the pending block, but we + // do not know the pending block locally. + Ok(None) | Err(_) if params.ignore_alternative_branches => { + tracing::warn!( + target: PARENT_SEARCH_LOG_TARGET, + %pending_hash, + "Failed to get header for pending block.", + ); + return Ok(Default::default()) + }, + Ok(Some(_)) => Some((header, pending_hash)), + _ => None, + } + } else { + None + } + }; + + let maybe_route_to_last_pending = maybe_pending + .as_ref() + .map(|(_, pending)| { + sp_blockchain::tree_route(backend.blockchain(), included_hash, *pending) + }) + .transpose()?; + + // If we want to ignore alternative branches there is no reason to start + // the parent search at the included block. We can add the included block and + // the path to the pending block to the potential parents directly (limited by max_depth). + let (frontier, potential_parents) = match ( + &maybe_pending, + params.ignore_alternative_branches, + &maybe_route_to_last_pending, + ) { + (Some((pending_header, pending_hash)), true, Some(ref route_to_pending)) => { + let mut potential_parents = only_included; + + // This is a defensive check, should never happen. + if !route_to_pending.retracted().is_empty() { + tracing::warn!(target: PARENT_SEARCH_LOG_TARGET, "Included block not an ancestor of pending block. This should not happen."); + return Ok(Default::default()) + } + + // Add all items on the path included -> pending - 1 to the potential parents, but + // not more than `max_depth`. + let num_parents_on_path = + route_to_pending.enacted().len().saturating_sub(1).min(params.max_depth); + for (num, block) in + route_to_pending.enacted().iter().take(num_parents_on_path).enumerate() + { + let Ok(Some(header)) = backend.blockchain().header(block.hash) else { continue }; + + potential_parents.push(PotentialParent { + hash: block.hash, + header, + depth: 1 + num, + aligned_with_pending: true, + }); + } + + // The search for additional potential parents should now start at the children of + // the pending block. + ( + vec![PotentialParent { + hash: *pending_hash, + header: pending_header.clone(), + depth: route_to_pending.enacted().len(), + aligned_with_pending: true, + }], + potential_parents, + ) + }, + _ => (only_included, Default::default()), + }; + + if potential_parents.len() > params.max_depth { + return Ok(potential_parents); + } + + // Build up the ancestry record of the relay chain to compare against. + let rp_ancestry = + build_relay_parent_ancestry(params.ancestry_lookback, params.relay_parent, relay_client) + .await?; + + Ok(search_child_branches_for_parents( + frontier, + maybe_route_to_last_pending, + included_header, + maybe_pending.map(|(_, hash)| hash), + backend, + params.max_depth, + params.ignore_alternative_branches, + rp_ancestry, + potential_parents, + )) +} + +/// Fetch the included block from the relay chain. +async fn fetch_included_from_relay_chain( + relay_client: &impl RelayChainInterface, + backend: &impl Backend, + para_id: ParaId, + relay_parent: RelayHash, +) -> Result, RelayChainError> { + // Fetch the pending header from the relay chain. We use `OccupiedCoreAssumption::TimedOut` + // so that even if there is a pending candidate, we assume it is timed out and we get the + // included head. + let included_header = relay_client + .persisted_validation_data(relay_parent, para_id, OccupiedCoreAssumption::TimedOut) + .await?; + let included_header = match included_header { + Some(pvd) => pvd.parent_head, + None => return Ok(None), // this implies the para doesn't exist. + }; + + let included_header = match B::Header::decode(&mut &included_header.0[..]).ok() { + None => return Ok(None), + Some(x) => x, + }; + + let included_hash = included_header.hash(); + // If the included block is not locally known, we can't do anything. + match backend.blockchain().header(included_hash) { + Ok(None) => { + tracing::warn!( + target: PARENT_SEARCH_LOG_TARGET, + %included_hash, + "Failed to get header for included block.", + ); + return Ok(None) + }, + Err(e) => { + tracing::warn!( + target: PARENT_SEARCH_LOG_TARGET, + %included_hash, + %e, + "Failed to get header for included block.", + ); + return Ok(None) + }, + _ => {}, + }; + + Ok(Some((included_header, included_hash))) +} + +/// Build an ancestry of relay parents that are acceptable. +/// +/// An acceptable relay parent is one that is no more than `ancestry_lookback` + 1 blocks below the +/// relay parent we want to build on. Parachain blocks anchored on relay parents older than that can +/// not be considered potential parents for block building. They have no chance of still getting +/// included, so our newly build parachain block would also not get included. +/// +/// On success, returns a vector of `(header_hash, state_root)` of the relevant relay chain +/// ancestry blocks. +async fn build_relay_parent_ancestry( + ancestry_lookback: usize, + relay_parent: RelayHash, + relay_client: &impl RelayChainInterface, +) -> Result, RelayChainError> { + let mut ancestry = Vec::with_capacity(ancestry_lookback + 1); + let mut current_rp = relay_parent; + let mut required_session = None; + while ancestry.len() <= ancestry_lookback { + let Some(header) = relay_client.header(RBlockId::hash(current_rp)).await? else { break }; + + let session = relay_client.session_index_for_child(current_rp).await?; + if required_session.get_or_insert(session) != &session { + // Respect the relay-chain rule not to cross session boundaries. + break; + } + + ancestry.push((current_rp, *header.state_root())); + current_rp = *header.parent_hash(); + + // don't iterate back into the genesis block. + if header.number == 1 { + break + } + } + Ok(ancestry) +} + +/// Start search for child blocks that can be used as parents. +pub fn search_child_branches_for_parents( + mut frontier: Vec>, + maybe_route_to_last_pending: Option>, + included_header: Block::Header, + pending_hash: Option, + backend: &impl Backend, + max_depth: usize, + ignore_alternative_branches: bool, + rp_ancestry: Vec<(RelayHash, RelayHash)>, + mut potential_parents: Vec>, +) -> Vec> { + let included_hash = included_header.hash(); + let is_hash_in_ancestry = |hash| rp_ancestry.iter().any(|x| x.0 == hash); + let is_root_in_ancestry = |root| rp_ancestry.iter().any(|x| x.1 == root); + + // The distance between pending and included block. Is later used to check if a child + // is aligned with pending when it is between pending and included block. + let pending_distance = maybe_route_to_last_pending.as_ref().map(|route| route.enacted().len()); + + // If a block is on the path included -> pending, we consider it `aligned_with_pending`. + let is_child_pending = |hash| { + maybe_route_to_last_pending + .as_ref() + .map_or(true, |route| route.enacted().iter().any(|x| x.hash == hash)) + }; + + tracing::trace!( + target: PARENT_SEARCH_LOG_TARGET, + ?included_hash, + included_num = ?included_header.number(), + ?pending_hash , + ?rp_ancestry, + "Searching relay chain ancestry." + ); + while let Some(entry) = frontier.pop() { + let is_pending = pending_hash.as_ref().map_or(false, |h| &entry.hash == h); + let is_included = included_hash == entry.hash; + + // note: even if the pending block or included block have a relay parent + // outside of the expected part of the relay chain, they are always allowed + // because they have already been posted on chain. + let is_potential = is_pending || is_included || { + let digest = entry.header.digest(); + let is_hash_in_ancestry_check = cumulus_primitives_core::extract_relay_parent(digest) + .map_or(false, is_hash_in_ancestry); + let is_root_in_ancestry_check = + cumulus_primitives_core::rpsr_digest::extract_relay_parent_storage_root(digest) + .map(|(r, _n)| r) + .map_or(false, is_root_in_ancestry); + + is_hash_in_ancestry_check || is_root_in_ancestry_check + }; + + let parent_aligned_with_pending = entry.aligned_with_pending; + let child_depth = entry.depth + 1; + let hash = entry.hash; + + tracing::trace!( + target: PARENT_SEARCH_LOG_TARGET, + ?hash, + is_potential, + is_pending, + is_included, + "Checking potential parent." + ); + + if is_potential { + potential_parents.push(entry); + } + + if !is_potential || child_depth > max_depth { + continue + } + + // push children onto search frontier. + for child in backend.blockchain().children(hash).ok().into_iter().flatten() { + tracing::trace!(target: PARENT_SEARCH_LOG_TARGET, ?child, child_depth, ?pending_distance, "Looking at child."); + + let aligned_with_pending = parent_aligned_with_pending && + (pending_distance.map_or(true, |dist| child_depth > dist) || + is_child_pending(child)); + + if ignore_alternative_branches && !aligned_with_pending { + tracing::trace!(target: PARENT_SEARCH_LOG_TARGET, ?child, "Child is not aligned with pending block."); + continue + } + + let Ok(Some(header)) = backend.blockchain().header(child) else { continue }; + + frontier.push(PotentialParent { + hash: child, + header, + depth: child_depth, + aligned_with_pending, + }); + } + } + + potential_parents +} diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index 2a944bc7f9fa2..284fa39ed1e70 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -20,7 +20,7 @@ use async_trait::async_trait; use codec::Encode; use cumulus_client_pov_recovery::RecoveryKind; use cumulus_primitives_core::{ - relay_chain::{self, BlockId}, + relay_chain::{BlockId, BlockNumber, CoreState}, CumulusDigestItem, InboundDownwardMessage, InboundHrmpMessage, }; use cumulus_relay_chain_interface::{ @@ -37,6 +37,7 @@ use futures_timer::Delay; use polkadot_primitives::HeadData; use sc_client_api::{Backend as _, UsageProvider}; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; +use sp_blockchain::Backend as BlockchainBackend; use sp_consensus::{BlockOrigin, BlockStatus}; use sp_version::RuntimeVersion; use std::{ @@ -46,11 +47,11 @@ use std::{ time::Duration, }; -fn relay_block_num_from_hash(hash: &PHash) -> relay_chain::BlockNumber { +fn relay_block_num_from_hash(hash: &PHash) -> BlockNumber { hash.to_low_u64_be() as u32 } -fn relay_hash_from_block_num(block_number: relay_chain::BlockNumber) -> PHash { +fn relay_hash_from_block_num(block_number: BlockNumber) -> PHash { PHash::from_low_u64_be(block_number as u64) } @@ -257,6 +258,13 @@ impl RelayChainInterface for Relaychain { })) } + async fn availability_cores( + &self, + _relay_parent: PHash, + ) -> RelayChainResult>> { + unimplemented!("Not needed for test"); + } + async fn version(&self, _: PHash) -> RelayChainResult { unimplemented!("Not needed for test") } @@ -1138,6 +1146,357 @@ fn find_potential_parents_with_max_depth() { } } +#[test] +fn find_potential_parents_unknown_included() { + sp_tracing::try_init_simple(); + + const NON_INCLUDED_CHAIN_LEN: usize = 5; + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let relay_parent = relay_hash_from_block_num(10); + // Choose different relay parent for alternative chain to get new hashes. + let search_relay_parent = relay_hash_from_block_num(11); + + let sproof = sproof_with_best_parent(&client); + let included_but_unknown = build_block(&*client, sproof, None, None, Some(relay_parent)); + + let relay_chain = Relaychain::new(); + { + let relay_inner = &mut relay_chain.inner.lock().unwrap(); + relay_inner + .relay_chain_hash_to_header + .insert(search_relay_parent, included_but_unknown.header().clone()); + } + + // Ignore alternative branch: + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth: NON_INCLUDED_CHAIN_LEN, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + assert_eq!(potential_parents.len(), 0); +} + +#[test] +fn find_potential_parents_unknown_pending() { + sp_tracing::try_init_simple(); + + const NON_INCLUDED_CHAIN_LEN: usize = 5; + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let mut para_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); + + let relay_parent = relay_hash_from_block_num(10); + // Choose different relay parent for alternative chain to get new hashes. + let search_relay_parent = relay_hash_from_block_num(11); + let included_block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + None, + None, + Some(relay_parent), + ); + + let sproof = sproof_with_parent_by_hash(&client, included_block.header().hash()); + let pending_but_unknown = build_block( + &*client, + sproof, + Some(included_block.header().hash()), + None, + Some(relay_parent), + ); + + let relay_chain = Relaychain::new(); + { + let relay_inner = &mut relay_chain.inner.lock().unwrap(); + relay_inner + .relay_chain_hash_to_header + .insert(search_relay_parent, included_block.header().clone()); + relay_inner + .relay_chain_hash_to_header_pending + .insert(search_relay_parent, pending_but_unknown.header().clone()); + } + + // Ignore alternative branch: + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth: NON_INCLUDED_CHAIN_LEN, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + assert!(potential_parents.is_empty()); +} + +#[test] +fn find_potential_parents_unknown_pending_include_alternative_branches() { + sp_tracing::try_init_simple(); + + const NON_INCLUDED_CHAIN_LEN: usize = 5; + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let mut para_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); + + let relay_parent = relay_hash_from_block_num(10); + + // Choose different relay parent for alternative chain to get new hashes. + let search_relay_parent = relay_hash_from_block_num(11); + + let included_block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + None, + None, + Some(relay_parent), + ); + + let alt_block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + Some(included_block.header().hash()), + None, + Some(search_relay_parent), + ); + + tracing::info!(hash = %alt_block.header().hash(), "Alt block."); + let sproof = sproof_with_parent_by_hash(&client, included_block.header().hash()); + let pending_but_unknown = build_block( + &*client, + sproof, + Some(included_block.header().hash()), + None, + Some(relay_parent), + ); + + let relay_chain = Relaychain::new(); + { + let relay_inner = &mut relay_chain.inner.lock().unwrap(); + relay_inner + .relay_chain_hash_to_header + .insert(search_relay_parent, included_block.header().clone()); + relay_inner + .relay_chain_hash_to_header_pending + .insert(search_relay_parent, pending_but_unknown.header().clone()); + } + + // Ignore alternative branch: + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth: NON_INCLUDED_CHAIN_LEN, + ignore_alternative_branches: false, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + let expected_parents: Vec<_> = vec![&included_block, &alt_block]; + assert_eq!(potential_parents.len(), 2); + assert_eq!(expected_parents[0].hash(), potential_parents[0].hash); + assert_eq!(expected_parents[1].hash(), potential_parents[1].hash); +} + +/// Test where there are multiple pending blocks. +#[test] +fn find_potential_parents_aligned_with_late_pending() { + sp_tracing::try_init_simple(); + + const NON_INCLUDED_CHAIN_LEN: usize = 5; + + let backend = Arc::new(Backend::new_test(1000, 1)); + let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); + let mut para_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); + + let relay_parent = relay_hash_from_block_num(10); + // Choose different relay parent for alternative chain to get new hashes. + let search_relay_parent = relay_hash_from_block_num(11); + let included_block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + None, + None, + Some(relay_parent), + ); + + let in_between_block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + Some(included_block.header().hash()), + None, + Some(relay_parent), + ); + + let pending_block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + Some(in_between_block.header().hash()), + None, + Some(relay_parent), + ); + + let relay_chain = Relaychain::new(); + { + let relay_inner = &mut relay_chain.inner.lock().unwrap(); + relay_inner + .relay_chain_hash_to_header + .insert(search_relay_parent, included_block.header().clone()); + relay_inner + .relay_chain_hash_to_header_pending + .insert(search_relay_parent, in_between_block.header().clone()); + relay_inner + .relay_chain_hash_to_header_pending + .insert(search_relay_parent, pending_block.header().clone()); + } + + // Build some blocks on the pending block and on the included block. + // We end up with two sibling chains, one is aligned with the pending block, + // the other is not. + let mut aligned_blocks = Vec::new(); + let mut parent = pending_block.header().hash(); + for _ in 2..NON_INCLUDED_CHAIN_LEN { + let block = build_and_import_block_ext( + &client, + BlockOrigin::Own, + true, + &mut para_import, + Some(parent), + None, + Some(relay_parent), + ); + parent = block.header().hash(); + aligned_blocks.push(block); + } + + let mut alt_blocks = Vec::new(); + let mut parent = included_block.header().hash(); + for _ in 0..NON_INCLUDED_CHAIN_LEN { + let block = build_and_import_block_ext( + &client, + BlockOrigin::NetworkInitialSync, + true, + &mut para_import, + Some(parent), + None, + Some(search_relay_parent), + ); + parent = block.header().hash(); + alt_blocks.push(block); + } + + // Ignore alternative branch: + for max_depth in 0..=NON_INCLUDED_CHAIN_LEN { + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth, + ignore_alternative_branches: true, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + assert_eq!(potential_parents.len(), max_depth + 1); + let expected_parents: Vec<_> = [&included_block, &in_between_block, &pending_block] + .into_iter() + .chain(aligned_blocks.iter()) + .take(max_depth + 1) + .collect(); + + for i in 0..(max_depth + 1) { + let parent = &potential_parents[i]; + let expected = &expected_parents[i]; + + assert_eq!(parent.hash, expected.hash()); + assert_eq!(&parent.header, expected.header()); + assert_eq!(parent.depth, i); + assert!(parent.aligned_with_pending); + } + } + + // Do not ignore: + for max_depth in 0..=NON_INCLUDED_CHAIN_LEN { + let potential_parents = block_on(find_potential_parents( + ParentSearchParams { + relay_parent: search_relay_parent, + para_id: ParaId::from(100), + ancestry_lookback: 1, // aligned chain is in ancestry. + max_depth, + ignore_alternative_branches: false, + }, + &*backend, + &relay_chain, + )) + .unwrap(); + + let expected_len = 2 * max_depth + 1; + assert_eq!(potential_parents.len(), expected_len); + let expected_aligned: Vec<_> = [&included_block, &in_between_block, &pending_block] + .into_iter() + .chain(aligned_blocks.iter()) + .take(max_depth + 1) + .collect(); + let expected_alt = alt_blocks.iter().take(max_depth); + + let expected_parents: Vec<_> = + expected_aligned.clone().into_iter().chain(expected_alt).collect(); + // Check correctness. + assert_eq!(expected_parents.len(), expected_len); + + for i in 0..expected_len { + let parent = &potential_parents[i]; + let expected = expected_parents + .iter() + .find(|block| block.header().hash() == parent.hash) + .expect("missing parent"); + + let is_aligned = expected_aligned.contains(&expected); + + assert_eq!(parent.hash, expected.hash()); + assert_eq!(&parent.header, expected.header()); + + assert_eq!(parent.aligned_with_pending, is_aligned); + } + } +} + #[test] fn find_potential_parents_aligned_with_pending() { sp_tracing::try_init_simple(); @@ -1249,6 +1608,7 @@ fn find_potential_parents_aligned_with_pending() { // Do not ignore: for max_depth in 0..=NON_INCLUDED_CHAIN_LEN { + log::info!("Ran with max_depth = {max_depth}"); let potential_parents = block_on(find_potential_parents( ParentSearchParams { relay_parent: search_relay_parent, @@ -1276,6 +1636,7 @@ fn find_potential_parents_aligned_with_pending() { // Check correctness. assert_eq!(expected_parents.len(), expected_len); + potential_parents.iter().for_each(|p| log::info!("result: {:?}", p)); for i in 0..expected_len { let parent = &potential_parents[i]; let expected = expected_parents @@ -1288,6 +1649,12 @@ fn find_potential_parents_aligned_with_pending() { assert_eq!(parent.hash, expected.hash()); assert_eq!(&parent.header, expected.header()); + log::info!( + "Check hash: {:?} expected: {} is: {}", + parent.hash, + is_aligned, + parent.aligned_with_pending, + ); assert_eq!(parent.aligned_with_pending, is_aligned); } } diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index 42ca4e06f8f45..ce91d48bf589a 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -10,15 +10,15 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -anyhow = "1.0" -async-trait = "0.1.79" +anyhow = { workspace = true } +async-trait = { workspace = true } thiserror = { workspace = true } # Substrate -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -sp-inherents = { path = "../../../../substrate/primitives/inherents" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } +sp-consensus = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } # Cumulus -cumulus-primitives-parachain-inherent = { path = "../../../primitives/parachain-inherent" } +cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index 7c3a901db6c32..f3ee6fc2f7d25 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -10,23 +10,23 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.79" -futures = "0.3.28" -parking_lot = "0.12.1" -tracing = "0.1.37" +async-trait = { workspace = true } +futures = { workspace = true } +parking_lot = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } # Substrate -sc-consensus = { path = "../../../../substrate/client/consensus/common" } -sp-api = { path = "../../../../substrate/primitives/api" } -sp-block-builder = { path = "../../../../substrate/primitives/block-builder" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-inherents = { path = "../../../../substrate/primitives/inherents" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../../substrate/utils/prometheus" } +sc-consensus = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } # Cumulus -cumulus-client-consensus-common = { path = "../common" } -cumulus-primitives-core = { path = "../../../primitives/core" } -cumulus-relay-chain-interface = { path = "../../relay-chain-interface" } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/relay-chain/src/import_queue.rs b/cumulus/client/consensus/relay-chain/src/import_queue.rs index 1b521e79d4820..1d6f039da4c12 100644 --- a/cumulus/client/consensus/relay-chain/src/import_queue.rs +++ b/cumulus/client/consensus/relay-chain/src/import_queue.rs @@ -52,7 +52,7 @@ where CIDP: CreateInherentDataProviders, { async fn verify( - &mut self, + &self, mut block_params: BlockImportParams, ) -> Result, String> { block_params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom( diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index 0dd7c4fdb0f60..bc67678eedeb1 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -10,51 +10,51 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.28" -futures-timer = "3.0.2" -parking_lot = "0.12.1" -tracing = "0.1.37" +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } +parking_lot = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-version = { path = "../../../substrate/primitives/version" } +sc-client-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } # Polkadot -polkadot-node-primitives = { path = "../../../polkadot/node/primitives" } -polkadot-parachain-primitives = { path = "../../../polkadot/parachain" } -polkadot-primitives = { path = "../../../polkadot/primitives" } -polkadot-node-subsystem = { path = "../../../polkadot/node/subsystem" } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } # Cumulus -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } +cumulus-relay-chain-interface = { workspace = true, default-features = true } [dev-dependencies] -portpicker = "0.1.1" -tokio = { version = "1.32.0", features = ["macros"] } -url = "2.4.0" -rstest = "0.18.2" +portpicker = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } +url = { workspace = true } +rstest = { workspace = true } # Substrate -sc-cli = { path = "../../../substrate/client/cli" } -sc-client-api = { path = "../../../substrate/client/api" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -substrate-test-utils = { path = "../../../substrate/test-utils" } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } # Polkadot -polkadot-test-client = { path = "../../../polkadot/node/test/client" } +polkadot-test-client = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } -cumulus-test-service = { path = "../../test/service" } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true } +cumulus-test-service = { workspace = true } diff --git a/cumulus/client/network/src/tests.rs b/cumulus/client/network/src/tests.rs index eb0d7f0e01b39..18d121c41d168 100644 --- a/cumulus/client/network/src/tests.rs +++ b/cumulus/client/network/src/tests.rs @@ -26,9 +26,10 @@ use futures::{executor::block_on, poll, task::Poll, FutureExt, Stream, StreamExt use parking_lot::Mutex; use polkadot_node_primitives::{SignedFullStatement, Statement}; use polkadot_primitives::{ - CandidateCommitments, CandidateDescriptor, CollatorPair, CommittedCandidateReceipt, - Hash as PHash, HeadData, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, - PersistedValidationData, SessionIndex, SigningContext, ValidationCodeHash, ValidatorId, + BlockNumber, CandidateCommitments, CandidateDescriptor, CollatorPair, + CommittedCandidateReceipt, CoreState, Hash as PHash, HeadData, InboundDownwardMessage, + InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, SessionIndex, + SigningContext, ValidationCodeHash, ValidatorId, }; use polkadot_test_client::{ Client as PClient, ClientBlockImportExt, DefaultTestClientBuilderExt, FullBackend as PBackend, @@ -297,6 +298,13 @@ impl RelayChainInterface for DummyRelayChainInterface { Ok(header) } + async fn availability_cores( + &self, + _relay_parent: PHash, + ) -> RelayChainResult>> { + unimplemented!("Not needed for test"); + } + async fn version(&self, _: PHash) -> RelayChainResult { let version = self.data.lock().runtime_version; diff --git a/cumulus/client/parachain-inherent/Cargo.toml b/cumulus/client/parachain-inherent/Cargo.toml index 85619e8403458..0d82cf6487432 100644 --- a/cumulus/client/parachain-inherent/Cargo.toml +++ b/cumulus/client/parachain-inherent/Cargo.toml @@ -7,24 +7,22 @@ description = "Inherent that needs to be present in every parachain block. Conta license = "Apache-2.0" [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -scale-info = { version = "2.11.1", features = ["derive"] } -tracing = { version = "0.1.37" } +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" } -sp-inherents = { path = "../../../substrate/primitives/inherents" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-std = { path = "../../../substrate/primitives/std" } -sp-storage = { path = "../../../substrate/primitives/storage" } -sp-trie = { path = "../../../substrate/primitives/trie" } +sc-client-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } -cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 539802d693866..a95b24bc2933a 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -10,46 +10,46 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.28" -futures-timer = "3.0.2" -rand = "0.8.5" -tracing = "0.1.37" +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } +rand = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-version = { path = "../../../substrate/primitives/version" } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } # Polkadot -polkadot-node-primitives = { path = "../../../polkadot/node/primitives" } -polkadot-node-subsystem = { path = "../../../polkadot/node/subsystem" } -polkadot-overseer = { path = "../../../polkadot/node/overseer" } -polkadot-primitives = { path = "../../../polkadot/primitives" } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } -async-trait = "0.1.79" +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +async-trait = { workspace = true } [dev-dependencies] -rstest = "0.18.2" -tokio = { version = "1.32.0", features = ["macros"] } -portpicker = "0.1.1" -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -cumulus-test-client = { path = "../../test/client" } -sc-utils = { path = "../../../substrate/client/utils" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -assert_matches = "1.5" +rstest = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } +portpicker = { workspace = true } +sp-blockchain = { workspace = true, default-features = true } +cumulus-test-client = { workspace = true } +sc-utils = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +assert_matches = { workspace = true } # Cumulus -cumulus-test-service = { path = "../../test/service" } +cumulus-test-service = { workspace = true } # Substrate -sc-cli = { path = "../../../substrate/client/cli" } -sc-client-api = { path = "../../../substrate/client/api" } -substrate-test-utils = { path = "../../../substrate/test-utils" } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } diff --git a/cumulus/client/pov-recovery/src/tests.rs b/cumulus/client/pov-recovery/src/tests.rs index 75bf308ef27aa..6f274ed18b6bc 100644 --- a/cumulus/client/pov-recovery/src/tests.rs +++ b/cumulus/client/pov-recovery/src/tests.rs @@ -17,7 +17,9 @@ use super::*; use assert_matches::assert_matches; use codec::{Decode, Encode}; -use cumulus_primitives_core::relay_chain::{BlockId, CandidateCommitments, CandidateDescriptor}; +use cumulus_primitives_core::relay_chain::{ + BlockId, CandidateCommitments, CandidateDescriptor, CoreState, +}; use cumulus_relay_chain_interface::{ InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PHash, PHeader, PersistedValidationData, StorageValue, ValidationCodeHash, ValidatorId, @@ -478,6 +480,13 @@ impl RelayChainInterface for Relaychain { async fn header(&self, _: BlockId) -> RelayChainResult> { unimplemented!("Not needed for test"); } + + async fn availability_cores( + &self, + _: PHash, + ) -> RelayChainResult>>> { + unimplemented!("Not needed for test"); + } } fn make_candidate_chain(candidate_number_range: Range) -> Vec { diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index 7629b6c631a3a..6f1b74191be79 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -10,39 +10,39 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.79" -futures = "0.3.28" -futures-timer = "3.0.2" +async-trait = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } # Substrate -sc-cli = { path = "../../../substrate/client/cli" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-telemetry = { path = "../../../substrate/client/telemetry" } -sc-tracing = { path = "../../../substrate/client/tracing" } -sc-sysinfo = { path = "../../../substrate/client/sysinfo" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } # Polkadot -polkadot-cli = { path = "../../../polkadot/cli", default-features = false, features = ["cli"] } -polkadot-service = { path = "../../../polkadot/node/service" } +polkadot-cli = { features = ["cli"], workspace = true } +polkadot-service = { workspace = true, default-features = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } [dev-dependencies] # Substrate -sp-keyring = { path = "../../../substrate/primitives/keyring" } +sp-keyring = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives" } -polkadot-test-client = { path = "../../../polkadot/node/test/client" } -metered = { package = "prioritized-metered-channel", version = "0.6.1", default-features = false, features = ["futures_channel"] } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-test-client = { workspace = true } +metered = { features = ["futures_channel"], workspace = true } # Cumulus -cumulus-test-service = { path = "../../test/service" } +cumulus-test-service = { workspace = true } diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 7871623e8447a..38ba84748c1e3 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use std::{pin::Pin, sync::Arc, time::Duration}; +use std::{collections::btree_map::BTreeMap, pin::Pin, sync::Arc, time::Duration}; use async_trait::async_trait; use cumulus_primitives_core::{ relay_chain::{ - runtime_api::ParachainHost, Block as PBlock, BlockId, CommittedCandidateReceipt, - Hash as PHash, Header as PHeader, InboundHrmpMessage, OccupiedCoreAssumption, SessionIndex, - ValidationCodeHash, ValidatorId, + runtime_api::ParachainHost, Block as PBlock, BlockId, BlockNumber, + CommittedCandidateReceipt, CoreState, Hash as PHash, Header as PHeader, InboundHrmpMessage, + OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -38,7 +38,7 @@ use sc_client_api::{ use sc_telemetry::TelemetryWorkerHandle; use sp_api::ProvideRuntimeApi; use sp_consensus::SyncOracle; -use sp_core::{sp_std::collections::btree_map::BTreeMap, Pair}; +use sp_core::Pair; use sp_state_machine::{Backend as StateBackend, StorageValue}; /// The timeout in seconds after that the waiting for a block should be aborted. @@ -256,6 +256,13 @@ impl RelayChainInterface for RelayChainInProcessInterface { Ok(Box::pin(notifications_stream)) } + async fn availability_cores( + &self, + relay_parent: PHash, + ) -> RelayChainResult>> { + Ok(self.full_client.runtime_api().availability_cores(relay_parent)?) + } + async fn candidates_pending_availability( &self, hash: PHash, diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index e8603693ac8da..a496fab050dd7 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -10,18 +10,18 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -polkadot-overseer = { path = "../../../polkadot/node/overseer" } +polkadot-overseer = { workspace = true, default-features = true } -cumulus-primitives-core = { path = "../../primitives/core" } +cumulus-primitives-core = { workspace = true, default-features = true } -sp-api = { path = "../../../substrate/primitives/api" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sc-client-api = { path = "../../../substrate/client/api" } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-version = { workspace = true } -futures = "0.3.28" -async-trait = "0.1.79" +futures = { workspace = true } +async-trait = { workspace = true } thiserror = { workspace = true } -jsonrpsee-core = "0.22" -codec = { package = "parity-scale-codec", version = "3.6.12" } +jsonrpsee-core = { workspace = true } +codec = { workspace = true, default-features = true } diff --git a/cumulus/client/relay-chain-interface/src/lib.rs b/cumulus/client/relay-chain-interface/src/lib.rs index 46e19b40f010c..d02035e84e92f 100644 --- a/cumulus/client/relay-chain-interface/src/lib.rs +++ b/cumulus/client/relay-chain-interface/src/lib.rs @@ -29,8 +29,8 @@ use sp_api::ApiError; use cumulus_primitives_core::relay_chain::BlockId; pub use cumulus_primitives_core::{ relay_chain::{ - CommittedCandidateReceipt, Hash as PHash, Header as PHeader, InboundHrmpMessage, - OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, + BlockNumber, CommittedCandidateReceipt, CoreState, Hash as PHash, Header as PHeader, + InboundHrmpMessage, OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -217,6 +217,14 @@ pub trait RelayChainInterface: Send + Sync { /// Get the runtime version of the relay chain. async fn version(&self, relay_parent: PHash) -> RelayChainResult; + + /// Yields information on all availability cores as relevant to the child block. + /// + /// Cores are either free, scheduled or occupied. Free cores can have paras assigned to them. + async fn availability_cores( + &self, + relay_parent: PHash, + ) -> RelayChainResult>>; } #[async_trait] @@ -337,6 +345,13 @@ where .await } + async fn availability_cores( + &self, + relay_parent: PHash, + ) -> RelayChainResult>> { + (**self).availability_cores(relay_parent).await + } + async fn candidates_pending_availability( &self, block_id: PHash, diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 0b541092a3de8..95ecadc8bd06e 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -11,44 +11,37 @@ workspace = true [dependencies] # polkadot deps -polkadot-primitives = { path = "../../../polkadot/primitives" } -polkadot-core-primitives = { path = "../../../polkadot/core-primitives" } -polkadot-overseer = { path = "../../../polkadot/node/overseer" } -polkadot-node-subsystem-util = { path = "../../../polkadot/node/subsystem-util" } -polkadot-node-network-protocol = { path = "../../../polkadot/node/network/protocol" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-availability-recovery = { path = "../../../polkadot/node/network/availability-recovery" } -polkadot-collator-protocol = { path = "../../../polkadot/node/network/collator-protocol" } -polkadot-network-bridge = { path = "../../../polkadot/node/network/bridge" } -polkadot-node-collation-generation = { path = "../../../polkadot/node/collation-generation" } -polkadot-node-core-runtime-api = { path = "../../../polkadot/node/core/runtime-api" } -polkadot-node-core-chain-api = { path = "../../../polkadot/node/core/chain-api" } -polkadot-node-core-prospective-parachains = { path = "../../../polkadot/node/core/prospective-parachains" } -polkadot-service = { path = "../../../polkadot/node/service" } +polkadot-network-bridge = { workspace = true, default-features = true } +polkadot-service = { workspace = true, default-features = true } # substrate deps -sc-authority-discovery = { path = "../../../substrate/client/authority-discovery" } -sc-network = { path = "../../../substrate/client/network" } -sc-network-common = { path = "../../../substrate/client/network/common" } -sc-service = { path = "../../../substrate/client/service" } -sc-client-api = { path = "../../../substrate/client/api" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } -sc-tracing = { path = "../../../substrate/client/tracing" } -sc-utils = { path = "../../../substrate/client/utils" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -tokio = { version = "1.32.0", features = ["macros"] } +sc-authority-discovery = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +tokio = { features = ["macros"], workspace = true, default-features = true } # cumulus deps -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } -cumulus-relay-chain-rpc-interface = { path = "../relay-chain-rpc-interface" } -cumulus-primitives-core = { path = "../../primitives/core" } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-relay-chain-rpc-interface = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } -array-bytes = "6.2.2" -tracing = "0.1.37" -async-trait = "0.1.79" -futures = "0.3.28" -parking_lot = "0.12.1" +array-bytes = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } +async-trait = { workspace = true } +futures = { workspace = true } diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index ea6bc2ede4c04..6c0730a56a264 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -10,39 +10,39 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -polkadot-overseer = { path = "../../../polkadot/node/overseer" } +polkadot-overseer = { workspace = true, default-features = true } -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } -sp-api = { path = "../../../substrate/primitives/api" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-storage = { path = "../../../substrate/primitives/storage" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-version = { path = "../../../substrate/primitives/version" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-rpc-api = { path = "../../../substrate/client/rpc-api" } -sc-service = { path = "../../../substrate/client/service" } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } -tokio = { version = "1.32.0", features = ["sync"] } -tokio-util = { version = "0.7.8", features = ["compat"] } +tokio = { features = ["sync"], workspace = true, default-features = true } +tokio-util = { features = ["compat"], workspace = true } -futures = "0.3.28" -futures-timer = "3.0.2" -codec = { package = "parity-scale-codec", version = "3.6.12" } -jsonrpsee = { version = "0.22", features = ["ws-client"] } -tracing = "0.1.37" -async-trait = "0.1.79" -url = "2.4.0" +futures = { workspace = true } +futures-timer = { workspace = true } +codec = { workspace = true, default-features = true } +jsonrpsee = { features = ["ws-client"], workspace = true } +tracing = { workspace = true, default-features = true } +async-trait = { workspace = true } +url = { workspace = true } serde_json = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } -schnellru = "0.2.1" -smoldot = { version = "0.11.0", default_features = false, features = ["std"] } -smoldot-light = { version = "0.9.0", default_features = false, features = ["std"] } -either = "1.8.1" +schnellru = { workspace = true } +smoldot = { default_features = false, features = ["std"], workspace = true } +smoldot-light = { default_features = false, features = ["std"], workspace = true } +either = { workspace = true, default-features = true } thiserror = { workspace = true } -rand = "0.8.5" -pin-project = "1.1.3" +rand = { workspace = true, default-features = true } +pin-project = { workspace = true } diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index bb7bfa5dc3226..e32ec6a41a4bf 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -24,17 +24,16 @@ use cumulus_primitives_core::{ InboundDownwardMessage, ParaId, PersistedValidationData, }; use cumulus_relay_chain_interface::{ - PHeader, RelayChainError, RelayChainInterface, RelayChainResult, + BlockNumber, CoreState, PHeader, RelayChainError, RelayChainInterface, RelayChainResult, }; use futures::{FutureExt, Stream, StreamExt}; use polkadot_overseer::Handle; use sc_client_api::StorageProof; -use sp_core::sp_std::collections::btree_map::BTreeMap; use sp_state_machine::StorageValue; use sp_storage::StorageKey; use sp_version::RuntimeVersion; -use std::pin::Pin; +use std::{collections::btree_map::BTreeMap, pin::Pin}; use cumulus_primitives_core::relay_chain::BlockId; pub use url::Url; @@ -252,4 +251,11 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn version(&self, relay_parent: RelayHash) -> RelayChainResult { self.rpc_client.runtime_version(relay_parent).await } + + async fn availability_cores( + &self, + relay_parent: RelayHash, + ) -> RelayChainResult>> { + self.rpc_client.parachain_host_availability_cores(relay_parent).await + } } diff --git a/cumulus/client/relay-chain-rpc-interface/src/light_client_worker.rs b/cumulus/client/relay-chain-rpc-interface/src/light_client_worker.rs index 9a49b60281b3c..2347dbb85f78e 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/light_client_worker.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/light_client_worker.rs @@ -20,7 +20,7 @@ use futures::{channel::mpsc::Sender, prelude::*, stream::FuturesUnordered}; use jsonrpsee::core::client::{ - Client as JsonRpseeClient, ClientBuilder, ClientT, Error, ReceivedMessage, TransportReceiverT, + Client as JsonRpseeClient, ClientBuilder, ClientT, ReceivedMessage, TransportReceiverT, TransportSenderT, }; use smoldot_light::{ChainId, Client as SmoldotClient, JsonRpcResponses}; @@ -124,7 +124,7 @@ pub struct LightClientRpcWorker { } fn handle_notification( - maybe_header: Option>, + maybe_header: Option>, senders: &mut Vec>, ) -> Result<(), ()> { match maybe_header { diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index a5d7c22a2ec89..c7eaa45958b0b 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -24,7 +24,7 @@ use jsonrpsee::{ }; use serde::de::DeserializeOwned; use serde_json::Value as JsonValue; -use std::collections::VecDeque; +use std::collections::{btree_map::BTreeMap, VecDeque}; use tokio::sync::mpsc::Sender as TokioSender; use codec::{Decode, Encode}; @@ -47,7 +47,6 @@ use sc_client_api::StorageData; use sc_rpc_api::{state::ReadProof, system::Health}; use sc_service::TaskManager; use sp_consensus_babe::Epoch; -use sp_core::sp_std::collections::btree_map::BTreeMap; use sp_storage::StorageKey; use sp_version::RuntimeVersion; diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index e03e20fe5b416..8e9e41ca89dc0 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -10,39 +10,39 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -futures = "0.3.28" +futures = { workspace = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } -sc-rpc = { path = "../../../substrate/client/rpc" } -sc-service = { path = "../../../substrate/client/service" } -sc-sysinfo = { path = "../../../substrate/client/sysinfo" } -sc-telemetry = { path = "../../../substrate/client/telemetry" } -sc-network = { path = "../../../substrate/client/network" } -sc-network-sync = { path = "../../../substrate/client/network/sync" } -sc-utils = { path = "../../../substrate/client/utils" } -sc-network-transactions = { path = "../../../substrate/client/network/transactions" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool" } -sp-io = { path = "../../../substrate/primitives/io" } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sc-network-transactions = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives" } +polkadot-primitives = { workspace = true, default-features = true } # Cumulus -cumulus-client-cli = { path = "../cli" } -cumulus-client-collator = { path = "../collator" } -cumulus-client-consensus-common = { path = "../consensus/common" } -cumulus-client-pov-recovery = { path = "../pov-recovery" } -cumulus-client-network = { path = "../network" } -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" } -cumulus-relay-chain-interface = { path = "../relay-chain-interface" } -cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } -cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" } +cumulus-client-cli = { workspace = true, default-features = true } +cumulus-client-collator = { workspace = true, default-features = true } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-client-pov-recovery = { workspace = true, default-features = true } +cumulus-client-network = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true } +cumulus-relay-chain-minimal-node = { workspace = true, default-features = true } diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index daff5ef8f482e..c08148928b7ce 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -10,26 +10,25 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-aura = { workspace = true } +pallet-timestamp = { workspace = true } +sp-application-crypto = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-runtime = { workspace = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../parachain-system", default-features = false } +cumulus-pallet-parachain-system = { workspace = true } [dev-dependencies] # Cumulus -cumulus-pallet-parachain-system = { path = "../parachain-system" } +cumulus-pallet-parachain-system = { workspace = true, default-features = true } [features] default = ["std"] @@ -44,7 +43,6 @@ std = [ "sp-application-crypto/std", "sp-consensus-aura/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "cumulus-pallet-parachain-system/try-runtime", diff --git a/cumulus/pallets/aura-ext/src/consensus_hook.rs b/cumulus/pallets/aura-ext/src/consensus_hook.rs index 5920298033911..c1a8568bdd834 100644 --- a/cumulus/pallets/aura-ext/src/consensus_hook.rs +++ b/cumulus/pallets/aura-ext/src/consensus_hook.rs @@ -20,6 +20,7 @@ //! The velocity `V` refers to the rate of block processing by the relay chain. use super::{pallet, Aura}; +use core::{marker::PhantomData, num::NonZeroU32}; use cumulus_pallet_parachain_system::{ self as parachain_system, consensus_hook::{ConsensusHook, UnincludedSegmentCapacity}, @@ -27,7 +28,6 @@ use cumulus_pallet_parachain_system::{ }; use frame_support::pallet_prelude::*; use sp_consensus_aura::{Slot, SlotDuration}; -use sp_std::{marker::PhantomData, num::NonZeroU32}; /// A consensus hook for a fixed block processing velocity and unincluded segment capacity. /// @@ -65,16 +65,26 @@ where let para_slot_from_relay = Slot::from_timestamp(relay_chain_timestamp.into(), para_slot_duration); - // Perform checks. - assert_eq!(slot, para_slot_from_relay, "slot number mismatch"); - if authored > velocity + 1 { + // Check that we are not too far in the future. Since we expect `V` parachain blocks + // during the relay chain slot, we can allow for `V` parachain slots into the future. + if *slot > *para_slot_from_relay + u64::from(velocity) { + panic!( + "Parachain slot is too far in the future: parachain_slot: {:?}, derived_from_relay_slot: {:?} velocity: {:?}", + slot, + para_slot_from_relay, + velocity + ); + } + + // We need to allow authoring multiple blocks in the same slot. + if slot != para_slot_from_relay && authored > velocity { panic!("authored blocks limit is reached for the slot") } let weight = T::DbWeight::get().reads(1); ( weight, - NonZeroU32::new(sp_std::cmp::max(C, 1)) + NonZeroU32::new(core::cmp::max(C, 1)) .expect("1 is the minimum value and non-zero; qed") .into(), ) @@ -113,6 +123,11 @@ impl< return false } + // TODO: This logic needs to be adjusted. + // It checks that we have not authored more than `V + 1` blocks in the slot. + // As a slot however, we take the parachain slot here. Velocity should + // be measured in relation to the relay chain slot. + // https://github.com/paritytech/polkadot-sdk/issues/3967 if last_slot == new_slot { authored_so_far < velocity + 1 } else { diff --git a/cumulus/pallets/aura-ext/src/lib.rs b/cumulus/pallets/aura-ext/src/lib.rs index 7ca84dff7c513..4c9e61458a87c 100644 --- a/cumulus/pallets/aura-ext/src/lib.rs +++ b/cumulus/pallets/aura-ext/src/lib.rs @@ -83,7 +83,7 @@ pub mod pallet { SlotInfo::::put((new_slot, authored)); - T::DbWeight::get().reads_writes(2, 1) + T::DbWeight::get().reads_writes(4, 2) } } @@ -109,7 +109,7 @@ pub mod pallet { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] @@ -125,7 +125,7 @@ pub mod pallet { /// /// When executing the block it will verify the block seal to ensure that the correct author created /// the block. -pub struct BlockExecutor(sp_std::marker::PhantomData<(T, I)>); +pub struct BlockExecutor(core::marker::PhantomData<(T, I)>); impl ExecuteBlock for BlockExecutor where diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index f30802fa5d82e..b3512dc2ae6c9 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -17,29 +17,28 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } -codec = { default-features = false, features = ["derive"], package = "parity-scale-codec", version = "3.6.12" } -rand = { version = "0.8.5", features = ["std_rng"], default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +rand = { features = ["std_rng"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-session = { workspace = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../../substrate/primitives/core" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -pallet-timestamp = { path = "../../../substrate/frame/timestamp" } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } -pallet-aura = { path = "../../../substrate/frame/aura" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +pallet-aura = { workspace = true, default-features = true } [features] default = ["std"] @@ -65,7 +64,6 @@ std = [ "scale-info/std", "sp-runtime/std", "sp-staking/std", - "sp-std/std", ] try-runtime = [ diff --git a/cumulus/pallets/collator-selection/src/benchmarking.rs b/cumulus/pallets/collator-selection/src/benchmarking.rs index c6b6004452825..24823661383b5 100644 --- a/cumulus/pallets/collator-selection/src/benchmarking.rs +++ b/cumulus/pallets/collator-selection/src/benchmarking.rs @@ -21,13 +21,14 @@ use super::*; #[allow(unused)] use crate::Pallet as CollatorSelection; +use alloc::vec::Vec; use codec::Decode; +use core::cmp; use frame_benchmarking::{account, v2::*, whitelisted_caller, BenchmarkError}; use frame_support::traits::{Currency, EnsureOrigin, Get, ReservableCurrency}; use frame_system::{pallet_prelude::BlockNumberFor, EventRecord, RawOrigin}; use pallet_authorship::EventHandler; use pallet_session::{self as session, SessionManager}; -use sp_std::{cmp, prelude::*}; pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs index 2fa384367528a..17dc1a552c2de 100644 --- a/cumulus/pallets/collator-selection/src/lib.rs +++ b/cumulus/pallets/collator-selection/src/lib.rs @@ -81,6 +81,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use core::marker::PhantomData; use frame_support::traits::TypedGet; pub use pallet::*; @@ -101,6 +103,7 @@ const LOG_TARGET: &str = "runtime::collator-selection"; #[frame_support::pallet] pub mod pallet { pub use crate::weights::WeightInfo; + use alloc::vec::Vec; use core::ops::Div; use frame_support::{ dispatch::{DispatchClass, DispatchResultWithPostInfo}, @@ -118,7 +121,6 @@ pub mod pallet { RuntimeDebug, }; use sp_staking::SessionIndex; - use sp_std::vec::Vec; /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); @@ -244,7 +246,7 @@ pub mod pallet { let duplicate_invulnerables = self .invulnerables .iter() - .collect::>(); + .collect::>(); assert!( duplicate_invulnerables.len() == self.invulnerables.len(), "duplicate invulnerables in genesis." diff --git a/cumulus/pallets/collator-selection/src/migration.rs b/cumulus/pallets/collator-selection/src/migration.rs index 425acdd8bfb59..c52016948069a 100644 --- a/cumulus/pallets/collator-selection/src/migration.rs +++ b/cumulus/pallets/collator-selection/src/migration.rs @@ -17,6 +17,8 @@ //! A module that is responsible for migration of storage for Collator Selection. use super::*; +#[cfg(feature = "try-runtime")] +use alloc::vec::Vec; use frame_support::traits::{OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade}; use log; @@ -29,8 +31,6 @@ pub mod v2 { traits::{Currency, ReservableCurrency}, }; use sp_runtime::traits::{Saturating, Zero}; - #[cfg(feature = "try-runtime")] - use sp_std::vec::Vec; /// [`UncheckedMigrationToV2`] wrapped in a /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the @@ -51,7 +51,7 @@ pub mod v2 { >; /// Migrate to V2. - pub struct UncheckedMigrationToV2(sp_std::marker::PhantomData); + pub struct UncheckedMigrationToV2(PhantomData); impl UncheckedOnRuntimeUpgrade for UncheckedMigrationToV2 { fn on_runtime_upgrade() -> Weight { let mut weight = Weight::zero(); @@ -123,10 +123,8 @@ pub mod v2 { pub mod v1 { use super::*; use frame_support::pallet_prelude::*; - #[cfg(feature = "try-runtime")] - use sp_std::prelude::*; - pub struct MigrateToV1(sp_std::marker::PhantomData); + pub struct MigrateToV1(PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { let on_chain_version = Pallet::::on_chain_storage_version(); diff --git a/cumulus/pallets/collator-selection/src/mock.rs b/cumulus/pallets/collator-selection/src/mock.rs index 6521c954eac23..459b1cb5fdf28 100644 --- a/cumulus/pallets/collator-selection/src/mock.rs +++ b/cumulus/pallets/collator-selection/src/mock.rs @@ -53,23 +53,12 @@ impl system::Config for Test { parameter_types! { pub const ExistentialDeposit: u64 = 5; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } pub struct Author4; diff --git a/cumulus/pallets/collator-selection/src/weights.rs b/cumulus/pallets/collator-selection/src/weights.rs index 1c01ad6cd6fe8..12e6b755e9769 100644 --- a/cumulus/pallets/collator-selection/src/weights.rs +++ b/cumulus/pallets/collator-selection/src/weights.rs @@ -18,11 +18,11 @@ #![allow(unused_parens)] #![allow(unused_imports)] +use core::marker::PhantomData; use frame_support::{ traits::Get, weights::{constants::RocksDbWeight, Weight}, }; -use sp_std::marker::PhantomData; // The weight info trait for `pallet_collator_selection`. pub trait WeightInfo { diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index 687cda164fb0b..936526290d93e 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -14,26 +14,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-io = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +xcm = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } +cumulus-primitives-core = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../substrate/primitives/core" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } +sp-core = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] @@ -48,7 +47,6 @@ std = [ "scale-info/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "xcm/std", ] diff --git a/cumulus/pallets/dmp-queue/src/benchmarking.rs b/cumulus/pallets/dmp-queue/src/benchmarking.rs index 91d1e0eab7e40..432d6f3bc7ae5 100644 --- a/cumulus/pallets/dmp-queue/src/benchmarking.rs +++ b/cumulus/pallets/dmp-queue/src/benchmarking.rs @@ -19,9 +19,9 @@ use crate::*; +use alloc::vec; use frame_benchmarking::v2::*; use frame_support::{pallet_prelude::*, traits::Hooks}; -use sp_std::vec; #[benchmarks] mod benchmarks { diff --git a/cumulus/pallets/dmp-queue/src/lib.rs b/cumulus/pallets/dmp-queue/src/lib.rs index 9b3ec684febab..cedca6f3fb97f 100644 --- a/cumulus/pallets/dmp-queue/src/lib.rs +++ b/cumulus/pallets/dmp-queue/src/lib.rs @@ -23,6 +23,8 @@ #![cfg_attr(not(feature = "std"), no_std)] #![allow(deprecated)] // The pallet itself is deprecated. +extern crate alloc; + use migration::*; pub use pallet::*; diff --git a/cumulus/pallets/dmp-queue/src/migration.rs b/cumulus/pallets/dmp-queue/src/migration.rs index 349635cce547d..b1945e8eb37b8 100644 --- a/cumulus/pallets/dmp-queue/src/migration.rs +++ b/cumulus/pallets/dmp-queue/src/migration.rs @@ -17,9 +17,9 @@ //! Migrates the storage from the previously deleted DMP pallet. use crate::*; +use alloc::vec::Vec; use cumulus_primitives_core::relay_chain::BlockNumber as RelayBlockNumber; use frame_support::{pallet_prelude::*, storage_alias, traits::HandleMessage}; -use sp_std::vec::Vec; pub(crate) const LOG: &str = "runtime::dmp-queue-export-xcms"; diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 1a6a19f2ab4a2..30a232f01b3e5 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -10,62 +10,62 @@ license = "Apache-2.0" workspace = true [dependencies] -bytes = { version = "1.4.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -environmental = { version = "1.1.4", default-features = false } -impl-trait-for-tuples = "0.2.1" +bytes = { workspace = true } +codec = { features = ["derive"], workspace = true } +environmental = { workspace = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } -trie-db = { version = "0.29.0", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +trie-db = { workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-message-queue = { workspace = true } +sp-core = { workspace = true } +sp-externalities = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-state-machine = { workspace = true } +sp-std = { workspace = true } +sp-trie = { workspace = true } +sp-version = { workspace = true } # Polkadot -polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false, features = ["wasm-api"] } -polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } -polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false, optional = true } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } +polkadot-runtime-parachains = { workspace = true } +polkadot-runtime-common = { optional = true, workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } # Cumulus -cumulus-pallet-parachain-system-proc-macro = { path = "proc-macro", default-features = false } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } -cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent", default-features = false } -cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction", default-features = false } +cumulus-pallet-parachain-system-proc-macro = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-parachain-inherent = { workspace = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true } [dev-dependencies] -assert_matches = "1.5" -hex-literal = "0.4.1" -lazy_static = "1.4" -trie-standardmap = "0.16.0" -rand = "0.8.5" -futures = "0.3.28" +assert_matches = { workspace = true } +hex-literal = { workspace = true, default-features = true } +lazy_static = { workspace = true } +trie-standardmap = { workspace = true } +rand = { workspace = true, default-features = true } +futures = { workspace = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -sp-version = { path = "../../../substrate/primitives/version" } -sp-consensus-slots = { path = "../../../substrate/primitives/consensus/slots" } +sc-client-api = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } # Cumulus -cumulus-test-client = { path = "../../test/client" } -cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } -cumulus-test-runtime = { path = "../../test/runtime" } +cumulus-test-client = { workspace = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } +cumulus-test-runtime = { workspace = true } [features] default = ["std"] diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index 0a90c30e03312..da6f0fd03efb7 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -14,9 +14,9 @@ proc-macro = true [dependencies] syn = { workspace = true } -proc-macro2 = "1.0.64" +proc-macro2 = { workspace = true } quote = { workspace = true } -proc-macro-crate = "3.0.0" +proc-macro-crate = { workspace = true } [features] default = ["std"] diff --git a/cumulus/pallets/parachain-system/proc-macro/src/lib.rs b/cumulus/pallets/parachain-system/proc-macro/src/lib.rs index 8ab5d81efdcf4..f284fbdc64c60 100644 --- a/cumulus/pallets/parachain-system/proc-macro/src/lib.rs +++ b/cumulus/pallets/parachain-system/proc-macro/src/lib.rs @@ -122,8 +122,8 @@ pub fn register_validate_block(input: proc_macro::TokenStream) -> proc_macro::To #[no_mangle] unsafe fn validate_block(arguments: *mut u8, arguments_len: usize) -> u64 { // We convert the `arguments` into a boxed slice and then into `Bytes`. - let args = #crate_::validate_block::sp_std::boxed::Box::from_raw( - #crate_::validate_block::sp_std::slice::from_raw_parts_mut( + let args = #crate_::validate_block::Box::from_raw( + #crate_::validate_block::slice::from_raw_parts_mut( arguments, arguments_len, ) diff --git a/cumulus/pallets/parachain-system/src/consensus_hook.rs b/cumulus/pallets/parachain-system/src/consensus_hook.rs index 91353fc7bbda7..3062396a4e786 100644 --- a/cumulus/pallets/parachain-system/src/consensus_hook.rs +++ b/cumulus/pallets/parachain-system/src/consensus_hook.rs @@ -18,8 +18,8 @@ //! of parachain blocks ready to submit to the relay chain, as well as some basic implementations. use super::relay_state_snapshot::RelayChainStateProof; +use core::num::NonZeroU32; use frame_support::weights::Weight; -use sp_std::num::NonZeroU32; /// The possible capacity of the unincluded segment. #[derive(Clone)] @@ -95,7 +95,7 @@ impl ConsensusHook for FixedCapacityUnincludedSegment { fn on_state_proof(_state_proof: &RelayChainStateProof) -> (Weight, UnincludedSegmentCapacity) { ( Weight::zero(), - NonZeroU32::new(sp_std::cmp::max(N, 1)) + NonZeroU32::new(core::cmp::max(N, 1)) .expect("1 is the minimum value and non-zero; qed") .into(), ) diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index bbb74a1b05388..9e0a68d09a14a 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -27,7 +27,11 @@ //! //! Users must ensure that they register this pallet as an inherent provider. +extern crate alloc; + +use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; use codec::{Decode, Encode}; +use core::cmp; use cumulus_primitives_core::{ relay_chain, AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, GetChannelInfo, InboundDownwardMessage, InboundHrmpMessage, ListChannelInfos, MessageSendError, @@ -54,7 +58,6 @@ use sp_runtime::{ }, BoundedSlice, FixedU128, RuntimeDebug, Saturating, }; -use sp_std::{cmp, collections::btree_map::BTreeMap, prelude::*}; use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm}; use xcm_builder::InspectMessageQueues; @@ -938,7 +941,7 @@ pub mod pallet { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] @@ -1530,7 +1533,7 @@ impl Pallet { } /// Type that implements `SetCode`. -pub struct ParachainSetCode(sp_std::marker::PhantomData); +pub struct ParachainSetCode(core::marker::PhantomData); impl frame_system::SetCode for ParachainSetCode { fn set_code(code: Vec) -> DispatchResult { Pallet::::schedule_code_upgrade(code) @@ -1645,7 +1648,7 @@ pub trait CheckInherents { /// Struct that always returns `Ok` on inherents check, needed for backwards-compatibility. #[doc(hidden)] -pub struct DummyCheckInherents(sp_std::marker::PhantomData); +pub struct DummyCheckInherents(core::marker::PhantomData); #[allow(deprecated)] impl CheckInherents for DummyCheckInherents { @@ -1718,7 +1721,7 @@ pub type RelaychainBlockNumberProvider = RelaychainDataProvider; /// of [`RelayChainState`]. /// - [`current_block_number`](Self::current_block_number): Will return /// [`Pallet::last_relay_block_number()`]. -pub struct RelaychainDataProvider(sp_std::marker::PhantomData); +pub struct RelaychainDataProvider(core::marker::PhantomData); impl BlockNumberProvider for RelaychainDataProvider { type BlockNumber = relay_chain::BlockNumber; diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index da904c0079a00..7bea72224b8ba 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -20,7 +20,9 @@ use super::*; +use alloc::collections::vec_deque::VecDeque; use codec::Encode; +use core::num::NonZeroU32; use cumulus_primitives_core::{ relay_chain::BlockNumber as RelayBlockNumber, AggregateMessageOrigin, InboundDownwardMessage, InboundHrmpMessage, PersistedValidationData, @@ -37,7 +39,6 @@ use frame_support::{ }; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use sp_runtime::{traits::BlakeTwo256, BuildStorage}; -use sp_std::{collections::vec_deque::VecDeque, num::NonZeroU32}; use sp_version::RuntimeVersion; use std::cell::RefCell; diff --git a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs index 60eccfb072f41..323aaf6503808 100644 --- a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs +++ b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs @@ -16,6 +16,7 @@ //! Relay chain state proof provides means for accessing part of relay chain storage for reads. +use alloc::vec::Vec; use codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain, AbridgedHostConfiguration, AbridgedHrmpChannel, ParaId, @@ -23,7 +24,6 @@ use cumulus_primitives_core::{ use scale_info::TypeInfo; use sp_runtime::traits::HashingFor; use sp_state_machine::{Backend, TrieBackend, TrieBackendBuilder}; -use sp_std::vec::Vec; use sp_trie::{HashDBT, MemoryDB, StorageProof, EMPTY_PREFIX}; /// The capacity of the upward message queue of a parachain on the relay chain. diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs index 5ff15036fb6e4..51c6e83c11319 100755 --- a/cumulus/pallets/parachain-system/src/tests.rs +++ b/cumulus/pallets/parachain-system/src/tests.rs @@ -19,6 +19,7 @@ use super::*; use crate::mock::*; +use core::num::NonZeroU32; use cumulus_primitives_core::{AbridgedHrmpChannel, InboundDownwardMessage, InboundHrmpMessage}; use frame_support::{assert_ok, parameter_types, weights::Weight}; use frame_system::RawOrigin; @@ -26,7 +27,6 @@ use hex_literal::hex; use rand::Rng; use relay_chain::HrmpChannelId; use sp_core::H256; -use sp_std::num::NonZeroU32; #[test] #[should_panic] diff --git a/cumulus/pallets/parachain-system/src/unincluded_segment.rs b/cumulus/pallets/parachain-system/src/unincluded_segment.rs index 1e83a945c4ee3..814bb83aa1acb 100644 --- a/cumulus/pallets/parachain-system/src/unincluded_segment.rs +++ b/cumulus/pallets/parachain-system/src/unincluded_segment.rs @@ -21,11 +21,12 @@ //! sent to relay chain. use super::relay_state_snapshot::{MessagingStateSnapshot, RelayDispatchQueueRemainingCapacity}; +use alloc::collections::btree_map::BTreeMap; use codec::{Decode, Encode}; +use core::marker::PhantomData; use cumulus_primitives_core::{relay_chain, ParaId}; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; -use sp_std::{collections::btree_map::BTreeMap, marker::PhantomData}; /// Constraints on outbound HRMP channel. #[derive(Clone, RuntimeDebug)] @@ -398,6 +399,7 @@ pub(crate) fn size_after_included(included_hash: H, segment: &[Anc #[cfg(test)] mod tests { use super::*; + use alloc::{vec, vec::Vec}; use assert_matches::assert_matches; #[test] diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 956962fce157d..42311ca9d8340 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -26,6 +26,7 @@ use polkadot_parachain_primitives::primitives::{ HeadData, RelayChainBlockNumber, ValidationResult, }; +use alloc::vec::Vec; use codec::Encode; use frame_support::traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}; @@ -33,7 +34,6 @@ use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; -use sp_std::prelude::*; use sp_trie::{MemoryDB, ProofSizeProvider}; use trie_recorder::SizeOnlyRecorderProvider; @@ -124,7 +124,7 @@ where Err(_) => panic!("Compact proof decoding failure."), }; - sp_std::mem::drop(storage_proof); + core::mem::drop(storage_proof); let mut recorder = SizeOnlyRecorderProvider::new(); let cache_provider = trie_cache::CacheProvider::new(); @@ -294,7 +294,7 @@ fn host_storage_read(key: &[u8], value_out: &mut [u8], value_offset: u32) -> Opt Some(value) => { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; - let written = sp_std::cmp::min(data.len(), value_out.len()); + let written = core::cmp::min(data.len(), value_out.len()); value_out[..written].copy_from_slice(&data[..written]); Some(value.len() as u32) }, @@ -368,7 +368,7 @@ fn host_default_child_storage_read( Some(value) => { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; - let written = sp_std::cmp::min(data.len(), value_out.len()); + let written = core::cmp::min(data.len(), value_out.len()); value_out[..written].copy_from_slice(&data[..written]); Some(value.len() as u32) }, diff --git a/cumulus/pallets/parachain-system/src/validate_block/mod.rs b/cumulus/pallets/parachain-system/src/validate_block/mod.rs index 763a4cffd77f9..3a00d4d352a69 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/mod.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/mod.rs @@ -30,6 +30,9 @@ mod trie_cache; #[doc(hidden)] mod trie_recorder; +#[cfg(not(feature = "std"))] +#[doc(hidden)] +pub use alloc::{boxed::Box, slice}; #[cfg(not(feature = "std"))] #[doc(hidden)] pub use bytes; diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs index 5d785910fbe02..5999b3ce87f9d 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs @@ -15,12 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_state_machine::TrieCacheProvider; -use sp_std::{ +use alloc::{ boxed::Box, - cell::{RefCell, RefMut}, collections::btree_map::{BTreeMap, Entry}, }; +use core::cell::{RefCell, RefMut}; +use sp_state_machine::TrieCacheProvider; use sp_trie::NodeCodec; use trie_db::{node::NodeOwned, Hasher}; diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 48310670c074d..1980134071952 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -22,11 +22,11 @@ use codec::Encode; -use sp_std::{ - cell::{RefCell, RefMut}, +use alloc::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, rc::Rc, }; +use core::cell::{RefCell, RefMut}; use sp_trie::{NodeCodec, ProofSizeProvider, StorageProof}; use trie_db::{Hasher, RecordedForKey, TrieAccess}; diff --git a/cumulus/pallets/parachain-system/src/weights.rs b/cumulus/pallets/parachain-system/src/weights.rs index da7f64237e9b6..5c61879b4d36b 100644 --- a/cumulus/pallets/parachain-system/src/weights.rs +++ b/cumulus/pallets/parachain-system/src/weights.rs @@ -50,7 +50,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions needed for cumulus_pallet_parachain_system. pub trait WeightInfo { diff --git a/cumulus/pallets/session-benchmarking/Cargo.toml b/cumulus/pallets/session-benchmarking/Cargo.toml index 62c923de59f25..e182ac45edebd 100644 --- a/cumulus/pallets/session-benchmarking/Cargo.toml +++ b/cumulus/pallets/session-benchmarking/Cargo.toml @@ -16,13 +16,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } +codec = { workspace = true } +sp-runtime = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +pallet-session = { workspace = true } [features] default = ["std"] @@ -39,5 +38,4 @@ std = [ "frame-system/std", "pallet-session/std", "sp-runtime/std", - "sp-std/std", ] diff --git a/cumulus/pallets/session-benchmarking/src/inner.rs b/cumulus/pallets/session-benchmarking/src/inner.rs index 36411d3d71afa..8d5954304878d 100644 --- a/cumulus/pallets/session-benchmarking/src/inner.rs +++ b/cumulus/pallets/session-benchmarking/src/inner.rs @@ -15,7 +15,7 @@ //! Benchmarking setup for pallet-session. -use sp_std::{prelude::*, vec}; +use alloc::{vec, vec::Vec}; use codec::Decode; use frame_benchmarking::{benchmarks, whitelisted_caller}; diff --git a/cumulus/pallets/session-benchmarking/src/lib.rs b/cumulus/pallets/session-benchmarking/src/lib.rs index a95d6fb7d5914..f5bfef0061690 100644 --- a/cumulus/pallets/session-benchmarking/src/lib.rs +++ b/cumulus/pallets/session-benchmarking/src/lib.rs @@ -20,6 +20,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + #[cfg(feature = "runtime-benchmarks")] pub mod inner; diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index 17b0fb2a01662..5fd1939e93a03 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -10,21 +10,20 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-sudo = { workspace = true } +sp-runtime = { workspace = true } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } +polkadot-primitives = { workspace = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../parachain-system", default-features = false } +cumulus-pallet-parachain-system = { workspace = true } [features] default = ["std"] @@ -37,7 +36,6 @@ std = [ "polkadot-primitives/std", "scale-info/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "cumulus-pallet-parachain-system/try-runtime", diff --git a/cumulus/pallets/solo-to-para/src/lib.rs b/cumulus/pallets/solo-to-para/src/lib.rs index da948615d4e90..b42cc74f1cf32 100644 --- a/cumulus/pallets/solo-to-para/src/lib.rs +++ b/cumulus/pallets/solo-to-para/src/lib.rs @@ -16,12 +16,14 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::vec::Vec; use cumulus_pallet_parachain_system as parachain_system; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; pub use pallet::*; use polkadot_primitives::PersistedValidationData; -use sp_std::vec::Vec; #[frame_support::pallet] pub mod pallet { diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 178d981702f2e..35d7a083b061d 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -10,18 +10,17 @@ description = "Pallet for stuff specific to parachains' usage of XCM" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +xcm = { workspace = true } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] @@ -33,7 +32,6 @@ std = [ "scale-info/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "xcm/std", ] try-runtime = [ diff --git a/cumulus/pallets/xcm/src/lib.rs b/cumulus/pallets/xcm/src/lib.rs index 90a0ec76defe2..e31df8471c266 100644 --- a/cumulus/pallets/xcm/src/lib.rs +++ b/cumulus/pallets/xcm/src/lib.rs @@ -25,7 +25,6 @@ use cumulus_primitives_core::ParaId; pub use pallet::*; use scale_info::TypeInfo; use sp_runtime::{traits::BadOrigin, RuntimeDebug}; -use sp_std::prelude::*; use xcm::latest::{ExecuteXcm, Outcome}; #[frame_support::pallet] diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index 87602978521fc..9c7470eda6da4 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -10,45 +10,44 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"], default-features = false } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +pallet-message-queue = { workspace = true } # Polkadot -polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false } -polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +polkadot-runtime-common = { workspace = true } +polkadot-runtime-parachains = { workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-builder = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } +cumulus-primitives-core = { workspace = true } # Optional import for benchmarking -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -bounded-collections = { version = "0.2.0", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +bounded-collections = { workspace = true } # Bridges -bp-xcm-bridge-hub-router = { path = "../../../bridges/primitives/xcm-bridge-hub-router", default-features = false, optional = true } +bp-xcm-bridge-hub-router = { optional = true, workspace = true } [dev-dependencies] # Substrate -sp-core = { path = "../../../substrate/primitives/core" } -pallet-balances = { path = "../../../substrate/frame/balances" } -frame-support = { path = "../../../substrate/frame/support", features = ["experimental"] } +sp-core = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../parachain-system" } +cumulus-pallet-parachain-system = { workspace = true, default-features = true } [features] default = ["std"] @@ -68,7 +67,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", diff --git a/cumulus/pallets/xcmp-queue/src/benchmarking.rs b/cumulus/pallets/xcmp-queue/src/benchmarking.rs index 49e2cc8367348..9cb1301addfe5 100644 --- a/cumulus/pallets/xcmp-queue/src/benchmarking.rs +++ b/cumulus/pallets/xcmp-queue/src/benchmarking.rs @@ -17,6 +17,7 @@ use crate::*; +use alloc::vec; use codec::DecodeAll; use frame_benchmarking::v2::*; use frame_support::traits::Hooks; diff --git a/cumulus/pallets/xcmp-queue/src/bridging.rs b/cumulus/pallets/xcmp-queue/src/bridging.rs index 9db4b6e74c398..eff4a37b0cef7 100644 --- a/cumulus/pallets/xcmp-queue/src/bridging.rs +++ b/cumulus/pallets/xcmp-queue/src/bridging.rs @@ -21,7 +21,7 @@ use frame_support::pallet_prelude::Get; /// both `OutboundXcmpStatus` and `InboundXcmpStatus` for defined `ParaId` if any of those is /// suspended. pub struct InAndOutXcmpChannelStatusProvider( - sp_std::marker::PhantomData<(SiblingBridgeHubParaId, Runtime)>, + core::marker::PhantomData<(SiblingBridgeHubParaId, Runtime)>, ); impl, Runtime: crate::Config> bp_xcm_bridge_hub_router::XcmChannelStatusProvider @@ -45,7 +45,7 @@ impl, Runtime: crate::Config> /// Adapter implementation for `bp_xcm_bridge_hub_router::XcmChannelStatusProvider` which checks /// only `OutboundXcmpStatus` for defined `SiblingParaId` if is suspended. pub struct OutXcmpChannelStatusProvider( - sp_std::marker::PhantomData<(SiblingBridgeHubParaId, Runtime)>, + core::marker::PhantomData<(SiblingBridgeHubParaId, Runtime)>, ); impl, Runtime: crate::Config> bp_xcm_bridge_hub_router::XcmChannelStatusProvider diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index 5633f05f13bb8..8c4446a925d4d 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -50,6 +50,9 @@ pub mod bridging; pub mod weights; pub use weights::WeightInfo; +extern crate alloc; + +use alloc::vec::Vec; use bounded_collections::BoundedBTreeSet; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use cumulus_primitives_core::{ @@ -69,7 +72,6 @@ use polkadot_runtime_parachains::FeeTracker; use scale_info::TypeInfo; use sp_core::MAX_POSSIBLE_ALLOCATION; use sp_runtime::{FixedU128, RuntimeDebug, Saturating, WeakBoundedVec}; -use sp_std::prelude::*; use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm, WrapVersion, MAX_XCM_DECODE_DEPTH}; use xcm_builder::InspectMessageQueues; use xcm_executor::traits::ConvertOrigin; @@ -491,7 +493,7 @@ impl Pallet { let channel_info = T::ChannelInfo::get_channel_info(recipient).ok_or(MessageSendError::NoChannel)?; // Max message size refers to aggregates, or pages. Not to individual fragments. - let max_message_size = channel_info.max_message_size as usize; + let max_message_size = channel_info.max_message_size.min(T::MaxPageSize::get()) as usize; let format_size = format.encoded_size(); // We check the encoded fragment length plus the format size against the max message size // because the format is concatenated if a new page is needed. @@ -522,7 +524,7 @@ impl Pallet { // We return the size of the last page inside of the option, to not calculate it again. let appended_to_last_page = have_active .then(|| { - >::mutate( + >::try_mutate( recipient, channel_details.last_index - 1, |page| { @@ -532,17 +534,18 @@ impl Pallet { ) != Ok(format) { defensive!("Bad format in outbound queue; dropping message"); - return None + return Err(()) } if page.len() + encoded_fragment.len() > max_message_size { - return None + return Err(()) } for frag in encoded_fragment.iter() { - page.try_push(*frag).ok()?; + page.try_push(*frag)?; } - Some(page.len()) + Ok(page.len()) }, ) + .ok() }) .flatten(); diff --git a/cumulus/pallets/xcmp-queue/src/migration.rs b/cumulus/pallets/xcmp-queue/src/migration.rs index b64982a893029..d0657aaea9fd0 100644 --- a/cumulus/pallets/xcmp-queue/src/migration.rs +++ b/cumulus/pallets/xcmp-queue/src/migration.rs @@ -19,6 +19,7 @@ pub mod v5; use crate::{Config, OverweightIndex, Pallet, QueueConfig, QueueConfigData, DEFAULT_POV_SIZE}; +use alloc::vec::Vec; use cumulus_primitives_core::XcmpMessageFormat; use frame_support::{ pallet_prelude::*, diff --git a/cumulus/pallets/xcmp-queue/src/migration/v5.rs b/cumulus/pallets/xcmp-queue/src/migration/v5.rs index 247adab7108fa..818365f36f605 100644 --- a/cumulus/pallets/xcmp-queue/src/migration/v5.rs +++ b/cumulus/pallets/xcmp-queue/src/migration/v5.rs @@ -17,6 +17,7 @@ //! Migrates the storage to version 5. use crate::*; +use alloc::vec::Vec; use cumulus_primitives_core::ListChannelInfos; use frame_support::{pallet_prelude::*, traits::UncheckedOnRuntimeUpgrade}; diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index e166a78ee8220..7fb96de7a4eaa 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -85,25 +85,14 @@ impl frame_system::Config for Test { parameter_types! { pub const ExistentialDeposit: u64 = 5; - pub const MaxReserves: u32 = 50; } pub type Balance = u64; +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl cumulus_pallet_parachain_system::Config for Test { diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index cdf41e27f0b27..5b02baf2310a3 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -28,6 +28,7 @@ use frame_support::{ use mock::{new_test_ext, ParachainSystem, RuntimeOrigin as Origin, Test, XcmpQueue}; use sp_runtime::traits::{BadOrigin, Zero}; use std::iter::{once, repeat}; +use xcm_builder::InspectMessageQueues; #[test] fn empty_concatenated_works() { @@ -854,7 +855,6 @@ fn verify_fee_factor_increase_and_decrease() { #[test] fn get_messages_works() { new_test_ext().execute_with(|| { - use xcm_builder::InspectMessageQueues; let sibling_para_id = ParaId::from(2001); ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(sibling_para_id); let destination: Location = (Parent, Parachain(sibling_para_id.into())).into(); @@ -890,3 +890,32 @@ fn get_messages_works() { ); }); } + +/// We try to send a fragment that will not fit into the currently active page. This should +/// therefore not modify the current page but instead create a new one. +#[test] +fn page_not_modified_when_fragment_does_not_fit() { + new_test_ext().execute_with(|| { + let sibling = ParaId::from(2001); + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(sibling); + + let destination: Location = (Parent, Parachain(sibling.into())).into(); + let message = Xcm(vec![ClearOrigin; 600]); + + loop { + let old_page_zero = OutboundXcmpMessages::::get(sibling, 0); + assert_ok!(send_xcm::(destination.clone(), message.clone())); + + // If a new page was created by this send_xcm call, then page_zero was not also + // modified: + let num_pages = OutboundXcmpMessages::::iter_prefix(sibling).count(); + if num_pages == 2 { + let new_page_zero = OutboundXcmpMessages::::get(sibling, 0); + assert_eq!(old_page_zero, new_page_zero); + break + } else if num_pages > 2 { + panic!("Too many pages created"); + } + } + }); +} diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index a0d5ddff6ebb1..a6ba01ffa394d 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -23,9 +23,8 @@ "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", - "/dns/pch13.rotko.net/tcp/33573/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", - "/dns/pch13.rotko.net/tcp/34573/ws/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", - "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", + "/dns/pch16.rotko.net/tcp/33576/p2p/12D3KooWKrm3XmuGzJH17Wcn4HRDGsEjLZGDgN77q3ZhwnnQP7y1", + "/dns/pch16.rotko.net/tcp/35576/wss/p2p/12D3KooWKrm3XmuGzJH17Wcn4HRDGsEjLZGDgN77q3ZhwnnQP7y1", "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30526/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", "/dns/boot-polkadot-collectives.luckyfriday.io/tcp/443/wss/p2p/12D3KooWCzifnPooTt4kvTnXT7FTKTymVL7xn7DURQLsS2AKpf6w" diff --git a/cumulus/parachains/chain-specs/people-kusama.json b/cumulus/parachains/chain-specs/people-kusama.json index 00a38b675def7..3352cb25a2898 100644 --- a/cumulus/parachains/chain-specs/people-kusama.json +++ b/cumulus/parachains/chain-specs/people-kusama.json @@ -8,7 +8,25 @@ "/dns/kusama-people-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWQaqG5TNmDfRWrtH7tMsN7YeqwVkSfoZT4GkemSzezNi1", "/dns/kusama-people-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWKhYoQH9LdSyvY3SVZY9gFf6ZV1bFh6317TRehUP3r5fm", "/dns/people-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA", - "/dns/people-kusama.bootnode.amforc.com/tcp/30004/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA" + "/dns/people-kusama.bootnode.amforc.com/tcp/30004/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA", + "/dns/boot.gatotech.network/tcp/33240/p2p/12D3KooWLi9TzaKX4zniJpiM521PnYG4EocpdqjPpJUhXq9QGkRX", + "/dns/boot.gatotech.network/tcp/35240/wss/p2p/12D3KooWLi9TzaKX4zniJpiM521PnYG4EocpdqjPpJUhXq9QGkRX", + "/dns/people-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWGP1C9iWTHnZyeaSjYZ7LdK8douXWc1n1dBv25XEASHaj", + "/dns/people-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWGP1C9iWTHnZyeaSjYZ7LdK8douXWc1n1dBv25XEASHaj", + "/dns/kppl16.rotko.net/tcp/33756/p2p/12D3KooWSKQwgoydfbN6mNN2aNwdqfkR2ExAnTRs8mmdrPQTtDLo", + "/dns/kppl16.rotko.net/tcp/35756/wss/p2p/12D3KooWSKQwgoydfbN6mNN2aNwdqfkR2ExAnTRs8mmdrPQTtDLo", + "/dns/people-kusama-boot-ng.dwellir.com/tcp/30359/p2p/12D3KooWM6T8MMibxLZhhpq6F612CZ4FgnfDSJSkWDMiVUDe1aGb", + "/dns/people-kusama-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWM6T8MMibxLZhhpq6F612CZ4FgnfDSJSkWDMiVUDe1aGb", + "/dns/people-kusama-bootnode.turboflakes.io/tcp/30645/p2p/12D3KooWCR2Q8J2NFFfuofDak4zSgWkuBq7orP96HFaxLgAoDUBV", + "/dns/people-kusama-bootnode.turboflakes.io/tcp/30745/wss/p2p/12D3KooWCR2Q8J2NFFfuofDak4zSgWkuBq7orP96HFaxLgAoDUBV", + "/dns/boot-node.helikon.io/tcp/7510/p2p/12D3KooWM1X4setrMWjwnV8iDkAtYhqFHNkGozdWdq6sawWh5Yhv", + "/dns/boot-node.helikon.io/tcp/7512/wss/p2p/12D3KooWM1X4setrMWjwnV8iDkAtYhqFHNkGozdWdq6sawWh5Yhv", + "/dns/people-kusama.bootnodes.polkadotters.com/tcp/30377/p2p/12D3KooWHy7TAuK6EoVij2tfaeh3KkaEJxhTmumbEom3HfRnSEsp", + "/dns/people-kusama.bootnodes.polkadotters.com/tcp/30379/wss/p2p/12D3KooWHy7TAuK6EoVij2tfaeh3KkaEJxhTmumbEom3HfRnSEsp", + "/dns/boot.metaspan.io/tcp/25068/p2p/12D3KooWDoDLtLvQi8hhFVyubPZhaYuAwSAJrPFtyGWJ2NSfBiyP", + "/dns/boot.metaspan.io/tcp/25069/wss/p2p/12D3KooWDoDLtLvQi8hhFVyubPZhaYuAwSAJrPFtyGWJ2NSfBiyP", + "/dns/ibp-boot-kusama-people.luckyfriday.io/tcp/30342/p2p/12D3KooWM4bRafMH2StfBEQtyj5cMWfGLYbuikCZmvKv9m1MQVPn", + "/dns/ibp-boot-kusama-people.luckyfriday.io/tcp/443/wss/p2p/12D3KooWM4bRafMH2StfBEQtyj5cMWfGLYbuikCZmvKv9m1MQVPn" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 2b943b6dca559..6d436bdf799a4 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -13,42 +13,41 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"], default-features = false } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-asset-tx-payment = { path = "../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false } -pallet-assets = { path = "../../../substrate/frame/assets", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-asset-tx-payment = { workspace = true } +pallet-assets = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { workspace = true } +polkadot-primitives = { workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../pallets/parachain-info", default-features = false } +pallet-collator-selection = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +parachain-info = { workspace = true } [dev-dependencies] -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } +pallet-authorship = { workspace = true } +sp-io = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] @@ -73,7 +72,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "xcm-executor/std", "xcm/std", ] diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index ed9c5c483fa74..42ea50c75a8d4 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -16,6 +16,8 @@ //! Auxiliary struct/enums for parachain runtimes. //! Taken from polkadot/runtime/common (at a21cd64) and adapted for parachains. +use alloc::boxed::Box; +use core::marker::PhantomData; use frame_support::traits::{ fungible, fungibles, tokens::imbalance::ResolveTo, Contains, ContainsPair, Currency, Defensive, Get, Imbalance, OnUnbalanced, OriginTrait, @@ -23,7 +25,6 @@ use frame_support::traits::{ use pallet_asset_tx_payment::HandleCredit; use pallet_collator_selection::StakingPotAccountId; use sp_runtime::traits::Zero; -use sp_std::{marker::PhantomData, prelude::*}; use xcm::latest::{ Asset, AssetId, Fungibility, Fungibility::Fungible, Junction, Junctions::Here, Location, Parent, WeightLimit, @@ -202,7 +203,7 @@ mod tests { use frame_system::{limits, EnsureRoot}; use pallet_collator_selection::IdentityCollator; use polkadot_primitives::AccountId; - use sp_core::{ConstU64, H256}; + use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, BuildStorage, Perbill, @@ -224,7 +225,6 @@ mod tests { parameter_types! { pub BlockLength: limits::BlockLength = limits::BlockLength::max(2 * 1024); pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const MaxReserves: u32 = 50; } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] @@ -253,20 +253,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type MaxLocks = (); - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } pub struct OneAuthor; diff --git a/cumulus/parachains/common/src/lib.rs b/cumulus/parachains/common/src/lib.rs index b01d623d2b93d..3cffb69daac3f 100644 --- a/cumulus/parachains/common/src/lib.rs +++ b/cumulus/parachains/common/src/lib.rs @@ -15,6 +15,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + pub mod impls; pub mod message_queue; pub mod xcm_config; diff --git a/cumulus/parachains/common/src/message_queue.rs b/cumulus/parachains/common/src/message_queue.rs index 0c9f4b840c916..511d6243cb8c4 100644 --- a/cumulus/parachains/common/src/message_queue.rs +++ b/cumulus/parachains/common/src/message_queue.rs @@ -16,10 +16,10 @@ //! Helpers to deal with configuring the message queue in the runtime. +use core::marker::PhantomData; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::traits::{QueueFootprint, QueuePausedQuery}; use pallet_message_queue::OnQueueChanged; -use sp_std::marker::PhantomData; /// Narrow the scope of the `Inner` query from `AggregateMessageOrigin` to `ParaId`. /// diff --git a/cumulus/parachains/common/src/xcm_config.rs b/cumulus/parachains/common/src/xcm_config.rs index a9756af7aed24..7c58a2b2405c7 100644 --- a/cumulus/parachains/common/src/xcm_config.rs +++ b/cumulus/parachains/common/src/xcm_config.rs @@ -14,13 +14,13 @@ // limitations under the License. use crate::impls::AccountIdOf; +use core::marker::PhantomData; use cumulus_primitives_core::{IsSystem, ParaId}; use frame_support::{ traits::{fungibles::Inspect, tokens::ConversionToAssetBalance, Contains, ContainsPair}, weights::Weight, }; use sp_runtime::traits::Get; -use sp_std::marker::PhantomData; use xcm::latest::prelude::*; /// A `ChargeFeeInFungibles` implementation that converts the output of diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml index 8100e68134883..7bd91ae6774c6 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml @@ -13,16 +13,16 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -asset-hub-rococo-runtime = { path = "../../../../../../runtimes/assets/asset-hub-rococo" } -rococo-emulated-chain = { path = "../../../relays/rococo" } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["rococo"] } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } +emulated-integration-tests-common = { workspace = true } +asset-hub-rococo-runtime = { workspace = true, default-features = true } +rococo-emulated-chain = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../../polkadot/xcm", default-features = false } +xcm = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs index e5378b35f5e48..3a87322664d91 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs @@ -21,7 +21,7 @@ use sp_core::{sr25519, storage::Storage}; use emulated_integration_tests_common::{ accounts, build_genesis_storage, collators, get_account_id_from_seed, PenpalSiblingSovereignAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, - SAFE_XCM_VERSION, + SAFE_XCM_VERSION, USDT_ID, }; use parachains_common::{AccountId, Balance}; @@ -68,7 +68,10 @@ pub fn genesis() -> Storage { ..Default::default() }, assets: asset_hub_rococo_runtime::AssetsConfig { - assets: vec![(RESERVABLE_ASSET_ID, AssetHubRococoAssetOwner::get(), true, ED)], + assets: vec![ + (RESERVABLE_ASSET_ID, AssetHubRococoAssetOwner::get(), true, ED), + (USDT_ID, AssetHubRococoAssetOwner::get(), true, ED), + ], ..Default::default() }, foreign_assets: asset_hub_rococo_runtime::ForeignAssetsConfig { diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs index 202d02b250bb2..80d2376c6811d 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub use asset_hub_rococo_runtime; + pub mod genesis; // Substrate diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml index e0abaa66c5cab..86d4ce3e7ac82 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml @@ -13,16 +13,16 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -asset-hub-westend-runtime = { path = "../../../../../../runtimes/assets/asset-hub-westend" } -westend-emulated-chain = { path = "../../../relays/westend" } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["westend"] } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } +emulated-integration-tests-common = { workspace = true } +asset-hub-westend-runtime = { workspace = true } +westend-emulated-chain = { workspace = true, default-features = true } +testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../../polkadot/xcm", default-features = false } +xcm = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs index 6043a6aeda48f..608690218d2f4 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub use asset_hub_westend_runtime; + pub mod genesis; // Substrate diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml index 789f10a35f268..f3c0799ad0f6a 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -bridge-hub-rococo-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-rococo" } -bridge-hub-common = { path = "../../../../../../runtimes/bridge-hubs/common", default-features = false } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["rococo"] } +parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +bridge-hub-rococo-runtime = { workspace = true, default-features = true } +bridge-hub-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs index 8c18d112bc12f..d8b8edaf2409b 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs @@ -15,6 +15,11 @@ pub mod genesis; +pub use bridge_hub_rococo_runtime::{ + xcm_config::XcmConfig as BridgeHubRococoXcmConfig, EthereumBeaconClient, EthereumInboundQueue, + RuntimeOrigin as BridgeHubRococoRuntimeOrigin, +}; + // Substrate use frame_support::traits::OnInitialize; diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml index d82971cf55aed..ebcec9641e7d9 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -bridge-hub-westend-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-westend" } -bridge-hub-common = { path = "../../../../../../runtimes/bridge-hubs/common", default-features = false } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["westend"] } +parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +bridge-hub-westend-runtime = { workspace = true, default-features = true } +bridge-hub-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs index b0dddc9dbf9a5..f701b3096994a 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs @@ -15,6 +15,8 @@ pub mod genesis; +pub use bridge_hub_westend_runtime::xcm_config::XcmConfig as BridgeHubWestendXcmConfig; + // Substrate use frame_support::traits::OnInitialize; diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml index 4c2a7d3c274dc..87dfd73ab05ba 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -collectives-westend-runtime = { path = "../../../../../../runtimes/collectives/collectives-westend" } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["westend"] } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } +emulated-integration-tests-common = { workspace = true } +collectives-westend-runtime = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/lib.rs index a32e865dd9ce8..f90d82231a3bb 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/lib.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub use collectives_westend_runtime; + pub mod genesis; // Substrate diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml index f7fe93d27775a..1549d6a2ab6ba 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml @@ -10,12 +10,12 @@ publish = false [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -people-rococo-runtime = { path = "../../../../../../runtimes/people/people-rococo" } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["rococo"] } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } +emulated-integration-tests-common = { workspace = true } +people-rococo-runtime = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/lib.rs index fa818bf81bf60..c8da97cc3e8bf 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/lib.rs @@ -12,6 +12,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +pub use people_rococo_runtime; pub mod genesis; diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml index 57a767e0c2a3e..9c5ac0bca9de7 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml @@ -10,12 +10,12 @@ publish = false [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -people-westend-runtime = { path = "../../../../../../runtimes/people/people-westend" } -testnet-parachains-constants = { path = "../../../../../../runtimes/constants", features = ["westend"] } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } +emulated-integration-tests-common = { workspace = true } +people-westend-runtime = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/lib.rs index 775b89ac208b0..904ce34d8c08a 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/lib.rs @@ -12,6 +12,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +pub use people_westend_runtime; pub mod genesis; diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml index 2ac508273c615..9e6b14b585984 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml @@ -13,14 +13,14 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../../polkadot/xcm", default-features = false } +xcm = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../../common" } -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -penpal-runtime = { path = "../../../../../../runtimes/testing/penpal" } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } +emulated-integration-tests-common = { workspace = true } +penpal-runtime = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index c268b014bfa34..91793d33f304f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -13,11 +13,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub use penpal_runtime::{self, xcm_config::RelayNetworkId as PenpalRelayNetworkId}; + mod genesis; pub use genesis::{genesis, PenpalAssetOwner, PenpalSudoAccount, ED, PARA_ID_A, PARA_ID_B}; -pub use penpal_runtime::xcm_config::{ - CustomizableAssetFromSystemAssetHub, RelayNetworkId as PenpalRelayNetworkId, -}; // Substrate use frame_support::traits::OnInitialize; diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml index 113036b4c00ea..9376687947e6c 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml @@ -13,17 +13,17 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } -sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } -sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } -sp-consensus-beefy = { path = "../../../../../../../substrate/primitives/consensus/beefy" } -sc-consensus-grandpa = { path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } +sp-core = { workspace = true } +sp-authority-discovery = { workspace = true } +sp-consensus-babe = { workspace = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true } # Polkadot -polkadot-primitives = { path = "../../../../../../../polkadot/primitives", default-features = false } -rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants", default-features = false } -rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } +polkadot-primitives = { workspace = true } +rococo-runtime-constants = { workspace = true } +rococo-runtime = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../common" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } +parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs index 7a3a936ec972f..bd637a5f7965b 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs @@ -12,6 +12,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +pub use rococo_runtime; pub mod genesis; diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml index b952477c47a7c..de285d9885a2f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -13,21 +13,21 @@ workspace = true [dependencies] # Substrate -sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } -sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } -sp-consensus-beefy = { path = "../../../../../../../substrate/primitives/consensus/beefy" } -sc-consensus-grandpa = { path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } -pallet-staking = { path = "../../../../../../../substrate/frame/staking", default-features = false } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-authority-discovery = { workspace = true } +sp-consensus-babe = { workspace = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true } +pallet-staking = { workspace = true } # Polkadot -polkadot-primitives = { path = "../../../../../../../polkadot/primitives", default-features = false } -westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants", default-features = false } -westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +polkadot-primitives = { workspace = true } +westend-runtime-constants = { workspace = true } +westend-runtime = { workspace = true } +xcm = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../common" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } +parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs index 83af58f61732d..ce9fafcd5bda8 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs @@ -12,6 +12,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +pub use westend_runtime; pub mod genesis; diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index d9ec813232309..7152f1dbc272b 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -10,37 +10,37 @@ description = "Common resources for integration testing with xcm-emulator" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -paste = "1.0.14" +codec = { workspace = true } +paste = { workspace = true, default-features = true } # Substrate -sp-consensus-beefy = { path = "../../../../../substrate/primitives/consensus/beefy" } -sc-consensus-grandpa = { path = "../../../../../substrate/client/consensus/grandpa" } -sp-authority-discovery = { path = "../../../../../substrate/primitives/authority-discovery" } -sp-runtime = { path = "../../../../../substrate/primitives/runtime" } -frame-support = { path = "../../../../../substrate/frame/support" } -sp-core = { path = "../../../../../substrate/primitives/core" } -sp-consensus-babe = { path = "../../../../../substrate/primitives/consensus/babe" } -pallet-assets = { path = "../../../../../substrate/frame/assets" } -pallet-balances = { path = "../../../../../substrate/frame/balances" } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue" } +sp-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../../../polkadot/primitives" } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain" } -polkadot-runtime-parachains = { path = "../../../../../polkadot/runtime/parachains" } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm" } -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +xcm = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } # Cumulus -parachains-common = { path = "../../../common" } -cumulus-primitives-core = { path = "../../../../primitives/core" } -xcm-emulator = { path = "../../../../xcm/xcm-emulator" } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue" } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system" } -asset-test-utils = { path = "../../../runtimes/assets/test-utils" } +parachains-common = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +xcm-emulator = { workspace = true, default-features = true } +cumulus-pallet-xcmp-queue = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true, default-features = true } +asset-test-utils = { workspace = true, default-features = true } # Bridges -bp-messages = { path = "../../../../../bridges/primitives/messages" } -pallet-bridge-messages = { path = "../../../../../bridges/modules/messages" } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common" } +bp-messages = { workspace = true, default-features = true } +pallet-bridge-messages = { workspace = true, default-features = true } +bridge-runtime-common = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 4a9d3b3a5aaf5..7077fbbb0a9aa 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -51,11 +51,14 @@ pub const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; type AccountPublic = ::Signer; -// This asset is added to AH as Asset and reserved transfer between Parachain and AH +// (trust-backed) Asset registered on AH and reserve-transferred between Parachain and AH pub const RESERVABLE_ASSET_ID: u32 = 1; -// This asset is added to AH as ForeignAsset and teleported between Penpal and AH +// ForeignAsset registered on AH and teleported between Penpal and AH pub const TELEPORTABLE_ASSET_ID: u32 = 2; +// USDT registered on AH as (trust-backed) Asset and reserve-transferred between Parachain and AH +pub const USDT_ID: u32 = 1984; + pub const PENPAL_ID: u32 = 2000; pub const ASSETS_PALLET_ID: u8 = 50; diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml index eb0a8a850d069..298be7362ec3a 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml @@ -12,9 +12,9 @@ workspace = true [dependencies] # Cumulus -emulated-integration-tests-common = { path = "../../common", default-features = false } -rococo-emulated-chain = { path = "../../chains/relays/rococo" } -asset-hub-rococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-rococo" } -bridge-hub-rococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-rococo" } -people-rococo-emulated-chain = { path = "../../chains/parachains/people/people-rococo" } -penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } +emulated-integration-tests-common = { workspace = true } +rococo-emulated-chain = { workspace = true } +asset-hub-rococo-emulated-chain = { workspace = true } +bridge-hub-rococo-emulated-chain = { workspace = true } +people-rococo-emulated-chain = { workspace = true } +penpal-emulated-chain = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml index 744cbe4f8c1e3..cd0cb272b7f5e 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml @@ -12,11 +12,11 @@ workspace = true [dependencies] # Cumulus -emulated-integration-tests-common = { path = "../../common", default-features = false } -rococo-emulated-chain = { path = "../../chains/relays/rococo" } -westend-emulated-chain = { path = "../../chains/relays/westend" } -asset-hub-rococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-rococo" } -asset-hub-westend-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-westend" } -bridge-hub-rococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-rococo" } -bridge-hub-westend-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-westend" } -penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } +emulated-integration-tests-common = { workspace = true } +rococo-emulated-chain = { workspace = true } +westend-emulated-chain = { workspace = true, default-features = true } +asset-hub-rococo-emulated-chain = { workspace = true } +asset-hub-westend-emulated-chain = { workspace = true } +bridge-hub-rococo-emulated-chain = { workspace = true } +bridge-hub-westend-emulated-chain = { workspace = true } +penpal-emulated-chain = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml index 64bc91f442d1b..37c14aa303529 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml @@ -12,10 +12,10 @@ workspace = true [dependencies] # Cumulus -emulated-integration-tests-common = { path = "../../common", default-features = false } -westend-emulated-chain = { path = "../../chains/relays/westend", default-features = false } -asset-hub-westend-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-westend" } -bridge-hub-westend-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-westend" } -collectives-westend-emulated-chain = { path = "../../chains/parachains/collectives/collectives-westend" } -penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } -people-westend-emulated-chain = { path = "../../chains/parachains/people/people-westend" } +emulated-integration-tests-common = { workspace = true } +westend-emulated-chain = { workspace = true } +asset-hub-westend-emulated-chain = { workspace = true } +bridge-hub-westend-emulated-chain = { workspace = true } +collectives-westend-emulated-chain = { workspace = true } +penpal-emulated-chain = { workspace = true } +people-westend-emulated-chain = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index 9abecbecc48a7..b4579da94cbf6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -11,32 +11,29 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -assert_matches = "1.5.0" +codec = { workspace = true } +assert_matches = { workspace = true } # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } -pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false } -pallet-utility = { path = "../../../../../../../substrate/frame/utility", default-features = false } +sp-runtime = { workspace = true } +frame-support = { workspace = true } +pallet-balances = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } -rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } -polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } -rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants" } +xcm = { workspace = true } +pallet-xcm = { workspace = true } +xcm-executor = { workspace = true } +polkadot-runtime-common = { workspace = true, default-features = true } +rococo-runtime-constants = { workspace = true, default-features = true } # Cumulus -asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -cumulus-pallet-parachain-system = { path = "../../../../../../pallets/parachain-system", default-features = false } -parachains-common = { path = "../../../../../common" } -asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo" } -penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -rococo-system-emulated-network = { path = "../../../networks/rococo-system" } +asset-test-utils = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true } +parachains-common = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +rococo-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 2bd388bee400e..f00945926963c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -46,14 +46,36 @@ mod imports { pub use parachains_common::Balance; pub use rococo_system_emulated_network::{ asset_hub_rococo_emulated_chain::{ + asset_hub_rococo_runtime::{ + xcm_config::{ + self as ahr_xcm_config, TokenLocation as RelayLocation, + XcmConfig as AssetHubRococoXcmConfig, + }, + AssetConversionOrigin as AssetHubRococoAssetConversionOrigin, + }, genesis::{AssetHubRococoAssetOwner, ED as ASSET_HUB_ROCOCO_ED}, AssetHubRococoParaPallet as AssetHubRococoPallet, }, penpal_emulated_chain::{ + penpal_runtime::xcm_config::{ + CustomizableAssetFromSystemAssetHub as PenpalCustomizableAssetFromSystemAssetHub, + LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, + LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, + }, PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet, ED as PENPAL_ED, }, - rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, + rococo_emulated_chain::{ + genesis::ED as ROCOCO_ED, + rococo_runtime::{ + governance as rococo_governance, + xcm_config::{ + UniversalLocation as RococoUniversalLocation, XcmConfig as RococoXcmConfig, + }, + OriginCaller as RococoOriginCaller, + }, + RococoRelayPallet as RococoPallet, + }, AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, AssetHubRococoParaSender as AssetHubRococoSender, BridgeHubRococoPara as BridgeHubRococo, BridgeHubRococoParaReceiver as BridgeHubRococoReceiver, PenpalAPara as PenpalA, @@ -62,18 +84,6 @@ mod imports { RococoRelayReceiver as RococoReceiver, RococoRelaySender as RococoSender, }; - // Runtimes - pub use asset_hub_rococo_runtime::xcm_config::{ - TokenLocation as RelayLocation, XcmConfig as AssetHubRococoXcmConfig, - }; - pub use penpal_runtime::xcm_config::{ - LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, - LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, - }; - pub use rococo_runtime::xcm_config::{ - UniversalLocation as RococoUniversalLocation, XcmConfig as RococoXcmConfig, - }; - pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs index edaaa998a9ca1..7ff6d6c193c9b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs @@ -170,7 +170,7 @@ fn transfer_foreign_assets_from_asset_hub_to_para() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Westend)]).encode(), )], )); @@ -300,7 +300,7 @@ fn transfer_foreign_assets_from_para_to_asset_hub() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Westend)]).encode(), )], )); @@ -454,7 +454,7 @@ fn transfer_foreign_assets_from_para_to_para_through_asset_hub() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Westend)]).encode(), )], )); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index ec48e400ff545..16e0512da9605 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -17,10 +17,7 @@ use crate::imports::*; #[test] fn swap_locally_on_chain_using_local_assets() { - let asset_native = Box::new( - v3::Location::try_from(asset_hub_rococo_runtime::xcm_config::TokenLocation::get()) - .expect("conversion works"), - ); + let asset_native = Box::new(v3::Location::try_from(RelayLocation::get()).unwrap()); let asset_one = Box::new(v3::Location::new( 0, [ @@ -230,12 +227,12 @@ fn swap_locally_on_chain_using_foreign_assets() { #[test] fn cannot_create_pool_from_pool_assets() { - let asset_native = asset_hub_rococo_runtime::xcm_config::TokenLocation::get(); - let mut asset_one = asset_hub_rococo_runtime::xcm_config::PoolAssetsPalletLocation::get(); + let asset_native = RelayLocation::get(); + let mut asset_one = ahr_xcm_config::PoolAssetsPalletLocation::get(); asset_one.append_with(GeneralIndex(ASSET_ID.into())).expect("pool assets"); AssetHubRococo::execute_with(|| { - let pool_owner_account_id = asset_hub_rococo_runtime::AssetConversionOrigin::get(); + let pool_owner_account_id = AssetHubRococoAssetConversionOrigin::get(); assert_ok!(::PoolAssets::create( ::RuntimeOrigin::signed(pool_owner_account_id.clone()), @@ -255,8 +252,8 @@ fn cannot_create_pool_from_pool_assets() { assert_matches::assert_matches!( ::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - Box::new(v3::Location::try_from(asset_native).expect("conversion works")), - Box::new(v3::Location::try_from(asset_one).expect("conversion works")), + Box::new(v3::Location::try_from(asset_native).unwrap()), + Box::new(v3::Location::try_from(asset_one).unwrap()), ), Err(DispatchError::Module(ModuleError{index: _, error: _, message})) => assert_eq!(message, Some("Unknown")) ); @@ -265,9 +262,7 @@ fn cannot_create_pool_from_pool_assets() { #[test] fn pay_xcm_fee_with_some_asset_swapped_for_native() { - let asset_native = - v3::Location::try_from(asset_hub_rococo_runtime::xcm_config::TokenLocation::get()) - .expect("conversion works"); + let asset_native = v3::Location::try_from(RelayLocation::get()).unwrap(); let asset_one = xcm::v3::Location { parents: 0, interior: [ diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs index 01bf40ae8fdf2..f8190e11c51c8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs @@ -20,12 +20,11 @@ use frame_support::{ sp_runtime::traits::Dispatchable, traits::{ fungible::Inspect, - fungibles::{Create, Inspect as FungiblesInspect, Mutate}, + fungibles::{Inspect as FungiblesInspect, Mutate}, }, }; use parachains_common::AccountId; use polkadot_runtime_common::impls::VersionedLocatableAsset; -use rococo_runtime::OriginCaller; use rococo_runtime_constants::currency::GRAND; use xcm_executor::traits::ConvertLocation; @@ -67,7 +66,7 @@ fn spend_roc_on_asset_hub() { let treasury_location: Location = (Parent, PalletInstance(18)).into(); let teleport_call = RuntimeCall::Utility(pallet_utility::Call::::dispatch_as { - as_origin: bx!(OriginCaller::system(RawOrigin::Signed(treasury_account))), + as_origin: bx!(RococoOriginCaller::system(RawOrigin::Signed(treasury_account))), call: bx!(RuntimeCall::XcmPallet(pallet_xcm::Call::::teleport_assets { dest: bx!(VersionedLocation::V4(asset_hub_location.clone())), beneficiary: bx!(VersionedLocation::V4(treasury_location)), @@ -99,7 +98,7 @@ fn spend_roc_on_asset_hub() { // Fund Alice account from Rococo Treasury account on Asset Hub. let treasury_origin: RuntimeOrigin = - rococo_runtime::governance::pallet_custom_origins::Origin::Treasurer.into(); + rococo_governance::pallet_custom_origins::Origin::Treasurer.into(); let alice_location: Location = [Junction::AccountId32 { network: None, id: Rococo::account_id_of(ALICE).into() }] @@ -163,15 +162,12 @@ fn spend_roc_on_asset_hub() { #[test] fn create_and_claim_treasury_spend_in_usdt() { const ASSET_ID: u32 = 1984; - const SPEND_AMOUNT: u128 = 1_000_000; + const SPEND_AMOUNT: u128 = 10_000_000; // treasury location from a sibling parachain. let treasury_location: Location = Location::new(1, PalletInstance(18)); // treasury account on a sibling parachain. let treasury_account = - asset_hub_rococo_runtime::xcm_config::LocationToAccountId::convert_location( - &treasury_location, - ) - .unwrap(); + ahr_xcm_config::LocationToAccountId::convert_location(&treasury_location).unwrap(); let asset_hub_location = v3::Location::new(0, v3::Junction::Parachain(AssetHubRococo::para_id().into())); let root = ::RuntimeOrigin::root(); @@ -190,13 +186,7 @@ fn create_and_claim_treasury_spend_in_usdt() { AssetHubRococo::execute_with(|| { type Assets = ::Assets; - // create an asset class and mint some assets to the treasury account. - assert_ok!(>::create( - ASSET_ID, - treasury_account.clone(), - true, - SPEND_AMOUNT / 2 - )); + // USDT created at genesis, mint some assets to the treasury account. assert_ok!(>::mint_into(ASSET_ID, &treasury_account, SPEND_AMOUNT * 4)); // beneficiary has zero balance. assert_eq!(>::balance(ASSET_ID, &alice,), 0u128,); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml index e0f29cd801c34..6b50b6f473ed0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -11,38 +11,35 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -assert_matches = "1.5.0" +codec = { workspace = true } +assert_matches = { workspace = true } # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -sp-keyring = { path = "../../../../../../../substrate/primitives/keyring", default-features = false } -sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } -frame-metadata-hash-extension = { path = "../../../../../../../substrate/frame/metadata-hash-extension" } -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../../../substrate/frame/system", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } -pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } -pallet-transaction-payment = { path = "../../../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-asset-tx-payment = { path = "../../../../../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false } +sp-runtime = { workspace = true } +sp-keyring = { workspace = true } +sp-core = { workspace = true } +frame-metadata-hash-extension = { workspace = true, default-features = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-treasury = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-asset-tx-payment = { workspace = true } # Polkadot -polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } -westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } +polkadot-runtime-common = { workspace = true, default-features = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } +pallet-xcm = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../common" } -penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } -asset-hub-westend-runtime = { path = "../../../../../runtimes/assets/asset-hub-westend" } -asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../../../pallets/parachain-system", default-features = false } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -westend-system-emulated-network = { path = "../../../networks/westend-system" } +parachains-common = { workspace = true, default-features = true } +asset-test-utils = { workspace = true, default-features = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +emulated-integration-tests-common = { workspace = true } +westend-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 1c4a0ef4c8d2a..db8ada3f4ea28 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -46,15 +46,33 @@ mod imports { pub use parachains_common::{AccountId, Balance}; pub use westend_system_emulated_network::{ asset_hub_westend_emulated_chain::{ + asset_hub_westend_runtime::{ + xcm_config::{ + self as ahw_xcm_config, WestendLocation as RelayLocation, + XcmConfig as AssetHubWestendXcmConfig, + }, + AssetConversionOrigin as AssetHubWestendAssetConversionOrigin, + }, genesis::{AssetHubWestendAssetOwner, ED as ASSET_HUB_WESTEND_ED}, AssetHubWestendParaPallet as AssetHubWestendPallet, }, collectives_westend_emulated_chain::CollectivesWestendParaPallet as CollectivesWestendPallet, penpal_emulated_chain::{ + penpal_runtime::xcm_config::{ + CustomizableAssetFromSystemAssetHub as PenpalCustomizableAssetFromSystemAssetHub, + LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, + LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, + }, PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet, }, - westend_emulated_chain::{genesis::ED as WESTEND_ED, WestendRelayPallet as WestendPallet}, + westend_emulated_chain::{ + genesis::ED as WESTEND_ED, + westend_runtime::xcm_config::{ + UniversalLocation as WestendUniversalLocation, XcmConfig as WestendXcmConfig, + }, + WestendRelayPallet as WestendPallet, + }, AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, AssetHubWestendParaSender as AssetHubWestendSender, @@ -66,18 +84,6 @@ mod imports { WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, }; - // Runtimes - pub use asset_hub_westend_runtime::xcm_config::{ - WestendLocation as RelayLocation, XcmConfig as AssetHubWestendXcmConfig, - }; - pub use penpal_runtime::xcm_config::{ - LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, - LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, - }; - pub use westend_runtime::xcm_config::{ - UniversalLocation as WestendUniversalLocation, XcmConfig as WestendXcmConfig, - }; - pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs index 2d02e90f47fb8..15f4fe33bddc1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs @@ -28,10 +28,7 @@ fn create_and_claim_treasury_spend() { Location::new(1, [Parachain(CollectivesWestend::para_id().into()), PalletInstance(65)]); // treasury account on a sibling parachain. let treasury_account = - asset_hub_westend_runtime::xcm_config::LocationToAccountId::convert_location( - &treasury_location, - ) - .unwrap(); + ahw_xcm_config::LocationToAccountId::convert_location(&treasury_location).unwrap(); let asset_hub_location = Location::new(1, [Parachain(AssetHubWestend::para_id().into())]); let root = ::RuntimeOrigin::root(); // asset kind to be spent from the treasury. diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs index d39c72c7c5f0d..49dfe8d58394c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs @@ -170,7 +170,7 @@ fn transfer_foreign_assets_from_asset_hub_to_para() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Rococo)]).encode(), )], )); @@ -300,7 +300,7 @@ fn transfer_foreign_assets_from_para_to_asset_hub() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Rococo)]).encode(), )], )); @@ -455,7 +455,7 @@ fn transfer_foreign_assets_from_para_to_para_through_asset_hub() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - penpal_runtime::xcm_config::CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Rococo)]).encode(), )], )); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index f6b6580988658..cf429378cf6d8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -17,10 +17,8 @@ use crate::imports::*; #[test] fn swap_locally_on_chain_using_local_assets() { - let asset_native = Box::new( - v3::Location::try_from(asset_hub_westend_runtime::xcm_config::WestendLocation::get()) - .expect("conversion works"), - ); + let asset_native = + Box::new(v3::Location::try_from(RelayLocation::get()).expect("conversion works")); let asset_one = Box::new(v3::Location { parents: 0, interior: [ @@ -229,12 +227,12 @@ fn swap_locally_on_chain_using_foreign_assets() { #[test] fn cannot_create_pool_from_pool_assets() { - let asset_native = asset_hub_westend_runtime::xcm_config::WestendLocation::get(); - let mut asset_one = asset_hub_westend_runtime::xcm_config::PoolAssetsPalletLocation::get(); + let asset_native = RelayLocation::get(); + let mut asset_one = ahw_xcm_config::PoolAssetsPalletLocation::get(); asset_one.append_with(GeneralIndex(ASSET_ID.into())).expect("pool assets"); AssetHubWestend::execute_with(|| { - let pool_owner_account_id = asset_hub_westend_runtime::AssetConversionOrigin::get(); + let pool_owner_account_id = AssetHubWestendAssetConversionOrigin::get(); assert_ok!(::PoolAssets::create( ::RuntimeOrigin::signed(pool_owner_account_id.clone()), @@ -264,9 +262,7 @@ fn cannot_create_pool_from_pool_assets() { #[test] fn pay_xcm_fee_with_some_asset_swapped_for_native() { - let asset_native = - v3::Location::try_from(asset_hub_westend_runtime::xcm_config::WestendLocation::get()) - .expect("conversion works"); + let asset_native = v3::Location::try_from(RelayLocation::get()).expect("conversion works"); let asset_one = xcm::v3::Location { parents: 0, interior: [ diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs index 6d8c0f5e5de6a..8cbce3e0d2232 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs @@ -27,10 +27,7 @@ fn create_and_claim_treasury_spend() { let treasury_location: Location = Location::new(1, PalletInstance(37)); // treasury account on a sibling parachain. let treasury_account = - asset_hub_westend_runtime::xcm_config::LocationToAccountId::convert_location( - &treasury_location, - ) - .unwrap(); + ahw_xcm_config::LocationToAccountId::convert_location(&treasury_location).unwrap(); let asset_hub_location = Location::new(0, Parachain(AssetHubWestend::para_id().into())); let root = ::RuntimeOrigin::root(); // asset kind to be spend from the treasury. diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs index dc89ef1f7a44e..c01aa78253363 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs @@ -18,7 +18,7 @@ use crate::imports::*; use frame_system::RawOrigin; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::runtime_decl_for_dry_run_api::DryRunApiV1, fees::runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, }; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index bed5af92f6e55..a5787885329d7 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -11,40 +11,38 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -hex-literal = "0.4.1" +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +hex-literal = { workspace = true, default-features = true } # Substrate -sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +sp-core = { workspace = true } +frame-support = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true, default-features = true } +sp-runtime = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +pallet-xcm = { workspace = true } +xcm-executor = { workspace = true } # Bridges -pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false } +pallet-bridge-messages = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../common" } -testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["rococo"] } -cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } -bridge-hub-rococo-runtime = { path = "../../../../../runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } -rococo-system-emulated-network = { path = "../../../networks/rococo-system" } -asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo", default-features = false } +cumulus-pallet-xcmp-queue = { workspace = true } +emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } +rococo-system-emulated-network = { workspace = true } +rococo-westend-system-emulated-network = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } # Snowbridge -snowbridge-core = { path = "../../../../../../../bridges/snowbridge/primitives/core", default-features = false } -snowbridge-router-primitives = { path = "../../../../../../../bridges/snowbridge/primitives/router", default-features = false } -snowbridge-pallet-system = { path = "../../../../../../../bridges/snowbridge/pallets/system", default-features = false } -snowbridge-pallet-outbound-queue = { path = "../../../../../../../bridges/snowbridge/pallets/outbound-queue", default-features = false } -snowbridge-pallet-inbound-queue-fixtures = { path = "../../../../../../../bridges/snowbridge/pallets/inbound-queue/fixtures" } +snowbridge-core = { workspace = true } +snowbridge-router-primitives = { workspace = true } +snowbridge-pallet-system = { workspace = true } +snowbridge-pallet-outbound-queue = { workspace = true } +snowbridge-pallet-inbound-queue-fixtures = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs index 0415af580ef8a..04466a611c713 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -35,19 +35,30 @@ mod imports { xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, TestExt, }, + ASSETS_PALLET_ID, USDT_ID, }; pub use parachains_common::AccountId; pub use rococo_westend_system_emulated_network::{ asset_hub_rococo_emulated_chain::{ - genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + asset_hub_rococo_runtime::xcm_config as ahr_xcm_config, + genesis::{AssetHubRococoAssetOwner, ED as ASSET_HUB_ROCOCO_ED}, + AssetHubRococoParaPallet as AssetHubRococoPallet, }, asset_hub_westend_emulated_chain::{ genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, }, bridge_hub_rococo_emulated_chain::{ - genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubRococoParaPallet as BridgeHubRococoPallet, + genesis::ED as BRIDGE_HUB_ROCOCO_ED, + BridgeHubRococoParaPallet as BridgeHubRococoPallet, BridgeHubRococoRuntimeOrigin, + BridgeHubRococoXcmConfig, EthereumBeaconClient, EthereumInboundQueue, + }, + penpal_emulated_chain::{ + penpal_runtime::xcm_config::{ + CustomizableAssetFromSystemAssetHub as PenpalCustomizableAssetFromSystemAssetHub, + UniversalLocation as PenpalUniversalLocation, + }, + PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner, }, - penpal_emulated_chain::{PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner}, rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWestendPara as AssetHubWestend, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs index 87fb70e4de238..6053936487b26 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -15,168 +15,140 @@ use crate::tests::*; -fn send_asset_from_asset_hub_rococo_to_asset_hub_westend(id: Location, amount: u128) { - let destination = asset_hub_westend_location(); - +fn send_assets_over_bridge(send_fn: F) { // fund the AHR's SA on BHR for paying bridge transport fees BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); // set XCM versions - AssetHubRococo::force_xcm_version(destination.clone(), XCM_VERSION); + let local_asset_hub = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + PenpalA::force_xcm_version(local_asset_hub.clone(), XCM_VERSION); + AssetHubRococo::force_xcm_version(asset_hub_westend_location(), XCM_VERSION); BridgeHubRococo::force_xcm_version(bridge_hub_westend_location(), XCM_VERSION); // send message over bridge - assert_ok!(send_asset_from_asset_hub_rococo(destination, (id, amount))); + send_fn(); + + // process and verify intermediary hops assert_bridge_hub_rococo_message_accepted(true); assert_bridge_hub_westend_message_received(); } -fn send_asset_from_penpal_rococo_through_local_asset_hub_to_westend_asset_hub( - id: Location, - transfer_amount: u128, -) { - let destination = asset_hub_westend_location(); - let local_asset_hub: Location = PenpalA::sibling_location_of(AssetHubRococo::para_id()); - let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of( - AssetHubRococo::sibling_location_of(PenpalA::para_id()), - ); - let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( - Westend, - AssetHubWestend::para_id(), - ); - - // fund the AHR's SA on BHR for paying bridge transport fees - BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); +fn set_up_rocs_for_penpal_rococo_through_ahr_to_ahw( + sender: &AccountId, + amount: u128, +) -> (Location, v3::Location) { + let roc_at_rococo_parachains = roc_at_ah_rococo(); + let roc_at_asset_hub_westend = bridged_roc_at_ah_westend().try_into().unwrap(); + create_foreign_on_ah_westend(roc_at_asset_hub_westend, true); - // set XCM versions - PenpalA::force_xcm_version(local_asset_hub.clone(), XCM_VERSION); - AssetHubRococo::force_xcm_version(destination.clone(), XCM_VERSION); - BridgeHubRococo::force_xcm_version(bridge_hub_westend_location(), XCM_VERSION); - - // send message over bridge - assert_ok!(PenpalA::execute_with(|| { - let signed_origin = ::RuntimeOrigin::signed(PenpalASender::get()); - let beneficiary: Location = - AccountId32Junction { network: None, id: AssetHubWestendReceiver::get().into() }.into(); - let assets: Assets = (id.clone(), transfer_amount).into(); - let fees_id: AssetId = id.into(); - let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { - assets: Wild(AllCounted(assets.len() as u32)), - beneficiary, - }]); + let penpal_location = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location); + // fund Penpal's sovereign account on AssetHub + AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), amount * 2)]); + // fund Penpal's sender account + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(PenpalAssetOwner::get()), + roc_at_rococo_parachains.clone(), + sender.clone(), + amount * 2, + ); + (roc_at_rococo_parachains, roc_at_asset_hub_westend) +} - ::PolkadotXcm::transfer_assets_using_type_and_then( - signed_origin, - bx!(destination.into()), - bx!(assets.clone().into()), - bx!(TransferType::RemoteReserve(local_asset_hub.clone().into())), - bx!(fees_id.into()), - bx!(TransferType::RemoteReserve(local_asset_hub.into())), - bx!(VersionedXcm::from(custom_xcm_on_dest)), - WeightLimit::Unlimited, - ) - })); - AssetHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - assert_expected_events!( - AssetHubRococo, - vec![ - // Amount to reserve transfer is withdrawn from Penpal's sovereign account - RuntimeEvent::Balances( - pallet_balances::Event::Burned { who, amount } - ) => { - who: *who == sov_penpal_on_ahr.clone().into(), - amount: *amount == transfer_amount, - }, - // Amount deposited in AHW's sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == sov_ahw_on_ahr.clone().into(), - }, - RuntimeEvent::XcmpQueue( - cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } - ) => {}, - ] +fn send_assets_from_penpal_rococo_through_rococo_ah_to_westend_ah( + destination: Location, + assets: (Assets, TransferType), + fees: (AssetId, TransferType), + custom_xcm_on_dest: Xcm<()>, +) { + send_assets_over_bridge(|| { + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), ); + let sov_ahw_on_ahr = + AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + Westend, + AssetHubWestend::para_id(), + ); + // send message over bridge + assert_ok!(PenpalA::execute_with(|| { + let signed_origin = ::RuntimeOrigin::signed(PenpalASender::get()); + ::PolkadotXcm::transfer_assets_using_type_and_then( + signed_origin, + bx!(destination.into()), + bx!(assets.0.into()), + bx!(assets.1), + bx!(fees.0.into()), + bx!(fees.1), + bx!(VersionedXcm::from(custom_xcm_on_dest)), + WeightLimit::Unlimited, + ) + })); + // verify intermediary AH Rococo hop + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount to reserve transfer is withdrawn from Penpal's sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Burned { who, .. } + ) => { + who: *who == sov_penpal_on_ahr.clone().into(), + }, + // Amount deposited in AHW's sovereign account + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { + who: *who == sov_ahw_on_ahr.clone().into(), + }, + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }); }); - assert_bridge_hub_rococo_message_accepted(true); - assert_bridge_hub_westend_message_received(); } #[test] -fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { - let roc_at_asset_hub_rococo: v3::Location = v3::Parent.into(); - let roc_at_asset_hub_westend = - v3::Location::new(2, [v3::Junction::GlobalConsensus(v3::NetworkId::Rococo)]); - let owner: AccountId = AssetHubWestend::account_id_of(ALICE); - AssetHubWestend::force_create_foreign_asset( - roc_at_asset_hub_westend, - owner, - true, - ASSET_MIN_BALANCE, - vec![], - ); +/// Test transfer of ROC, USDT and wETH from AssetHub Rococo to AssetHub Westend. +/// +/// This mix of assets should cover the whole range: +/// - native assets: ROC, +/// - trust-based assets: USDT (exists only on Rococo, Westend gets it from Rococo over bridge), +/// - foreign asset / bridged asset (other bridge / Snowfork): wETH (bridged from Ethereum to Rococo +/// over Snowbridge, then bridged over to Westend through this bridge). +fn send_roc_usdt_and_weth_from_asset_hub_rococo_to_asset_hub_westend() { + let amount = ASSET_HUB_ROCOCO_ED * 1_000_000; + let sender = AssetHubRococoSender::get(); + let receiver = AssetHubWestendReceiver::get(); + let roc_at_asset_hub_rococo: v3::Location = roc_at_ah_rococo().try_into().unwrap(); + let bridged_roc_at_asset_hub_westend = bridged_roc_at_ah_westend().try_into().unwrap(); + + create_foreign_on_ah_westend(bridged_roc_at_asset_hub_westend, true); + set_up_pool_with_wnd_on_ah_westend(bridged_roc_at_asset_hub_westend); + + //////////////////////////////////////////////////////////// + // Let's first send over just some ROCs as a simple example + //////////////////////////////////////////////////////////// let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( Westend, AssetHubWestend::para_id(), ); - - AssetHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // setup a pool to pay xcm fees with `roc_at_asset_hub_westend` tokens - assert_ok!(::ForeignAssets::mint( - ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - roc_at_asset_hub_westend.into(), - AssetHubWestendSender::get().into(), - 3_000_000_000_000, - )); - - assert_ok!(::AssetConversion::create_pool( - ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - Box::new(xcm::v3::Parent.into()), - Box::new(roc_at_asset_hub_westend), - )); - - assert_expected_events!( - AssetHubWestend, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, - ] - ); - - assert_ok!(::AssetConversion::add_liquidity( - ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - Box::new(xcm::v3::Parent.into()), - Box::new(roc_at_asset_hub_westend), - 1_000_000_000_000, - 2_000_000_000_000, - 1, - 1, - AssetHubWestendSender::get().into() - )); - - assert_expected_events!( - AssetHubWestend, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded {..}) => {}, - ] - ); - }); - let rocs_in_reserve_on_ahr_before = ::account_data_of(sov_ahw_on_ahr.clone()).free; - let sender_rocs_before = - ::account_data_of(AssetHubRococoSender::get()).free; - let receiver_rocs_before = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(roc_at_asset_hub_westend, &AssetHubWestendReceiver::get()) + let sender_rocs_before = ::account_data_of(sender.clone()).free; + let receiver_rocs_before = + foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend, &receiver); + + // send ROCs, use them for fees + send_assets_over_bridge(|| { + let destination = asset_hub_westend_location(); + let assets: Assets = (Location::try_from(roc_at_asset_hub_rococo).unwrap(), amount).into(); + let fee_idx = 0; + assert_ok!(send_assets_from_asset_hub_rococo(destination, assets, fee_idx)); }); - let amount = ASSET_HUB_ROCOCO_ED * 1_000_000; - send_asset_from_asset_hub_rococo_to_asset_hub_westend( - roc_at_asset_hub_rococo.try_into().unwrap(), - amount, - ); + // verify expected events on final destination AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( @@ -185,7 +157,7 @@ fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { // issue ROCs on AHW RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { asset_id: *asset_id == roc_at_asset_hub_rococo, - owner: *owner == AssetHubWestendReceiver::get(), + owner: owner == &receiver, }, // message processed successfully RuntimeEvent::MessageQueue( @@ -195,36 +167,100 @@ fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { ); }); - let sender_rocs_after = - ::account_data_of(AssetHubRococoSender::get()).free; - let receiver_rocs_after = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(roc_at_asset_hub_westend, &AssetHubWestendReceiver::get()) - }); + let sender_rocs_after = ::account_data_of(sender.clone()).free; + let receiver_rocs_after = + foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend, &receiver); let rocs_in_reserve_on_ahr_after = ::account_data_of(sov_ahw_on_ahr.clone()).free; - // Sender's balance is reduced + // Sender's ROC balance is reduced assert!(sender_rocs_before > sender_rocs_after); - // Receiver's balance is increased + // Receiver's ROC balance is increased assert!(receiver_rocs_after > receiver_rocs_before); - // Reserve balance is increased by sent amount + // Reserve ROC balance is increased by sent amount assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before + amount); + + ///////////////////////////////////////////////////////////// + // Now let's send over USDTs + wETH (and pay fees with USDT) + ///////////////////////////////////////////////////////////// + + let usdt_at_asset_hub_rococo = usdt_at_ah_rococo(); + let bridged_usdt_at_asset_hub_westend = bridged_usdt_at_ah_westend().try_into().unwrap(); + // wETH has same relative location on both Rococo and Westend AssetHubs + let bridged_weth_at_ah = weth_at_asset_hubs().try_into().unwrap(); + + // mint USDT in sender's account (USDT already created in genesis) + AssetHubRococo::mint_asset( + ::RuntimeOrigin::signed(AssetHubRococoAssetOwner::get()), + USDT_ID, + sender.clone(), + amount * 2, + ); + // create wETH at src and dest and prefund sender's account + create_foreign_on_ah_rococo(bridged_weth_at_ah, true, vec![(sender.clone(), amount * 2)]); + create_foreign_on_ah_westend(bridged_weth_at_ah, true); + create_foreign_on_ah_westend(bridged_usdt_at_asset_hub_westend, true); + set_up_pool_with_wnd_on_ah_westend(bridged_usdt_at_asset_hub_westend); + + let receiver_usdts_before = + foreign_balance_on_ah_westend(bridged_usdt_at_asset_hub_westend, &receiver); + let receiver_weth_before = foreign_balance_on_ah_westend(bridged_weth_at_ah, &receiver); + + // send USDTs and wETHs + let assets: Assets = vec![ + (usdt_at_asset_hub_rococo.clone(), amount).into(), + (Location::try_from(bridged_weth_at_ah).unwrap(), amount).into(), + ] + .into(); + // use USDT for fees + let fee: AssetId = usdt_at_asset_hub_rococo.into(); + + // use the more involved transfer extrinsic + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary: AccountId32Junction { network: None, id: receiver.clone().into() }.into(), + }]); + assert_ok!(AssetHubRococo::execute_with(|| { + ::PolkadotXcm::transfer_assets_using_type_and_then( + ::RuntimeOrigin::signed(sender.into()), + bx!(asset_hub_westend_location().into()), + bx!(assets.into()), + bx!(TransferType::LocalReserve), + bx!(fee.into()), + bx!(TransferType::LocalReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), + WeightLimit::Unlimited, + ) + })); + // verify hops (also advances the message through the hops) + assert_bridge_hub_rococo_message_accepted(true); + assert_bridge_hub_westend_message_received(); + AssetHubWestend::execute_with(|| { + AssetHubWestend::assert_xcmp_queue_success(None); + }); + + let receiver_usdts_after = + foreign_balance_on_ah_westend(bridged_usdt_at_asset_hub_westend, &receiver); + let receiver_weth_after = foreign_balance_on_ah_westend(bridged_weth_at_ah, &receiver); + + // Receiver's USDT balance is increased by almost `amount` (minus fees) + assert!(receiver_usdts_after > receiver_usdts_before); + assert!(receiver_usdts_after < receiver_usdts_before + amount); + // Receiver's wETH balance is increased by sent amount + assert_eq!(receiver_weth_after, receiver_weth_before + amount); } #[test] -fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { +/// Send bridged WNDs "back" from AssetHub Rococo to AssetHub Westend. +fn send_back_wnds_from_asset_hub_rococo_to_asset_hub_westend() { let prefund_amount = 10_000_000_000_000u128; - let wnd_at_asset_hub_rococo = - v3::Location::new(2, [v3::Junction::GlobalConsensus(v3::NetworkId::Westend)]); - let owner: AccountId = AssetHubRococo::account_id_of(ALICE); - AssetHubRococo::force_create_foreign_asset( - wnd_at_asset_hub_rococo, - owner, - true, - ASSET_MIN_BALANCE, - vec![(AssetHubRococoSender::get(), prefund_amount)], - ); + let amount_to_send = ASSET_HUB_WESTEND_ED * 1_000; + let sender = AssetHubRococoSender::get(); + let receiver = AssetHubWestendReceiver::get(); + let wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo(); + let wnd_at_asset_hub_rococo_v3 = wnd_at_asset_hub_rococo.clone().try_into().unwrap(); + let prefund_accounts = vec![(sender.clone(), prefund_amount)]; + create_foreign_on_ah_rococo(wnd_at_asset_hub_rococo_v3, true, prefund_accounts); // fund the AHR's SA on AHW with the WND tokens held in reserve let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( @@ -236,19 +272,19 @@ fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { let wnds_in_reserve_on_ahw_before = ::account_data_of(sov_ahr_on_ahw.clone()).free; assert_eq!(wnds_in_reserve_on_ahw_before, prefund_amount); - let sender_wnds_before = AssetHubRococo::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoSender::get()) - }); + + let sender_wnds_before = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo_v3, &sender); assert_eq!(sender_wnds_before, prefund_amount); - let receiver_wnds_before = - ::account_data_of(AssetHubWestendReceiver::get()).free; + let receiver_wnds_before = ::account_data_of(receiver.clone()).free; + + // send back WNDs, use them for fees + send_assets_over_bridge(|| { + let destination = asset_hub_westend_location(); + let assets: Assets = (wnd_at_asset_hub_rococo, amount_to_send).into(); + let fee_idx = 0; + assert_ok!(send_assets_from_asset_hub_rococo(destination, assets, fee_idx)); + }); - let amount_to_send = ASSET_HUB_WESTEND_ED * 1_000; - send_asset_from_asset_hub_rococo_to_asset_hub_westend( - Location::try_from(wnd_at_asset_hub_rococo).unwrap(), - amount_to_send, - ); AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( @@ -263,7 +299,7 @@ fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { }, // WNDs deposited to beneficiary RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == AssetHubWestendReceiver::get(), + who: who == &receiver, }, // message processed successfully RuntimeEvent::MessageQueue( @@ -273,12 +309,8 @@ fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { ); }); - let sender_wnds_after = AssetHubRococo::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoSender::get()) - }); - let receiver_wnds_after = - ::account_data_of(AssetHubWestendReceiver::get()).free; + let sender_wnds_after = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo_v3, &sender); + let receiver_wnds_after = ::account_data_of(receiver).free; let wnds_in_reserve_on_ahw_after = ::account_data_of(sov_ahr_on_ahw).free; @@ -292,55 +324,47 @@ fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { #[test] fn send_rocs_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() { - let roc_at_rococo_parachains: Location = Parent.into(); - let roc_at_asset_hub_westend = Location::new(2, [Junction::GlobalConsensus(NetworkId::Rococo)]); - let owner: AccountId = AssetHubWestend::account_id_of(ALICE); - AssetHubWestend::force_create_foreign_asset( - roc_at_asset_hub_westend.clone().try_into().unwrap(), - owner, - true, - ASSET_MIN_BALANCE, - vec![], - ); + let amount = ASSET_HUB_ROCOCO_ED * 10_000_000; + let sender = PenpalASender::get(); + let receiver = AssetHubWestendReceiver::get(); + let local_asset_hub = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + let (roc_at_rococo_parachains, roc_at_asset_hub_westend) = + set_up_rocs_for_penpal_rococo_through_ahr_to_ahw(&sender, amount); + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( Westend, AssetHubWestend::para_id(), ); - - let amount = ASSET_HUB_ROCOCO_ED * 10_000_000; - let penpal_location = AssetHubRococo::sibling_location_of(PenpalA::para_id()); - let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location); - // fund Penpal's sovereign account on AssetHub - AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), amount * 2)]); - // fund Penpal's sender account - PenpalA::mint_foreign_asset( - ::RuntimeOrigin::signed(PenpalAssetOwner::get()), - roc_at_rococo_parachains.clone(), - PenpalASender::get(), - amount * 2, - ); - let rocs_in_reserve_on_ahr_before = ::account_data_of(sov_ahw_on_ahr.clone()).free; let sender_rocs_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance( - roc_at_rococo_parachains.clone(), - &PenpalASender::get(), - ) + >::balance(roc_at_rococo_parachains.clone(), &sender) }); - let receiver_rocs_before = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance( - roc_at_asset_hub_westend.clone().try_into().unwrap(), - &AssetHubWestendReceiver::get(), - ) - }); - send_asset_from_penpal_rococo_through_local_asset_hub_to_westend_asset_hub( - roc_at_rococo_parachains.clone(), - amount, - ); + let receiver_rocs_before = foreign_balance_on_ah_westend(roc_at_asset_hub_westend, &receiver); + + // Send ROCs over bridge + { + let destination = asset_hub_westend_location(); + let assets: Assets = (roc_at_rococo_parachains.clone(), amount).into(); + let asset_transfer_type = TransferType::RemoteReserve(local_asset_hub.clone().into()); + let fees_id: AssetId = roc_at_rococo_parachains.clone().into(); + let fees_transfer_type = TransferType::RemoteReserve(local_asset_hub.into()); + let beneficiary: Location = + AccountId32Junction { network: None, id: receiver.clone().into() }.into(); + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary, + }]); + send_assets_from_penpal_rococo_through_rococo_ah_to_westend_ah( + destination, + (assets, asset_transfer_type), + (fees_id, fees_transfer_type), + custom_xcm_on_dest, + ); + } + // process AHW incoming message and check events AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( @@ -349,7 +373,7 @@ fn send_rocs_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() // issue ROCs on AHW RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { asset_id: *asset_id == roc_at_rococo_parachains.clone().try_into().unwrap(), - owner: *owner == AssetHubWestendReceiver::get(), + owner: owner == &receiver, }, // message processed successfully RuntimeEvent::MessageQueue( @@ -361,15 +385,9 @@ fn send_rocs_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() let sender_rocs_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(roc_at_rococo_parachains, &PenpalASender::get()) - }); - let receiver_rocs_after = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance( - roc_at_asset_hub_westend.try_into().unwrap(), - &AssetHubWestendReceiver::get(), - ) + >::balance(roc_at_rococo_parachains, &sender) }); + let receiver_rocs_after = foreign_balance_on_ah_westend(roc_at_asset_hub_westend, &receiver); let rocs_in_reserve_on_ahr_after = ::account_data_of(sov_ahw_on_ahr.clone()).free; @@ -381,3 +399,121 @@ fn send_rocs_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() assert!(rocs_in_reserve_on_ahr_after > rocs_in_reserve_on_ahr_before); assert!(rocs_in_reserve_on_ahr_after <= rocs_in_reserve_on_ahr_before + amount); } + +#[test] +fn send_back_wnds_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() { + let wnd_at_rococo_parachains = bridged_wnd_at_ah_rococo(); + let wnd_at_rococo_parachains_v3 = wnd_at_rococo_parachains.clone().try_into().unwrap(); + let amount = ASSET_HUB_ROCOCO_ED * 10_000_000; + let sender = PenpalASender::get(); + let receiver = AssetHubWestendReceiver::get(); + + // set up ROCs for transfer + let (roc_at_rococo_parachains, _) = + set_up_rocs_for_penpal_rococo_through_ahr_to_ahw(&sender, amount); + + // set up WNDs for transfer + let penpal_location = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location); + let prefund_accounts = vec![(sov_penpal_on_ahr, amount * 2)]; + create_foreign_on_ah_rococo(wnd_at_rococo_parachains_v3, true, prefund_accounts); + let asset_owner: AccountId = AssetHubRococo::account_id_of(ALICE); + PenpalA::force_create_foreign_asset( + wnd_at_rococo_parachains.clone(), + asset_owner.clone(), + true, + ASSET_MIN_BALANCE, + vec![(sender.clone(), amount * 2)], + ); + + // fund the AHR's SA on AHW with the WND tokens held in reserve + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Rococo, + AssetHubRococo::para_id(), + ); + AssetHubWestend::fund_accounts(vec![(sov_ahr_on_ahw.clone(), amount * 2)]); + + // balances before + let sender_wnds_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_at_rococo_parachains.clone().into(), &sender) + }); + let receiver_wnds_before = ::account_data_of(receiver.clone()).free; + + // send WNDs over the bridge, ROCs only used to pay fees on local AH, pay with WND on remote AH + { + let final_destination = asset_hub_westend_location(); + let intermediary_hop = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + let context = PenpalA::execute_with(|| PenpalUniversalLocation::get()); + + // what happens at final destination + let beneficiary = AccountId32Junction { network: None, id: receiver.clone().into() }.into(); + // use WND as fees on the final destination (AHW) + let remote_fees: Asset = (wnd_at_rococo_parachains.clone(), amount).into(); + let remote_fees = remote_fees.reanchored(&final_destination, &context).unwrap(); + // buy execution using WNDs, then deposit all remaining WNDs + let xcm_on_final_dest = Xcm::<()>(vec![ + BuyExecution { fees: remote_fees, weight_limit: WeightLimit::Unlimited }, + DepositAsset { assets: Wild(AllCounted(1)), beneficiary }, + ]); + + // what happens at intermediary hop + // reanchor final dest (Asset Hub Westend) to the view of hop (Asset Hub Rococo) + let mut final_destination = final_destination.clone(); + final_destination.reanchor(&intermediary_hop, &context).unwrap(); + // reanchor WNDs to the view of hop (Asset Hub Rococo) + let asset: Asset = (wnd_at_rococo_parachains.clone(), amount).into(); + let asset = asset.reanchored(&intermediary_hop, &context).unwrap(); + // on Asset Hub Rococo, forward a request to withdraw WNDs from reserve on Asset Hub Westend + let xcm_on_hop = Xcm::<()>(vec![InitiateReserveWithdraw { + assets: Definite(asset.into()), // WNDs + reserve: final_destination, // AHW + xcm: xcm_on_final_dest, // XCM to execute on AHW + }]); + // assets to send from Penpal and how they reach the intermediary hop + let assets: Assets = vec![ + (wnd_at_rococo_parachains.clone(), amount).into(), + (roc_at_rococo_parachains.clone(), amount).into(), + ] + .into(); + let asset_transfer_type = TransferType::DestinationReserve; + let fees_id: AssetId = roc_at_rococo_parachains.into(); + let fees_transfer_type = TransferType::DestinationReserve; + + // initiate the transfer + send_assets_from_penpal_rococo_through_rococo_ah_to_westend_ah( + intermediary_hop, + (assets, asset_transfer_type), + (fees_id, fees_transfer_type), + xcm_on_hop, + ); + } + + // process AHW incoming message and check events + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // issue ROCs on AHW + RuntimeEvent::Balances(pallet_balances::Event::Issued { .. }) => {}, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_wnds_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(wnd_at_rococo_parachains.into(), &sender) + }); + let receiver_wnds_after = ::account_data_of(receiver).free; + + // Sender's balance is reduced by sent "amount" + assert_eq!(sender_wnds_after, sender_wnds_before - amount); + // Receiver's balance is increased by no more than "amount" + assert!(receiver_wnds_after > receiver_wnds_before); + assert!(receiver_wnds_after <= receiver_wnds_before + amount); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs index 88dad06434b0d..58c52e1328c81 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs @@ -21,39 +21,141 @@ mod snowbridge; mod teleport; pub(crate) fn asset_hub_westend_location() -> Location { + Location::new(2, [GlobalConsensus(Westend), Parachain(AssetHubWestend::para_id().into())]) +} +pub(crate) fn bridge_hub_westend_location() -> Location { + Location::new(2, [GlobalConsensus(Westend), Parachain(BridgeHubWestend::para_id().into())]) +} + +// ROC and wROC +pub(crate) fn roc_at_ah_rococo() -> Location { + Parent.into() +} +pub(crate) fn bridged_roc_at_ah_westend() -> Location { + Location::new(2, [GlobalConsensus(Rococo)]) +} + +// wWND +pub(crate) fn bridged_wnd_at_ah_rococo() -> Location { + Location::new(2, [GlobalConsensus(Westend)]) +} + +// USDT and wUSDT +pub(crate) fn usdt_at_ah_rococo() -> Location { + Location::new(0, [PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into())]) +} +pub(crate) fn bridged_usdt_at_ah_westend() -> Location { Location::new( 2, - [GlobalConsensus(NetworkId::Westend), Parachain(AssetHubWestend::para_id().into())], + [ + GlobalConsensus(Rococo), + Parachain(AssetHubRococo::para_id().into()), + PalletInstance(ASSETS_PALLET_ID), + GeneralIndex(USDT_ID.into()), + ], ) } -pub(crate) fn bridge_hub_westend_location() -> Location { +// wETH has same relative location on both Rococo and Westend AssetHubs +pub(crate) fn weth_at_asset_hubs() -> Location { Location::new( 2, - [GlobalConsensus(NetworkId::Westend), Parachain(BridgeHubWestend::para_id().into())], + [ + GlobalConsensus(Ethereum { chain_id: snowbridge::CHAIN_ID }), + AccountKey20 { network: None, key: snowbridge::WETH }, + ], ) } -pub(crate) fn send_asset_from_asset_hub_rococo( +pub(crate) fn create_foreign_on_ah_rococo( + id: v3::Location, + sufficient: bool, + prefund_accounts: Vec<(AccountId, u128)>, +) { + let owner = AssetHubRococo::account_id_of(ALICE); + let min = ASSET_MIN_BALANCE; + AssetHubRococo::force_create_foreign_asset(id, owner, sufficient, min, prefund_accounts); +} + +pub(crate) fn create_foreign_on_ah_westend(id: v3::Location, sufficient: bool) { + let owner = AssetHubWestend::account_id_of(ALICE); + AssetHubWestend::force_create_foreign_asset(id, owner, sufficient, ASSET_MIN_BALANCE, vec![]); +} + +pub(crate) fn foreign_balance_on_ah_rococo(id: v3::Location, who: &AccountId) -> u128 { + AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(id, who) + }) +} +pub(crate) fn foreign_balance_on_ah_westend(id: v3::Location, who: &AccountId) -> u128 { + AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(id, who) + }) +} + +// set up pool +pub(crate) fn set_up_pool_with_wnd_on_ah_westend(foreign_asset: v3::Location) { + let wnd: v3::Location = v3::Parent.into(); + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + let owner = AssetHubWestendSender::get(); + let signed_owner = ::RuntimeOrigin::signed(owner.clone()); + + assert_ok!(::ForeignAssets::mint( + signed_owner.clone(), + foreign_asset.into(), + owner.clone().into(), + 3_000_000_000_000, + )); + assert_ok!(::AssetConversion::create_pool( + signed_owner.clone(), + Box::new(wnd), + Box::new(foreign_asset), + )); + assert_expected_events!( + AssetHubWestend, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, + ] + ); + assert_ok!(::AssetConversion::add_liquidity( + signed_owner.clone(), + Box::new(wnd), + Box::new(foreign_asset), + 1_000_000_000_000, + 2_000_000_000_000, + 1, + 1, + owner.into() + )); + assert_expected_events!( + AssetHubWestend, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded {..}) => {}, + ] + ); + }); +} + +pub(crate) fn send_assets_from_asset_hub_rococo( destination: Location, - (id, amount): (Location, u128), + assets: Assets, + fee_idx: u32, ) -> DispatchResult { let signed_origin = ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); - let beneficiary: Location = AccountId32Junction { network: None, id: AssetHubWestendReceiver::get().into() }.into(); - let assets: Assets = (id, amount).into(); - let fee_asset_item = 0; - AssetHubRococo::execute_with(|| { ::PolkadotXcm::limited_reserve_transfer_assets( signed_origin, bx!(destination.into()), bx!(beneficiary.into()), bx!(assets.into()), - fee_asset_item, + fee_idx, WeightLimit::Unlimited, ) }) diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index 78788634e6ff4..652447fa56010 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -81,7 +81,11 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { // send XCM from AssetHubRococo - fails - destination version not known assert_err!( - send_asset_from_asset_hub_rococo(destination.clone(), (native_token.clone(), amount)), + send_assets_from_asset_hub_rococo( + destination.clone(), + (native_token.clone(), amount).into(), + 0 + ), DispatchError::Module(sp_runtime::ModuleError { index: 31, error: [1, 0, 0, 0], @@ -98,9 +102,10 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { newer_xcm_version, ); // send XCM from AssetHubRococo - ok - assert_ok!(send_asset_from_asset_hub_rococo( + assert_ok!(send_assets_from_asset_hub_rococo( destination.clone(), - (native_token.clone(), amount) + (native_token.clone(), amount).into(), + 0, )); // `ExportMessage` on local BridgeHub - fails - remote BridgeHub version not known @@ -115,9 +120,10 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { ); // send XCM from AssetHubRococo - ok - assert_ok!(send_asset_from_asset_hub_rococo( + assert_ok!(send_assets_from_asset_hub_rococo( destination.clone(), - (native_token.clone(), amount) + (native_token.clone(), amount).into(), + 0, )); assert_bridge_hub_rococo_message_accepted(true); assert_bridge_hub_westend_message_received(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index 8196b27cfe028..40a1968ec557b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -13,12 +13,10 @@ // See the License for the specific language governing permissions and // limitations under the License. use crate::imports::*; -use bridge_hub_rococo_runtime::{EthereumBeaconClient, EthereumInboundQueue, RuntimeOrigin}; use codec::{Decode, Encode}; use emulated_integration_tests_common::xcm_emulator::ConvertLocation; use frame_support::pallet_prelude::TypeInfo; use hex_literal::hex; -use rococo_system_emulated_network::penpal_emulated_chain::CustomizableAssetFromSystemAssetHub; use rococo_westend_system_emulated_network::BridgeHubRococoParaSender as BridgeHubRococoSender; use snowbridge_core::{inbound::InboundQueueFixture, outbound::OperatingMode}; use snowbridge_pallet_inbound_queue_fixtures::{ @@ -34,10 +32,10 @@ use sp_runtime::{DispatchError::Token, TokenError::FundsUnavailable}; use testnet_parachains_constants::rococo::snowbridge::EthereumNetwork; const INITIAL_FUND: u128 = 5_000_000_000 * ROCOCO_ED; -const CHAIN_ID: u64 = 11155111; +pub const CHAIN_ID: u64 = 11155111; const TREASURY_ACCOUNT: [u8; 32] = hex!("6d6f646c70792f74727372790000000000000000000000000000000000000000"); -const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); +pub const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); const INSUFFICIENT_XCM_FEE: u128 = 1000; const XCM_FEE: u128 = 4_000_000_000; @@ -64,7 +62,7 @@ pub fn send_inbound_message(fixture: InboundQueueFixture) -> DispatchResult { ) .unwrap(); EthereumInboundQueue::submit( - RuntimeOrigin::signed(BridgeHubRococoSender::get()), + BridgeHubRococoRuntimeOrigin::signed(BridgeHubRococoSender::get()), fixture.message, ) } @@ -298,7 +296,7 @@ fn send_token_from_ethereum_to_penpal() { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( - CustomizableAssetFromSystemAssetHub::key().to_vec(), + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]).encode(), )], )); @@ -379,7 +377,7 @@ fn send_token_from_ethereum_to_penpal() { /// - returning the token to Ethereum #[test] fn send_weth_asset_from_asset_hub_to_ethereum() { - use asset_hub_rococo_runtime::xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee; + use ahr_xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee; let assethub_location = BridgeHubRococo::sibling_location_of(AssetHubRococo::para_id()); let assethub_sovereign = BridgeHubRococo::sovereign_account_id_of(assethub_location); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs index 8f51f5b180004..1fb03748d926c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs @@ -13,8 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::tests::*; -use bridge_hub_rococo_runtime::xcm_config::XcmConfig; +use crate::imports::*; #[test] fn teleport_to_other_system_parachains_works() { @@ -22,9 +21,9 @@ fn teleport_to_other_system_parachains_works() { let native_asset: Assets = (Parent, amount).into(); test_parachain_is_trusted_teleporter!( - BridgeHubRococo, // Origin - XcmConfig, // XCM configuration - vec![AssetHubRococo], // Destinations + BridgeHubRococo, // Origin + BridgeHubRococoXcmConfig, // XCM configuration + vec![AssetHubRococo], // Destinations (native_asset, amount) ); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 6aebf8862d62e..6b83479eaf89a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -11,26 +11,26 @@ publish = false workspace = true [dependencies] +hex-literal = { workspace = true, default-features = true } # Substrate -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true, default-features = true } +sp-runtime = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +pallet-xcm = { workspace = true } +xcm-executor = { workspace = true } # Bridges -pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false } +pallet-bridge-messages = { workspace = true } # Cumulus -parachains-common = { path = "../../../../../common" } -cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } -bridge-hub-westend-runtime = { path = "../../../../../runtimes/bridge-hubs/bridge-hub-westend", default-features = false } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } +cumulus-pallet-xcmp-queue = { workspace = true } +emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } +rococo-westend-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs index 36b846e103131..3b0fcea57a26f 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -36,20 +36,25 @@ mod imports { xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, TestExt, }, + ASSETS_PALLET_ID, USDT_ID, }; pub use parachains_common::AccountId; pub use rococo_westend_system_emulated_network::{ asset_hub_rococo_emulated_chain::{ - genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + genesis::{AssetHubRococoAssetOwner, ED as ASSET_HUB_ROCOCO_ED}, + AssetHubRococoParaPallet as AssetHubRococoPallet, }, asset_hub_westend_emulated_chain::{ genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, }, bridge_hub_westend_emulated_chain::{ genesis::ED as BRIDGE_HUB_WESTEND_ED, - BridgeHubWestendParaPallet as BridgeHubWestendPallet, + BridgeHubWestendParaPallet as BridgeHubWestendPallet, BridgeHubWestendXcmConfig, + }, + penpal_emulated_chain::{ + penpal_runtime::xcm_config::UniversalLocation as PenpalUniversalLocation, + PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet, }, - penpal_emulated_chain::{PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet}, westend_emulated_chain::WestendRelayPallet as WestendPallet, AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWestendPara as AssetHubWestend, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs index 597e77d9049cf..0c0b04cd45a91 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -14,165 +14,132 @@ // limitations under the License. use crate::tests::*; -fn send_asset_from_asset_hub_westend_to_asset_hub_rococo(id: Location, amount: u128) { - let destination = asset_hub_rococo_location(); - +fn send_assets_over_bridge(send_fn: F) { // fund the AHW's SA on BHW for paying bridge transport fees BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); // set XCM versions - AssetHubWestend::force_xcm_version(destination.clone(), XCM_VERSION); + let local_asset_hub = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + PenpalB::force_xcm_version(local_asset_hub.clone(), XCM_VERSION); + AssetHubWestend::force_xcm_version(asset_hub_rococo_location(), XCM_VERSION); BridgeHubWestend::force_xcm_version(bridge_hub_rococo_location(), XCM_VERSION); // send message over bridge - assert_ok!(send_asset_from_asset_hub_westend(destination, (id, amount))); + send_fn(); + + // process and verify intermediary hops assert_bridge_hub_westend_message_accepted(true); assert_bridge_hub_rococo_message_received(); } -fn send_asset_from_penpal_westend_through_local_asset_hub_to_rococo_asset_hub( - id: Location, - transfer_amount: u128, -) { - let destination = asset_hub_rococo_location(); - let local_asset_hub: Location = PenpalB::sibling_location_of(AssetHubWestend::para_id()); - let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of( - AssetHubWestend::sibling_location_of(PenpalB::para_id()), - ); - let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( - Rococo, - AssetHubRococo::para_id(), - ); - - // fund the AHW's SA on BHW for paying bridge transport fees - BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); +fn set_up_wnds_for_penpal_westend_through_ahw_to_ahr( + sender: &AccountId, + amount: u128, +) -> (Location, v3::Location) { + let wnd_at_westend_parachains = wnd_at_ah_westend(); + let wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo().try_into().unwrap(); + create_foreign_on_ah_rococo(wnd_at_asset_hub_rococo, true); - // set XCM versions - PenpalB::force_xcm_version(local_asset_hub.clone(), XCM_VERSION); - AssetHubWestend::force_xcm_version(destination.clone(), XCM_VERSION); - BridgeHubWestend::force_xcm_version(bridge_hub_rococo_location(), XCM_VERSION); - - // send message over bridge - assert_ok!(PenpalB::execute_with(|| { - let signed_origin = ::RuntimeOrigin::signed(PenpalBSender::get()); - let beneficiary: Location = - AccountId32Junction { network: None, id: AssetHubRococoReceiver::get().into() }.into(); - let assets: Assets = (id.clone(), transfer_amount).into(); - let fees_id: AssetId = id.into(); - let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { - assets: Wild(AllCounted(assets.len() as u32)), - beneficiary, - }]); + let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location); + // fund Penpal's sovereign account on AssetHub + AssetHubWestend::fund_accounts(vec![(sov_penpal_on_ahw.into(), amount * 2)]); + // fund Penpal's sender account + PenpalB::mint_foreign_asset( + ::RuntimeOrigin::signed(PenpalAssetOwner::get()), + wnd_at_westend_parachains.clone(), + sender.clone(), + amount * 2, + ); + (wnd_at_westend_parachains, wnd_at_asset_hub_rococo) +} - ::PolkadotXcm::transfer_assets_using_type_and_then( - signed_origin, - bx!(destination.into()), - bx!(assets.into()), - bx!(TransferType::RemoteReserve(local_asset_hub.clone().into())), - bx!(fees_id.into()), - bx!(TransferType::RemoteReserve(local_asset_hub.into())), - bx!(VersionedXcm::from(custom_xcm_on_dest)), - WeightLimit::Unlimited, - ) - })); - AssetHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - assert_expected_events!( - AssetHubWestend, - vec![ - // Amount to reserve transfer is withdrawn from Penpal's sovereign account - RuntimeEvent::Balances( - pallet_balances::Event::Burned { who, amount } - ) => { - who: *who == sov_penpal_on_ahw.clone().into(), - amount: *amount == transfer_amount, - }, - // Amount deposited in AHR's sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == sov_ahr_on_ahw.clone().into(), - }, - RuntimeEvent::XcmpQueue( - cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } - ) => {}, - ] +fn send_assets_from_penpal_westend_through_westend_ah_to_rococo_ah( + destination: Location, + assets: (Assets, TransferType), + fees: (AssetId, TransferType), + custom_xcm_on_dest: Xcm<()>, +) { + send_assets_over_bridge(|| { + let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalB::para_id()), ); + let sov_ahr_on_ahw = + AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + Rococo, + AssetHubRococo::para_id(), + ); + + // send message over bridge + assert_ok!(PenpalB::execute_with(|| { + let signed_origin = ::RuntimeOrigin::signed(PenpalBSender::get()); + ::PolkadotXcm::transfer_assets_using_type_and_then( + signed_origin, + bx!(destination.into()), + bx!(assets.0.into()), + bx!(assets.1), + bx!(fees.0.into()), + bx!(fees.1), + bx!(VersionedXcm::from(custom_xcm_on_dest)), + WeightLimit::Unlimited, + ) + })); + // verify intermediary AH Westend hop + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // Amount to reserve transfer is withdrawn from Penpal's sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Burned { who, .. } + ) => { + who: *who == sov_penpal_on_ahw.clone().into(), + }, + // Amount deposited in AHR's sovereign account + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { + who: *who == sov_ahr_on_ahw.clone().into(), + }, + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }); }); - assert_bridge_hub_westend_message_accepted(true); - assert_bridge_hub_rococo_message_received(); } #[test] +/// Test transfer of WND from AssetHub Westend to AssetHub Rococo. fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { - let wnd_at_asset_hub_westend: Location = Parent.into(); - let wnd_at_asset_hub_rococo = - v3::Location::new(2, [v3::Junction::GlobalConsensus(v3::NetworkId::Westend)]); - let owner: AccountId = AssetHubRococo::account_id_of(ALICE); - AssetHubRococo::force_create_foreign_asset( - wnd_at_asset_hub_rococo, - owner, - true, - ASSET_MIN_BALANCE, - vec![], - ); + let amount = ASSET_HUB_WESTEND_ED * 1_000; + let sender = AssetHubWestendSender::get(); + let receiver = AssetHubRococoReceiver::get(); + let wnd_at_asset_hub_westend = wnd_at_ah_westend(); + let bridged_wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo().try_into().unwrap(); + create_foreign_on_ah_rococo(bridged_wnd_at_asset_hub_rococo, true); + + set_up_pool_with_roc_on_ah_rococo(bridged_wnd_at_asset_hub_rococo, true); + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( Rococo, AssetHubRococo::para_id(), ); - - AssetHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // setup a pool to pay xcm fees with `wnd_at_asset_hub_rococo` tokens - assert_ok!(::ForeignAssets::mint( - ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - wnd_at_asset_hub_rococo.into(), - AssetHubRococoSender::get().into(), - 3_000_000_000_000, - )); - - assert_ok!(::AssetConversion::create_pool( - ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - Box::new(xcm::v3::Parent.into()), - Box::new(wnd_at_asset_hub_rococo), - )); - - assert_expected_events!( - AssetHubRococo, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, - ] - ); - - assert_ok!(::AssetConversion::add_liquidity( - ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - Box::new(xcm::v3::Parent.into()), - Box::new(wnd_at_asset_hub_rococo), - 1_000_000_000_000, - 2_000_000_000_000, - 1, - 1, - AssetHubRococoSender::get().into() - )); - - assert_expected_events!( - AssetHubRococo, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded {..}) => {}, - ] - ); - }); - let wnds_in_reserve_on_ahw_before = ::account_data_of(sov_ahr_on_ahw.clone()).free; - let sender_wnds_before = - ::account_data_of(AssetHubWestendSender::get()).free; - let receiver_wnds_before = AssetHubRococo::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoReceiver::get()) + let sender_wnds_before = ::account_data_of(sender.clone()).free; + let receiver_wnds_before = + foreign_balance_on_ah_rococo(bridged_wnd_at_asset_hub_rococo, &receiver); + + // send WNDs, use them for fees + send_assets_over_bridge(|| { + let destination = asset_hub_rococo_location(); + let assets: Assets = (wnd_at_asset_hub_westend, amount).into(); + let fee_idx = 0; + assert_ok!(send_assets_from_asset_hub_westend(destination, assets, fee_idx)); }); - let amount = ASSET_HUB_WESTEND_ED * 1_000; - send_asset_from_asset_hub_westend_to_asset_hub_rococo(wnd_at_asset_hub_westend, amount); + // verify expected events on final destination AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( @@ -180,8 +147,8 @@ fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { vec![ // issue WNDs on AHR RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { - asset_id: *asset_id == wnd_at_asset_hub_rococo, - owner: *owner == AssetHubRococoReceiver::get(), + asset_id: *asset_id == bridged_wnd_at_asset_hub_rococo, + owner: *owner == receiver, }, // message processed successfully RuntimeEvent::MessageQueue( @@ -191,12 +158,9 @@ fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { ); }); - let sender_wnds_after = - ::account_data_of(AssetHubWestendSender::get()).free; - let receiver_wnds_after = AssetHubRococo::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoReceiver::get()) - }); + let sender_wnds_after = ::account_data_of(sender).free; + let receiver_wnds_after = + foreign_balance_on_ah_rococo(bridged_wnd_at_asset_hub_rococo, &receiver); let wnds_in_reserve_on_ahw_after = ::account_data_of(sov_ahr_on_ahw).free; @@ -209,18 +173,28 @@ fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { } #[test] -fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { +/// Send bridged assets "back" from AssetHub Rococo to AssetHub Westend. +/// +/// This mix of assets should cover the whole range: +/// - bridged native assets: ROC, +/// - bridged trust-based assets: USDT (exists only on Rococo, Westend gets it from Rococo over +/// bridge), +/// - bridged foreign asset / double-bridged asset (other bridge / Snowfork): wETH (bridged from +/// Ethereum to Rococo over Snowbridge, then bridged over to Westend through this bridge). +fn send_back_rocs_usdt_and_weth_from_asset_hub_westend_to_asset_hub_rococo() { let prefund_amount = 10_000_000_000_000u128; - let roc_at_asset_hub_westend = - v3::Location::new(2, [v3::Junction::GlobalConsensus(v3::NetworkId::Rococo)]); - let owner: AccountId = AssetHubWestend::account_id_of(ALICE); - AssetHubWestend::force_create_foreign_asset( - roc_at_asset_hub_westend, - owner, - true, - ASSET_MIN_BALANCE, - vec![(AssetHubWestendSender::get(), prefund_amount)], - ); + let amount_to_send = ASSET_HUB_ROCOCO_ED * 1_000; + let sender = AssetHubWestendSender::get(); + let receiver = AssetHubRococoReceiver::get(); + let bridged_roc_at_asset_hub_westend = bridged_roc_at_ah_westend(); + let bridged_roc_at_asset_hub_westend_v3 = + bridged_roc_at_asset_hub_westend.clone().try_into().unwrap(); + let prefund_accounts = vec![(sender.clone(), prefund_amount)]; + create_foreign_on_ah_westend(bridged_roc_at_asset_hub_westend_v3, true, prefund_accounts); + + //////////////////////////////////////////////////////////// + // Let's first send back just some ROCs as a simple example + //////////////////////////////////////////////////////////// // fund the AHW's SA on AHR with the ROC tokens held in reserve let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( @@ -232,19 +206,20 @@ fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { let rocs_in_reserve_on_ahr_before = ::account_data_of(sov_ahw_on_ahr.clone()).free; assert_eq!(rocs_in_reserve_on_ahr_before, prefund_amount); - let sender_rocs_before = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(roc_at_asset_hub_westend, &AssetHubWestendSender::get()) - }); + + let sender_rocs_before = + foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend_v3, &sender); assert_eq!(sender_rocs_before, prefund_amount); - let receiver_rocs_before = - ::account_data_of(AssetHubRococoReceiver::get()).free; + let receiver_rocs_before = ::account_data_of(receiver.clone()).free; + + // send back ROCs, use them for fees + send_assets_over_bridge(|| { + let destination = asset_hub_rococo_location(); + let assets: Assets = (bridged_roc_at_asset_hub_westend, amount_to_send).into(); + let fee_idx = 0; + assert_ok!(send_assets_from_asset_hub_westend(destination, assets, fee_idx)); + }); - let amount_to_send = ASSET_HUB_ROCOCO_ED * 1_000; - send_asset_from_asset_hub_westend_to_asset_hub_rococo( - roc_at_asset_hub_westend.try_into().unwrap(), - amount_to_send, - ); AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( @@ -259,7 +234,7 @@ fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { }, // ROCs deposited to beneficiary RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == AssetHubRococoReceiver::get(), + who: *who == receiver, }, // message processed successfully RuntimeEvent::MessageQueue( @@ -269,12 +244,9 @@ fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { ); }); - let sender_rocs_after = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance(roc_at_asset_hub_westend, &AssetHubWestendSender::get()) - }); - let receiver_rocs_after = - ::account_data_of(AssetHubRococoReceiver::get()).free; + let sender_rocs_after = + foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend_v3, &sender); + let receiver_rocs_after = ::account_data_of(receiver.clone()).free; let rocs_in_reserve_on_ahr_after = ::account_data_of(sov_ahw_on_ahr.clone()).free; @@ -284,59 +256,141 @@ fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { assert!(receiver_rocs_after > receiver_rocs_before); // Reserve balance is reduced by sent amount assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before - amount_to_send); + + ////////////////////////////////////////////////////////////////// + // Now let's send back over USDTs + wETH (and pay fees with USDT) + ////////////////////////////////////////////////////////////////// + + // wETH has same relative location on both Rococo and Westend AssetHubs + let bridged_weth_at_ah = weth_at_asset_hubs().try_into().unwrap(); + let bridged_usdt_at_asset_hub_westend = bridged_usdt_at_ah_westend().try_into().unwrap(); + + // set up destination chain AH Rococo: + // create a ROC/USDT pool to be able to pay fees with USDT (USDT created in genesis) + set_up_pool_with_roc_on_ah_rococo(usdt_at_ah_rococo().try_into().unwrap(), false); + // create wETH on Rococo (IRL it's already created by Snowbridge) + create_foreign_on_ah_rococo(bridged_weth_at_ah, true); + // prefund AHW's sovereign account on AHR to be able to withdraw USDT and wETH from reserves + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + Westend, + AssetHubWestend::para_id(), + ); + AssetHubRococo::mint_asset( + ::RuntimeOrigin::signed(AssetHubRococoAssetOwner::get()), + USDT_ID, + sov_ahw_on_ahr.clone(), + amount_to_send * 2, + ); + AssetHubRococo::mint_foreign_asset( + ::RuntimeOrigin::signed(AssetHubRococo::account_id_of(ALICE)), + bridged_weth_at_ah, + sov_ahw_on_ahr, + amount_to_send * 2, + ); + + // set up source chain AH Westend: + // create wETH and USDT foreign assets on Westend and prefund sender's account + let prefund_accounts = vec![(sender.clone(), amount_to_send * 2)]; + create_foreign_on_ah_westend(bridged_weth_at_ah, true, prefund_accounts.clone()); + create_foreign_on_ah_westend(bridged_usdt_at_asset_hub_westend, true, prefund_accounts); + + // check balances before + let receiver_usdts_before = AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + >::balance(USDT_ID, &receiver) + }); + let receiver_weth_before = foreign_balance_on_ah_rococo(bridged_weth_at_ah, &receiver); + + let usdt_id: AssetId = Location::try_from(bridged_usdt_at_asset_hub_westend).unwrap().into(); + // send USDTs and wETHs + let assets: Assets = vec![ + (usdt_id.clone(), amount_to_send).into(), + (Location::try_from(bridged_weth_at_ah).unwrap(), amount_to_send).into(), + ] + .into(); + // use USDT for fees + let fee = usdt_id; + + // use the more involved transfer extrinsic + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary: AccountId32Junction { network: None, id: receiver.clone().into() }.into(), + }]); + assert_ok!(AssetHubWestend::execute_with(|| { + ::PolkadotXcm::transfer_assets_using_type_and_then( + ::RuntimeOrigin::signed(sender.into()), + bx!(asset_hub_rococo_location().into()), + bx!(assets.into()), + bx!(TransferType::DestinationReserve), + bx!(fee.into()), + bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), + WeightLimit::Unlimited, + ) + })); + // verify hops (also advances the message through the hops) + assert_bridge_hub_westend_message_accepted(true); + assert_bridge_hub_rococo_message_received(); + AssetHubRococo::execute_with(|| { + AssetHubRococo::assert_xcmp_queue_success(None); + }); + + let receiver_usdts_after = AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + >::balance(USDT_ID, &receiver) + }); + let receiver_weth_after = foreign_balance_on_ah_rococo(bridged_weth_at_ah, &receiver); + + // Receiver's USDT balance is increased by almost `amount_to_send` (minus fees) + assert!(receiver_usdts_after > receiver_usdts_before); + assert!(receiver_usdts_after < receiver_usdts_before + amount_to_send); + // Receiver's wETH balance is increased by `amount_to_send` + assert_eq!(receiver_weth_after, receiver_weth_before + amount_to_send); } #[test] fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() { - let wnd_at_westend_parachains: Location = Parent.into(); - let wnd_at_asset_hub_rococo = Location::new(2, [Junction::GlobalConsensus(NetworkId::Westend)]); - let owner: AccountId = AssetHubRococo::account_id_of(ALICE); - AssetHubRococo::force_create_foreign_asset( - wnd_at_asset_hub_rococo.clone().try_into().unwrap(), - owner, - true, - ASSET_MIN_BALANCE, - vec![], - ); + let amount = ASSET_HUB_WESTEND_ED * 10_000_000; + let sender = PenpalBSender::get(); + let receiver = AssetHubRococoReceiver::get(); + let local_asset_hub = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + let (wnd_at_westend_parachains, wnd_at_asset_hub_rococo) = + set_up_wnds_for_penpal_westend_through_ahw_to_ahr(&sender, amount); + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( Rococo, AssetHubRococo::para_id(), ); - - let amount = ASSET_HUB_WESTEND_ED * 10_000_000; - let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id()); - let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location); - // fund Penpal's sovereign account on AssetHub - AssetHubWestend::fund_accounts(vec![(sov_penpal_on_ahw.into(), amount * 2)]); - // fund Penpal's sender account - PenpalB::mint_foreign_asset( - ::RuntimeOrigin::signed(PenpalAssetOwner::get()), - wnd_at_westend_parachains.clone(), - PenpalBSender::get(), - amount * 2, - ); - let wnds_in_reserve_on_ahw_before = ::account_data_of(sov_ahr_on_ahw.clone()).free; let sender_wnds_before = PenpalB::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance( - wnd_at_westend_parachains.clone(), - &PenpalBSender::get(), - ) + >::balance(wnd_at_westend_parachains.clone(), &sender) }); - let receiver_wnds_before = AssetHubRococo::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance( - wnd_at_asset_hub_rococo.clone().try_into().unwrap(), - &AssetHubRococoReceiver::get(), - ) - }); - send_asset_from_penpal_westend_through_local_asset_hub_to_rococo_asset_hub( - wnd_at_westend_parachains.clone(), - amount, - ); + let receiver_wnds_before = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo, &receiver); + + // Send WNDs over bridge + { + let destination = asset_hub_rococo_location(); + let assets: Assets = (wnd_at_westend_parachains.clone(), amount).into(); + let asset_transfer_type = TransferType::RemoteReserve(local_asset_hub.clone().into()); + let fees_id: AssetId = wnd_at_westend_parachains.clone().into(); + let fees_transfer_type = TransferType::RemoteReserve(local_asset_hub.into()); + let beneficiary: Location = + AccountId32Junction { network: None, id: receiver.clone().into() }.into(); + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary, + }]); + send_assets_from_penpal_westend_through_westend_ah_to_rococo_ah( + destination, + (assets, asset_transfer_type), + (fees_id, fees_transfer_type), + custom_xcm_on_dest, + ); + } + // process AHR incoming message and check events AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( @@ -345,7 +399,7 @@ fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() // issue WNDs on AHR RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { asset_id: *asset_id == wnd_at_westend_parachains.clone().try_into().unwrap(), - owner: *owner == AssetHubRococoReceiver::get(), + owner: owner == &receiver, }, // message processed successfully RuntimeEvent::MessageQueue( @@ -357,15 +411,9 @@ fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() let sender_wnds_after = PenpalB::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(wnd_at_westend_parachains, &PenpalBSender::get()) - }); - let receiver_wnds_after = AssetHubRococo::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance( - wnd_at_asset_hub_rococo.try_into().unwrap(), - &AssetHubRococoReceiver::get(), - ) + >::balance(wnd_at_westend_parachains, &sender) }); + let receiver_wnds_after = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo, &receiver); let wnds_in_reserve_on_ahw_after = ::account_data_of(sov_ahr_on_ahw.clone()).free; @@ -377,3 +425,121 @@ fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() assert!(wnds_in_reserve_on_ahw_after > wnds_in_reserve_on_ahw_before); assert!(wnds_in_reserve_on_ahw_after <= wnds_in_reserve_on_ahw_before + amount); } + +#[test] +fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() { + let roc_at_westend_parachains = bridged_roc_at_ah_westend(); + let roc_at_westend_parachains_v3 = roc_at_westend_parachains.clone().try_into().unwrap(); + let amount = ASSET_HUB_WESTEND_ED * 10_000_000; + let sender = PenpalBSender::get(); + let receiver = AssetHubRococoReceiver::get(); + + // set up WNDs for transfer + let (wnd_at_westend_parachains, _) = + set_up_wnds_for_penpal_westend_through_ahw_to_ahr(&sender, amount); + + // set up ROCs for transfer + let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let sov_penpal_on_ahr = AssetHubWestend::sovereign_account_id_of(penpal_location); + let prefund_accounts = vec![(sov_penpal_on_ahr, amount * 2)]; + create_foreign_on_ah_westend(roc_at_westend_parachains_v3, true, prefund_accounts); + let asset_owner: AccountId = AssetHubWestend::account_id_of(ALICE); + PenpalB::force_create_foreign_asset( + roc_at_westend_parachains.clone(), + asset_owner.clone(), + true, + ASSET_MIN_BALANCE, + vec![(sender.clone(), amount * 2)], + ); + + // fund the AHW's SA on AHR with the ROC tokens held in reserve + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Westend, + AssetHubWestend::para_id(), + ); + AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), amount * 2)]); + + // balances before + let sender_rocs_before = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_at_westend_parachains.clone().into(), &sender) + }); + let receiver_rocs_before = ::account_data_of(receiver.clone()).free; + + // send ROCs over the bridge, WNDs only used to pay fees on local AH, pay with ROC on remote AH + { + let final_destination = asset_hub_rococo_location(); + let intermediary_hop = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + let context = PenpalB::execute_with(|| PenpalUniversalLocation::get()); + + // what happens at final destination + let beneficiary = AccountId32Junction { network: None, id: receiver.clone().into() }.into(); + // use ROC as fees on the final destination (AHW) + let remote_fees: Asset = (roc_at_westend_parachains.clone(), amount).into(); + let remote_fees = remote_fees.reanchored(&final_destination, &context).unwrap(); + // buy execution using ROCs, then deposit all remaining ROCs + let xcm_on_final_dest = Xcm::<()>(vec![ + BuyExecution { fees: remote_fees, weight_limit: WeightLimit::Unlimited }, + DepositAsset { assets: Wild(AllCounted(1)), beneficiary }, + ]); + + // what happens at intermediary hop + // reanchor final dest (Asset Hub Rococo) to the view of hop (Asset Hub Westend) + let mut final_destination = final_destination.clone(); + final_destination.reanchor(&intermediary_hop, &context).unwrap(); + // reanchor ROCs to the view of hop (Asset Hub Westend) + let asset: Asset = (roc_at_westend_parachains.clone(), amount).into(); + let asset = asset.reanchored(&intermediary_hop, &context).unwrap(); + // on Asset Hub Westend, forward a request to withdraw ROCs from reserve on Asset Hub Rococo + let xcm_on_hop = Xcm::<()>(vec![InitiateReserveWithdraw { + assets: Definite(asset.into()), // ROCs + reserve: final_destination, // AHR + xcm: xcm_on_final_dest, // XCM to execute on AHR + }]); + // assets to send from Penpal and how they reach the intermediary hop + let assets: Assets = vec![ + (roc_at_westend_parachains.clone(), amount).into(), + (wnd_at_westend_parachains.clone(), amount).into(), + ] + .into(); + let asset_transfer_type = TransferType::DestinationReserve; + let fees_id: AssetId = wnd_at_westend_parachains.into(); + let fees_transfer_type = TransferType::DestinationReserve; + + // initiate the transfer + send_assets_from_penpal_westend_through_westend_ah_to_rococo_ah( + intermediary_hop, + (assets, asset_transfer_type), + (fees_id, fees_transfer_type), + xcm_on_hop, + ); + } + + // process AHR incoming message and check events + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // issue WNDs on AHR + RuntimeEvent::Balances(pallet_balances::Event::Issued { .. }) => {}, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_rocs_after = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(roc_at_westend_parachains.into(), &sender) + }); + let receiver_rocs_after = ::account_data_of(receiver).free; + + // Sender's balance is reduced by sent "amount" + assert_eq!(sender_rocs_after, sender_rocs_before - amount); + // Receiver's balance is increased by no more than "amount" + assert!(receiver_rocs_after > receiver_rocs_before); + assert!(receiver_rocs_after <= receiver_rocs_before + amount); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs index b781d6e987ca1..92e864229a9cd 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs @@ -19,40 +19,161 @@ mod asset_transfers; mod send_xcm; mod teleport; +mod snowbridge { + pub const CHAIN_ID: u64 = 11155111; + pub const WETH: [u8; 20] = hex_literal::hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); +} + pub(crate) fn asset_hub_rococo_location() -> Location { + Location::new(2, [GlobalConsensus(Rococo), Parachain(AssetHubRococo::para_id().into())]) +} + +pub(crate) fn bridge_hub_rococo_location() -> Location { + Location::new(2, [GlobalConsensus(Rococo), Parachain(BridgeHubRococo::para_id().into())]) +} + +// WND and wWND +pub(crate) fn wnd_at_ah_westend() -> Location { + Parent.into() +} +pub(crate) fn bridged_wnd_at_ah_rococo() -> Location { + Location::new(2, [GlobalConsensus(Westend)]) +} + +// wROC +pub(crate) fn bridged_roc_at_ah_westend() -> Location { + Location::new(2, [GlobalConsensus(Rococo)]) +} + +// USDT and wUSDT +pub(crate) fn usdt_at_ah_rococo() -> Location { + Location::new(0, [PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into())]) +} +pub(crate) fn bridged_usdt_at_ah_westend() -> Location { Location::new( 2, - [GlobalConsensus(NetworkId::Rococo), Parachain(AssetHubRococo::para_id().into())], + [ + GlobalConsensus(Rococo), + Parachain(AssetHubRococo::para_id().into()), + PalletInstance(ASSETS_PALLET_ID), + GeneralIndex(USDT_ID.into()), + ], ) } -pub(crate) fn bridge_hub_rococo_location() -> Location { +// wETH has same relative location on both Rococo and Westend AssetHubs +pub(crate) fn weth_at_asset_hubs() -> Location { Location::new( 2, - [GlobalConsensus(NetworkId::Rococo), Parachain(BridgeHubRococo::para_id().into())], + [ + GlobalConsensus(Ethereum { chain_id: snowbridge::CHAIN_ID }), + AccountKey20 { network: None, key: snowbridge::WETH }, + ], ) } -pub(crate) fn send_asset_from_asset_hub_westend( +pub(crate) fn create_foreign_on_ah_rococo(id: v3::Location, sufficient: bool) { + let owner = AssetHubRococo::account_id_of(ALICE); + AssetHubRococo::force_create_foreign_asset(id, owner, sufficient, ASSET_MIN_BALANCE, vec![]); +} + +pub(crate) fn create_foreign_on_ah_westend( + id: v3::Location, + sufficient: bool, + prefund_accounts: Vec<(AccountId, u128)>, +) { + let owner = AssetHubWestend::account_id_of(ALICE); + let min = ASSET_MIN_BALANCE; + AssetHubWestend::force_create_foreign_asset(id, owner, sufficient, min, prefund_accounts); +} + +pub(crate) fn foreign_balance_on_ah_rococo(id: v3::Location, who: &AccountId) -> u128 { + AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(id, who) + }) +} +pub(crate) fn foreign_balance_on_ah_westend(id: v3::Location, who: &AccountId) -> u128 { + AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(id, who) + }) +} + +// set up pool +pub(crate) fn set_up_pool_with_roc_on_ah_rococo(asset: v3::Location, is_foreign: bool) { + let roc: v3::Location = v3::Parent.into(); + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + let owner = AssetHubRococoSender::get(); + let signed_owner = ::RuntimeOrigin::signed(owner.clone()); + + if is_foreign { + assert_ok!(::ForeignAssets::mint( + signed_owner.clone(), + asset.into(), + owner.clone().into(), + 3_000_000_000_000, + )); + } else { + let asset_id = match asset.interior.split_last() { + (_, Some(v3::Junction::GeneralIndex(id))) => id as u32, + _ => unreachable!(), + }; + assert_ok!(::Assets::mint( + signed_owner.clone(), + asset_id.into(), + owner.clone().into(), + 3_000_000_000_000, + )); + } + assert_ok!(::AssetConversion::create_pool( + signed_owner.clone(), + Box::new(roc), + Box::new(asset), + )); + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, + ] + ); + assert_ok!(::AssetConversion::add_liquidity( + signed_owner.clone(), + Box::new(roc), + Box::new(asset), + 1_000_000_000_000, + 2_000_000_000_000, + 1, + 1, + owner.into() + )); + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded {..}) => {}, + ] + ); + }); +} + +pub(crate) fn send_assets_from_asset_hub_westend( destination: Location, - (id, amount): (Location, u128), + assets: Assets, + fee_idx: u32, ) -> DispatchResult { let signed_origin = ::RuntimeOrigin::signed(AssetHubWestendSender::get().into()); - let beneficiary: Location = AccountId32Junction { network: None, id: AssetHubRococoReceiver::get().into() }.into(); - let assets: Assets = (id, amount).into(); - let fee_asset_item = 0; - AssetHubWestend::execute_with(|| { ::PolkadotXcm::limited_reserve_transfer_assets( signed_origin, bx!(destination.into()), bx!(beneficiary.into()), bx!(assets.into()), - fee_asset_item, + fee_idx, WeightLimit::Unlimited, ) }) diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index 8539df97be933..dee411bea8b73 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -81,7 +81,11 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { // send XCM from AssetHubWestend - fails - destination version not known assert_err!( - send_asset_from_asset_hub_westend(destination.clone(), (native_token.clone(), amount)), + send_assets_from_asset_hub_westend( + destination.clone(), + (native_token.clone(), amount).into(), + 0 + ), DispatchError::Module(sp_runtime::ModuleError { index: 31, error: [1, 0, 0, 0], @@ -98,9 +102,10 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { newer_xcm_version, ); // send XCM from AssetHubWestend - ok - assert_ok!(send_asset_from_asset_hub_westend( + assert_ok!(send_assets_from_asset_hub_westend( destination.clone(), - (native_token.clone(), amount) + (native_token.clone(), amount).into(), + 0 )); // `ExportMessage` on local BridgeHub - fails - remote BridgeHub version not known @@ -115,9 +120,10 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { ); // send XCM from AssetHubWestend - ok - assert_ok!(send_asset_from_asset_hub_westend( + assert_ok!(send_assets_from_asset_hub_westend( destination.clone(), - (native_token.clone(), amount) + (native_token.clone(), amount).into(), + 0 )); assert_bridge_hub_westend_message_accepted(true); assert_bridge_hub_rococo_message_received(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs index c960233c08b73..64378a844f52a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs @@ -13,8 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::tests::*; -use bridge_hub_westend_runtime::xcm_config::XcmConfig; +use crate::imports::*; #[test] fn teleport_to_other_system_parachains_works() { @@ -22,9 +21,9 @@ fn teleport_to_other_system_parachains_works() { let native_asset: Assets = (Parent, amount).into(); test_parachain_is_trusted_teleporter!( - BridgeHubWestend, // Origin - XcmConfig, // XCM configuration - vec![AssetHubWestend], // Destinations + BridgeHubWestend, // Origin + BridgeHubWestendXcmConfig, // XCM configuration + vec![AssetHubWestend], // Destinations (native_asset, amount) ); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml index 297f68de62183..3012e2b19f532 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml @@ -11,33 +11,30 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -assert_matches = "1.5.0" +codec = { workspace = true } +assert_matches = { workspace = true } # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-asset-rate = { path = "../../../../../../../substrate/frame/asset-rate", default-features = false } -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } -pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } -pallet-utility = { path = "../../../../../../../substrate/frame/utility", default-features = false } +sp-runtime = { workspace = true } +frame-support = { workspace = true } +pallet-balances = { workspace = true } +pallet-asset-rate = { workspace = true } +pallet-assets = { workspace = true } +pallet-treasury = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-utility = { workspace = true } # Polkadot -polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } -westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } -westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants" } +polkadot-runtime-common = { workspace = true, default-features = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } +pallet-xcm = { workspace = true } +westend-runtime-constants = { workspace = true, default-features = true } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } -testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["westend"] } -asset-hub-westend-runtime = { path = "../../../../../runtimes/assets/asset-hub-westend" } -collectives-westend-runtime = { path = "../../../../../runtimes/collectives/collectives-westend" } -cumulus-pallet-xcmp-queue = { default-features = false, path = "../../../../../../pallets/xcmp-queue" } -cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../../pallets/parachain-system" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -westend-system-emulated-network = { path = "../../../networks/westend-system" } +parachains-common = { workspace = true, default-features = true } +testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +emulated-integration-tests-common = { workspace = true } +westend-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs index 97239330216ac..8af93a62f4a17 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs @@ -19,9 +19,18 @@ pub use emulated_integration_tests_common::xcm_emulator::{ assert_expected_events, bx, Chain, RelayChain as Relay, TestExt, }; pub use westend_system_emulated_network::{ - asset_hub_westend_emulated_chain::AssetHubWestendParaPallet as AssetHubWestendPallet, - collectives_westend_emulated_chain::CollectivesWestendParaPallet as CollectivesWestendPallet, - westend_emulated_chain::WestendRelayPallet as WestendPallet, + asset_hub_westend_emulated_chain::{ + asset_hub_westend_runtime::xcm_config::LocationToAccountId as AssetHubLocationToAccountId, + AssetHubWestendParaPallet as AssetHubWestendPallet, + }, + collectives_westend_emulated_chain::{ + collectives_westend_runtime::fellowship as collectives_fellowship, + CollectivesWestendParaPallet as CollectivesWestendPallet, + }, + westend_emulated_chain::{ + westend_runtime::{governance as westend_governance, OriginCaller as WestendOriginCaller}, + WestendRelayPallet as WestendPallet, + }, AssetHubWestendPara as AssetHubWestend, CollectivesWestendPara as CollectivesWestend, WestendRelay as Westend, }; diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs index bde1220e2495b..abd9a982c8ed5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs @@ -14,14 +14,12 @@ // limitations under the License. use crate::*; -use asset_hub_westend_runtime::xcm_config::LocationToAccountId as AssetHubLocationToAccountId; use emulated_integration_tests_common::accounts::ALICE; use frame_support::{ assert_ok, dispatch::RawOrigin, instances::Instance1, sp_runtime::traits::Dispatchable, traits::fungible::Inspect, }; use polkadot_runtime_common::impls::VersionedLocatableAsset; -use westend_runtime::OriginCaller; use westend_runtime_constants::currency::UNITS; use xcm_executor::traits::ConvertLocation; @@ -65,7 +63,7 @@ fn fellowship_treasury_spend() { let treasury_location: Location = (Parent, PalletInstance(37)).into(); let teleport_call = RuntimeCall::Utility(pallet_utility::Call::::dispatch_as { - as_origin: bx!(OriginCaller::system(RawOrigin::Signed(treasury_account))), + as_origin: bx!(WestendOriginCaller::system(RawOrigin::Signed(treasury_account))), call: bx!(RuntimeCall::XcmPallet(pallet_xcm::Call::::teleport_assets { dest: bx!(VersionedLocation::V4(asset_hub_location.clone())), beneficiary: bx!(VersionedLocation::V4(treasury_location)), @@ -97,7 +95,7 @@ fn fellowship_treasury_spend() { // Fund Fellowship Treasury from Westend Treasury. let treasury_origin: RuntimeOrigin = - westend_runtime::governance::pallet_custom_origins::Origin::Treasurer.into(); + westend_governance::pallet_custom_origins::Origin::Treasurer.into(); let fellowship_treasury_location: Location = Location::new(1, [Parachain(1001), PalletInstance(65)]); let asset_hub_location: Location = [Parachain(1000)].into(); @@ -170,8 +168,7 @@ fn fellowship_treasury_spend() { // Fund Alice account from Fellowship Treasury. let fellows_origin: RuntimeOrigin = - collectives_westend_runtime::fellowship::pallet_fellowship_origins::Origin::Fellows - .into(); + collectives_fellowship::pallet_fellowship_origins::Origin::Fellows.into(); let asset_hub_location: Location = (Parent, Parachain(1000)).into(); let native_asset = Location::parent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml index 29a939951e597..011be93ecac73 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml @@ -8,25 +8,23 @@ description = "People Rococo runtime integration tests with xcm-emulator" publish = false [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } -pallet-identity = { path = "../../../../../../../substrate/frame/identity", default-features = false } +frame-support = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-identity = { workspace = true } +sp-runtime = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } -rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } -rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants" } -polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } +polkadot-runtime-common = { workspace = true, default-features = true } +rococo-runtime-constants = { workspace = true, default-features = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -parachains-common = { path = "../../../../../common" } -people-rococo-runtime = { path = "../../../../../runtimes/people/people-rococo" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -rococo-system-emulated-network = { path = "../../../networks/rococo-system" } +asset-test-utils = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } +rococo-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs index 38ff08b486d47..6c23c2f1f292e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs @@ -37,9 +37,19 @@ mod imports { pub use parachains_common::Balance; pub use rococo_system_emulated_network::{ people_rococo_emulated_chain::{ - genesis::ED as PEOPLE_ROCOCO_ED, PeopleRococoParaPallet as PeopleRococoPallet, + genesis::ED as PEOPLE_ROCOCO_ED, + people_rococo_runtime::{people, xcm_config::XcmConfig as PeopleRococoXcmConfig}, + PeopleRococoParaPallet as PeopleRococoPallet, + }, + rococo_emulated_chain::{ + genesis::ED as ROCOCO_ED, + rococo_runtime::{ + xcm_config::XcmConfig as RococoXcmConfig, BasicDeposit, ByteDeposit, + MaxAdditionalFields, MaxSubAccounts, RuntimeOrigin as RococoOrigin, + SubAccountDeposit, + }, + RococoRelayPallet as RococoPallet, }, - rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, PeopleRococoPara as PeopleRococo, PeopleRococoParaReceiver as PeopleRococoReceiver, PeopleRococoParaSender as PeopleRococoSender, RococoRelay as Rococo, RococoRelayReceiver as RococoReceiver, RococoRelaySender as RococoSender, diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/reap_identity.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/reap_identity.rs index 3f1f8638d6fa1..342a8f053f607 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/reap_identity.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/reap_identity.rs @@ -42,14 +42,10 @@ use crate::imports::*; use frame_support::BoundedVec; use pallet_balances::Event as BalancesEvent; use pallet_identity::{legacy::IdentityInfo, Data, Event as IdentityEvent}; -use people_rococo_runtime::people::{ +use people::{ BasicDeposit as BasicDepositParachain, ByteDeposit as ByteDepositParachain, IdentityInfo as IdentityInfoParachain, SubAccountDeposit as SubAccountDepositParachain, }; -use rococo_runtime::{ - BasicDeposit, ByteDeposit, MaxAdditionalFields, MaxSubAccounts, RuntimeOrigin as RococoOrigin, - SubAccountDeposit, -}; use rococo_runtime_constants::currency::*; use rococo_system_emulated_network::{ rococo_emulated_chain::RococoRelayPallet, RococoRelay, RococoRelaySender, diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs index 350d87d638ab2..4410d1bd40dcc 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs @@ -14,8 +14,6 @@ // limitations under the License. use crate::imports::*; -use people_rococo_runtime::xcm_config::XcmConfig as PeopleRococoXcmConfig; -use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; fn relay_origin_assertions(t: RelayToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index 6eab6f52aa721..f7e1cce85a2cf 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -8,25 +8,23 @@ description = "People Westend runtime integration tests with xcm-emulator" publish = false [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } -pallet-identity = { path = "../../../../../../../substrate/frame/identity", default-features = false } +frame-support = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-identity = { workspace = true } +sp-runtime = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } -westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } -westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants" } -polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } +polkadot-runtime-common = { workspace = true, default-features = true } +westend-runtime-constants = { workspace = true, default-features = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -parachains-common = { path = "../../../../../common" } -people-westend-runtime = { path = "../../../../../runtimes/people/people-westend" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -westend-system-emulated-network = { path = "../../../networks/westend-system" } +asset-test-utils = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } +westend-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs index 77ac7cfc78c78..ce1ed9751a2e6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs @@ -35,10 +35,21 @@ mod imports { }; pub use parachains_common::Balance; pub use westend_system_emulated_network::{ + self, people_westend_emulated_chain::{ - genesis::ED as PEOPLE_WESTEND_ED, PeopleWestendParaPallet as PeopleWestendPallet, + genesis::ED as PEOPLE_WESTEND_ED, + people_westend_runtime::{people, xcm_config::XcmConfig as PeopleWestendXcmConfig}, + PeopleWestendParaPallet as PeopleWestendPallet, + }, + westend_emulated_chain::{ + genesis::ED as WESTEND_ED, + westend_runtime::{ + xcm_config::XcmConfig as WestendXcmConfig, BasicDeposit, ByteDeposit, + MaxAdditionalFields, MaxSubAccounts, RuntimeOrigin as WestendOrigin, + SubAccountDeposit, + }, + WestendRelayPallet as WestendPallet, }, - westend_emulated_chain::{genesis::ED as WESTEND_ED, WestendRelayPallet as WestendPallet}, PeopleWestendPara as PeopleWestend, PeopleWestendParaReceiver as PeopleWestendReceiver, PeopleWestendParaSender as PeopleWestendSender, WestendRelay as Westend, WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/reap_identity.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/reap_identity.rs index 3ed8592918d65..28d1be853204e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/reap_identity.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/reap_identity.rs @@ -42,14 +42,10 @@ use crate::imports::*; use frame_support::BoundedVec; use pallet_balances::Event as BalancesEvent; use pallet_identity::{legacy::IdentityInfo, Data, Event as IdentityEvent}; -use people_westend_runtime::people::{ +use people::{ BasicDeposit as BasicDepositParachain, ByteDeposit as ByteDepositParachain, IdentityInfo as IdentityInfoParachain, SubAccountDeposit as SubAccountDepositParachain, }; -use westend_runtime::{ - BasicDeposit, ByteDeposit, MaxAdditionalFields, MaxSubAccounts, RuntimeOrigin as WestendOrigin, - SubAccountDeposit, -}; use westend_runtime_constants::currency::*; use westend_system_emulated_network::{ westend_emulated_chain::WestendRelayPallet, WestendRelay, WestendRelaySender, diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs index 8697477ba7693..6fd3cdeb61fbc 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs @@ -14,8 +14,6 @@ // limitations under the License. use crate::imports::*; -use people_westend_runtime::xcm_config::XcmConfig as PeopleWestendXcmConfig; -use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; fn relay_origin_assertions(t: RelayToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index 92e0a54631394..c52021f67e362 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -10,19 +10,18 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } +sp-io = { workspace = true } [features] default = ["std"] @@ -48,5 +47,4 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] diff --git a/cumulus/parachains/pallets/collective-content/src/lib.rs b/cumulus/parachains/pallets/collective-content/src/lib.rs index b1c960ad6a0d3..7ea3c2d79fa79 100644 --- a/cumulus/parachains/pallets/collective-content/src/lib.rs +++ b/cumulus/parachains/pallets/collective-content/src/lib.rs @@ -46,7 +46,6 @@ pub use weights::WeightInfo; use frame_support::{traits::schedule::DispatchTime, BoundedVec}; use sp_core::ConstU32; -use sp_std::prelude::*; /// IPFS compatible CID. // Worst case 2 bytes base and codec, 2 bytes hash type and size, 64 bytes hash digest. diff --git a/cumulus/parachains/pallets/collective-content/src/tests.rs b/cumulus/parachains/pallets/collective-content/src/tests.rs index 4910b30b89af8..7fee5eea101db 100644 --- a/cumulus/parachains/pallets/collective-content/src/tests.rs +++ b/cumulus/parachains/pallets/collective-content/src/tests.rs @@ -16,7 +16,8 @@ //! Tests. use super::{mock::*, *}; -use frame_support::{assert_noop, assert_ok, error::BadOrigin, pallet_prelude::Pays}; +use frame_support::{assert_noop, assert_ok, pallet_prelude::Pays}; +use sp_runtime::traits::BadOrigin; /// returns CID hash of 68 bytes of given `i`. fn create_cid(i: u8) -> OpaqueCid { diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index 01ee12bf4e719..e0bed23c4f8c0 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -10,16 +10,15 @@ description = "Pallet to store the parachain ID" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } +sp-runtime = { workspace = true } -cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] @@ -30,7 +29,6 @@ std = [ "frame-system/std", "scale-info/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/cumulus/parachains/pallets/parachain-info/src/lib.rs b/cumulus/parachains/pallets/parachain-info/src/lib.rs index a4ef448a6b6b9..0aaa7adaa51c0 100644 --- a/cumulus/parachains/pallets/parachain-info/src/lib.rs +++ b/cumulus/parachains/pallets/parachain-info/src/lib.rs @@ -41,7 +41,7 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, pub parachain_id: ParaId, } diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index f51946e9ebd5d..51fc384a4f140 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -10,18 +10,17 @@ description = "Ping Pallet for Cumulus XCM/UMP testing." workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } +sp-runtime = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } +xcm = { workspace = true } -cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } -cumulus-pallet-xcm = { path = "../../../pallets/xcm", default-features = false } +cumulus-primitives-core = { workspace = true } +cumulus-pallet-xcm = { workspace = true } [features] default = ["std"] @@ -33,7 +32,6 @@ std = [ "frame-system/std", "scale-info/std", "sp-runtime/std", - "sp-std/std", "xcm/std", ] diff --git a/cumulus/parachains/pallets/ping/src/lib.rs b/cumulus/parachains/pallets/ping/src/lib.rs index a738c05e0366b..729494cbd251d 100644 --- a/cumulus/parachains/pallets/ping/src/lib.rs +++ b/cumulus/parachains/pallets/ping/src/lib.rs @@ -18,12 +18,14 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{vec, vec::Vec}; use cumulus_pallet_xcm::{ensure_sibling_para, Origin as CumulusOrigin}; use cumulus_primitives_core::ParaId; use frame_support::{parameter_types, BoundedVec}; use frame_system::Config as SystemConfig; use sp_runtime::traits::Saturating; -use sp_std::prelude::*; use xcm::latest::prelude::*; pub use pallet::*; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index a880730ddacfd..98df41090a407 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -10,96 +10,96 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -hex-literal = { version = "0.4.1" } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-metadata-hash-extension = { path = "../../../../../substrate/frame/metadata-hash-extension", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion-ops = { path = "../../../../../substrate/frame/asset-conversion/ops", default-features = false } -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false } -pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false } -pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false } -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } -sp-weights = { path = "../../../../../substrate/primitives/weights", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-asset-conversion-tx-payment = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion-ops = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-assets-freezer = { workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-multisig = { workspace = true } +pallet-nft-fractionalization = { workspace = true } +pallet-nfts = { workspace = true } +pallet-nfts-runtime-api = { workspace = true } +pallet-proxy = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-uniques = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-inherents = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } +sp-weights = { workspace = true } # num-traits feature needed for dex integer sq root: -primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "num-traits", "scale-info"] } +primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } # Polkadot -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +rococo-runtime-constants = { workspace = true } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } -assets-common = { path = "../common", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true } +assets-common = { workspace = true } # Bridges -pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } -bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } -snowbridge-router-primitives = { path = "../../../../../bridges/snowbridge/primitives/router", default-features = false } +pallet-xcm-bridge-hub-router = { workspace = true } +bp-asset-hub-rococo = { workspace = true } +bp-asset-hub-westend = { workspace = true } +bp-bridge-hub-rococo = { workspace = true } +bp-bridge-hub-westend = { workspace = true } +snowbridge-router-primitives = { workspace = true } [dev-dependencies] -asset-test-utils = { path = "../test-utils" } +asset-test-utils = { workspace = true, default-features = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -116,6 +116,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-asset-conversion-ops/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", + "pallet-assets-freezer/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", @@ -137,7 +138,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", @@ -151,6 +152,7 @@ try-runtime = [ "pallet-asset-conversion-ops/try-runtime", "pallet-asset-conversion-tx-payment/try-runtime", "pallet-asset-conversion/try-runtime", + "pallet-assets-freezer/try-runtime", "pallet-assets/try-runtime", "pallet-aura/try-runtime", "pallet-authorship/try-runtime", @@ -200,6 +202,7 @@ std = [ "pallet-asset-conversion-ops/std", "pallet-asset-conversion-tx-payment/std", "pallet-asset-conversion/std", + "pallet-assets-freezer/std", "pallet-assets/std", "pallet-aura/std", "pallet-authorship/std", @@ -237,7 +240,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", @@ -246,7 +248,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index d75b07bd2b9fc..f09647854cd01 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -27,6 +27,9 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); mod weights; pub mod xcm_config; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use assets_common::{ foreign_creators::ForeignCreators, local_and_foreign_assets::{LocalFromLeft, TargetFromLeft}, @@ -45,7 +48,6 @@ use sp_runtime::{ }; use testnet_parachains_constants::rococo::snowbridge::EthereumNetwork; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -100,7 +102,7 @@ use xcm::{ latest::prelude::{AssetId, BodyId}, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -118,7 +120,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, @@ -257,7 +259,7 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = MetadataDepositPerByte; type ApprovalDeposit = ApprovalDeposit; type StringLimit = AssetsStringLimit; - type Freezer = (); + type Freezer = AssetsFreezer; type Extra = (); type WeightInfo = weights::pallet_assets_local::WeightInfo; type CallbackHandle = (); @@ -267,6 +269,13 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = (); } +// Allow Freezes for the `Assets` pallet +pub type AssetsFreezerInstance = pallet_assets_freezer::Instance1; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); pub const LiquidityWithdrawalFee: Permill = Permill::from_percent(0); @@ -295,7 +304,7 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = ConstU128<0>; type ApprovalDeposit = ApprovalDeposit; type StringLimit = ConstU32<50>; - type Freezer = (); + type Freezer = PoolAssetsFreezer; type Extra = (); type WeightInfo = weights::pallet_assets_pool::WeightInfo; type CallbackHandle = (); @@ -303,6 +312,13 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = (); } +// Allow Freezes for the `PoolAssets` pallet +pub type PoolAssetsFreezerInstance = pallet_assets_freezer::Instance3; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + /// Union fungibles implementation for `Assets` and `ForeignAssets`. pub type LocalAndForeignAssets = fungibles::UnionOf< Assets, @@ -411,7 +427,7 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = ForeignAssetsMetadataDepositPerByte; type ApprovalDeposit = ForeignAssetsApprovalDeposit; type StringLimit = ForeignAssetsAssetsStringLimit; - type Freezer = (); + type Freezer = ForeignAssetsFreezer; type Extra = (); type WeightInfo = weights::pallet_assets_foreign::WeightInfo; type CallbackHandle = (); @@ -421,6 +437,13 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = xcm_config::XcmBenchmarkHelper; } +// Allow Freezes for the `ForeignAssets` pallet +pub type ForeignAssetsFreezerInstance = pallet_assets_freezer::Instance2; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. pub const DepositBase: Balance = deposit(1, 88); @@ -953,6 +976,9 @@ construct_runtime!( NftFractionalization: pallet_nft_fractionalization = 54, PoolAssets: pallet_assets:: = 55, AssetConversion: pallet_asset_conversion = 56, + AssetsFreezer: pallet_assets_freezer:: = 57, + ForeignAssetsFreezer: pallet_assets_freezer:: = 58, + PoolAssetsFreezer: pallet_assets_freezer:: = 59, // TODO: the pallet instance should be removed once all pools have migrated // to the new account IDs. @@ -1137,7 +1163,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -1298,7 +1324,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -1311,11 +1337,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -1330,7 +1356,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -1340,6 +1366,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) @@ -1409,7 +1447,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); Ok(()) } @@ -1479,7 +1517,7 @@ impl_runtime_apis! { } fn set_up_complex_asset_transfer( - ) -> Option<(XcmAssets, u32, Location, Box)> { + ) -> Option<(XcmAssets, u32, Location, alloc::boxed::Box)> { // Transfer to Relay some local AH asset (local-reserve-transfer) while paying // fees using teleported native token. // (We don't care that Relay doesn't accept incoming unknown AH local asset) @@ -1514,7 +1552,7 @@ impl_runtime_apis! { let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; // verify transferred successfully - let verify = Box::new(move || { + let verify = alloc::boxed::Box::new(move || { // verify native balance after transfer, decreased by transferred fee amount // (plus transport fees) assert!(Balances::free_balance(&who) <= balance - fee_amount); @@ -1548,7 +1586,7 @@ impl_runtime_apis! { let bridged_asset_hub = xcm_config::bridging::to_westend::AssetHubWestend::get(); let _ = PolkadotXcm::force_xcm_version( RuntimeOrigin::root(), - Box::new(bridged_asset_hub.clone()), + alloc::boxed::Box::new(bridged_asset_hub.clone()), XCM_VERSION, ).map_err(|e| { log::error!( diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs index c1e5c6a742939..fc63a0814d0a4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs @@ -47,7 +47,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `cumulus_pallet_parachain_system`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_message_queue.rs index 45531ccfa797c..cd72703104ad0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_message_queue.rs @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs index 775bc3bdb80f5..0a86037391b42 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -49,32 +49,32 @@ use core::marker::PhantomData; pub struct WeightInfo(PhantomData); impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn on_initialize_when_non_congested() -> Weight { // Proof Size summary in bytes: // Measured: `154` - // Estimated: `1639` - // Minimum execution time: 7_853_000 picoseconds. - Weight::from_parts(8_443_000, 0) - .saturating_add(Weight::from_parts(0, 1639)) + // Estimated: `5487` + // Minimum execution time: 8_078_000 picoseconds. + Weight::from_parts(8_455_000, 0) + .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) fn on_initialize_when_congested() -> Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `1629` - // Minimum execution time: 4_333_000 picoseconds. - Weight::from_parts(4_501_000, 0) - .saturating_add(Weight::from_parts(0, 1629)) + // Estimated: `5487` + // Minimum execution time: 4_291_000 picoseconds. + Weight::from_parts(4_548_000, 0) + .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(2)) } /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) @@ -83,14 +83,12 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `150` // Estimated: `1502` - // Minimum execution time: 10_167_000 picoseconds. - Weight::from_parts(10_667_000, 0) + // Minimum execution time: 9_959_000 picoseconds. + Weight::from_parts(10_372_000, 0) .saturating_add(Weight::from_parts(0, 1502)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:2 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) @@ -100,7 +98,9 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:2 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) @@ -108,17 +108,17 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) fn send_message() -> Weight { // Proof Size summary in bytes: // Measured: `448` // Estimated: `6388` - // Minimum execution time: 60_584_000 picoseconds. - Weight::from_parts(62_467_000, 0) + // Minimum execution time: 45_888_000 picoseconds. + Weight::from_parts(47_022_000, 0) .saturating_add(Weight::from_parts(0, 6388)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(4)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs index 8e675ad0cf8e6..8c52ecd9f1b1f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs @@ -18,10 +18,10 @@ mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; +use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; use xcm::{latest::prelude::*, DoubleEncoded}; trait WeighAssets { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 7fab35842509d..03d3785dccbd7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::fungible`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 4454494badcbf..bee6bcdf21cf3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index cf5a3905e5816..c736d3ee44204 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -337,10 +337,11 @@ impl xcm_executor::Config for XcmConfig { type OriginConverter = XcmOriginToTransactDispatchOrigin; // Asset Hub trusts only particular, pre-configured bridged locations from a different consensus // as reserve locations (we trust the Bridge Hub to relay the message that a reserve is being - // held). Asset Hub may _act_ as a reserve location for ROC and assets created - // under `pallet-assets`. Users must use teleport where allowed (e.g. ROC with the Relay Chain). + // held). On Rococo Asset Hub, we allow Westend Asset Hub to act as reserve for any asset native + // to the Westend ecosystem. We also allow Ethereum contracts to act as reserves for the foreign + // assets identified by the same respective contracts locations. type IsReserve = ( - bridging::to_westend::IsTrustedBridgedReserveLocationForConcreteAsset, + bridging::to_westend::WestendAssetFromAssetHubWestend, bridging::to_ethereum::IsTrustedBridgedReserveLocationForForeignAsset, ); type IsTeleporter = TrustedTeleporters; @@ -510,8 +511,8 @@ impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { /// All configuration related to bridging pub mod bridging { use super::*; + use alloc::collections::btree_set::BTreeSet; use assets_common::matching; - use sp_std::collections::btree_set::BTreeSet; // common/shared parameters parameter_types! { @@ -540,13 +541,13 @@ pub mod bridging { /// (`AssetId` has to be aligned with `BridgeTable`) pub XcmBridgeHubRouterFeeAssetId: AssetId = TokenLocation::get().into(); - pub BridgeTable: sp_std::vec::Vec = - sp_std::vec::Vec::new().into_iter() + pub BridgeTable: alloc::vec::Vec = + alloc::vec::Vec::new().into_iter() .chain(to_westend::BridgeTable::get()) .collect(); - pub EthereumBridgeTable: sp_std::vec::Vec = - sp_std::vec::Vec::new().into_iter() + pub EthereumBridgeTable: alloc::vec::Vec = + alloc::vec::Vec::new().into_iter() .chain(to_ethereum::BridgeTable::get()) .collect(); } @@ -568,20 +569,19 @@ pub mod bridging { ); pub const WestendNetwork: NetworkId = NetworkId::Westend; - pub AssetHubWestend: Location = Location::new(2, [GlobalConsensus(WestendNetwork::get()), Parachain(bp_asset_hub_westend::ASSET_HUB_WESTEND_PARACHAIN_ID)]); + pub WestendEcosystem: Location = Location::new(2, [GlobalConsensus(WestendNetwork::get())]); pub WndLocation: Location = Location::new(2, [GlobalConsensus(WestendNetwork::get())]); - - pub WndFromAssetHubWestend: (AssetFilter, Location) = ( - Wild(AllOf { fun: WildFungible, id: AssetId(WndLocation::get()) }), - AssetHubWestend::get() - ); + pub AssetHubWestend: Location = Location::new(2, [ + GlobalConsensus(WestendNetwork::get()), + Parachain(bp_asset_hub_westend::ASSET_HUB_WESTEND_PARACHAIN_ID) + ]); /// Set up exporters configuration. /// `Option` represents static "base fee" which is used for total delivery fee calculation. - pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ + pub BridgeTable: alloc::vec::Vec = alloc::vec![ NetworkExportTableItem::new( WestendNetwork::get(), - Some(sp_std::vec![ + Some(alloc::vec![ AssetHubWestend::get().interior.split_global().expect("invalid configuration for AssetHubWestend").1, ]), SiblingBridgeHub::get(), @@ -595,7 +595,7 @@ pub mod bridging { /// Universal aliases pub UniversalAliases: BTreeSet<(Location, Junction)> = BTreeSet::from_iter( - sp_std::vec![ + alloc::vec![ (SiblingBridgeHubWithBridgeHubWestendInstance::get(), GlobalConsensus(WestendNetwork::get())) ] ); @@ -607,17 +607,9 @@ pub mod bridging { } } - /// Trusted reserve locations filter for `xcm_executor::Config::IsReserve`. - /// Locations from which the runtime accepts reserved assets. - pub type IsTrustedBridgedReserveLocationForConcreteAsset = - matching::IsTrustedBridgedReserveLocationForConcreteAsset< - UniversalLocation, - ( - // allow receive WND from AssetHubWestend - xcm_builder::Case, - // and nothing else - ), - >; + /// Allow any asset native to the Westend ecosystem if it comes from Westend Asset Hub. + pub type WestendAssetFromAssetHubWestend = + matching::RemoteAssetFromLocation, AssetHubWestend>; impl Contains for ToWestendXcmRouter { fn contains(call: &RuntimeCall) -> bool { @@ -651,10 +643,10 @@ pub mod bridging { /// Set up exporters configuration. /// `Option` represents static "base fee" which is used for total delivery fee calculation. - pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ + pub BridgeTable: alloc::vec::Vec = alloc::vec![ NetworkExportTableItem::new( EthereumNetwork::get(), - Some(sp_std::vec![Junctions::Here]), + Some(alloc::vec![Junctions::Here]), SiblingBridgeHub::get(), Some(( XcmBridgeHubRouterFeeAssetId::get(), @@ -665,14 +657,14 @@ pub mod bridging { /// Universal aliases pub UniversalAliases: BTreeSet<(Location, Junction)> = BTreeSet::from_iter( - sp_std::vec![ + alloc::vec![ (SiblingBridgeHubWithEthereumInboundQueueInstance::get(), GlobalConsensus(EthereumNetwork::get())), ] ); } pub type IsTrustedBridgedReserveLocationForForeignAsset = - matching::IsForeignConcreteAsset>; + IsForeignConcreteAsset>; impl Contains<(Location, Junction)> for UniversalAliases { fn contains(alias: &(Location, Junction)) -> bool { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index f670c5f424efe..ee1461b7f9c85 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -34,6 +34,7 @@ use asset_test_utils::{ ExtBuilder, SlotDurations, }; use codec::{Decode, Encode}; +use core::ops::Mul; use cumulus_primitives_utility::ChargeWeightInFungibles; use frame_support::{ assert_noop, assert_ok, @@ -48,7 +49,6 @@ use frame_support::{ use parachains_common::{AccountId, AssetIdForTrustBackedAssets, AuraId, Balance}; use sp_consensus_aura::SlotDuration; use sp_runtime::traits::MaybeEquivalence; -use sp_std::ops::Mul; use std::convert::Into; use testnet_parachains_constants::rococo::{consensus::*, currency::UNITS, fee::WeightToFee}; use xcm::latest::prelude::{Assets as XcmAssets, *}; @@ -1277,7 +1277,7 @@ mod asset_hub_rococo_tests { collator_session_keys(), bridging_to_asset_hub_westend, || { - sp_std::vec![ + vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, Transact { origin_kind: OriginKind::Xcm, @@ -1287,16 +1287,16 @@ mod asset_hub_rococo_tests { bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { bridge_id: Default::default(), is_congested: true, - } + }, ) .encode() .into(), - } + }, ] .into() }, || { - sp_std::vec![ + vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, Transact { origin_kind: OriginKind::Xcm, @@ -1306,11 +1306,11 @@ mod asset_hub_rococo_tests { bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { bridge_id: Default::default(), is_congested: false, - } + }, ) .encode() .into(), - } + }, ] .into() }, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 953f6a8b4009a..6b1bf769ace35 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -10,95 +10,95 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -hex-literal = { version = "0.4.1" } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-metadata-hash-extension = { path = "../../../../../substrate/frame/metadata-hash-extension", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-asset-conversion-ops = { path = "../../../../../substrate/frame/asset-conversion/ops", default-features = false } -pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false } -pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false } -pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false } -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-asset-conversion-ops = { workspace = true } +pallet-asset-conversion-tx-payment = { workspace = true } +pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-assets-freezer = { workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-multisig = { workspace = true } +pallet-nft-fractionalization = { workspace = true } +pallet-nfts = { workspace = true } +pallet-nfts-runtime-api = { workspace = true } +pallet-proxy = { workspace = true } +pallet-session = { workspace = true } +pallet-state-trie-migration = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-uniques = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # num-traits feature needed for dex integer sq root: -primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "num-traits", "scale-info"] } +primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +westend-runtime-constants = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } -assets-common = { path = "../common", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true } +assets-common = { workspace = true } # Bridges -pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } -bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } +pallet-xcm-bridge-hub-router = { workspace = true } +bp-asset-hub-rococo = { workspace = true } +bp-asset-hub-westend = { workspace = true } +bp-bridge-hub-rococo = { workspace = true } +bp-bridge-hub-westend = { workspace = true } [dev-dependencies] -asset-test-utils = { path = "../test-utils" } +asset-test-utils = { workspace = true, default-features = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -115,6 +115,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-asset-conversion-ops/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", + "pallet-assets-freezer/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collator-selection/runtime-benchmarks", @@ -136,7 +137,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", @@ -150,6 +151,7 @@ try-runtime = [ "pallet-asset-conversion-ops/try-runtime", "pallet-asset-conversion-tx-payment/try-runtime", "pallet-asset-conversion/try-runtime", + "pallet-assets-freezer/try-runtime", "pallet-assets/try-runtime", "pallet-aura/try-runtime", "pallet-authorship/try-runtime", @@ -200,6 +202,7 @@ std = [ "pallet-asset-conversion-ops/std", "pallet-asset-conversion-tx-payment/std", "pallet-asset-conversion/std", + "pallet-assets-freezer/std", "pallet-assets/std", "pallet-aura/std", "pallet-authorship/std", @@ -236,7 +239,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", @@ -245,7 +247,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index e9c2b10f719da..178b886fc3e84 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -27,6 +27,9 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); mod weights; pub mod xcm_config; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use assets_common::{ local_and_foreign_assets::{LocalFromLeft, TargetFromLeft}, AssetIdForTrustBackedAssetsConvert, @@ -68,7 +71,6 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Perbill, Permill, RuntimeDebug, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -85,10 +87,10 @@ pub use sp_runtime::BuildStorage; use assets_common::{foreign_creators::ForeignCreators, matching::FromSiblingParachain}; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use xcm::prelude::{VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; - -// We exclude `Assets` since it's the name of a pallet -use xcm::latest::prelude::AssetId; +use xcm::{ + latest::prelude::AssetId, + prelude::{VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}, +}; #[cfg(feature = "runtime-benchmarks")] use xcm::latest::prelude::{ @@ -96,7 +98,7 @@ use xcm::latest::prelude::{ NetworkId, NonFungible, Parent, ParentThen, Response, XCM_VERSION, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -117,7 +119,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, @@ -255,7 +257,7 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = MetadataDepositPerByte; type ApprovalDeposit = ApprovalDeposit; type StringLimit = AssetsStringLimit; - type Freezer = (); + type Freezer = AssetsFreezer; type Extra = (); type WeightInfo = weights::pallet_assets_local::WeightInfo; type CallbackHandle = (); @@ -265,6 +267,13 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = (); } +// Allow Freezes for the `Assets` pallet +pub type AssetsFreezerInstance = pallet_assets_freezer::Instance1; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); pub const LiquidityWithdrawalFee: Permill = Permill::from_percent(0); @@ -292,7 +301,7 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = ConstU128<0>; type ApprovalDeposit = ConstU128<0>; type StringLimit = ConstU32<50>; - type Freezer = (); + type Freezer = PoolAssetsFreezer; type Extra = (); type WeightInfo = weights::pallet_assets_pool::WeightInfo; type CallbackHandle = (); @@ -300,6 +309,13 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = (); } +// Allow Freezes for the `PoolAssets` pallet +pub type PoolAssetsFreezerInstance = pallet_assets_freezer::Instance3; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + /// Union fungibles implementation for `Assets` and `ForeignAssets`. pub type LocalAndForeignAssets = fungibles::UnionOf< Assets, @@ -405,7 +421,7 @@ impl pallet_assets::Config for Runtime { type MetadataDepositPerByte = ForeignAssetsMetadataDepositPerByte; type ApprovalDeposit = ForeignAssetsApprovalDeposit; type StringLimit = ForeignAssetsAssetsStringLimit; - type Freezer = (); + type Freezer = ForeignAssetsFreezer; type Extra = (); type WeightInfo = weights::pallet_assets_foreign::WeightInfo; type CallbackHandle = (); @@ -415,6 +431,13 @@ impl pallet_assets::Config for Runtime { type BenchmarkHelper = xcm_config::XcmBenchmarkHelper; } +// Allow Freezes for the `ForeignAssets` pallet +pub type ForeignAssetsFreezerInstance = pallet_assets_freezer::Instance2; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. pub const DepositBase: Balance = deposit(1, 88); @@ -943,6 +966,9 @@ construct_runtime!( NftFractionalization: pallet_nft_fractionalization = 54, PoolAssets: pallet_assets:: = 55, AssetConversion: pallet_asset_conversion = 56, + AssetsFreezer: pallet_assets_freezer:: = 57, + ForeignAssetsFreezer: pallet_assets_freezer:: = 58, + PoolAssetsFreezer: pallet_assets_freezer:: = 59, StateTrieMigration: pallet_state_trie_migration = 70, @@ -1182,7 +1208,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -1326,7 +1352,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::WestendLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -1339,11 +1365,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -1358,7 +1384,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -1368,6 +1394,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi for Runtime { @@ -1500,7 +1538,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); Ok(()) } @@ -1565,7 +1603,7 @@ impl_runtime_apis! { } fn set_up_complex_asset_transfer( - ) -> Option<(XcmAssets, u32, Location, Box)> { + ) -> Option<(XcmAssets, u32, Location, alloc::boxed::Box)> { // Transfer to Relay some local AH asset (local-reserve-transfer) while paying // fees using teleported native token. // (We don't care that Relay doesn't accept incoming unknown AH local asset) @@ -1600,7 +1638,7 @@ impl_runtime_apis! { let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; // verify transferred successfully - let verify = Box::new(move || { + let verify = alloc::boxed::Box::new(move || { // verify native balance after transfer, decreased by transferred fee amount // (plus transport fees) assert!(Balances::free_balance(&who) <= balance - fee_amount); @@ -1639,7 +1677,7 @@ impl_runtime_apis! { let bridged_asset_hub = xcm_config::bridging::to_rococo::AssetHubRococo::get(); let _ = PolkadotXcm::force_xcm_version( RuntimeOrigin::root(), - Box::new(bridged_asset_hub.clone()), + alloc::boxed::Box::new(bridged_asset_hub.clone()), XCM_VERSION, ).map_err(|e| { log::error!( diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs index c1e5c6a742939..fc63a0814d0a4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -47,7 +47,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `cumulus_pallet_parachain_system`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_message_queue.rs index 45531ccfa797c..cd72703104ad0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_message_queue.rs @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs index 84d717b0283c7..21d15c75af553 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -49,48 +49,46 @@ use core::marker::PhantomData; pub struct WeightInfo(PhantomData); impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn on_initialize_when_non_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `193` - // Estimated: `1678` - // Minimum execution time: 8_095_000 picoseconds. - Weight::from_parts(8_393_000, 0) - .saturating_add(Weight::from_parts(0, 1678)) + // Measured: `226` + // Estimated: `5487` + // Minimum execution time: 8_363_000 picoseconds. + Weight::from_parts(8_620_000, 0) + .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) fn on_initialize_when_congested() -> Weight { // Proof Size summary in bytes: // Measured: `111` - // Estimated: `1596` - // Minimum execution time: 3_417_000 picoseconds. - Weight::from_parts(3_583_000, 0) - .saturating_add(Weight::from_parts(0, 1596)) + // Estimated: `5487` + // Minimum execution time: 3_436_000 picoseconds. + Weight::from_parts(3_586_000, 0) + .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(2)) } /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn report_bridge_status() -> Weight { // Proof Size summary in bytes: - // Measured: `117` + // Measured: `150` // Estimated: `1502` - // Minimum execution time: 10_280_000 picoseconds. - Weight::from_parts(10_703_000, 0) + // Minimum execution time: 9_706_000 picoseconds. + Weight::from_parts(10_139_000, 0) .saturating_add(Weight::from_parts(0, 1502)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:2 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) @@ -100,7 +98,9 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:2 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) @@ -108,18 +108,18 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) fn send_message() -> Weight { // Proof Size summary in bytes: - // Measured: `487` - // Estimated: `6427` - // Minimum execution time: 63_624_000 picoseconds. - Weight::from_parts(66_071_000, 0) - .saturating_add(Weight::from_parts(0, 6427)) + // Measured: `520` + // Estimated: `6460` + // Minimum execution time: 46_250_000 picoseconds. + Weight::from_parts(47_801_000, 0) + .saturating_add(Weight::from_parts(0, 6460)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(4)) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs index 8c77774da2dd7..d39052c5c03b8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs @@ -17,10 +17,10 @@ mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; +use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; use xcm::{latest::prelude::*, DoubleEncoded}; trait WeighAssets { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index eaf07aac52cef..fe8d186139256 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -42,7 +42,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::fungible`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index fc196abea0f5e..127bc173c1037 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -42,7 +42,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index ff1fc99cba8a7..2deeb73eb127b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -357,9 +357,9 @@ impl xcm_executor::Config for XcmConfig { type OriginConverter = XcmOriginToTransactDispatchOrigin; // Asset Hub trusts only particular, pre-configured bridged locations from a different consensus // as reserve locations (we trust the Bridge Hub to relay the message that a reserve is being - // held). Asset Hub may _act_ as a reserve location for WND and assets created - // under `pallet-assets`. Users must use teleport where allowed (e.g. WND with the Relay Chain). - type IsReserve = (bridging::to_rococo::IsTrustedBridgedReserveLocationForConcreteAsset,); + // held). On Westend Asset Hub, we allow Rococo Asset Hub to act as reserve for any asset native + // to the Rococo or Ethereum ecosystems. + type IsReserve = (bridging::to_rococo::RococoOrEthereumAssetFromAssetHubRococo,); type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; @@ -519,8 +519,8 @@ impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { /// All configuration related to bridging pub mod bridging { use super::*; + use alloc::collections::btree_set::BTreeSet; use assets_common::matching; - use sp_std::collections::btree_set::BTreeSet; parameter_types! { /// Base price of every byte of the Westend -> Rococo message. Can be adjusted via @@ -548,8 +548,8 @@ pub mod bridging { /// (`AssetId` has to be aligned with `BridgeTable`) pub XcmBridgeHubRouterFeeAssetId: AssetId = WestendLocation::get().into(); - pub BridgeTable: sp_std::vec::Vec = - sp_std::vec::Vec::new().into_iter() + pub BridgeTable: alloc::vec::Vec = + alloc::vec::Vec::new().into_iter() .chain(to_rococo::BridgeTable::get()) .collect(); } @@ -569,20 +569,21 @@ pub mod bridging { ); pub const RococoNetwork: NetworkId = NetworkId::Rococo; - pub AssetHubRococo: Location = Location::new(2, [GlobalConsensus(RococoNetwork::get()), Parachain(bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID)]); + pub const EthereumNetwork: NetworkId = NetworkId::Ethereum { chain_id: 11155111 }; + pub RococoEcosystem: Location = Location::new(2, [GlobalConsensus(RococoNetwork::get())]); pub RocLocation: Location = Location::new(2, [GlobalConsensus(RococoNetwork::get())]); - - pub RocFromAssetHubRococo: (AssetFilter, Location) = ( - Wild(AllOf { fun: WildFungible, id: AssetId(RocLocation::get()) }), - AssetHubRococo::get() - ); + pub EthereumEcosystem: Location = Location::new(2, [GlobalConsensus(EthereumNetwork::get())]); + pub AssetHubRococo: Location = Location::new(2, [ + GlobalConsensus(RococoNetwork::get()), + Parachain(bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID) + ]); /// Set up exporters configuration. /// `Option` represents static "base fee" which is used for total delivery fee calculation. - pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ + pub BridgeTable: alloc::vec::Vec = alloc::vec![ NetworkExportTableItem::new( RococoNetwork::get(), - Some(sp_std::vec![ + Some(alloc::vec![ AssetHubRococo::get().interior.split_global().expect("invalid configuration for AssetHubRococo").1, ]), SiblingBridgeHub::get(), @@ -596,7 +597,7 @@ pub mod bridging { /// Universal aliases pub UniversalAliases: BTreeSet<(Location, Junction)> = BTreeSet::from_iter( - sp_std::vec![ + alloc::vec![ (SiblingBridgeHubWithBridgeHubRococoInstance::get(), GlobalConsensus(RococoNetwork::get())) ] ); @@ -608,17 +609,12 @@ pub mod bridging { } } - /// Reserve locations filter for `xcm_executor::Config::IsReserve`. - /// Locations from which the runtime accepts reserved assets. - pub type IsTrustedBridgedReserveLocationForConcreteAsset = - matching::IsTrustedBridgedReserveLocationForConcreteAsset< - UniversalLocation, - ( - // allow receive ROC from AssetHubRococo - xcm_builder::Case, - // and nothing else - ), - >; + /// Allow any asset native to the Rococo or Ethereum ecosystems if it comes from Rococo + /// Asset Hub. + pub type RococoOrEthereumAssetFromAssetHubRococo = matching::RemoteAssetFromLocation< + (StartsWith, StartsWith), + AssetHubRococo, + >; impl Contains for ToRococoXcmRouter { fn contains(call: &RuntimeCall) -> bool { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index b5957dd5df92f..48e6c11d268c7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -1258,7 +1258,7 @@ fn report_bridge_status_from_xcm_bridge_router_for_rococo_works() { collator_session_keys(), bridging_to_asset_hub_rococo, || { - sp_std::vec![ + vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, Transact { origin_kind: OriginKind::Xcm, @@ -1268,16 +1268,16 @@ fn report_bridge_status_from_xcm_bridge_router_for_rococo_works() { bp_asset_hub_westend::XcmBridgeHubRouterCall::report_bridge_status { bridge_id: Default::default(), is_congested: true, - } + }, ) .encode() .into(), - } + }, ] .into() }, || { - sp_std::vec![ + vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, Transact { origin_kind: OriginKind::Xcm, @@ -1287,11 +1287,11 @@ fn report_bridge_status_from_xcm_bridge_router_for_rococo_works() { bp_asset_hub_westend::XcmBridgeHubRouterCall::report_bridge_status { bridge_id: Default::default(), is_congested: false, - } + }, ) .encode() .into(), - } + }, ] .into() }, diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index 4664e0cb9a7f8..c6740269339d8 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -10,30 +10,29 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } -impl-trait-for-tuples = "0.2.2" +impl-trait-for-tuples = { workspace = true } # Substrate -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +pallet-asset-conversion = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -parachains-common = { path = "../../../common", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +parachains-common = { workspace = true } +cumulus-primitives-core = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] @@ -48,7 +47,6 @@ std = [ "scale-info/std", "sp-api/std", "sp-runtime/std", - "sp-std/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", diff --git a/cumulus/parachains/runtimes/assets/common/src/benchmarks.rs b/cumulus/parachains/runtimes/assets/common/src/benchmarks.rs index 44bda1eb3709c..d59fddc4e8f02 100644 --- a/cumulus/parachains/runtimes/assets/common/src/benchmarks.rs +++ b/cumulus/parachains/runtimes/assets/common/src/benchmarks.rs @@ -13,9 +13,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +use core::marker::PhantomData; use cumulus_primitives_core::ParaId; use sp_runtime::traits::Get; -use sp_std::marker::PhantomData; use xcm::latest::prelude::*; /// Creates asset pairs for liquidity pools with `Target` always being the first asset. diff --git a/cumulus/parachains/runtimes/assets/common/src/foreign_creators.rs b/cumulus/parachains/runtimes/assets/common/src/foreign_creators.rs index a9fd79bf939f5..95edb31da06e5 100644 --- a/cumulus/parachains/runtimes/assets/common/src/foreign_creators.rs +++ b/cumulus/parachains/runtimes/assets/common/src/foreign_creators.rs @@ -23,7 +23,7 @@ use xcm_executor::traits::ConvertLocation; /// `EnsureOriginWithArg` impl for `CreateOrigin` that allows only XCM origins that are locations /// containing the class location. pub struct ForeignCreators( - sp_std::marker::PhantomData<(IsForeign, AccountOf, AccountId, L)>, + core::marker::PhantomData<(IsForeign, AccountOf, AccountId, L)>, ); impl< IsForeign: ContainsPair, @@ -41,7 +41,7 @@ where fn try_origin( origin: RuntimeOrigin, asset_location: &L, - ) -> sp_std::result::Result { + ) -> core::result::Result { let origin_location = EnsureXcm::::try_origin(origin.clone())?; if !IsForeign::contains(asset_location, &origin_location) { return Err(origin) diff --git a/cumulus/parachains/runtimes/assets/common/src/fungible_conversion.rs b/cumulus/parachains/runtimes/assets/common/src/fungible_conversion.rs index e21203485a764..27ee2d6b5653c 100644 --- a/cumulus/parachains/runtimes/assets/common/src/fungible_conversion.rs +++ b/cumulus/parachains/runtimes/assets/common/src/fungible_conversion.rs @@ -16,9 +16,10 @@ //! Runtime API definition for assets. use crate::runtime_api::FungiblesAccessError; +use alloc::vec::Vec; +use core::borrow::Borrow; use frame_support::traits::Contains; use sp_runtime::traits::MaybeEquivalence; -use sp_std::{borrow::Borrow, vec::Vec}; use xcm::latest::{Asset, Location}; use xcm_builder::{ConvertedConcreteId, MatchedConvertedConcreteId}; use xcm_executor::traits::MatchesFungibles; diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs index 431b5766147ae..4bb593f98929e 100644 --- a/cumulus/parachains/runtimes/assets/common/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs @@ -23,6 +23,8 @@ pub mod local_and_foreign_assets; pub mod matching; pub mod runtime_api; +extern crate alloc; + use crate::matching::{LocalLocationPattern, ParentLocation}; use frame_support::traits::{Equals, EverythingBut}; use parachains_common::{AssetIdForTrustBackedAssets, CollectionId, ItemId}; diff --git a/cumulus/parachains/runtimes/assets/common/src/local_and_foreign_assets.rs b/cumulus/parachains/runtimes/assets/common/src/local_and_foreign_assets.rs index 58f5d2d57a766..8a89089c71877 100644 --- a/cumulus/parachains/runtimes/assets/common/src/local_and_foreign_assets.rs +++ b/cumulus/parachains/runtimes/assets/common/src/local_and_foreign_assets.rs @@ -13,13 +13,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +use core::marker::PhantomData; use frame_support::traits::Get; use sp_runtime::{ traits::{Convert, MaybeEquivalence}, Either, Either::{Left, Right}, }; -use sp_std::marker::PhantomData; use xcm::latest::Location; /// Converts a given [`Location`] to [`Either::Left`] when equal to `Target`, or diff --git a/cumulus/parachains/runtimes/assets/common/src/matching.rs b/cumulus/parachains/runtimes/assets/common/src/matching.rs index 3aad88e177caa..9bb35d0c5328b 100644 --- a/cumulus/parachains/runtimes/assets/common/src/matching.rs +++ b/cumulus/parachains/runtimes/assets/common/src/matching.rs @@ -14,7 +14,10 @@ // limitations under the License. use cumulus_primitives_core::ParaId; -use frame_support::{pallet_prelude::Get, traits::ContainsPair}; +use frame_support::{ + pallet_prelude::Get, + traits::{Contains, ContainsPair}, +}; use xcm::prelude::*; use xcm_builder::ensure_is_remote; @@ -25,7 +28,7 @@ frame_support::parameter_types! { } /// Accepts an asset if it is from the origin. -pub struct IsForeignConcreteAsset(sp_std::marker::PhantomData); +pub struct IsForeignConcreteAsset(core::marker::PhantomData); impl> ContainsPair for IsForeignConcreteAsset { @@ -38,7 +41,7 @@ impl> ContainsPair /// Checks if `a` is from sibling location `b`. Checks that `Location-a` starts with /// `Location-b`, and that the `ParaId` of `b` is not equal to `a`. pub struct FromSiblingParachain( - sp_std::marker::PhantomData<(SelfParaId, L)>, + core::marker::PhantomData<(SelfParaId, L)>, ); impl, L: TryFrom + TryInto + Clone> ContainsPair for FromSiblingParachain @@ -62,7 +65,7 @@ impl, L: TryFrom + TryInto + Clone> /// Checks if `a` is from the expected global consensus network. Checks that `Location-a` /// starts with `Location-b`, and that network is a foreign consensus system. pub struct FromNetwork( - sp_std::marker::PhantomData<(UniversalLocation, ExpectedNetworkId, L)>, + core::marker::PhantomData<(UniversalLocation, ExpectedNetworkId, L)>, ); impl< UniversalLocation: Get, @@ -94,36 +97,33 @@ impl< } } -/// Adapter verifies if it is allowed to receive `Asset` from `Location`. -/// -/// Note: `Location` has to be from a different global consensus. -pub struct IsTrustedBridgedReserveLocationForConcreteAsset( - sp_std::marker::PhantomData<(UniversalLocation, Reserves)>, +/// Accept an asset if it is native to `AssetsAllowedNetworks` and it is coming from +/// `OriginLocation`. +pub struct RemoteAssetFromLocation( + core::marker::PhantomData<(AssetsAllowedNetworks, OriginLocation)>, ); -impl, Reserves: ContainsPair> - ContainsPair - for IsTrustedBridgedReserveLocationForConcreteAsset +impl, OriginLocation: Get> + ContainsPair for RemoteAssetFromLocation { fn contains(asset: &Asset, origin: &Location) -> bool { - let universal_source = UniversalLocation::get(); - log::trace!( - target: "xcm::contains", - "IsTrustedBridgedReserveLocationForConcreteAsset asset: {:?}, origin: {:?}, universal_source: {:?}", - asset, origin, universal_source - ); - - // check remote origin - if ensure_is_remote(universal_source.clone(), origin.clone()).is_err() { + let expected_origin = OriginLocation::get(); + // ensure `origin` is expected `OriginLocation` + if !expected_origin.eq(origin) { log::trace!( target: "xcm::contains", - "IsTrustedBridgedReserveLocationForConcreteAsset origin: {:?} is not remote to the universal_source: {:?}", - origin, universal_source + "RemoteAssetFromLocation asset: {:?}, origin: {:?} is not from expected {:?}", + asset, origin, expected_origin, ); return false + } else { + log::trace!( + target: "xcm::contains", + "RemoteAssetFromLocation asset: {asset:?}, origin: {origin:?}", + ); } - // check asset according to the configured reserve locations - Reserves::contains(asset, origin) + // ensure `asset` is from remote consensus listed in `AssetsAllowedNetworks` + AssetsAllowedNetworks::contains(&asset.id.0) } } diff --git a/cumulus/parachains/runtimes/assets/common/src/runtime_api.rs b/cumulus/parachains/runtimes/assets/common/src/runtime_api.rs index 19977cbedab07..799b2f45b4dfb 100644 --- a/cumulus/parachains/runtimes/assets/common/src/runtime_api.rs +++ b/cumulus/parachains/runtimes/assets/common/src/runtime_api.rs @@ -18,7 +18,7 @@ use codec::{Codec, Decode, Encode}; use sp_runtime::RuntimeDebug; #[cfg(feature = "std")] -use {sp_std::vec::Vec, xcm::latest::Asset}; +use {alloc::vec::Vec, xcm::latest::Asset}; /// The possible errors that can happen querying the storage of assets. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index af5b4a6468072..529d6460fc4e4 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -10,42 +10,41 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } # Substrate -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-assets = { workspace = true } +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-session = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-runtimes-test-utils = { path = "../../test-utils", default-features = false } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +pallet-collator-selection = { workspace = true } +parachains-common = { workspace = true } +cumulus-primitives-core = { workspace = true } +parachain-info = { workspace = true } +parachains-runtimes-test-utils = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +pallet-xcm = { workspace = true } # Bridges -pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } +pallet-xcm-bridge-hub-router = { workspace = true } [dev-dependencies] -hex-literal = "0.4.1" +hex-literal = { workspace = true, default-features = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] @@ -68,7 +67,6 @@ std = [ "parachains-runtimes-test-utils/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 253a21f5d0bab..9873729846821 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -10,125 +10,124 @@ license = "Apache-2.0" workspace = true [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -hex-literal = { version = "0.4.1" } +], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = [ +scale-info = { features = [ "derive", -] } +], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -tuplex = { version = "0.1", default-features = false } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-session = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-multisig = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +rococo-runtime-constants = { workspace = true } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = [ +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { features = [ "bridging", -] } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } +], workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +cumulus-primitives-utility = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true } # Bridges -bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-polkadot = { path = "../../../../../bridges/chains/chain-bridge-hub-polkadot", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } -bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } -bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } -bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } -bp-polkadot-bulletin = { path = "../../../../../bridges/chains/chain-polkadot-bulletin", default-features = false } -bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } -bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } -bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } -bp-rococo = { path = "../../../../../bridges/chains/chain-rococo", default-features = false } -bp-westend = { path = "../../../../../bridges/chains/chain-westend", default-features = false } -pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } -pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } -pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } -pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", default-features = false } -pallet-xcm-bridge-hub = { path = "../../../../../bridges/modules/xcm-bridge-hub", default-features = false } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } +bp-asset-hub-rococo = { workspace = true } +bp-asset-hub-westend = { workspace = true } +bp-bridge-hub-polkadot = { workspace = true } +bp-bridge-hub-rococo = { workspace = true } +bp-bridge-hub-westend = { workspace = true } +bp-header-chain = { workspace = true } +bp-messages = { workspace = true } +bp-parachains = { workspace = true } +bp-polkadot-bulletin = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-relayers = { workspace = true } +bp-runtime = { features = ["test-helpers"], workspace = true } +bp-rococo = { workspace = true } +bp-westend = { workspace = true } +pallet-bridge-grandpa = { workspace = true } +pallet-bridge-messages = { workspace = true } +pallet-bridge-parachains = { workspace = true } +pallet-bridge-relayers = { workspace = true } +pallet-xcm-bridge-hub = { workspace = true } +bridge-runtime-common = { workspace = true } # Ethereum Bridge (Snowbridge) -snowbridge-beacon-primitives = { path = "../../../../../bridges/snowbridge/primitives/beacon", default-features = false } -snowbridge-pallet-system = { path = "../../../../../bridges/snowbridge/pallets/system", default-features = false } -snowbridge-system-runtime-api = { path = "../../../../../bridges/snowbridge/pallets/system/runtime-api", default-features = false } -snowbridge-core = { path = "../../../../../bridges/snowbridge/primitives/core", default-features = false } -snowbridge-pallet-ethereum-client = { path = "../../../../../bridges/snowbridge/pallets/ethereum-client", default-features = false } -snowbridge-pallet-inbound-queue = { path = "../../../../../bridges/snowbridge/pallets/inbound-queue", default-features = false } -snowbridge-pallet-outbound-queue = { path = "../../../../../bridges/snowbridge/pallets/outbound-queue", default-features = false } -snowbridge-outbound-queue-runtime-api = { path = "../../../../../bridges/snowbridge/pallets/outbound-queue/runtime-api", default-features = false } -snowbridge-router-primitives = { path = "../../../../../bridges/snowbridge/primitives/router", default-features = false } -snowbridge-runtime-common = { path = "../../../../../bridges/snowbridge/runtime/runtime-common", default-features = false } +snowbridge-beacon-primitives = { workspace = true } +snowbridge-pallet-system = { workspace = true } +snowbridge-system-runtime-api = { workspace = true } +snowbridge-core = { workspace = true } +snowbridge-pallet-ethereum-client = { workspace = true } +snowbridge-pallet-inbound-queue = { workspace = true } +snowbridge-pallet-outbound-queue = { workspace = true } +snowbridge-outbound-queue-runtime-api = { workspace = true } +snowbridge-router-primitives = { workspace = true } +snowbridge-runtime-common = { workspace = true } -bridge-hub-common = { path = "../common", default-features = false } +bridge-hub-common = { workspace = true } [dev-dependencies] -static_assertions = "1.1" -bridge-hub-test-utils = { path = "../test-utils" } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", features = [ +static_assertions = { workspace = true, default-features = true } +bridge-hub-test-utils = { workspace = true, default-features = true } +bridge-runtime-common = { features = [ "integrity-test", -] } -sp-keyring = { path = "../../../../../substrate/primitives/keyring" } -snowbridge-runtime-test-common = { path = "../../../../../bridges/snowbridge/runtime/test-common" } +], workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +snowbridge-runtime-test-common = { workspace = true, default-features = true } [features] default = ["std"] @@ -218,10 +217,9 @@ std = [ "sp-version/std", "substrate-wasm-builder", "testnet-parachains-constants/std", - "tuplex/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -264,7 +262,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs index 5551b05e20254..779cc537ee96d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs @@ -21,14 +21,9 @@ //! For example, the messaging pallet needs to know the sending and receiving chains, but the //! GRANDPA tracking pallet only needs to be aware of one chain. -use super::{ - weights, AccountId, Balance, Balances, BlockNumber, Runtime, RuntimeEvent, RuntimeOrigin, -}; +use super::{weights, AccountId, Balance, Balances, BlockNumber, Runtime, RuntimeEvent}; use bp_parachains::SingleParaStoredHeaderDataBuilder; -use bp_runtime::UnderlyingChainProvider; -use bridge_runtime_common::messages::ThisChainWithMessages; use frame_support::{parameter_types, traits::ConstU32}; -use sp_runtime::RuntimeDebug; parameter_types! { pub const RelayChainHeadersToKeep: u32 = 1024; @@ -103,15 +98,3 @@ impl pallet_bridge_grandpa::Config for Runt // weights are also the same for both bridges. type WeightInfo = weights::pallet_bridge_grandpa::WeightInfo; } - -/// BridgeHubRococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubRococo; - -impl UnderlyingChainProvider for BridgeHubRococo { - type Chain = bp_bridge_hub_rococo::BridgeHubRococo; -} - -impl ThisChainWithMessages for BridgeHubRococo { - type RuntimeOrigin = RuntimeOrigin; -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index 94b936889b77c..d97e6a1d88e1b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -20,23 +20,20 @@ //! are reusing Polkadot Bulletin chain primitives everywhere here. use crate::{ - bridge_common_config::BridgeHubRococo, weights, xcm_config::UniversalLocation, AccountId, - BridgeRococoBulletinGrandpa, BridgeRococoBulletinMessages, PolkadotXcm, Runtime, RuntimeEvent, - XcmOverRococoBulletin, XcmRouter, + weights, xcm_config::UniversalLocation, BridgeRococoBulletinGrandpa, + BridgeRococoBulletinMessages, PolkadotXcm, Runtime, RuntimeEvent, XcmOverRococoBulletin, + XcmRouter, +}; +use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, LaneId, }; -use bp_messages::LaneId; use bp_runtime::Chain; use bridge_runtime_common::{ extensions::refund_relayer_extension::{ ActualFeeRefund, RefundBridgedMessages, RefundSignedExtensionAdapter, RefundableMessagesLane, }, - messages, - messages::{ - source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, - MessageBridge, UnderlyingChainProvider, - }, messages_xcm_extension::{ SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, @@ -44,7 +41,6 @@ use bridge_runtime_common::{ }; use frame_support::{parameter_types, traits::PalletInfoAccess}; -use sp_runtime::RuntimeDebug; use xcm::{ latest::prelude::*, prelude::{InteriorLocation, NetworkId}, @@ -52,15 +48,6 @@ use xcm::{ use xcm_builder::BridgeBlobDispatcher; parameter_types! { - /// Maximal number of entries in the unrewarded relayers vector at the Rococo Bridge Hub. It matches the - /// maximal number of unrewarded relayers that the single confirmation transaction at Rococo Bulletin Chain - /// may process. - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_polkadot_bulletin::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - /// Maximal number of unconfirmed messages at the Rococo Bridge Hub. It matches the maximal number of - /// unconfirmed messages that the single confirmation transaction at Rococo Bulletin Chain may process. - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_polkadot_bulletin::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; /// Bridge specific chain (network) identifier of the Rococo Bulletin Chain. pub const RococoBulletinChainId: bp_runtime::ChainId = bp_polkadot_bulletin::PolkadotBulletin::ID; /// Interior location (relative to this runtime) of the with-RococoBulletin messages pallet. @@ -100,7 +87,7 @@ parameter_types! { XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN, ); /// All active routes and their destinations. - pub ActiveLanes: sp_std::vec::Vec<(SenderAndLane, (NetworkId, InteriorLocation))> = sp_std::vec![ + pub ActiveLanes: alloc::vec::Vec<(SenderAndLane, (NetworkId, InteriorLocation))> = alloc::vec![ ( FromRococoPeopleToRococoBulletinRoute::get(), (RococoBulletinGlobalConsensusNetwork::get(), Here) @@ -142,31 +129,6 @@ impl XcmBlobHauler for ToRococoBulletinXcmBlobHauler { type OnMessagesDeliveredFromRococoBulletin = XcmBlobHaulerAdapter; -/// Messaging Bridge configuration for BridgeHubRococo -> Rococo Bulletin. -pub struct WithRococoBulletinMessageBridge; -impl MessageBridge for WithRococoBulletinMessageBridge { - // Bulletin chain assumes it is bridged with Polkadot Bridge Hub - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_polkadot::WITH_BRIDGE_HUB_POLKADOT_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubRococo; - type BridgedChain = RococoBulletin; - type BridgedHeaderChain = BridgeRococoBulletinGrandpa; -} - -/// Maximal outbound payload size of BridgeHubRococo -> RococoBulletin messages. -pub type ToRococoBulletinMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// RococoBulletin chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct RococoBulletin; - -impl UnderlyingChainProvider for RococoBulletin { - type Chain = bp_polkadot_bulletin::PolkadotBulletin; -} - -impl messages::BridgedChainWithMessages for RococoBulletin {} - /// Signed extension that refunds relayers that are delivering messages from the Rococo Bulletin /// chain. pub type OnBridgeHubRococoRefundRococoBulletinMessages = RefundSignedExtensionAdapter< @@ -189,22 +151,20 @@ impl pallet_bridge_messages::Config for Runt type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_bridge_messages_rococo_to_rococo_bulletin::WeightInfo; - type BridgedChainId = RococoBulletinChainId; + + type ThisChain = bp_bridge_hub_rococo::BridgeHubRococo; + type BridgedChain = bp_polkadot_bulletin::PolkadotBulletin; + type BridgedHeaderChain = BridgeRococoBulletinGrandpa; + type ActiveOutboundLanes = ActiveOutboundLanesToRococoBulletin; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - type MaximalOutboundPayloadSize = ToRococoBulletinMaximalOutboundPayloadSize; type OutboundPayload = XcmAsPlainPayload; type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; type DeliveryPayments = (); - type TargetHeaderChain = TargetHeaderChainAdapter; type DeliveryConfirmationPayments = (); - type SourceHeaderChain = SourceHeaderChainAdapter; type MessageDispatch = XcmBlobMessageDispatch; type OnMessagesDelivered = OnMessagesDeliveredFromRococoBulletin; @@ -267,8 +227,7 @@ mod tests { runtime: Runtime, with_bridged_chain_grandpa_instance: BridgeGrandpaRococoBulletinInstance, with_bridged_chain_messages_instance: WithRococoBulletinMessagesInstance, - bridge: WithRococoBulletinMessageBridge, - this_chain: bp_rococo::Rococo, + this_chain: bp_bridge_hub_rococo::BridgeHubRococo, bridged_chain: bp_polkadot_bulletin::PolkadotBulletin, ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index 1681ac7f46874..fe854e20c2445 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -17,27 +17,21 @@ //! Bridge definitions used on BridgeHubRococo for bridging to BridgeHubWestend. use crate::{ - bridge_common_config::{ - BridgeHubRococo, BridgeParachainWestendInstance, DeliveryRewardInBalance, - }, + bridge_common_config::{BridgeParachainWestendInstance, DeliveryRewardInBalance}, weights, xcm_config::UniversalLocation, - AccountId, BridgeWestendMessages, PolkadotXcm, Runtime, RuntimeEvent, XcmOverBridgeHubWestend, - XcmRouter, + BridgeWestendMessages, PolkadotXcm, Runtime, RuntimeEvent, XcmOverBridgeHubWestend, XcmRouter, +}; +use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, LaneId, }; -use bp_messages::LaneId; use bp_runtime::Chain; use bridge_runtime_common::{ extensions::refund_relayer_extension::{ ActualFeeRefund, RefundBridgedMessages, RefundSignedExtensionAdapter, RefundableMessagesLane, }, - messages, - messages::{ - source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, - MessageBridge, UnderlyingChainProvider, - }, messages_xcm_extension::{ SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, @@ -46,7 +40,6 @@ use bridge_runtime_common::{ use codec::Encode; use frame_support::{parameter_types, traits::PalletInfoAccess}; -use sp_runtime::RuntimeDebug; use xcm::{ latest::prelude::*, prelude::{InteriorLocation, NetworkId}, @@ -54,11 +47,7 @@ use xcm::{ use xcm_builder::BridgeBlobDispatcher; parameter_types! { - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - pub const BridgeHubWestendChainId: bp_runtime::ChainId = BridgeHubWestend::ID; + pub const BridgeHubWestendChainId: bp_runtime::ChainId = bp_bridge_hub_westend::BridgeHubWestend::ID; pub BridgeRococoToWestendMessagesPalletInstance: InteriorLocation = [PalletInstance(::index() as u8)].into(); pub WestendGlobalConsensusNetwork: NetworkId = NetworkId::Westend; pub WestendGlobalConsensusNetworkLocation: Location = Location::new( @@ -82,7 +71,7 @@ parameter_types! { ParentThen([Parachain(AssetHubRococoParaId::get().into())].into()).into(), XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, ); - pub ActiveLanes: sp_std::vec::Vec<(SenderAndLane, (NetworkId, InteriorLocation))> = sp_std::vec![ + pub ActiveLanes: alloc::vec::Vec<(SenderAndLane, (NetworkId, InteriorLocation))> = alloc::vec![ ( FromAssetHubRococoToAssetHubWestendRoute::get(), (WestendGlobalConsensusNetwork::get(), [Parachain(AssetHubWestendParaId::get().into())].into()) @@ -102,8 +91,8 @@ parameter_types! { } pub const XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND: LaneId = LaneId([0, 0, 0, 2]); -fn build_congestion_message(is_congested: bool) -> sp_std::vec::Vec> { - sp_std::vec![ +fn build_congestion_message(is_congested: bool) -> alloc::vec::Vec> { + alloc::vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, Transact { origin_kind: OriginKind::Xcm, @@ -148,34 +137,6 @@ impl XcmBlobHauler for ToBridgeHubWestendXcmBlobHauler { type OnMessagesDeliveredFromWestend = XcmBlobHaulerAdapter; -/// Messaging Bridge configuration for BridgeHubRococo -> BridgeHubWestend -pub struct WithBridgeHubWestendMessageBridge; -impl MessageBridge for WithBridgeHubWestendMessageBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubRococo; - type BridgedChain = BridgeHubWestend; - type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< - Runtime, - BridgeParachainWestendInstance, - bp_bridge_hub_westend::BridgeHubWestend, - >; -} - -/// Maximal outbound payload size of BridgeHubRococo -> BridgeHubWestend messages. -pub type ToBridgeHubWestendMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// BridgeHubWestend chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubWestend; - -impl UnderlyingChainProvider for BridgeHubWestend { - type Chain = bp_bridge_hub_westend::BridgeHubWestend; -} - -impl messages::BridgedChainWithMessages for BridgeHubWestend {} - /// Signed extension that refunds relayers that are delivering messages from the Westend parachain. pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = RefundSignedExtensionAdapter< RefundBridgedMessages< @@ -196,26 +157,28 @@ pub type WithBridgeHubWestendMessagesInstance = pallet_bridge_messages::Instance impl pallet_bridge_messages::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_bridge_messages_rococo_to_westend::WeightInfo; - type BridgedChainId = BridgeHubWestendChainId; + + type ThisChain = bp_bridge_hub_rococo::BridgeHubRococo; + type BridgedChain = bp_bridge_hub_westend::BridgeHubWestend; + type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< + Runtime, + BridgeParachainWestendInstance, + bp_bridge_hub_westend::BridgeHubWestend, + >; + type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubWestend; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - type MaximalOutboundPayloadSize = ToBridgeHubWestendMaximalOutboundPayloadSize; type OutboundPayload = XcmAsPlainPayload; type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; type DeliveryPayments = (); - type TargetHeaderChain = TargetHeaderChainAdapter; type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< Runtime, WithBridgeHubWestendMessagesInstance, DeliveryRewardInBalance, >; - type SourceHeaderChain = SourceHeaderChainAdapter; type MessageDispatch = XcmBlobMessageDispatch< FromWestendMessageBlobDispatcher, Self::WeightInfo, @@ -248,9 +211,8 @@ mod tests { assert_complete_bridge_types, extensions::refund_relayer_extension::RefundableParachain, integrity::{ - assert_complete_bridge_constants, check_message_lane_weights, - AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, - AssertCompleteBridgeConstants, + assert_complete_with_parachain_bridge_constants, check_message_lane_weights, + AssertChainConstants, AssertCompleteBridgeConstants, }, }; use parachains_common::Balance; @@ -292,36 +254,20 @@ mod tests { runtime: Runtime, with_bridged_chain_grandpa_instance: BridgeGrandpaWestendInstance, with_bridged_chain_messages_instance: WithBridgeHubWestendMessagesInstance, - bridge: WithBridgeHubWestendMessageBridge, - this_chain: bp_rococo::Rococo, - bridged_chain: bp_westend::Westend, + this_chain: bp_bridge_hub_rococo::BridgeHubRococo, + bridged_chain: bp_bridge_hub_westend::BridgeHubWestend, ); - assert_complete_bridge_constants::< + assert_complete_with_parachain_bridge_constants::< Runtime, BridgeGrandpaWestendInstance, WithBridgeHubWestendMessagesInstance, - WithBridgeHubWestendMessageBridge, + bp_westend::Westend, >(AssertCompleteBridgeConstants { this_chain_constants: AssertChainConstants { block_length: bp_bridge_hub_rococo::BlockLength::get(), block_weights: bp_bridge_hub_rococo::BlockWeightsForAsyncBacking::get(), }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_bridge_hub_westend::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_bridge_hub_westend::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: BridgeHubWestend::ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: - bp_westend::WITH_WESTEND_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_bridge_hub_westend::WITH_BRIDGE_HUB_WESTEND_MESSAGES_PALLET_NAME, - }, }); bridge_runtime_common::extensions::priority_calculator::per_relay_header::ensure_priority_boost_is_sane::< @@ -332,7 +278,7 @@ mod tests { bridge_runtime_common::extensions::priority_calculator::per_parachain_header::ensure_priority_boost_is_sane::< Runtime, - RefundableParachain, + RefundableParachain, PriorityBoostPerParachainHeader, >(FEE_BOOST_PER_PARACHAIN_HEADER); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index e7868bcbc78d0..512c1199f4392 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -35,6 +35,9 @@ pub mod bridge_to_westend_config; mod weights; pub mod xcm_config; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use bridge_runtime_common::extensions::{ check_obsolete_extension::{ CheckAndBoostBridgeGrandpaTransactions, CheckAndBoostBridgeParachainsTransactions, @@ -58,7 +61,6 @@ use sp_runtime::{ ApplyExtrinsicResult, FixedU128, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -98,7 +100,7 @@ pub use sp_runtime::BuildStorage; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use rococo_runtime_constants::system_parachain::{ASSET_HUB_ID, BRIDGE_HUB_ID}; use xcm::prelude::*; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -110,6 +112,8 @@ use parachains_common::{ AVERAGE_ON_INITIALIZE_RATIO, NORMAL_DISPATCH_RATIO, }; +#[cfg(feature = "runtime-benchmarks")] +use alloc::boxed::Box; #[cfg(feature = "runtime-benchmarks")] use benchmark_helpers::DoNothingRouter; @@ -214,7 +218,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, @@ -862,7 +866,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -966,7 +970,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -979,11 +983,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -998,7 +1002,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -1008,6 +1012,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) @@ -1185,7 +1201,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); Ok(()) } @@ -1420,7 +1436,7 @@ impl_runtime_apis! { prepare_message_proof_from_parachain::< Runtime, bridge_common_config::BridgeGrandpaWestendInstance, - bridge_to_westend_config::WithBridgeHubWestendMessageBridge, + bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, >(params, generate_xcm_builder_bridge_message_sample([GlobalConsensus(Rococo), Parachain(42)].into())) } @@ -1430,7 +1446,7 @@ impl_runtime_apis! { prepare_message_delivery_proof_from_parachain::< Runtime, bridge_common_config::BridgeGrandpaWestendInstance, - bridge_to_westend_config::WithBridgeHubWestendMessageBridge, + bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, >(params) } @@ -1455,7 +1471,7 @@ impl_runtime_apis! { prepare_message_proof_from_grandpa_chain::< Runtime, bridge_common_config::BridgeGrandpaRococoBulletinInstance, - bridge_to_bulletin_config::WithRococoBulletinMessageBridge, + bridge_to_bulletin_config::WithRococoBulletinMessagesInstance, >(params, generate_xcm_builder_bridge_message_sample([GlobalConsensus(Rococo), Parachain(42)].into())) } @@ -1465,7 +1481,7 @@ impl_runtime_apis! { prepare_message_delivery_proof_from_grandpa_chain::< Runtime, bridge_common_config::BridgeGrandpaRococoBulletinInstance, - bridge_to_bulletin_config::WithRococoBulletinMessageBridge, + bridge_to_bulletin_config::WithRococoBulletinMessagesInstance, >(params) } @@ -1491,7 +1507,7 @@ impl_runtime_apis! { fn prepare_parachain_heads_proof( parachains: &[bp_polkadot_core::parachains::ParaId], parachain_head_size: u32, - proof_size: bp_runtime::StorageProofSize, + proof_params: bp_runtime::UnverifiedStorageProofParams, ) -> ( pallet_bridge_parachains::RelayBlockNumber, pallet_bridge_parachains::RelayBlockHash, @@ -1501,7 +1517,7 @@ impl_runtime_apis! { prepare_parachain_heads_proof::( parachains, parachain_head_size, - proof_size, + proof_params, ) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs index dc480c391636a..8fcd7b10d931b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_parachain_system.rs @@ -47,7 +47,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `cumulus_pallet_parachain_system`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs index 11e1439a1f6df..4ce57b2e50161 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_bridge_grandpa` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yaoqqom-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,15 +64,17 @@ impl pallet_bridge_grandpa::WeightInfo for WeightInfo Weight { + fn submit_finality_proof(p: u32, v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `438 + p * (60 ยฑ0)` // Estimated: `51735` - // Minimum execution time: 300_829_000 picoseconds. - Weight::from_parts(321_573_000, 0) + // Minimum execution time: 325_365_000 picoseconds. + Weight::from_parts(14_958_535, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 25_917 - .saturating_add(Weight::from_parts(48_613_160, 0).saturating_mul(p.into())) + // Standard Error: 15_085 + .saturating_add(Weight::from_parts(41_227_904, 0).saturating_mul(p.into())) + // Standard Error: 50_338 + .saturating_add(Weight::from_parts(2_664_555, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -90,8 +92,8 @@ impl pallet_bridge_grandpa::WeightInfo for WeightInfo pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `621` + // Measured: `654` // Estimated: `52645` - // Minimum execution time: 36_661_000 picoseconds. - Weight::from_parts(38_106_000, 0) + // Minimum execution time: 37_206_000 picoseconds. + Weight::from_parts(38_545_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -74,13 +74,17 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgePolkadotBulletinMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { + /// The range of component `n` is `[1, 4076]`. + /// The range of component `n` is `[1, 4076]`. + fn receive_n_messages_proof(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `621` + // Measured: `654` // Estimated: `52645` - // Minimum execution time: 47_599_000 picoseconds. - Weight::from_parts(49_731_000, 0) + // Minimum execution time: 37_075_000 picoseconds. + Weight::from_parts(37_757_000, 0) .saturating_add(Weight::from_parts(0, 52645)) + // Standard Error: 5_776 + .saturating_add(Weight::from_parts(11_586_768, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -94,10 +98,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `621` + // Measured: `654` // Estimated: `52645` - // Minimum execution time: 42_211_000 picoseconds. - Weight::from_parts(43_454_000, 0) + // Minimum execution time: 42_087_000 picoseconds. + Weight::from_parts(42_970_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -108,30 +112,20 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) /// Storage: `BridgePolkadotBulletinMessages::InboundLanes` (r:1 w:1) /// Proof: `BridgePolkadotBulletinMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `589` - // Estimated: `52645` - // Minimum execution time: 36_072_000 picoseconds. - Weight::from_parts(37_260_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgePolkadotBulletinMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgePolkadotBulletinMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16384]`. + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `589` + // Measured: `654` // Estimated: `52645` - // Minimum execution time: 66_995_000 picoseconds. - Weight::from_parts(68_661_000, 0) + // Minimum execution time: 35_055_000 picoseconds. + Weight::from_parts(36_987_740, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(2_316, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0) @@ -142,10 +136,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgePolkadotBulletinMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `588` + // Measured: `621` // Estimated: `2543` - // Minimum execution time: 25_553_000 picoseconds. - Weight::from_parts(26_205_000, 0) + // Minimum execution time: 24_326_000 picoseconds. + Weight::from_parts(25_169_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -158,10 +152,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgePolkadotBulletinMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `588` + // Measured: `621` // Estimated: `2543` - // Minimum execution time: 25_610_000 picoseconds. - Weight::from_parts(26_273_000, 0) + // Minimum execution time: 24_484_000 picoseconds. + Weight::from_parts(25_130_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -174,10 +168,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgePolkadotBulletinMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `588` + // Measured: `621` // Estimated: `2543` - // Minimum execution time: 25_651_000 picoseconds. - Weight::from_parts(26_172_000, 0) + // Minimum execution time: 24_450_000 picoseconds. + Weight::from_parts(25_164_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -191,7 +185,7 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -201,20 +195,20 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16384]`. + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof_with_dispatch(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `780` + // Measured: `813` // Estimated: `52645` - // Minimum execution time: 64_219_000 picoseconds. - Weight::from_parts(65_848_290, 0) + // Minimum execution time: 54_317_000 picoseconds. + Weight::from_parts(59_171_547, 0) .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 43 - .saturating_add(Weight::from_parts(7_577, 0).saturating_mul(i.into())) + // Standard Error: 7 + .saturating_add(Weight::from_parts(7_566, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs index 30ea9eed4a5b4..9c05dae979daa 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_messages` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -51,7 +51,7 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) @@ -60,10 +60,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `605` + // Measured: `658` // Estimated: `52645` - // Minimum execution time: 40_349_000 picoseconds. - Weight::from_parts(41_856_000, 0) + // Minimum execution time: 41_396_000 picoseconds. + Weight::from_parts(43_141_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) @@ -71,27 +71,31 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { + /// The range of component `n` is `[1, 4076]`. + /// The range of component `n` is `[1, 4076]`. + fn receive_n_messages_proof(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `605` + // Measured: `658` // Estimated: `52645` - // Minimum execution time: 50_514_000 picoseconds. - Weight::from_parts(52_254_000, 0) + // Minimum execution time: 41_095_000 picoseconds. + Weight::from_parts(42_030_000, 0) .saturating_add(Weight::from_parts(0, 52645)) + // Standard Error: 5_702 + .saturating_add(Weight::from_parts(11_627_951, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) @@ -100,10 +104,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `605` + // Measured: `658` // Estimated: `52645` - // Minimum execution time: 45_761_000 picoseconds. - Weight::from_parts(47_075_000, 0) + // Minimum execution time: 45_912_000 picoseconds. + Weight::from_parts(47_564_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) @@ -111,37 +115,25 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `573` - // Estimated: `52645` - // Minimum execution time: 39_098_000 picoseconds. - Weight::from_parts(40_577_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16384]`. + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `573` + // Measured: `658` // Estimated: `52645` - // Minimum execution time: 69_120_000 picoseconds. - Weight::from_parts(71_810_000, 0) + // Minimum execution time: 39_175_000 picoseconds. + Weight::from_parts(41_674_095, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(2_305, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) @@ -156,11 +148,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3912` - // Minimum execution time: 32_325_000 picoseconds. - Weight::from_parts(33_070_000, 0) - .saturating_add(Weight::from_parts(0, 3912)) + // Measured: `501` + // Estimated: `3966` + // Minimum execution time: 32_033_000 picoseconds. + Weight::from_parts(33_131_000, 0) + .saturating_add(Weight::from_parts(0, 3966)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -176,11 +168,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3912` - // Minimum execution time: 32_180_000 picoseconds. - Weight::from_parts(33_202_000, 0) - .saturating_add(Weight::from_parts(0, 3912)) + // Measured: `501` + // Estimated: `3966` + // Minimum execution time: 32_153_000 picoseconds. + Weight::from_parts(33_126_000, 0) + .saturating_add(Weight::from_parts(0, 3966)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -196,10 +188,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `447` + // Measured: `501` // Estimated: `6086` - // Minimum execution time: 36_774_000 picoseconds. - Weight::from_parts(37_774_000, 0) + // Minimum execution time: 36_387_000 picoseconds. + Weight::from_parts(37_396_000, 0) .saturating_add(Weight::from_parts(0, 6086)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -207,7 +199,7 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) @@ -215,7 +207,7 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -225,18 +217,18 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16384]`. + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof_with_dispatch(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `736` + // Measured: `789` // Estimated: `52645` - // Minimum execution time: 65_934_000 picoseconds. - Weight::from_parts(67_915_916, 0) + // Minimum execution time: 56_562_000 picoseconds. + Weight::from_parts(61_452_871, 0) .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 65 - .saturating_add(Weight::from_parts(7_190, 0).saturating_mul(i.into())) + // Standard Error: 9 + .saturating_add(Weight::from_parts(7_587, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs index ea68852804e39..8eb291ea14523 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_parachains` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -56,20 +56,22 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::FreeHeadersRemaining` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 2]`. fn submit_parachain_heads_with_n_parachains(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `558` // Estimated: `2543` - // Minimum execution time: 31_135_000 picoseconds. - Weight::from_parts(32_061_351, 0) + // Minimum execution time: 34_889_000 picoseconds. + Weight::from_parts(36_100_759, 0) .saturating_add(Weight::from_parts(0, 2543)) - // Standard Error: 80_309 - .saturating_add(Weight::from_parts(99_724, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + // Standard Error: 102_466 + .saturating_add(Weight::from_parts(178_820, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) @@ -79,17 +81,19 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::FreeHeadersRemaining` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `558` // Estimated: `2543` - // Minimum execution time: 32_263_000 picoseconds. - Weight::from_parts(33_139_000, 0) + // Minimum execution time: 36_501_000 picoseconds. + Weight::from_parts(37_266_000, 0) .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) @@ -99,16 +103,18 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::FreeHeadersRemaining` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `558` // Estimated: `2543` - // Minimum execution time: 61_313_000 picoseconds. - Weight::from_parts(62_200_000, 0) + // Minimum execution time: 66_059_000 picoseconds. + Weight::from_parts(67_139_000, 0) .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs index 5ab4cb900d848..f8bb983e80aa7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_relayers` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,10 +54,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_rewards() -> Weight { // Proof Size summary in bytes: - // Measured: `244` + // Measured: `278` // Estimated: `3593` - // Minimum execution time: 45_393_000 picoseconds. - Weight::from_parts(46_210_000, 0) + // Minimum execution time: 44_224_000 picoseconds. + Weight::from_parts(44_905_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -70,10 +70,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `Balances::Reserves` (`max_values`: None, `max_size`: Some(1249), added: 3724, mode: `MaxEncodedLen`) fn register() -> Weight { // Proof Size summary in bytes: - // Measured: `97` + // Measured: `131` // Estimated: `4714` - // Minimum execution time: 23_767_000 picoseconds. - Weight::from_parts(24_217_000, 0) + // Minimum execution time: 23_902_000 picoseconds. + Weight::from_parts(24_702_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,10 +84,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `Balances::Reserves` (`max_values`: None, `max_size`: Some(1249), added: 3724, mode: `MaxEncodedLen`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `197` + // Measured: `231` // Estimated: `4714` - // Minimum execution time: 25_745_000 picoseconds. - Weight::from_parts(26_319_000, 0) + // Minimum execution time: 24_469_000 picoseconds. + Weight::from_parts(25_176_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -100,10 +100,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn slash_and_deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `300` + // Measured: `334` // Estimated: `4714` - // Minimum execution time: 27_497_000 picoseconds. - Weight::from_parts(27_939_000, 0) + // Minimum execution time: 27_518_000 picoseconds. + Weight::from_parts(28_068_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -112,10 +112,10 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn register_relayer_reward() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `76` // Estimated: `3538` - // Minimum execution time: 5_584_000 picoseconds. - Weight::from_parts(5_908_000, 0) + // Minimum execution time: 5_484_000 picoseconds. + Weight::from_parts(5_718_000, 0) .saturating_add(Weight::from_parts(0, 3538)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_message_queue.rs index 2fcd573ceb277..b6fee47d14351 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_message_queue.rs @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs index 4f5bae0fe597b..b40cbfeeb8f27 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs @@ -17,11 +17,11 @@ mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; +use alloc::vec::Vec; use codec::Encode; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; use xcm::{latest::prelude::*, DoubleEncoded}; trait WeighAssets { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index d7e8c41ff8ac4..057dc4313510f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::fungible`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index abd84f8e89b07..9c58072d402c9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); @@ -68,8 +68,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 61_813_000 picoseconds. - Weight::from_parts(62_996_000, 6196) + // Minimum execution time: 60_119_000 picoseconds. + Weight::from_parts(61_871_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -77,8 +77,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_044_000 picoseconds. - Weight::from_parts(2_112_000, 0) + // Minimum execution time: 998_000 picoseconds. + Weight::from_parts(1_038_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -86,58 +86,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_472_000 picoseconds. - Weight::from_parts(7_723_000, 3497) + // Minimum execution time: 6_327_000 picoseconds. + Weight::from_parts(6_520_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_414_000 picoseconds. - Weight::from_parts(8_765_000, 0) + // Minimum execution time: 6_783_000 picoseconds. + Weight::from_parts(7_117_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_192_000 picoseconds. - Weight::from_parts(2_243_000, 0) + // Minimum execution time: 1_589_000 picoseconds. + Weight::from_parts(1_655_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_866_000 picoseconds. - Weight::from_parts(1_931_000, 0) + // Minimum execution time: 1_013_000 picoseconds. + Weight::from_parts(1_045_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_847_000 picoseconds. - Weight::from_parts(1_921_000, 0) + // Minimum execution time: 1_005_000 picoseconds. + Weight::from_parts(1_044_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_797_000 picoseconds. - Weight::from_parts(1_880_000, 0) + // Minimum execution time: 964_000 picoseconds. + Weight::from_parts(1_011_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_458_000 picoseconds. - Weight::from_parts(2_523_000, 0) + // Minimum execution time: 1_005_000 picoseconds. + Weight::from_parts(1_027_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_833_000 picoseconds. - Weight::from_parts(1_906_000, 0) + // Minimum execution time: 980_000 picoseconds. + Weight::from_parts(1_009_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -159,8 +159,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 54_659_000 picoseconds. - Weight::from_parts(56_025_000, 6196) + // Minimum execution time: 56_726_000 picoseconds. + Weight::from_parts(59_300_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -170,8 +170,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 10_953_000 picoseconds. - Weight::from_parts(11_220_000, 3555) + // Minimum execution time: 8_962_000 picoseconds. + Weight::from_parts(9_519_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_834_000 picoseconds. - Weight::from_parts(1_892_000, 0) + // Minimum execution time: 999_000 picoseconds. + Weight::from_parts(1_035_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -200,8 +200,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 22_238_000 picoseconds. - Weight::from_parts(22_690_000, 3503) + // Minimum execution time: 20_313_000 picoseconds. + Weight::from_parts(21_000_000, 3503) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,44 +211,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_798_000 picoseconds. - Weight::from_parts(3_936_000, 0) + // Minimum execution time: 2_820_000 picoseconds. + Weight::from_parts(2_949_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_985_000 picoseconds. - Weight::from_parts(3_099_000, 0) + // Minimum execution time: 1_293_000 picoseconds. + Weight::from_parts(1_354_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_955_000 picoseconds. - Weight::from_parts(2_050_000, 0) + // Minimum execution time: 1_076_000 picoseconds. + Weight::from_parts(1_114_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_939_000 picoseconds. - Weight::from_parts(1_990_000, 0) + // Minimum execution time: 1_014_000 picoseconds. + Weight::from_parts(1_055_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_841_000 picoseconds. - Weight::from_parts(1_900_000, 0) + // Minimum execution time: 979_000 picoseconds. + Weight::from_parts(1_019_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_081_000 picoseconds. - Weight::from_parts(2_145_000, 0) + // Minimum execution time: 1_161_000 picoseconds. + Weight::from_parts(1_208_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -270,8 +270,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 59_600_000 picoseconds. - Weight::from_parts(61_572_000, 6196) + // Minimum execution time: 62_250_000 picoseconds. + Weight::from_parts(64_477_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -279,8 +279,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_390_000 picoseconds. - Weight::from_parts(4_517_000, 0) + // Minimum execution time: 4_286_000 picoseconds. + Weight::from_parts(4_476_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -302,8 +302,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 53_864_000 picoseconds. - Weight::from_parts(55_527_000, 6196) + // Minimum execution time: 58_253_000 picoseconds. + Weight::from_parts(59_360_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -311,22 +311,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_879_000 picoseconds. - Weight::from_parts(1_947_000, 0) + // Minimum execution time: 1_026_000 picoseconds. + Weight::from_parts(1_065_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_827_000 picoseconds. - Weight::from_parts(1_900_000, 0) + // Minimum execution time: 993_000 picoseconds. + Weight::from_parts(1_015_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_824_000 picoseconds. - Weight::from_parts(1_898_000, 0) + // Minimum execution time: 966_000 picoseconds. + Weight::from_parts(999_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -339,16 +339,16 @@ impl WeightInfo { // Storage: `BridgeWestendMessages::OutboundLanesCongestedSignals` (r:1 w:0) // Proof: `BridgeWestendMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) // Storage: `BridgeWestendMessages::OutboundMessages` (r:0 w:1) - // Proof: `BridgeWestendMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) + // Proof: `BridgeWestendMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(65568), added: 68043, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 1000]`. pub fn export_message(x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `190` // Estimated: `6130` - // Minimum execution time: 41_598_000 picoseconds. - Weight::from_parts(42_219_173, 6130) - // Standard Error: 426 - .saturating_add(Weight::from_parts(452_460, 0).saturating_mul(x.into())) + // Minimum execution time: 37_014_000 picoseconds. + Weight::from_parts(38_096_655, 6130) + // Standard Error: 61 + .saturating_add(Weight::from_parts(45_146, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -356,14 +356,14 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_812_000 picoseconds. - Weight::from_parts(1_898_000, 0) + // Minimum execution time: 996_000 picoseconds. + Weight::from_parts(1_025_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_915_000 picoseconds. - Weight::from_parts(1_976_000, 0) + // Minimum execution time: 1_001_000 picoseconds. + Weight::from_parts(1_044_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index a0d2e91dffd2e..5ec545ee0590f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -22,6 +22,7 @@ use super::{ use bp_messages::LaneId; use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::ChainId; +use core::marker::PhantomData; use frame_support::{ parameter_types, traits::{tokens::imbalance::ResolveTo, ConstU32, Contains, Equals, Everything, Nothing}, @@ -41,7 +42,6 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use snowbridge_runtime_common::XcmExportFeeToSibling; use sp_core::Get; use sp_runtime::traits::AccountIdConversion; -use sp_std::marker::PhantomData; use testnet_parachains_constants::rococo::snowbridge::EthereumNetwork; use xcm::latest::prelude::*; use xcm_builder::{ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index b309232825db3..1d3d9e55f7eeb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -148,8 +148,7 @@ mod bridge_hub_westend_tests { use bridge_hub_test_utils::test_cases::from_parachain; use bridge_to_westend_config::{ BridgeHubWestendChainId, BridgeHubWestendLocation, WestendGlobalConsensusNetwork, - WithBridgeHubWestendMessageBridge, WithBridgeHubWestendMessagesInstance, - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, + WithBridgeHubWestendMessagesInstance, XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, }; // Para id of sibling chain used in tests. @@ -162,7 +161,6 @@ mod bridge_hub_westend_tests { BridgeGrandpaWestendInstance, BridgeParachainWestendInstance, WithBridgeHubWestendMessagesInstance, - WithBridgeHubWestendMessageBridge, >; #[test] @@ -457,8 +455,8 @@ mod bridge_hub_bulletin_tests { use bridge_hub_test_utils::test_cases::from_grandpa_chain; use bridge_to_bulletin_config::{ RococoBulletinChainId, RococoBulletinGlobalConsensusNetwork, - RococoBulletinGlobalConsensusNetworkLocation, WithRococoBulletinMessageBridge, - WithRococoBulletinMessagesInstance, XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN, + RococoBulletinGlobalConsensusNetworkLocation, WithRococoBulletinMessagesInstance, + XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN, }; // Para id of sibling chain used in tests. @@ -470,7 +468,6 @@ mod bridge_hub_bulletin_tests { AllPalletsWithoutSystem, BridgeGrandpaRococoBulletinInstance, WithRococoBulletinMessagesInstance, - WithRococoBulletinMessageBridge, >; #[test] @@ -594,44 +591,4 @@ mod bridge_hub_bulletin_tests { construct_and_apply_extrinsic, ) } - - #[test] - pub fn can_calculate_fee_for_standalone_message_delivery_transaction() { - bridge_hub_test_utils::check_sane_fees_values( - "bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs", - bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs::get(), - || { - from_grandpa_chain::can_calculate_fee_for_standalone_message_delivery_transaction::< - RuntimeTestsAdapter, - >(collator_session_keys(), construct_and_estimate_extrinsic_fee) - }, - Perbill::from_percent(33), - None, /* we don't want lowering according to the Bulletin setup, because - * `from_grandpa_chain` is cheaper then `from_parachain_chain` */ - &format!( - "Estimate fee for `single message delivery` for runtime: {:?}", - ::Version::get() - ), - ) - } - - #[test] - pub fn can_calculate_fee_for_standalone_message_confirmation_transaction() { - bridge_hub_test_utils::check_sane_fees_values( - "bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs", - bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs::get(), - || { - from_grandpa_chain::can_calculate_fee_for_standalone_message_confirmation_transaction::< - RuntimeTestsAdapter, - >(collator_session_keys(), construct_and_estimate_extrinsic_fee) - }, - Perbill::from_percent(33), - None, /* we don't want lowering according to the Bulletin setup, because - * `from_grandpa_chain` is cheaper then `from_parachain_chain` */ - &format!( - "Estimate fee for `single message confirmation` for runtime: {:?}", - ::Version::get() - ), - ) - } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 0f16d629fc260..e2671d3d606d1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -10,102 +10,101 @@ license = "Apache-2.0" workspace = true [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1" } +codec = { features = ["derive"], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -tuplex = { version = "0.1", default-features = false } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-session = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-multisig = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +westend-runtime-constants = { workspace = true } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true } # Bridges -bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } -bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } -bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } -bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } -bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } -bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } -bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } -bp-rococo = { path = "../../../../../bridges/chains/chain-rococo", default-features = false } -bp-westend = { path = "../../../../../bridges/chains/chain-westend", default-features = false } -pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } -pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } -pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } -pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", default-features = false } -pallet-xcm-bridge-hub = { path = "../../../../../bridges/modules/xcm-bridge-hub", default-features = false } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } -bridge-hub-common = { path = "../common", default-features = false } +bp-asset-hub-rococo = { workspace = true } +bp-asset-hub-westend = { workspace = true } +bp-bridge-hub-rococo = { workspace = true } +bp-bridge-hub-westend = { workspace = true } +bp-header-chain = { workspace = true } +bp-messages = { workspace = true } +bp-parachains = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-relayers = { workspace = true } +bp-runtime = { features = ["test-helpers"], workspace = true } +bp-rococo = { workspace = true } +bp-westend = { workspace = true } +pallet-bridge-grandpa = { workspace = true } +pallet-bridge-messages = { workspace = true } +pallet-bridge-parachains = { workspace = true } +pallet-bridge-relayers = { workspace = true } +pallet-xcm-bridge-hub = { workspace = true } +bridge-runtime-common = { workspace = true } +bridge-hub-common = { workspace = true } [dev-dependencies] -static_assertions = "1.1" -bridge-hub-test-utils = { path = "../test-utils" } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", features = ["integrity-test"] } -sp-keyring = { path = "../../../../../substrate/primitives/keyring" } +static_assertions = { workspace = true, default-features = true } +bridge-hub-test-utils = { workspace = true, default-features = true } +bridge-runtime-common = { features = ["integrity-test"], workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } [features] default = ["std"] @@ -182,11 +181,10 @@ std = [ "sp-version/std", "substrate-wasm-builder", "testnet-parachains-constants/std", - "tuplex/std", "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -221,7 +219,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs index 425b53da30fc8..42d5ef3eebdb3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -18,10 +18,12 @@ use crate::{ bridge_common_config::DeliveryRewardInBalance, weights, xcm_config::UniversalLocation, - AccountId, BridgeRococoMessages, PolkadotXcm, Runtime, RuntimeEvent, RuntimeOrigin, - XcmOverBridgeHubRococo, XcmRouter, + BridgeRococoMessages, PolkadotXcm, Runtime, RuntimeEvent, XcmOverBridgeHubRococo, XcmRouter, +}; +use bp_messages::{ + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, LaneId, }; -use bp_messages::LaneId; use bp_parachains::SingleParaStoredHeaderDataBuilder; use bp_runtime::Chain; use bridge_runtime_common::{ @@ -29,12 +31,6 @@ use bridge_runtime_common::{ ActualFeeRefund, RefundBridgedMessages, RefundSignedExtensionAdapter, RefundableMessagesLane, }, - messages, - messages::{ - source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, - MessageBridge, ThisChainWithMessages, UnderlyingChainProvider, - }, messages_xcm_extension::{ SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, @@ -45,7 +41,6 @@ use frame_support::{ parameter_types, traits::{ConstU32, PalletInfoAccess}, }; -use sp_runtime::RuntimeDebug; use xcm::{ latest::prelude::*, prelude::{InteriorLocation, NetworkId}, @@ -59,11 +54,7 @@ parameter_types! { pub const RococoBridgeParachainPalletName: &'static str = "Paras"; pub const MaxRococoParaHeadDataSize: u32 = bp_rococo::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_westend::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_westend::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - pub const BridgeHubRococoChainId: bp_runtime::ChainId = BridgeHubRococo::ID; + pub const BridgeHubRococoChainId: bp_runtime::ChainId = bp_bridge_hub_rococo::BridgeHubRococo::ID; pub BridgeWestendToRococoMessagesPalletInstance: InteriorLocation = [PalletInstance(::index() as u8)].into(); pub RococoGlobalConsensusNetwork: NetworkId = NetworkId::Rococo; pub RococoGlobalConsensusNetworkLocation: Location = Location::new( @@ -87,7 +78,7 @@ parameter_types! { ParentThen([Parachain(AssetHubWestendParaId::get().into())].into()).into(), XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, ); - pub ActiveLanes: sp_std::vec::Vec<(SenderAndLane, (NetworkId, InteriorLocation))> = sp_std::vec![ + pub ActiveLanes: alloc::vec::Vec<(SenderAndLane, (NetworkId, InteriorLocation))> = alloc::vec![ ( FromAssetHubWestendToAssetHubRococoRoute::get(), (RococoGlobalConsensusNetwork::get(), [Parachain(AssetHubRococoParaId::get().into())].into()) @@ -107,8 +98,8 @@ parameter_types! { } pub const XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO: LaneId = LaneId([0, 0, 0, 2]); -fn build_congestion_message(is_congested: bool) -> sp_std::vec::Vec> { - sp_std::vec![ +fn build_congestion_message(is_congested: bool) -> alloc::vec::Vec> { + alloc::vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, Transact { origin_kind: OriginKind::Xcm, @@ -153,46 +144,6 @@ impl XcmBlobHauler for ToBridgeHubRococoXcmBlobHauler { /// On messages delivered callback. type OnMessagesDelivered = XcmBlobHaulerAdapter; -/// Messaging Bridge configuration for BridgeHubWestend -> BridgeHubRococo -pub struct WithBridgeHubRococoMessageBridge; -impl MessageBridge for WithBridgeHubRococoMessageBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_westend::WITH_BRIDGE_HUB_WESTEND_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubWestend; - type BridgedChain = BridgeHubRococo; - type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< - Runtime, - BridgeParachainRococoInstance, - bp_bridge_hub_rococo::BridgeHubRococo, - >; -} - -/// Maximal outbound payload size of BridgeHubWestend -> BridgeHubRococo messages. -type ToBridgeHubRococoMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// BridgeHubRococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubRococo; - -impl UnderlyingChainProvider for BridgeHubRococo { - type Chain = bp_bridge_hub_rococo::BridgeHubRococo; -} - -impl messages::BridgedChainWithMessages for BridgeHubRococo {} - -/// BridgeHubWestend chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubWestend; - -impl UnderlyingChainProvider for BridgeHubWestend { - type Chain = bp_bridge_hub_westend::BridgeHubWestend; -} - -impl ThisChainWithMessages for BridgeHubWestend { - type RuntimeOrigin = RuntimeOrigin; -} - /// Signed extension that refunds relayers that are delivering messages from the Rococo parachain. pub type OnBridgeHubWestendRefundBridgeHubRococoMessages = RefundSignedExtensionAdapter< RefundBridgedMessages< @@ -237,26 +188,28 @@ pub type WithBridgeHubRococoMessagesInstance = pallet_bridge_messages::Instance1 impl pallet_bridge_messages::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_bridge_messages::WeightInfo; - type BridgedChainId = BridgeHubRococoChainId; + + type ThisChain = bp_bridge_hub_westend::BridgeHubWestend; + type BridgedChain = bp_bridge_hub_rococo::BridgeHubRococo; + type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< + Runtime, + BridgeParachainRococoInstance, + bp_bridge_hub_rococo::BridgeHubRococo, + >; + type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubRococo; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - type MaximalOutboundPayloadSize = ToBridgeHubRococoMaximalOutboundPayloadSize; type OutboundPayload = XcmAsPlainPayload; type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; type DeliveryPayments = (); - type TargetHeaderChain = TargetHeaderChainAdapter; type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< Runtime, WithBridgeHubRococoMessagesInstance, DeliveryRewardInBalance, >; - type SourceHeaderChain = SourceHeaderChainAdapter; type MessageDispatch = XcmBlobMessageDispatch< FromRococoMessageBlobDispatcher, Self::WeightInfo, @@ -287,9 +240,8 @@ mod tests { assert_complete_bridge_types, extensions::refund_relayer_extension::RefundableParachain, integrity::{ - assert_complete_bridge_constants, check_message_lane_weights, - AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, - AssertCompleteBridgeConstants, + assert_complete_with_parachain_bridge_constants, check_message_lane_weights, + AssertChainConstants, AssertCompleteBridgeConstants, }, }; use parachains_common::Balance; @@ -331,35 +283,20 @@ mod tests { runtime: Runtime, with_bridged_chain_grandpa_instance: BridgeGrandpaRococoInstance, with_bridged_chain_messages_instance: WithBridgeHubRococoMessagesInstance, - bridge: WithBridgeHubRococoMessageBridge, - this_chain: bp_westend::Westend, - bridged_chain: bp_rococo::Rococo, + this_chain: bp_bridge_hub_westend::BridgeHubWestend, + bridged_chain: bp_bridge_hub_rococo::BridgeHubRococo, ); - assert_complete_bridge_constants::< + assert_complete_with_parachain_bridge_constants::< Runtime, BridgeGrandpaRococoInstance, WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, + bp_rococo::Rococo, >(AssertCompleteBridgeConstants { this_chain_constants: AssertChainConstants { block_length: bp_bridge_hub_westend::BlockLength::get(), block_weights: bp_bridge_hub_westend::BlockWeightsForAsyncBacking::get(), }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: BridgeHubRococo::ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: - bp_bridge_hub_westend::WITH_BRIDGE_HUB_WESTEND_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: bp_rococo::WITH_ROCOCO_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, - }, }); bridge_runtime_common::extensions::priority_calculator::per_relay_header::ensure_priority_boost_is_sane::< @@ -370,7 +307,7 @@ mod tests { bridge_runtime_common::extensions::priority_calculator::per_parachain_header::ensure_priority_boost_is_sane::< Runtime, - RefundableParachain, + RefundableParachain, PriorityBoostPerParachainHeader, >(FEE_BOOST_PER_PARACHAIN_HEADER); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index e26d490f9ac11..5d4c35d6610a0 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -32,6 +32,9 @@ pub mod bridge_to_rococo_config; mod weights; pub mod xcm_config; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use bridge_runtime_common::extensions::{ check_obsolete_extension::{ CheckAndBoostBridgeGrandpaTransactions, CheckAndBoostBridgeParachainsTransactions, @@ -49,7 +52,6 @@ use sp_runtime::{ ApplyExtrinsicResult, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -75,7 +77,7 @@ pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; use xcm_config::{XcmOriginToTransactDispatchOrigin, XcmRouter}; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -189,7 +191,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, @@ -612,7 +614,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -716,7 +718,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::WestendLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -729,11 +731,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -748,7 +750,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -758,6 +760,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) @@ -874,7 +888,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); Ok(()) } @@ -916,7 +930,7 @@ impl_runtime_apis! { } fn set_up_complex_asset_transfer( - ) -> Option<(Assets, u32, Location, Box)> { + ) -> Option<(Assets, u32, Location, alloc::boxed::Box)> { // BH only supports teleports to system parachain. // Relay/native token can be teleported between BH and Relay. let native_location = Parent.into(); @@ -1039,7 +1053,7 @@ impl_runtime_apis! { // save XCM version for remote bridge hub let _ = PolkadotXcm::force_xcm_version( RuntimeOrigin::root(), - Box::new(bridge_to_rococo_config::BridgeHubRococoLocation::get()), + alloc::boxed::Box::new(bridge_to_rococo_config::BridgeHubRococoLocation::get()), XCM_VERSION, ).map_err(|e| { log::error!( @@ -1106,7 +1120,7 @@ impl_runtime_apis! { prepare_message_proof_from_parachain::< Runtime, bridge_to_rococo_config::BridgeGrandpaRococoInstance, - bridge_to_rococo_config::WithBridgeHubRococoMessageBridge, + bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, >(params, generate_xcm_builder_bridge_message_sample([GlobalConsensus(Westend), Parachain(42)].into())) } @@ -1116,7 +1130,7 @@ impl_runtime_apis! { prepare_message_delivery_proof_from_parachain::< Runtime, bridge_to_rococo_config::BridgeGrandpaRococoInstance, - bridge_to_rococo_config::WithBridgeHubRococoMessageBridge, + bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, >(params) } @@ -1142,7 +1156,7 @@ impl_runtime_apis! { fn prepare_parachain_heads_proof( parachains: &[bp_polkadot_core::parachains::ParaId], parachain_head_size: u32, - proof_size: bp_runtime::StorageProofSize, + proof_params: bp_runtime::UnverifiedStorageProofParams, ) -> ( pallet_bridge_parachains::RelayBlockNumber, pallet_bridge_parachains::RelayBlockHash, @@ -1152,7 +1166,7 @@ impl_runtime_apis! { prepare_parachain_heads_proof::( parachains, parachain_head_size, - proof_size, + proof_params, ) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_parachain_system.rs index dc480c391636a..8fcd7b10d931b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -47,7 +47,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `cumulus_pallet_parachain_system`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs index e98be6ba39be7..fa7efc260489b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_bridge_grandpa` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yaoqqom-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -68,13 +68,13 @@ impl pallet_bridge_grandpa::WeightInfo for WeightInfo pallet_bridge_grandpa::WeightInfo for WeightInfo pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) @@ -60,10 +60,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `502` + // Measured: `522` // Estimated: `52645` - // Minimum execution time: 40_646_000 picoseconds. - Weight::from_parts(41_754_000, 0) + // Minimum execution time: 40_748_000 picoseconds. + Weight::from_parts(41_836_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) @@ -71,27 +71,30 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { + /// The range of component `n` is `[1, 4076]`. + fn receive_n_messages_proof(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `502` + // Measured: `522` // Estimated: `52645` - // Minimum execution time: 50_898_000 picoseconds. - Weight::from_parts(52_743_000, 0) + // Minimum execution time: 40_923_000 picoseconds. + Weight::from_parts(41_287_000, 0) .saturating_add(Weight::from_parts(0, 52645)) + // Standard Error: 9_774 + .saturating_add(Weight::from_parts(11_469_207, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) @@ -100,10 +103,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `502` + // Measured: `522` // Estimated: `52645` - // Minimum execution time: 45_848_000 picoseconds. - Weight::from_parts(47_036_000, 0) + // Minimum execution time: 45_946_000 picoseconds. + Weight::from_parts(47_547_000, 0) .saturating_add(Weight::from_parts(0, 52645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) @@ -111,37 +114,24 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `433` - // Estimated: `52645` - // Minimum execution time: 39_085_000 picoseconds. - Weight::from_parts(41_623_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `433` + // Measured: `522` // Estimated: `52645` - // Minimum execution time: 72_754_000 picoseconds. - Weight::from_parts(74_985_000, 0) + // Minimum execution time: 39_668_000 picoseconds. + Weight::from_parts(41_908_980, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + // Standard Error: 11 + .saturating_add(Weight::from_parts(2_209, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) @@ -156,11 +146,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `337` - // Estimated: `3802` - // Minimum execution time: 31_479_000 picoseconds. - Weight::from_parts(32_280_000, 0) - .saturating_add(Weight::from_parts(0, 3802)) + // Measured: `357` + // Estimated: `3822` + // Minimum execution time: 30_544_000 picoseconds. + Weight::from_parts(31_171_000, 0) + .saturating_add(Weight::from_parts(0, 3822)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -176,11 +166,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `337` - // Estimated: `3802` - // Minimum execution time: 31_807_000 picoseconds. - Weight::from_parts(32_219_000, 0) - .saturating_add(Weight::from_parts(0, 3802)) + // Measured: `357` + // Estimated: `3822` + // Minimum execution time: 30_593_000 picoseconds. + Weight::from_parts(31_261_000, 0) + .saturating_add(Weight::from_parts(0, 3822)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -196,10 +186,10 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `337` + // Measured: `357` // Estimated: `6086` - // Minimum execution time: 36_450_000 picoseconds. - Weight::from_parts(37_288_000, 0) + // Minimum execution time: 34_682_000 picoseconds. + Weight::from_parts(35_277_000, 0) .saturating_add(Weight::from_parts(0, 6086)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -207,7 +197,7 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:1 w:0) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoMessages::InboundLanes` (r:1 w:1) @@ -215,7 +205,7 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -225,17 +215,17 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo< /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16384]`. + fn receive_single_n_bytes_message_proof_with_dispatch(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `633` + // Measured: `653` // Estimated: `52645` - // Minimum execution time: 67_047_000 picoseconds. - Weight::from_parts(68_717_105, 0) + // Minimum execution time: 56_465_000 picoseconds. + Weight::from_parts(61_575_775, 0) .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 138 - .saturating_add(Weight::from_parts(8_056, 0).saturating_mul(i.into())) + // Standard Error: 15 + .saturating_add(Weight::from_parts(7_197, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs index 9819bd4065411..b4748f1417059 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_parachains.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_parachains` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -56,18 +56,20 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeRococoParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHashes` (r:1 w:1) /// Proof: `BridgeRococoParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::FreeHeadersRemaining` (r:1 w:1) + /// Proof: `BridgeRococoGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:0 w:1) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 2]`. fn submit_parachain_heads_with_n_parachains(_p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `291` + // Measured: `315` // Estimated: `2543` - // Minimum execution time: 29_994_000 picoseconds. - Weight::from_parts(31_005_636, 0) + // Minimum execution time: 34_177_000 picoseconds. + Weight::from_parts(35_662_308, 0) .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `BridgeRococoParachains::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) @@ -77,17 +79,19 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeRococoParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHashes` (r:1 w:1) /// Proof: `BridgeRococoParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::FreeHeadersRemaining` (r:1 w:1) + /// Proof: `BridgeRococoGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:0 w:1) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `291` + // Measured: `315` // Estimated: `2543` - // Minimum execution time: 31_425_000 picoseconds. - Weight::from_parts(32_163_000, 0) + // Minimum execution time: 35_975_000 picoseconds. + Weight::from_parts(36_510_000, 0) .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `BridgeRococoParachains::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) @@ -97,16 +101,18 @@ impl pallet_bridge_parachains::WeightInfo for WeightInf /// Proof: `BridgeRococoParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHashes` (r:1 w:1) /// Proof: `BridgeRococoParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::FreeHeadersRemaining` (r:1 w:1) + /// Proof: `BridgeRococoGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoParachains::ImportedParaHeads` (r:0 w:1) /// Proof: `BridgeRococoParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `291` + // Measured: `315` // Estimated: `2543` - // Minimum execution time: 60_062_000 picoseconds. - Weight::from_parts(61_201_000, 0) + // Minimum execution time: 62_837_000 picoseconds. + Weight::from_parts(63_562_000, 0) .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs index ed96f0cd87c9e..60d81dc3082a8 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_relayers.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_relayers` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -56,8 +56,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `207` // Estimated: `3593` - // Minimum execution time: 45_732_000 picoseconds. - Weight::from_parts(46_282_000, 0) + // Minimum execution time: 43_132_000 picoseconds. + Weight::from_parts(43_923_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -72,8 +72,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `61` // Estimated: `4714` - // Minimum execution time: 22_934_000 picoseconds. - Weight::from_parts(23_531_000, 0) + // Minimum execution time: 22_765_000 picoseconds. + Weight::from_parts(23_576_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -86,8 +86,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `160` // Estimated: `4714` - // Minimum execution time: 25_187_000 picoseconds. - Weight::from_parts(25_679_000, 0) + // Minimum execution time: 24_013_000 picoseconds. + Weight::from_parts(24_460_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -102,8 +102,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `263` // Estimated: `4714` - // Minimum execution time: 27_015_000 picoseconds. - Weight::from_parts(27_608_000, 0) + // Minimum execution time: 26_946_000 picoseconds. + Weight::from_parts(27_485_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -114,8 +114,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `6` // Estimated: `3538` - // Minimum execution time: 5_207_000 picoseconds. - Weight::from_parts(5_394_000, 0) + // Minimum execution time: 4_658_000 picoseconds. + Weight::from_parts(4_902_000, 0) .saturating_add(Weight::from_parts(0, 3538)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_message_queue.rs index 2fcd573ceb277..b6fee47d14351 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_message_queue.rs @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs index e8950678b40fd..3961cc6d5cdd6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs @@ -18,11 +18,11 @@ mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; +use alloc::vec::Vec; use codec::Encode; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; use xcm::{latest::prelude::*, DoubleEncoded}; trait WeighAssets { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 295abd481d7dc..4310b24564758 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::fungible`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 9281a880c7e12..ba434ff29629f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); @@ -68,8 +68,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 61_577_000 picoseconds. - Weight::from_parts(63_216_000, 6196) + // Minimum execution time: 58_505_000 picoseconds. + Weight::from_parts(60_437_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -77,8 +77,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_019_000 picoseconds. - Weight::from_parts(2_146_000, 0) + // Minimum execution time: 510_000 picoseconds. + Weight::from_parts(569_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -86,58 +86,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_473_000 picoseconds. - Weight::from_parts(7_784_000, 3497) + // Minimum execution time: 5_597_000 picoseconds. + Weight::from_parts(5_884_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_385_000 picoseconds. - Weight::from_parts(8_768_000, 0) + // Minimum execution time: 5_320_000 picoseconds. + Weight::from_parts(5_594_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_181_000 picoseconds. - Weight::from_parts(2_304_000, 0) + // Minimum execution time: 1_164_000 picoseconds. + Weight::from_parts(1_227_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_858_000 picoseconds. - Weight::from_parts(1_919_000, 0) + // Minimum execution time: 528_000 picoseconds. + Weight::from_parts(586_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_855_000 picoseconds. - Weight::from_parts(1_979_000, 0) + // Minimum execution time: 509_000 picoseconds. + Weight::from_parts(571_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_823_000 picoseconds. - Weight::from_parts(1_890_000, 0) + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(546_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_407_000 picoseconds. - Weight::from_parts(2_507_000, 0) + // Minimum execution time: 560_000 picoseconds. + Weight::from_parts(600_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_838_000 picoseconds. - Weight::from_parts(1_894_000, 0) + // Minimum execution time: 514_000 picoseconds. + Weight::from_parts(558_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -159,8 +159,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 54_847_000 picoseconds. - Weight::from_parts(55_742_000, 6196) + // Minimum execution time: 55_871_000 picoseconds. + Weight::from_parts(57_172_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -170,8 +170,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 10_614_000 picoseconds. - Weight::from_parts(11_344_000, 3555) + // Minimum execution time: 8_487_000 picoseconds. + Weight::from_parts(8_800_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_826_000 picoseconds. - Weight::from_parts(1_899_000, 0) + // Minimum execution time: 528_000 picoseconds. + Weight::from_parts(569_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -200,8 +200,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 22_312_000 picoseconds. - Weight::from_parts(22_607_000, 3503) + // Minimum execution time: 19_803_000 picoseconds. + Weight::from_parts(20_368_000, 3503) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,44 +211,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_728_000 picoseconds. - Weight::from_parts(3_914_000, 0) + // Minimum execution time: 2_185_000 picoseconds. + Weight::from_parts(2_332_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_054_000 picoseconds. - Weight::from_parts(3_140_000, 0) + // Minimum execution time: 822_000 picoseconds. + Weight::from_parts(928_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_996_000 picoseconds. - Weight::from_parts(2_148_000, 0) + // Minimum execution time: 603_000 picoseconds. + Weight::from_parts(643_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_008_000 picoseconds. - Weight::from_parts(2_077_000, 0) + // Minimum execution time: 503_000 picoseconds. + Weight::from_parts(580_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_837_000 picoseconds. - Weight::from_parts(1_913_000, 0) + // Minimum execution time: 534_000 picoseconds. + Weight::from_parts(577_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_052_000 picoseconds. - Weight::from_parts(2_120_000, 0) + // Minimum execution time: 694_000 picoseconds. + Weight::from_parts(745_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -270,8 +270,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 58_725_000 picoseconds. - Weight::from_parts(60_271_000, 6196) + // Minimum execution time: 61_083_000 picoseconds. + Weight::from_parts(62_214_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -279,8 +279,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_570_000 picoseconds. - Weight::from_parts(4_707_000, 0) + // Minimum execution time: 3_261_000 picoseconds. + Weight::from_parts(3_483_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -302,8 +302,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 54_903_000 picoseconds. - Weight::from_parts(55_711_000, 6196) + // Minimum execution time: 56_270_000 picoseconds. + Weight::from_parts(57_443_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -311,22 +311,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_872_000 picoseconds. - Weight::from_parts(1_938_000, 0) + // Minimum execution time: 565_000 picoseconds. + Weight::from_parts(628_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_836_000 picoseconds. - Weight::from_parts(1_903_000, 0) + // Minimum execution time: 496_000 picoseconds. + Weight::from_parts(563_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_847_000 picoseconds. - Weight::from_parts(1_900_000, 0) + // Minimum execution time: 518_000 picoseconds. + Weight::from_parts(557_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -339,16 +339,16 @@ impl WeightInfo { // Storage: `BridgeRococoMessages::OutboundLanesCongestedSignals` (r:1 w:0) // Proof: `BridgeRococoMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) // Storage: `BridgeRococoMessages::OutboundMessages` (r:0 w:1) - // Proof: `BridgeRococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) + // Proof: `BridgeRococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(65568), added: 68043, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 1000]`. pub fn export_message(x: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `225` // Estimated: `6165` - // Minimum execution time: 41_750_000 picoseconds. - Weight::from_parts(43_496_915, 6165) - // Standard Error: 623 - .saturating_add(Weight::from_parts(457_907, 0).saturating_mul(x.into())) + // Minimum execution time: 36_288_000 picoseconds. + Weight::from_parts(37_707_751, 6165) + // Standard Error: 124 + .saturating_add(Weight::from_parts(51_290, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -356,14 +356,14 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_826_000 picoseconds. - Weight::from_parts(1_911_000, 0) + // Minimum execution time: 485_000 picoseconds. + Weight::from_parts(540_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_967_000 picoseconds. - Weight::from_parts(2_096_000, 0) + // Minimum execution time: 542_000 picoseconds. + Weight::from_parts(586_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 836594140b232..763271fd7af0e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -28,8 +28,8 @@ use bridge_hub_westend_runtime::{ }; use bridge_to_rococo_config::{ BridgeGrandpaRococoInstance, BridgeHubRococoChainId, BridgeHubRococoLocation, - BridgeParachainRococoInstance, WithBridgeHubRococoMessageBridge, - WithBridgeHubRococoMessagesInstance, XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, + BridgeParachainRococoInstance, WithBridgeHubRococoMessagesInstance, + XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, }; use codec::{Decode, Encode}; use frame_support::{dispatch::GetDispatchInfo, parameter_types, traits::ConstU8}; @@ -53,7 +53,6 @@ type RuntimeTestsAdapter = from_parachain::WithRemoteParachainHelperAdapter< BridgeGrandpaRococoInstance, BridgeParachainRococoInstance, WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, >; parameter_types! { diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml index aece34613e6a6..3ae43075000ba 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -7,16 +7,15 @@ description = "Bridge hub common utilities" license = "Apache-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -snowbridge-core = { path = "../../../../../bridges/snowbridge/primitives/core", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +cumulus-primitives-core = { workspace = true } +xcm = { workspace = true } +pallet-message-queue = { workspace = true } +snowbridge-core = { workspace = true } [features] default = ["std"] @@ -29,7 +28,6 @@ std = [ "snowbridge-core/std", "sp-core/std", "sp-runtime/std", - "sp-std/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs index c1bba65b0abc3..5f91897262f4b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs @@ -14,6 +14,7 @@ // limitations under the License. //! Runtime configuration for MessageQueue pallet use codec::{Decode, Encode, MaxEncodedLen}; +use core::marker::PhantomData; use cumulus_primitives_core::{AggregateMessageOrigin as CumulusAggregateMessageOrigin, ParaId}; use frame_support::{ traits::{ProcessMessage, ProcessMessageError, QueueFootprint, QueuePausedQuery}, @@ -22,7 +23,6 @@ use frame_support::{ use pallet_message_queue::OnQueueChanged; use scale_info::TypeInfo; use snowbridge_core::ChannelId; -use sp_std::{marker::PhantomData, prelude::*}; use xcm::v4::{Junction, Location}; /// The aggregate origin of an inbound message. diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 80f0114cc4cad..44a8646142d6c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -10,47 +10,46 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -impl-trait-for-tuples = "0.2" +codec = { features = ["derive", "max-encoded-len"], workspace = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } # Substrate -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } -sp-keyring = { path = "../../../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-tracing = { path = "../../../../../substrate/primitives/tracing" } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +pallet-balances = { workspace = true } +pallet-utility = { workspace = true } +pallet-timestamp = { workspace = true } # Cumulus -asset-test-utils = { path = "../../assets/test-utils" } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -parachains-runtimes-test-utils = { path = "../../test-utils", default-features = false } +asset-test-utils = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +parachains-common = { workspace = true } +parachains-runtimes-test-utils = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } # Bridges -bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } -bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } -bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } -bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } -bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } -bp-test-utils = { path = "../../../../../bridges/primitives/test-utils", default-features = false } -pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } -pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } -pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } -pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", default-features = false } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } +bp-header-chain = { workspace = true } +bp-messages = { workspace = true } +bp-polkadot-core = { workspace = true } +bp-relayers = { workspace = true } +bp-runtime = { workspace = true } +bp-test-utils = { workspace = true } +pallet-bridge-grandpa = { workspace = true } +pallet-bridge-parachains = { workspace = true } +pallet-bridge-messages = { features = ["test-helpers"], workspace = true } +pallet-bridge-relayers = { workspace = true } +bridge-runtime-common = { workspace = true } [features] default = ["std"] @@ -81,7 +80,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs index 1874f38de2df1..0b3463f0df974 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs @@ -19,6 +19,8 @@ pub mod test_cases; pub mod test_data; +extern crate alloc; + pub use bp_test_utils::test_header; pub use parachains_runtimes_test_utils::*; use sp_runtime::Perbill; diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs index bfa2f0f50f94c..d6dfa93731a7d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs @@ -22,22 +22,14 @@ use crate::{ test_data, }; +use alloc::{boxed::Box, vec}; use bp_header_chain::ChainWithGrandpa; -use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, LaneId, - UnrewardedRelayersState, -}; +use bp_messages::{LaneId, UnrewardedRelayersState}; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; -use bp_runtime::{HashOf, UnderlyingChainOf}; -use bridge_runtime_common::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - BridgedChain as MessageBridgedChain, MessageBridge, ThisChain as MessageThisChain, - }, - messages_xcm_extension::XcmAsPlainPayload, -}; +use bridge_runtime_common::messages_xcm_extension::XcmAsPlainPayload; use frame_support::traits::{OnFinalize, OnInitialize}; use frame_system::pallet_prelude::BlockNumberFor; +use pallet_bridge_messages::{BridgedChainOf, ThisChainOf}; use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; @@ -53,13 +45,10 @@ pub trait WithRemoteGrandpaChainHelper { /// This chain runtime. type Runtime: BasicParachainRuntime + cumulus_pallet_xcmp_queue::Config - + BridgeGrandpaConfig< - Self::GPI, - BridgedChain = UnderlyingChainOf>, - > + BridgeMessagesConfig< + + BridgeGrandpaConfig> + + BridgeMessagesConfig< Self::MPI, InboundPayload = XcmAsPlainPayload, - InboundRelayer = bp_runtime::AccountIdOf>, OutboundPayload = XcmAsPlainPayload, > + pallet_bridge_relayers::Config; /// All pallets of this chain, excluding system pallet. @@ -69,38 +58,33 @@ pub trait WithRemoteGrandpaChainHelper { type GPI: 'static; /// Instance of the `pallet-bridge-messages`, used to bridge with remote GRANDPA chain. type MPI: 'static; - /// Messages bridge definition. - type MB: MessageBridge; } /// Adapter struct that implements [`WithRemoteGrandpaChainHelper`]. -pub struct WithRemoteGrandpaChainHelperAdapter( - sp_std::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, MPI, MB)>, +pub struct WithRemoteGrandpaChainHelperAdapter( + core::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, MPI)>, ); -impl WithRemoteGrandpaChainHelper - for WithRemoteGrandpaChainHelperAdapter +impl WithRemoteGrandpaChainHelper + for WithRemoteGrandpaChainHelperAdapter where Runtime: BasicParachainRuntime + cumulus_pallet_xcmp_queue::Config - + BridgeGrandpaConfig>> + + BridgeGrandpaConfig> + BridgeMessagesConfig< MPI, InboundPayload = XcmAsPlainPayload, - InboundRelayer = bp_runtime::AccountIdOf>, OutboundPayload = XcmAsPlainPayload, > + pallet_bridge_relayers::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, GPI: 'static, MPI: 'static, - MB: MessageBridge, { type Runtime = Runtime; type AllPalletsWithoutSystem = AllPalletsWithoutSystem; type GPI = GPI; type MPI = MPI; - type MB = MB; } /// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, @@ -124,13 +108,7 @@ pub fn relayed_incoming_message_works( AccountIdOf: From, RuntimeCallOf: From> + From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { helpers::relayed_incoming_message_works::< RuntimeHelper::Runtime, @@ -161,7 +139,8 @@ pub fn relayed_incoming_message_works( // to be submitted by relayer to this chain. let (relay_chain_header, grandpa_justification, message_proof) = test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( lane_id, @@ -186,7 +165,7 @@ pub fn relayed_incoming_message_works( ( BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), @@ -233,13 +212,7 @@ pub fn free_relay_extrinsic_works( AccountIdOf: From, RuntimeCallOf: From> + From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { // ensure that the runtime allows free header submissions let free_headers_interval = ( // to be submitted by relayer to this chain. let (relay_chain_header, grandpa_justification, message_proof) = test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( lane_id, @@ -322,7 +296,7 @@ pub fn free_relay_extrinsic_works( ( BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), @@ -370,13 +344,7 @@ pub fn complex_relay_extrinsic_works( RuntimeCallOf: From> + From> + From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { helpers::relayed_incoming_message_works::< RuntimeHelper::Runtime, @@ -407,7 +375,8 @@ pub fn complex_relay_extrinsic_works( // to be submitted by relayer to this chain. let (relay_chain_header, grandpa_justification, message_proof) = test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( lane_id, @@ -428,7 +397,7 @@ pub fn complex_relay_extrinsic_works( }.into(), BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), @@ -470,13 +439,7 @@ where pallet_utility::Config>, RuntimeCallOf: From> + From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -487,7 +450,8 @@ where // the message additionally let (relay_chain_header, grandpa_justification, message_proof) = test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), @@ -526,19 +490,11 @@ where AccountIdOf: From, RuntimeHelper::Runtime: pallet_utility::Config>, - MessageThisChain: + ThisChainOf: bp_runtime::Chain>, RuntimeCallOf: From> + From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::TargetHeaderChain: - TargetHeaderChain< - XcmAsPlainPayload, - AccountIdOf, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< - HashOf>>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -550,7 +506,8 @@ where }; let (relay_chain_header, grandpa_justification, message_delivery_proof) = test_data::from_grandpa_chain::make_complex_relayer_confirmation_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), @@ -587,13 +544,7 @@ where RuntimeHelper: WithRemoteGrandpaChainHelper, RuntimeCallOf: From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -604,7 +555,8 @@ where // the message additionally let (_, _, message_proof) = test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), @@ -639,19 +591,11 @@ pub fn can_calculate_fee_for_standalone_message_confirmation_transaction: From, - MessageThisChain: + ThisChainOf: bp_runtime::Chain>, RuntimeCallOf: From>, - UnderlyingChainOf>: ChainWithGrandpa, - >::TargetHeaderChain: - TargetHeaderChain< - XcmAsPlainPayload, - AccountIdOf, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< - HashOf>>, - >, - >, + BridgedChainOf: ChainWithGrandpa, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -663,7 +607,8 @@ where }; let (_, _, message_delivery_proof) = test_data::from_grandpa_chain::make_complex_relayer_confirmation_proofs::< - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs index 12ab382d9e0f6..728b4e76b1055 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs @@ -22,23 +22,16 @@ use crate::{ test_data, }; +use alloc::{boxed::Box, vec}; use bp_header_chain::ChainWithGrandpa; -use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, LaneId, - UnrewardedRelayersState, -}; +use bp_messages::{LaneId, UnrewardedRelayersState}; use bp_polkadot_core::parachains::ParaHash; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; -use bp_runtime::{HashOf, Parachain, UnderlyingChainOf}; -use bridge_runtime_common::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - BridgedChain as MessageBridgedChain, MessageBridge, ThisChain as MessageThisChain, - }, - messages_xcm_extension::XcmAsPlainPayload, -}; +use bp_runtime::{Chain, Parachain}; +use bridge_runtime_common::messages_xcm_extension::XcmAsPlainPayload; use frame_support::traits::{OnFinalize, OnInitialize}; use frame_system::pallet_prelude::BlockNumberFor; +use pallet_bridge_messages::{BridgedChainOf, ThisChainOf}; use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; @@ -59,7 +52,6 @@ pub trait WithRemoteParachainHelper { + BridgeMessagesConfig< Self::MPI, InboundPayload = XcmAsPlainPayload, - InboundRelayer = bp_runtime::AccountIdOf>, OutboundPayload = XcmAsPlainPayload, > + pallet_bridge_relayers::Config; /// All pallets of this chain, excluding system pallet. @@ -71,17 +63,15 @@ pub trait WithRemoteParachainHelper { type PPI: 'static; /// Instance of the `pallet-bridge-messages`, used to bridge with remote parachain. type MPI: 'static; - /// Messages bridge definition. - type MB: MessageBridge; } /// Adapter struct that implements `WithRemoteParachainHelper`. -pub struct WithRemoteParachainHelperAdapter( - sp_std::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, PPI, MPI, MB)>, +pub struct WithRemoteParachainHelperAdapter( + core::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, PPI, MPI)>, ); -impl WithRemoteParachainHelper - for WithRemoteParachainHelperAdapter +impl WithRemoteParachainHelper + for WithRemoteParachainHelperAdapter where Runtime: BasicParachainRuntime + cumulus_pallet_xcmp_queue::Config @@ -90,7 +80,6 @@ where + BridgeMessagesConfig< MPI, InboundPayload = XcmAsPlainPayload, - InboundRelayer = bp_runtime::AccountIdOf>, OutboundPayload = XcmAsPlainPayload, > + pallet_bridge_relayers::Config, AllPalletsWithoutSystem: @@ -98,14 +87,13 @@ where GPI: 'static, PPI: 'static, MPI: 'static, - MB: MessageBridge, + // MB: MessageBridge, { type Runtime = Runtime; type AllPalletsWithoutSystem = AllPalletsWithoutSystem; type GPI = GPI; type PPI = PPI; type MPI = MPI; - type MB = MB; } /// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, @@ -131,16 +119,9 @@ pub fn relayed_incoming_message_works( RuntimeCallOf: From> + From> + From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, { helpers::relayed_incoming_message_works::< RuntimeHelper::Runtime, @@ -179,7 +160,8 @@ pub fn relayed_incoming_message_works( message_proof, ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( lane_id, @@ -219,7 +201,7 @@ pub fn relayed_incoming_message_works( ( BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), @@ -268,16 +250,9 @@ pub fn free_relay_extrinsic_works( RuntimeCallOf: From> + From> + From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, { // ensure that the runtime allows free header submissions let free_headers_interval = ( message_proof, ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( lane_id, @@ -390,7 +366,7 @@ pub fn free_relay_extrinsic_works( ( BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), @@ -440,16 +416,9 @@ pub fn complex_relay_extrinsic_works( + From> + From> + From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, { helpers::relayed_incoming_message_works::< RuntimeHelper::Runtime, @@ -488,7 +457,8 @@ pub fn complex_relay_extrinsic_works( message_proof, ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( lane_id, @@ -518,7 +488,7 @@ pub fn complex_relay_extrinsic_works( }.into(), BridgeMessagesCall::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), @@ -565,16 +535,9 @@ where RuntimeCallOf: From> + From> + From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -592,7 +555,8 @@ where message_proof, ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), @@ -612,7 +576,6 @@ where RuntimeHelper::GPI, RuntimeHelper::PPI, RuntimeHelper::MPI, - _, >( relay_chain_header, grandpa_justification, @@ -637,23 +600,14 @@ where AccountIdOf: From, RuntimeHelper::Runtime: pallet_utility::Config>, - MessageThisChain: - bp_runtime::Chain>, + ThisChainOf: + Chain>, RuntimeCallOf: From> + From> + From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::TargetHeaderChain: - TargetHeaderChain< - XcmAsPlainPayload, - AccountIdOf, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< - HashOf>>, - >, - >, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -672,7 +626,8 @@ where message_delivery_proof, ) = test_data::from_parachain::make_complex_relayer_confirmation_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), @@ -714,16 +669,9 @@ where RuntimeHelper: WithRemoteParachainHelper, RuntimeCallOf: From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::SourceHeaderChain: - SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof< - HashOf>, - >, - >, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -741,7 +689,8 @@ where message_proof, ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), @@ -757,7 +706,6 @@ where let call = test_data::from_parachain::make_standalone_relayer_delivery_call::< RuntimeHelper::Runtime, RuntimeHelper::MPI, - _, >( message_proof, helpers::relayer_id_at_bridged_chain::(), @@ -778,22 +726,13 @@ pub fn can_calculate_fee_for_standalone_message_confirmation_transaction: From, - MessageThisChain: - bp_runtime::Chain>, + ThisChainOf: + Chain>, RuntimeCallOf: From>, - UnderlyingChainOf>: - bp_runtime::Chain + Parachain, + BridgedChainOf: Chain + Parachain, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::TargetHeaderChain: - TargetHeaderChain< - XcmAsPlainPayload, - AccountIdOf, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< - HashOf>>, - >, - >, { run_test::(collator_session_key, 1000, vec![], || { // generate bridged relay chain finality, parachain heads and message proofs, @@ -806,7 +745,8 @@ where let (_, _, _, _, _, message_delivery_proof) = test_data::from_parachain::make_complex_relayer_confirmation_proofs::< >::BridgedChain, - RuntimeHelper::MB, + BridgedChainOf, + ThisChainOf, (), >( LaneId::default(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs index 0ce049cd1c463..78b8a170f0d4b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs @@ -23,12 +23,14 @@ use bp_messages::{LaneId, MessageNonce}; use bp_polkadot_core::parachains::{ParaHash, ParaId}; use bp_relayers::RewardsAccountParams; use codec::Decode; +use core::marker::PhantomData; use frame_support::{ assert_ok, traits::{OnFinalize, OnInitialize, PalletInfoAccess}, }; use frame_system::pallet_prelude::BlockNumberFor; use pallet_bridge_grandpa::{BridgedBlockHash, BridgedHeader}; +use pallet_bridge_messages::BridgedChainOf; use parachains_common::AccountId; use parachains_runtimes_test_utils::{ mock_open_hrmp_channel, AccountIdOf, CollatorSessionKeys, RuntimeCallOf, SlotDurations, @@ -36,7 +38,6 @@ use parachains_runtimes_test_utils::{ use sp_core::Get; use sp_keyring::AccountKeyring::*; use sp_runtime::{traits::TrailingZeroInput, AccountId32}; -use sp_std::marker::PhantomData; use xcm::latest::prelude::*; /// Verify that the transaction has succeeded. @@ -240,10 +241,12 @@ pub(crate) fn initialize_bridge_grandpa_pallet( pub type CallsAndVerifiers = Vec<(RuntimeCallOf, Box)>; +pub type InboundRelayerId = bp_runtime::AccountIdOf>; + /// Returns relayer id at the bridged chain. pub fn relayer_id_at_bridged_chain, MPI>( -) -> Runtime::InboundRelayer { - Runtime::InboundRelayer::decode(&mut TrailingZeroInput::zeroes()).unwrap() +) -> InboundRelayerId { + Decode::decode(&mut TrailingZeroInput::zeroes()).unwrap() } /// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, @@ -260,7 +263,7 @@ pub fn relayed_incoming_message_works( ) -> sp_runtime::DispatchOutcome, prepare_message_proof_import: impl FnOnce( Runtime::AccountId, - Runtime::InboundRelayer, + InboundRelayerId, InteriorLocation, MessageNonce, Xcm<()>, @@ -287,7 +290,7 @@ pub fn relayed_incoming_message_works( // value here is tricky - there are several transaction payment pallets and we don't // want to introduce additional bounds and traits here just for that, so let's just // select some presumably large value - sp_std::cmp::max::(Runtime::ExistentialDeposit::get(), 1u32.into()) * + core::cmp::max::(Runtime::ExistentialDeposit::get(), 1u32.into()) * 100_000_000u32.into(), )], || { diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs index e5d5e7cac96ba..c61a31e5454ba 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs @@ -19,30 +19,29 @@ use crate::test_data::prepare_inbound_xcm; use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, LaneId, MessageNonce, + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, ChainWithMessages, LaneId, MessageNonce, UnrewardedRelayersState, }; -use bp_runtime::{AccountIdOf, BlockNumberOf, HeaderOf, StorageProofSize, UnderlyingChainOf}; +use bp_runtime::{AccountIdOf, BlockNumberOf, Chain, HeaderOf, UnverifiedStorageProofParams}; use bp_test_utils::make_default_justification; -use bridge_runtime_common::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - BridgedChain as MessageBridgedChain, MessageBridge, ThisChain as MessageThisChain, - }, - messages_generation::{ - encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, - prepare_messages_storage_proof, - }, - messages_xcm_extension::XcmAsPlainPayload, -}; +use bridge_runtime_common::messages_xcm_extension::XcmAsPlainPayload; use codec::Encode; use pallet_bridge_grandpa::{BridgedChain, BridgedHeader}; use sp_runtime::traits::Header as HeaderT; use xcm::latest::prelude::*; +use crate::test_cases::helpers::InboundRelayerId; use bp_header_chain::{justification::GrandpaJustification, ChainWithGrandpa}; use bp_messages::{DeliveredMessages, InboundLaneData, UnrewardedRelayer}; use bp_runtime::HashOf; +use pallet_bridge_messages::{ + messages_generation::{ + encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, + prepare_messages_storage_proof, + }, + BridgedChainOf, +}; use sp_runtime::DigestItem; /// Prepare a batch call with bridged GRANDPA finality and message proof. @@ -50,22 +49,17 @@ pub fn make_complex_relayer_delivery_batch( bridged_header: BridgedHeader, bridged_justification: GrandpaJustification>, message_proof: FromBridgedChainMessagesProof>>, - relayer_id_at_bridged_chain: AccountIdOf>, + relayer_id_at_bridged_chain: InboundRelayerId, ) -> pallet_utility::Call where Runtime: pallet_bridge_grandpa::Config - + pallet_bridge_messages::Config< - MPI, - InboundPayload = XcmAsPlainPayload, - InboundRelayer = AccountIdOf>, - > + pallet_utility::Config, + + pallet_bridge_messages::Config + + pallet_utility::Config, GPI: 'static, MPI: 'static, - >::SourceHeaderChain: SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof>>, - >, ::RuntimeCall: From> + From>, + BridgedChainOf: Chain>>, { let submit_grandpa = pallet_bridge_grandpa::Call::::submit_finality_proof { finality_target: Box::new(bridged_header), @@ -73,7 +67,7 @@ where }; let submit_message = pallet_bridge_messages::Call::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }; @@ -97,15 +91,9 @@ where + pallet_utility::Config, GPI: 'static, MPI: 'static, - >::TargetHeaderChain: TargetHeaderChain< - XcmAsPlainPayload, - Runtime::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< - HashOf>, - >, - >, ::RuntimeCall: From> + From>, + BridgedChainOf: Chain>>, { let submit_grandpa = pallet_bridge_grandpa::Call::::submit_finality_proof { finality_target: Box::new(bridged_header), @@ -124,24 +112,18 @@ where /// Prepare a call with message proof. pub fn make_standalone_relayer_delivery_call( message_proof: FromBridgedChainMessagesProof>>, - relayer_id_at_bridged_chain: AccountIdOf>, + relayer_id_at_bridged_chain: InboundRelayerId, ) -> Runtime::RuntimeCall where Runtime: pallet_bridge_grandpa::Config - + pallet_bridge_messages::Config< - MPI, - InboundPayload = XcmAsPlainPayload, - InboundRelayer = AccountIdOf>, - >, + + pallet_bridge_messages::Config, MPI: 'static, - >::SourceHeaderChain: SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof>>, - >, Runtime::RuntimeCall: From>, + BridgedChainOf: Chain>>, { pallet_bridge_messages::Call::::receive_messages_proof { relayer_id_at_bridged_chain, - proof: message_proof, + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), } @@ -159,14 +141,8 @@ where Runtime: pallet_bridge_grandpa::Config + pallet_bridge_messages::Config, MPI: 'static, - >::TargetHeaderChain: TargetHeaderChain< - XcmAsPlainPayload, - Runtime::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< - HashOf>, - >, - >, Runtime::RuntimeCall: From>, + BridgedChainOf: Chain>>, { pallet_bridge_messages::Call::::receive_messages_delivery_proof { proof: message_delivery_proof, @@ -176,39 +152,47 @@ where } /// Prepare storage proofs of messages, stored at the (bridged) source GRANDPA chain. -pub fn make_complex_relayer_delivery_proofs( +pub fn make_complex_relayer_delivery_proofs< + BridgedChain, + ThisChainWithMessages, + InnerXcmRuntimeCall, +>( lane_id: LaneId, xcm_message: Xcm, message_nonce: MessageNonce, message_destination: Junctions, - header_number: BlockNumberOf>, + header_number: BlockNumberOf, is_minimal_call: bool, ) -> ( - HeaderOf>, - GrandpaJustification>>, - FromBridgedChainMessagesProof>>, + HeaderOf, + GrandpaJustification>, + FromBridgedChainMessagesProof>, ) where - MB: MessageBridge, - MessageBridgedChain: Send + Sync + 'static, - UnderlyingChainOf>: ChainWithGrandpa, + BridgedChain: ChainWithGrandpa, + ThisChainWithMessages: ChainWithMessages, { + // prepare message let message_payload = prepare_inbound_xcm(xcm_message, message_destination); - let message_size = StorageProofSize::Minimal(message_payload.len() as u32); - // prepare para storage proof containing message - let (state_root, storage_proof) = prepare_messages_storage_proof::( - lane_id, - message_nonce..=message_nonce, - None, - message_size, - message_payload, - encode_all_messages, - encode_lane_data, - ); + // prepare storage proof containing message + let (state_root, storage_proof) = + prepare_messages_storage_proof::( + lane_id, + message_nonce..=message_nonce, + None, + UnverifiedStorageProofParams::from_db_size(message_payload.len() as u32), + |_| message_payload.clone(), + encode_all_messages, + encode_lane_data, + false, + false, + ); - let (header, justification) = make_complex_bridged_grandpa_header_proof::< - MessageBridgedChain, - >(state_root, header_number, is_minimal_call); + let (header, justification) = make_complex_bridged_grandpa_header_proof::( + state_root, + header_number, + is_minimal_call, + ); let message_proof = FromBridgedChainMessagesProof { bridged_header_hash: header.hash(), @@ -222,44 +206,44 @@ where } /// Prepare storage proofs of message confirmations, stored at the (bridged) target GRANDPA chain. -pub fn make_complex_relayer_confirmation_proofs( +pub fn make_complex_relayer_confirmation_proofs< + BridgedChain, + ThisChainWithMessages, + InnerXcmRuntimeCall, +>( lane_id: LaneId, - header_number: BlockNumberOf>, - relayer_id_at_this_chain: AccountIdOf>, + header_number: BlockNumberOf, + relayer_id_at_this_chain: AccountIdOf, relayers_state: UnrewardedRelayersState, ) -> ( - HeaderOf>, - GrandpaJustification>>, - FromBridgedChainMessagesDeliveryProof>>, + HeaderOf, + GrandpaJustification>, + FromBridgedChainMessagesDeliveryProof>, ) where - MB: MessageBridge, - MessageBridgedChain: Send + Sync + 'static, - MessageThisChain: Send + Sync + 'static, - UnderlyingChainOf>: ChainWithGrandpa, + BridgedChain: ChainWithGrandpa, + ThisChainWithMessages: ChainWithMessages, { // prepare storage proof containing message delivery proof - let (state_root, storage_proof) = prepare_message_delivery_storage_proof::( - lane_id, - InboundLaneData { - relayers: vec![ - UnrewardedRelayer { - relayer: relayer_id_at_this_chain, - messages: DeliveredMessages::new(1) - }; - relayers_state.unrewarded_relayer_entries as usize - ] - .into(), - last_confirmed_nonce: 1, - }, - StorageProofSize::Minimal(0), - ); + let (state_root, storage_proof) = + prepare_message_delivery_storage_proof::( + lane_id, + InboundLaneData { + relayers: vec![ + UnrewardedRelayer { + relayer: relayer_id_at_this_chain, + messages: DeliveredMessages::new(1) + }; + relayers_state.unrewarded_relayer_entries as usize + ] + .into(), + last_confirmed_nonce: 1, + }, + UnverifiedStorageProofParams::default(), + ); - let (header, justification) = make_complex_bridged_grandpa_header_proof::( - state_root, - header_number, - false, - ); + let (header, justification) = + make_complex_bridged_grandpa_header_proof::(state_root, header_number, false); let message_delivery_proof = FromBridgedChainMessagesDeliveryProof { bridged_header_hash: header.hash(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs index 5d3cba4e53b5e..897fe0d0b0f17 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs @@ -19,61 +19,58 @@ use super::{from_grandpa_chain::make_complex_bridged_grandpa_header_proof, prepare_inbound_xcm}; use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, LaneId, + source_chain::FromBridgedChainMessagesDeliveryProof, + target_chain::FromBridgedChainMessagesProof, ChainWithMessages, LaneId, UnrewardedRelayersState, Weight, }; use bp_runtime::{ - AccountIdOf, BlockNumberOf, HeaderOf, Parachain, StorageProofSize, UnderlyingChainOf, + AccountIdOf, BlockNumberOf, Chain, HeaderOf, Parachain, UnverifiedStorageProofParams, }; use bp_test_utils::prepare_parachain_heads_proof; -use bridge_runtime_common::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - BridgedChain as MessageBridgedChain, MessageBridge, ThisChain as MessageThisChain, - }, - messages_generation::{ - encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, - prepare_messages_storage_proof, - }, - messages_xcm_extension::XcmAsPlainPayload, -}; +use bridge_runtime_common::messages_xcm_extension::XcmAsPlainPayload; use codec::Encode; use pallet_bridge_grandpa::BridgedHeader; use pallet_bridge_parachains::{RelayBlockHash, RelayBlockNumber}; use sp_runtime::traits::Header as HeaderT; use xcm::latest::prelude::*; +use crate::test_cases::helpers::InboundRelayerId; use bp_header_chain::{justification::GrandpaJustification, ChainWithGrandpa}; use bp_messages::{DeliveredMessages, InboundLaneData, MessageNonce, UnrewardedRelayer}; use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; +use pallet_bridge_messages::{ + messages_generation::{ + encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, + prepare_messages_storage_proof, + }, + BridgedChainOf, +}; use sp_runtime::SaturatedConversion; /// Prepare a batch call with relay finality proof, parachain head proof and message proof. -pub fn make_complex_relayer_delivery_batch( +pub fn make_complex_relayer_delivery_batch( relay_chain_header: BridgedHeader, grandpa_justification: GrandpaJustification>, parachain_heads: Vec<(ParaId, ParaHash)>, para_heads_proof: ParaHeadsProof, message_proof: FromBridgedChainMessagesProof, - relayer_id_at_bridged_chain: InboundRelayer, -) -> pallet_utility::Call where - Runtime:pallet_bridge_grandpa::Config + relayer_id_at_bridged_chain: InboundRelayerId, +) -> pallet_utility::Call +where + Runtime: pallet_bridge_grandpa::Config + pallet_bridge_parachains::Config - + pallet_bridge_messages::Config< - MPI, - InboundPayload = XcmAsPlainPayload, - InboundRelayer = InboundRelayer, - > + + pallet_bridge_messages::Config + pallet_utility::Config, GPI: 'static, PPI: 'static, MPI: 'static, - ParaHash: From<<>::BridgedChain as bp_runtime::Chain>::Hash>, - <>::BridgedChain as bp_runtime::Chain>::Hash: From, - <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: - From>, - ::RuntimeCall: - From> + ParaHash: From< + <>::BridgedChain as bp_runtime::Chain>::Hash, + >, + <>::BridgedChain as bp_runtime::Chain>::Hash: + From, + BridgedChainOf: Chain + Parachain, + ::RuntimeCall: From> + From> + From>, { @@ -93,7 +90,7 @@ pub fn make_complex_relayer_delivery_batch::receive_messages_proof { relayer_id_at_bridged_chain: relayer_id_at_bridged_chain.into(), - proof: message_proof.into(), + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), }; @@ -122,11 +119,7 @@ where MPI: 'static, >::BridgedChain: bp_runtime::Chain + ChainWithGrandpa, - >::TargetHeaderChain: TargetHeaderChain< - XcmAsPlainPayload, - Runtime::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof, - >, + BridgedChainOf: Chain + Parachain, ::RuntimeCall: From> + From> + From>, @@ -160,23 +153,19 @@ where } /// Prepare a call with message proof. -pub fn make_standalone_relayer_delivery_call( +pub fn make_standalone_relayer_delivery_call( message_proof: FromBridgedChainMessagesProof, - relayer_id_at_bridged_chain: InboundRelayer, -) -> Runtime::RuntimeCall where - Runtime: pallet_bridge_messages::Config< - MPI, - InboundPayload = XcmAsPlainPayload, - InboundRelayer = InboundRelayer, - >, + relayer_id_at_bridged_chain: InboundRelayerId, +) -> Runtime::RuntimeCall +where + Runtime: pallet_bridge_messages::Config, MPI: 'static, - Runtime::RuntimeCall: From>, - <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: - From>, + Runtime::RuntimeCall: From>, + BridgedChainOf: Chain + Parachain, { pallet_bridge_messages::Call::::receive_messages_proof { relayer_id_at_bridged_chain: relayer_id_at_bridged_chain.into(), - proof: message_proof.into(), + proof: Box::new(message_proof), messages_count: 1, dispatch_weight: Weight::from_parts(1000000000, 0), } @@ -192,11 +181,7 @@ where Runtime: pallet_bridge_messages::Config, MPI: 'static, Runtime::RuntimeCall: From>, - >::TargetHeaderChain: TargetHeaderChain< - XcmAsPlainPayload, - Runtime::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof, - >, + BridgedChainOf: Chain + Parachain, { pallet_bridge_messages::Call::::receive_messages_delivery_proof { proof: message_delivery_proof, @@ -206,7 +191,12 @@ where } /// Prepare storage proofs of messages, stored at the source chain. -pub fn make_complex_relayer_delivery_proofs( +pub fn make_complex_relayer_delivery_proofs< + BridgedRelayChain, + BridgedParachain, + ThisChainWithMessages, + InnerXcmRuntimeCall, +>( lane_id: LaneId, xcm_message: Xcm, message_nonce: MessageNonce, @@ -226,24 +216,27 @@ pub fn make_complex_relayer_delivery_proofs + ChainWithGrandpa, - MB: MessageBridge, - UnderlyingChainOf>: bp_runtime::Chain + Parachain, + BridgedParachain: bp_runtime::Chain + Parachain, + ThisChainWithMessages: ChainWithMessages, { + // prepare message let message_payload = prepare_inbound_xcm(xcm_message, message_destination); - let message_size = StorageProofSize::Minimal(message_payload.len() as u32); // prepare para storage proof containing message - let (para_state_root, para_storage_proof) = prepare_messages_storage_proof::( - lane_id, - message_nonce..=message_nonce, - None, - message_size, - message_payload, - encode_all_messages, - encode_lane_data, - ); + let (para_state_root, para_storage_proof) = + prepare_messages_storage_proof::( + lane_id, + message_nonce..=message_nonce, + None, + UnverifiedStorageProofParams::from_db_size(message_payload.len() as u32), + |_| message_payload.clone(), + encode_all_messages, + encode_lane_data, + false, + false, + ); let (relay_chain_header, justification, bridged_para_head, parachain_heads, para_heads_proof) = - make_complex_bridged_parachain_heads_proof::( + make_complex_bridged_parachain_heads_proof::( para_state_root, para_header_number, relay_header_number, @@ -270,12 +263,17 @@ where } /// Prepare storage proofs of message confirmations, stored at the target parachain. -pub fn make_complex_relayer_confirmation_proofs( +pub fn make_complex_relayer_confirmation_proofs< + BridgedRelayChain, + BridgedParachain, + ThisChainWithMessages, + InnerXcmRuntimeCall, +>( lane_id: LaneId, para_header_number: u32, relay_header_number: u32, bridged_para_id: u32, - relayer_id_at_this_chain: AccountIdOf>, + relayer_id_at_this_chain: AccountIdOf, relayers_state: UnrewardedRelayersState, ) -> ( HeaderOf, @@ -288,28 +286,29 @@ pub fn make_complex_relayer_confirmation_proofs + ChainWithGrandpa, - MB: MessageBridge, - UnderlyingChainOf>: bp_runtime::Chain + Parachain, + BridgedParachain: bp_runtime::Chain + Parachain, + ThisChainWithMessages: ChainWithMessages, { // prepare para storage proof containing message delivery proof - let (para_state_root, para_storage_proof) = prepare_message_delivery_storage_proof::( - lane_id, - InboundLaneData { - relayers: vec![ - UnrewardedRelayer { - relayer: relayer_id_at_this_chain.into(), - messages: DeliveredMessages::new(1) - }; - relayers_state.unrewarded_relayer_entries as usize - ] - .into(), - last_confirmed_nonce: 1, - }, - StorageProofSize::Minimal(0), - ); + let (para_state_root, para_storage_proof) = + prepare_message_delivery_storage_proof::( + lane_id, + InboundLaneData { + relayers: vec![ + UnrewardedRelayer { + relayer: relayer_id_at_this_chain.into(), + messages: DeliveredMessages::new(1) + }; + relayers_state.unrewarded_relayer_entries as usize + ] + .into(), + last_confirmed_nonce: 1, + }, + UnverifiedStorageProofParams::default(), + ); let (relay_chain_header, justification, bridged_para_head, parachain_heads, para_heads_proof) = - make_complex_bridged_parachain_heads_proof::( + make_complex_bridged_parachain_heads_proof::( para_state_root, para_header_number, relay_header_number, @@ -334,7 +333,7 @@ where } /// Make bridged parachain header with given state root and relay header that is finalizing it. -pub fn make_complex_bridged_parachain_heads_proof( +pub fn make_complex_bridged_parachain_heads_proof( para_state_root: ParaHash, para_header_number: u32, relay_header_number: BlockNumberOf, @@ -350,20 +349,17 @@ pub fn make_complex_bridged_parachain_heads_proof( where BridgedRelayChain: bp_runtime::Chain + ChainWithGrandpa, - MB: MessageBridge, - ::BridgedChain: Send + Sync + 'static, - ::ThisChain: Send + Sync + 'static, - UnderlyingChainOf>: bp_runtime::Chain + Parachain, + BridgedParachain: bp_runtime::Chain + Parachain, { let bridged_para_head = ParaHead( - bp_test_utils::test_header_with_root::>( + bp_test_utils::test_header_with_root::>( para_header_number.into(), para_state_root, ) .encode(), ); let (relay_state_root, para_heads_proof, parachain_heads) = - prepare_parachain_heads_proof::>(vec![( + prepare_parachain_heads_proof::>(vec![( bridged_para_id, bridged_para_head.clone(), )]); diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs index 9285a1e7ad450..ee3fc1ed2c41f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs @@ -39,8 +39,8 @@ pub fn prepare_inbound_xcm( xcm_message: Xcm, destination: InteriorLocation, ) -> Vec { - let location = xcm::VersionedInteriorLocation::V4(destination); - let xcm = xcm::VersionedXcm::::V4(xcm_message); + let location = xcm::VersionedInteriorLocation::from(destination); + let xcm = xcm::VersionedXcm::::from(xcm_message); // this is the `BridgeMessage` from polkadot xcm builder, but it has no constructor // or public fields, so just tuple // (double encoding, because `.encode()` is called on original Xcm BLOB when it is pushed diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index fe4de3114be0d..43fc9083937c3 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -10,88 +10,87 @@ description = "Westend Collectives Parachain Runtime" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -hex-literal = { version = "0.4.1" } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-asset-rate = { path = "../../../../../substrate/frame/asset-rate", default-features = false } -pallet-alliance = { path = "../../../../../substrate/frame/alliance", default-features = false } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-collective = { path = "../../../../../substrate/frame/collective", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-preimage = { path = "../../../../../substrate/frame/preimage", default-features = false } -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } -pallet-scheduler = { path = "../../../../../substrate/frame/scheduler", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-treasury = { path = "../../../../../substrate/frame/treasury", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -pallet-referenda = { path = "../../../../../substrate/frame/referenda", default-features = false } -pallet-ranked-collective = { path = "../../../../../substrate/frame/ranked-collective", default-features = false } -pallet-core-fellowship = { path = "../../../../../substrate/frame/core-fellowship", default-features = false } -pallet-salary = { path = "../../../../../substrate/frame/salary", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-arithmetic = { path = "../../../../../substrate/primitives/arithmetic", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-asset-rate = { workspace = true } +pallet-alliance = { workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-collective = { workspace = true } +pallet-multisig = { workspace = true } +pallet-preimage = { workspace = true } +pallet-proxy = { workspace = true } +pallet-scheduler = { workspace = true } +pallet-session = { workspace = true } +pallet-state-trie-migration = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } +pallet-referenda = { workspace = true } +pallet-ranked-collective = { workspace = true } +pallet-core-fellowship = { workspace = true } +pallet-salary = { workspace = true } +sp-api = { workspace = true } +sp-arithmetic = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +westend-runtime-constants = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -pallet-collective-content = { path = "../../../pallets/collective-content", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } +pallet-collator-selection = { workspace = true } +pallet-collective-content = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dev-dependencies] -sp-io = { path = "../../../../../substrate/primitives/io", features = ["std"] } +sp-io = { features = ["std"], workspace = true, default-features = true } [features] default = ["std"] @@ -131,7 +130,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", @@ -229,7 +228,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", @@ -238,7 +236,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs index ceef6de6b7435..a052a9d3800cc 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs @@ -117,6 +117,7 @@ impl pallet_ranked_collective::Config for Runtime type MinRankOfClass = sp_runtime::traits::Identity; type MemberSwappedHandler = (crate::AmbassadorCore, crate::AmbassadorSalary); type VoteWeight = pallet_ranked_collective::Linear; + type MaxMemberCount = (); #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = (crate::AmbassadorCore, crate::AmbassadorSalary); } @@ -219,6 +220,7 @@ impl pallet_core_fellowship::Config for Runtime { >; type ApproveOrigin = PromoteOrigin; type PromoteOrigin = PromoteOrigin; + type FastPromoteOrigin = Self::PromoteOrigin; type EvidenceSize = ConstU32<65536>; type MaxRank = ConstU32<9>; } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs index 6a4a182079671..942e0c294dd02 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs @@ -30,7 +30,7 @@ use frame_support::{ parameter_types, traits::{ tokens::UnityOrOuterConversion, EitherOf, EitherOfDiverse, FromContains, MapSuccess, - NeverEnsureOrigin, OriginTrait, TryWithMorphedArg, + OriginTrait, TryWithMorphedArg, }, PalletId, }; @@ -55,8 +55,6 @@ use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; #[cfg(feature = "runtime-benchmarks")] use crate::impls::benchmarks::{OpenHrmpChannel, PayWithEnsure}; -#[cfg(feature = "runtime-benchmarks")] -use testnet_parachains_constants::westend::currency::DOLLARS; /// The Fellowship members' ranks. pub mod ranks { @@ -152,6 +150,7 @@ impl pallet_ranked_collective::Config for Runtime type MinRankOfClass = tracks::MinRankOfClass; type MemberSwappedHandler = (crate::FellowshipCore, crate::FellowshipSalary); type VoteWeight = pallet_ranked_collective::Geometric; + type MaxMemberCount = (); #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = (crate::FellowshipCore, crate::FellowshipSalary); } @@ -209,6 +208,7 @@ impl pallet_core_fellowship::Config for Runtime { >, EnsureCanPromoteTo, >; + type FastPromoteOrigin = Self::PromoteOrigin; type EvidenceSize = ConstU32<65536>; type MaxRank = ConstU32<9>; } @@ -270,16 +270,6 @@ parameter_types! { pub SelfParaId: ParaId = ParachainInfo::parachain_id(); } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - // Benchmark bond. Needed to make `propose_spend` work. - pub const TenPercent: Permill = Permill::from_percent(10); - // Benchmark minimum. Needed to make `propose_spend` work. - pub const BenchmarkProposalBondMinimum: Balance = 1 * DOLLARS; - // Benchmark maximum. Needed to make `propose_spend` work. - pub const BenchmarkProposalBondMaximum: Balance = 10 * DOLLARS; -} - /// [`PayOverXcm`] setup to pay the Fellowship Treasury. pub type FellowshipTreasuryPaymaster = PayOverXcm< FellowshipTreasuryInteriorLocation, @@ -295,28 +285,6 @@ pub type FellowshipTreasuryPaymaster = PayOverXcm< pub type FellowshipTreasuryInstance = pallet_treasury::Instance1; impl pallet_treasury::Config for Runtime { - // The creation of proposals via the treasury pallet is deprecated and should not be utilized. - // Instead, public or fellowship referenda should be used to propose and command the treasury - // spend or spend_local dispatchables. The parameters below have been configured accordingly to - // discourage its use. - // TODO: replace with `NeverEnsure` once polkadot-sdk 1.5 is released. - type ApproveOrigin = NeverEnsureOrigin<()>; - type OnSlash = (); - #[cfg(not(feature = "runtime-benchmarks"))] - type ProposalBond = HundredPercent; - #[cfg(not(feature = "runtime-benchmarks"))] - type ProposalBondMinimum = MaxBalance; - #[cfg(not(feature = "runtime-benchmarks"))] - type ProposalBondMaximum = MaxBalance; - - #[cfg(feature = "runtime-benchmarks")] - type ProposalBond = TenPercent; - #[cfg(feature = "runtime-benchmarks")] - type ProposalBondMinimum = BenchmarkProposalBondMinimum; - #[cfg(feature = "runtime-benchmarks")] - type ProposalBondMaximum = BenchmarkProposalBondMaximum; - // end. - type WeightInfo = weights::pallet_treasury::WeightInfo; type PalletId = FellowshipTreasuryPalletId; type Currency = Balances; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs index e5b176fc77873..ed5d4870e4a6d 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs @@ -14,6 +14,8 @@ // limitations under the License. use crate::OriginCaller; +use alloc::boxed::Box; +use core::{cmp::Ordering, marker::PhantomData}; use frame_support::{ dispatch::DispatchResultWithPostInfo, traits::{Currency, PrivilegeCmp}, @@ -21,7 +23,6 @@ use frame_support::{ }; use pallet_alliance::{ProposalIndex, ProposalProvider}; use sp_runtime::DispatchError; -use sp_std::{cmp::Ordering, marker::PhantomData, prelude::*}; type AccountIdOf = ::AccountId; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 5fce8e5095410..d843d6f6f776e 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -42,8 +42,12 @@ mod weights; pub mod xcm_config; // Fellowship configurations. pub mod fellowship; + +extern crate alloc; + pub use ambassador::pallet_ambassador_origins; +use alloc::{vec, vec::Vec}; use ambassador::AmbassadorCoreInstance; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use fellowship::{pallet_fellowship_origins, Fellows, FellowshipCoreInstance}; @@ -57,7 +61,6 @@ use sp_runtime::{ ApplyExtrinsicResult, Perbill, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -70,8 +73,8 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ - fungible::HoldConsideration, ConstBool, ConstU16, ConstU32, ConstU64, ConstU8, - EitherOfDiverse, InstanceFilter, LinearStoragePrice, TransformOrigin, + fungible::HoldConsideration, ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, + InstanceFilter, LinearStoragePrice, TransformOrigin, }, weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, @@ -104,7 +107,7 @@ use polkadot_runtime_common::{ impls::VersionedLocatableAsset, BlockHashCount, SlowAdjustingFeeUpdate, }; use xcm::prelude::*; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -122,7 +125,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -163,6 +166,7 @@ parameter_types! { }) .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) .build_or_panic(); + pub const SS58Prefix: u8 = 42; } // Configure FRAME pallets to include in runtime. @@ -180,7 +184,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; - type SS58Prefix = ConstU16<0>; + type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; } @@ -836,7 +840,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -940,7 +944,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::WndLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -953,11 +957,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -972,7 +976,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -982,6 +986,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) @@ -1034,7 +1050,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); Ok(()) } @@ -1083,7 +1099,7 @@ impl_runtime_apis! { } fn set_up_complex_asset_transfer( - ) -> Option<(Assets, u32, Location, Box)> { + ) -> Option<(Assets, u32, Location, alloc::boxed::Box)> { // Collectives only supports teleports to system parachain. // Relay/native token can be teleported between Collectives and Relay. let native_location = Parent.into(); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs index 0b7a2fc21cde4..92c8c88b51547 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -47,7 +47,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `cumulus_pallet_parachain_system`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs index f40940a8b25fa..6bedfcc7e0123 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs @@ -58,6 +58,17 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `AmbassadorCore::Params` (r:0 w:1) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + fn set_partial_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(11_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `AmbassadorCore::Member` (r:1 w:1) /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `AmbassadorCollective::Members` (r:1 w:1) @@ -160,6 +171,20 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(6)) } + fn promote_fast(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `16844` + // Estimated: `19894 + r * (2489 ยฑ0)` + // Minimum execution time: 45_065_000 picoseconds. + Weight::from_parts(34_090_392, 19894) + // Standard Error: 18_620 + .saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into())) + } /// Storage: `AmbassadorCollective::Members` (r:1 w:0) /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `AmbassadorCore::Member` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs index 471ee82ead729..05014e273f000 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs @@ -57,6 +57,17 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `FellowshipCore::Params` (r:0 w:1) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + fn set_partial_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `FellowshipCore::Member` (r:1 w:1) /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `FellowshipCollective::Members` (r:1 w:1) @@ -159,6 +170,20 @@ impl pallet_core_fellowship::WeightInfo for WeightInfo< .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(6)) } + fn promote_fast(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `16844` + // Estimated: `19894 + r * (2489 ยฑ0)` + // Minimum execution time: 45_065_000 picoseconds. + Weight::from_parts(34_090_392, 19894) + // Standard Error: 18_620 + .saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into())) + } /// Storage: `FellowshipCollective::Members` (r:1 w:0) /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `FellowshipCore::Member` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs index 4bd71c4e7d497..0bb6d3d0f1c45 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs index 58540e646d8c3..5c513c3754ce8 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_treasury.rs @@ -62,43 +62,6 @@ impl pallet_treasury::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `FellowshipTreasury::ProposalCount` (r:1 w:1) - /// Proof: `FellowshipTreasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `FellowshipTreasury::Proposals` (r:0 w:1) - /// Proof: `FellowshipTreasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `143` - // Estimated: `1489` - // Minimum execution time: 264_000_000 picoseconds. - Weight::from_parts(277_000_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipTreasury::Proposals` (r:1 w:1) - /// Proof: `FellowshipTreasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `301` - // Estimated: `3593` - // Minimum execution time: 289_000_000 picoseconds. - Weight::from_parts(312_000_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(_p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `FellowshipTreasury::Approvals` (r:1 w:1) /// Proof: `FellowshipTreasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn remove_approval() -> Weight { diff --git a/cumulus/parachains/runtimes/constants/Cargo.toml b/cumulus/parachains/runtimes/constants/Cargo.toml index 561e8276b5f05..d54f1e7db6c16 100644 --- a/cumulus/parachains/runtimes/constants/Cargo.toml +++ b/cumulus/parachains/runtimes/constants/Cargo.toml @@ -13,20 +13,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -smallvec = "1.11.0" +smallvec = { workspace = true, default-features = true } # Substrate -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +frame-support = { workspace = true } +sp-runtime = { workspace = true } # Polkadot -polkadot-core-primitives = { path = "../../../../polkadot/core-primitives", default-features = false } -rococo-runtime-constants = { path = "../../../../polkadot/runtime/rococo/constants", default-features = false, optional = true } -westend-runtime-constants = { path = "../../../../polkadot/runtime/westend/constants", default-features = false, optional = true } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } +polkadot-core-primitives = { workspace = true } +rococo-runtime-constants = { optional = true, workspace = true } +westend-runtime-constants = { optional = true, workspace = true } +xcm = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index e43a69482c79f..1fcebb3f16a96 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -13,74 +13,73 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1", optional = true } +codec = { features = ["derive"], workspace = true } +hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } # Substrate -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-insecure-randomness-collective-flip = { path = "../../../../../substrate/frame/insecure-randomness-collective-flip", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-contracts = { path = "../../../../../substrate/frame/contracts", default-features = false } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-try-runtime = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-insecure-randomness-collective-flip = { workspace = true } +pallet-balances = { workspace = true } +pallet-multisig = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +pallet-sudo = { workspace = true } +pallet-contracts = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +rococo-runtime-constants = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true } [features] default = ["std"] @@ -133,7 +132,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", @@ -141,7 +139,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -171,7 +169,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index fcd786711bbe9..e8cc9d02fb0e4 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -65,6 +65,7 @@ impl Config for Runtime { type AddressGenerator = DefaultAddressGenerator; type MaxCodeLen = ConstU32<{ 123 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; + type MaxTransientStorageSize = ConstU32<{ 1 * 1024 * 1024 }>; type UnsafeUnstableInterface = ConstBool; type UploadOrigin = EnsureSigned; type InstantiateOrigin = EnsureSigned; diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 2d346e66c6c3b..47ce6f3628ecd 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -29,6 +29,9 @@ mod contracts; mod weights; mod xcm_config; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::AggregateMessageOrigin; use sp_api::impl_runtime_apis; @@ -40,7 +43,6 @@ use sp_runtime::{ ApplyExtrinsicResult, Perbill, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -50,7 +52,7 @@ use frame_support::{ dispatch::DispatchClass, genesis_builder_helper::{build_state, get_preset}, parameter_types, - traits::{ConstBool, ConstU16, ConstU32, ConstU64, ConstU8}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8}, weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; @@ -64,7 +66,7 @@ pub use parachains_common::{AuraId, Balance}; use testnet_parachains_constants::rococo::{consensus::*, currency::*, fee::WeightToFee, time::*}; use xcm::prelude::*; use xcm_config::CollatorSelectionUpdateOrigin; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -142,7 +144,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 7, @@ -177,6 +179,7 @@ parameter_types! { }) .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) .build_or_panic(); + pub const SS58Prefix: u8 = 42; } // Configure FRAME pallets to include in runtime. @@ -193,7 +196,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = frame_system::weights::SubstrateWeight; - type SS58Prefix = ConstU16<42>; + type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = ConstU32<16>; } @@ -486,7 +489,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -590,7 +593,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::RelayLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -603,11 +606,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -622,7 +625,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -632,6 +635,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) @@ -752,7 +767,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); Ok(()) } @@ -802,7 +817,7 @@ impl_runtime_apis! { } fn set_up_complex_asset_transfer( - ) -> Option<(Assets, u32, Location, Box)> { + ) -> Option<(Assets, u32, Location, alloc::boxed::Box)> { // Contracts-System-Para only supports teleports to system parachain. // Relay/native token can be teleported between Contracts-System-Para and Relay. let native_location = Parent.into(); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index dc99fe331f786..2920bc428d90b 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -10,74 +10,74 @@ license = "Apache-2.0" workspace = true [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -hex-literal = "0.4.1" +codec = { features = ["derive"], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-broker = { path = "../../../../../substrate/frame/broker", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-broker = { workspace = true } +pallet-multisig = { workspace = true } +pallet-session = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-inherents = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +rococo-runtime-constants = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true } [features] default = ["std"] @@ -94,6 +94,7 @@ std = [ "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -131,7 +132,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", @@ -139,7 +139,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -169,7 +169,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ @@ -199,4 +199,14 @@ try-runtime = [ "sp-runtime/try-runtime", ] -fast-runtime = [] +fast-runtime = [ + "rococo-runtime-constants/fast-runtime", +] + +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller, like logging for example. +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/build.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/build.rs index 28dacd20cf305..368a1e427aaaf 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/build.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/build.rs @@ -13,20 +13,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(feature = "std")] +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); + + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .set_file_name("fast_runtime_binary.rs") + .enable_feature("fast-runtime") + .build(); +} + +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("ROC", 12) .build(); - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() + substrate_wasm_builder::WasmBuilder::init_with_defaults() .set_file_name("fast_runtime_binary.rs") .enable_feature("fast-runtime") - .import_memory() - .export_heap_base() + .enable_metadata_hash("ROC", 12) .build(); } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs index ec3a4f31202fd..fa0c2644421e7 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs @@ -21,22 +21,65 @@ use cumulus_primitives_core::relay_chain; use frame_support::{ parameter_types, traits::{ - fungible::{Balanced, Credit}, - OnUnbalanced, + fungible::{Balanced, Credit, Inspect}, + tokens::{Fortitude, Preservation}, + DefensiveResult, OnUnbalanced, }, }; +use frame_system::Pallet as System; use pallet_broker::{CoreAssignment, CoreIndex, CoretimeInterface, PartsOf57600, RCBlockNumberOf}; -use parachains_common::{AccountId, Balance, BlockNumber}; +use parachains_common::{AccountId, Balance}; +use rococo_runtime_constants::system_parachain::coretime; +use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; +use xcm_executor::traits::TransactAsset; -pub struct CreditToCollatorPot; -impl OnUnbalanced> for CreditToCollatorPot { - fn on_nonzero_unbalanced(credit: Credit) { - let staking_pot = CollatorSelection::account_id(); - let _ = >::resolve(&staking_pot, credit); +pub struct BurnCoretimeRevenue; +impl OnUnbalanced> for BurnCoretimeRevenue { + fn on_nonzero_unbalanced(amount: Credit) { + let acc = RevenueAccumulationAccount::get(); + if !System::::account_exists(&acc) { + System::::inc_providers(&acc); + } + Balances::resolve(&acc, amount).defensive_ok(); } } +type AssetTransactor = ::AssetTransactor; + +fn burn_at_relay(stash: &AccountId, value: Balance) -> Result<(), XcmError> { + let dest = Location::parent(); + let stash_location = + Junction::AccountId32 { network: None, id: stash.clone().into() }.into_location(); + let asset = Asset { id: AssetId(Location::parent()), fun: Fungible(value) }; + let dummy_xcm_context = XcmContext { origin: None, message_id: [0; 32], topic: None }; + + let withdrawn = AssetTransactor::withdraw_asset(&asset, &stash_location, None)?; + + AssetTransactor::can_check_out(&dest, &asset, &dummy_xcm_context)?; + + let parent_assets = Into::::into(withdrawn) + .reanchored(&dest, &Here.into()) + .defensive_map_err(|_| XcmError::ReanchorFailed)?; + + PolkadotXcm::send_xcm( + Here, + Location::parent(), + Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + ReceiveTeleportedAsset(parent_assets.clone()), + BurnAsset(parent_assets), + ]), + )?; + + AssetTransactor::check_out(&dest, &asset, &dummy_xcm_context); + + Ok(()) +} + /// A type containing the encoding of the coretime pallet in the Relay chain runtime. Used to /// construct any remote calls. The codec index must correspond to the index of `Coretime` in the /// `construct_runtime` of the Relay chain. @@ -66,11 +109,7 @@ enum CoretimeProviderCalls { parameter_types! { pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); -} - -parameter_types! { - pub storage CoreCount: Option = None; - pub storage CoretimeRevenue: Option<(BlockNumber, Balance)> = None; + pub RevenueAccumulationAccount: AccountId = BrokerPalletId::get().into_sub_account_truncating(b"burnstash"); } /// Type that implements the `CoretimeInterface` for the allocation of Coretime. Meant to operate @@ -205,26 +244,30 @@ impl CoretimeInterface for CoretimeAllocator { } } - fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)> { - let revenue = CoretimeRevenue::get(); - CoretimeRevenue::set(&None); - revenue - } + fn on_new_timeslice(_t: pallet_broker::Timeslice) { + let stash = RevenueAccumulationAccount::get(); + let value = + Balances::reducible_balance(&stash, Preservation::Expendable, Fortitude::Polite); - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(when: RCBlockNumberOf, revenue: Self::Balance) { - CoretimeRevenue::set(&Some((when, revenue))); + if value > 0 { + log::debug!(target: "runtime::coretime", "Going to burn {value} stashed tokens at RC"); + match burn_at_relay(&stash, value) { + Ok(()) => { + log::debug!(target: "runtime::coretime", "Succesfully burnt {value} tokens"); + }, + Err(err) => { + log::error!(target: "runtime::coretime", "burn_at_relay failed: {err:?}"); + }, + } + } } } impl pallet_broker::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type OnRevenue = CreditToCollatorPot; - #[cfg(feature = "fast-runtime")] - type TimeslicePeriod = ConstU32<10>; - #[cfg(not(feature = "fast-runtime"))] - type TimeslicePeriod = ConstU32<80>; + type OnRevenue = BurnCoretimeRevenue; + type TimeslicePeriod = ConstU32<{ coretime::TIMESLICE_PERIOD }>; type MaxLeasedCores = ConstU32<50>; type MaxReservedCores = ConstU32<10>; type Coretime = CoretimeAllocator; diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index b3eaf3d127a2f..9fd0093840d3f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -33,6 +33,9 @@ mod coretime; mod weights; pub mod xcm_config; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ @@ -66,7 +69,6 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, DispatchError, MultiAddress, Perbill, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -76,7 +78,7 @@ use xcm::prelude::*; use xcm_config::{ FellowshipLocation, GovernanceLocation, RocRelayLocation, XcmOriginToTransactDispatchOrigin, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -104,6 +106,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. @@ -117,6 +120,7 @@ pub type Migrations = ( cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, pallet_broker::migration::MigrateV0ToV1, pallet_broker::migration::MigrateV1ToV2, + pallet_broker::migration::MigrateV2ToV3, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -142,10 +146,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 1, + transaction_version: 2, state_version: 1, }; @@ -550,7 +554,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -660,7 +664,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::RocRelayLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -673,11 +677,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -692,7 +696,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -702,6 +706,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) @@ -760,7 +776,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); Ok(()) } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs index 5c9175a18d98a..83e80e2e91e73 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_918_000 picoseconds. - Weight::from_parts(2_092_000, 0) + // Minimum execution time: 2_024_000 picoseconds. + Weight::from_parts(2_121_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 21_943_000 picoseconds. - Weight::from_parts(22_570_000, 0) + // Minimum execution time: 21_654_000 picoseconds. + Weight::from_parts(22_591_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 20_923_000 picoseconds. - Weight::from_parts(21_354_000, 0) + // Minimum execution time: 20_769_000 picoseconds. + Weight::from_parts(21_328_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -93,24 +93,34 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `1951` - // Minimum execution time: 10_687_000 picoseconds. - Weight::from_parts(11_409_000, 0) + // Minimum execution time: 10_404_000 picoseconds. + Weight::from_parts(10_941_000, 0) .saturating_add(Weight::from_parts(0, 1951)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::LastRelayChainBlockNumber` (r:1 w:0) /// Proof: `ParachainSystem::LastRelayChainBlockNumber` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:0) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) - /// Storage: `Broker::Leases` (r:1 w:1) - /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -120,33 +130,34 @@ impl pallet_broker::WeightInfo for WeightInfo { /// The range of component `n` is `[0, 1000]`. fn start_sales(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `12567` - // Estimated: `14052` - // Minimum execution time: 111_288_000 picoseconds. - Weight::from_parts(117_804_282, 0) - .saturating_add(Weight::from_parts(0, 14052)) - // Standard Error: 391 - .saturating_add(Weight::from_parts(1_243, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(66)) + // Measured: `12599` + // Estimated: `15065 + n * (1 ยฑ0)` + // Minimum execution time: 44_085_000 picoseconds. + Weight::from_parts(127_668_002, 0) + .saturating_add(Weight::from_parts(0, 15065)) + // Standard Error: 2_231 + .saturating_add(Weight::from_parts(20_604, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(13)) + .saturating_add(T::DbWeight::get().writes(59)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:0) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:0 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `316` + // Measured: `332` // Estimated: `3593` - // Minimum execution time: 33_006_000 picoseconds. - Weight::from_parts(34_256_000, 0) + // Minimum execution time: 45_100_000 picoseconds. + Weight::from_parts(46_263_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -156,53 +167,53 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:0) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `553` // Estimated: `4698` - // Minimum execution time: 61_473_000 picoseconds. - Weight::from_parts(66_476_000, 0) + // Minimum execution time: 65_944_000 picoseconds. + Weight::from_parts(68_666_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `357` - // Estimated: `3550` - // Minimum execution time: 13_771_000 picoseconds. - Weight::from_parts(14_374_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `358` + // Estimated: `3551` + // Minimum execution time: 13_794_000 picoseconds. + Weight::from_parts(14_450_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Broker::Regions` (r:1 w:2) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn partition() -> Weight { // Proof Size summary in bytes: - // Measured: `357` - // Estimated: `3550` - // Minimum execution time: 15_162_000 picoseconds. - Weight::from_parts(15_742_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `358` + // Estimated: `3551` + // Minimum execution time: 15_316_000 picoseconds. + Weight::from_parts(15_787_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Broker::Regions` (r:1 w:3) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: - // Measured: `357` - // Estimated: `3550` - // Minimum execution time: 16_196_000 picoseconds. - Weight::from_parts(16_796_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `358` + // Estimated: `3551` + // Minimum execution time: 16_375_000 picoseconds. + Weight::from_parts(17_113_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,15 +222,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn assign() -> Weight { // Proof Size summary in bytes: - // Measured: `936` + // Measured: `937` // Estimated: `4681` - // Minimum execution time: 25_653_000 picoseconds. - Weight::from_parts(27_006_000, 0) + // Minimum execution time: 25_952_000 picoseconds. + Weight::from_parts(27_198_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -227,7 +238,7 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolIo` (r:2 w:2) @@ -236,10 +247,10 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::InstaPoolContribution` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn pool() -> Weight { // Proof Size summary in bytes: - // Measured: `1002` + // Measured: `1003` // Estimated: `5996` - // Minimum execution time: 31_114_000 picoseconds. - Weight::from_parts(32_235_000, 0) + // Minimum execution time: 31_790_000 picoseconds. + Weight::from_parts(32_920_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -255,11 +266,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `652` // Estimated: `6196 + m * (2520 ยฑ0)` - // Minimum execution time: 57_280_000 picoseconds. - Weight::from_parts(58_127_480, 0) + // Minimum execution time: 56_286_000 picoseconds. + Weight::from_parts(56_946_240, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 41_670 - .saturating_add(Weight::from_parts(1_203_066, 0).saturating_mul(m.into())) + // Standard Error: 44_472 + .saturating_add(Weight::from_parts(1_684_838, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -279,25 +290,25 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn purchase_credit() -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `3680` - // Minimum execution time: 59_968_000 picoseconds. - Weight::from_parts(62_315_000, 0) - .saturating_add(Weight::from_parts(0, 3680)) + // Measured: `322` + // Estimated: `3787` + // Minimum execution time: 64_967_000 picoseconds. + Weight::from_parts(66_504_000, 0) + .saturating_add(Weight::from_parts(0, 3787)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn drop_region() -> Weight { // Proof Size summary in bytes: - // Measured: `465` - // Estimated: `3550` - // Minimum execution time: 50_887_000 picoseconds. - Weight::from_parts(57_366_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `466` + // Estimated: `3551` + // Minimum execution time: 37_552_000 picoseconds. + Weight::from_parts(46_263_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -311,8 +322,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 84_472_000 picoseconds. - Weight::from_parts(96_536_000, 0) + // Minimum execution time: 79_625_000 picoseconds. + Weight::from_parts(86_227_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -329,8 +340,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `857` // Estimated: `3593` - // Minimum execution time: 96_371_000 picoseconds. - Weight::from_parts(104_659_000, 0) + // Minimum execution time: 88_005_000 picoseconds. + Weight::from_parts(92_984_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -343,8 +354,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `957` // Estimated: `4698` - // Minimum execution time: 51_741_000 picoseconds. - Weight::from_parts(54_461_000, 0) + // Minimum execution time: 38_877_000 picoseconds. + Weight::from_parts(40_408_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -360,13 +371,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(_n: u32, ) -> Weight { + fn request_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 19_901_000 picoseconds. - Weight::from_parts(21_028_116, 0) + // Minimum execution time: 20_581_000 picoseconds. + Weight::from_parts(21_610_297, 0) .saturating_add(Weight::from_parts(0, 3539)) + // Standard Error: 119 + .saturating_add(Weight::from_parts(144, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -377,29 +390,29 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 5_987_000 picoseconds. - Weight::from_parts(6_412_478, 0) + // Minimum execution time: 6_079_000 picoseconds. + Weight::from_parts(6_540_110, 0) .saturating_add(Weight::from_parts(0, 1487)) - // Standard Error: 16 - .saturating_add(Weight::from_parts(47, 0).saturating_mul(n.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(10, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:1) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `447` + // Measured: `442` // Estimated: `6196` - // Minimum execution time: 38_623_000 picoseconds. - Weight::from_parts(39_773_000, 0) + // Minimum execution time: 42_947_000 picoseconds. + Weight::from_parts(43_767_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -412,13 +425,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Workplan` (r:0 w:60) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(_n: u32, ) -> Weight { + fn rotate_sale(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `12514` // Estimated: `13506` - // Minimum execution time: 97_074_000 picoseconds. - Weight::from_parts(101_247_740, 0) + // Minimum execution time: 93_426_000 picoseconds. + Weight::from_parts(96_185_447, 0) .saturating_add(Weight::from_parts(0, 13506)) + // Standard Error: 116 + .saturating_add(Weight::from_parts(4, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(65)) } @@ -430,8 +445,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 6_317_000 picoseconds. - Weight::from_parts(6_521_000, 0) + // Minimum execution time: 5_842_000 picoseconds. + Weight::from_parts(6_077_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -454,8 +469,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 32_575_000 picoseconds. - Weight::from_parts(33_299_000, 0) + // Minimum execution time: 33_278_000 picoseconds. + Weight::from_parts(34_076_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -474,8 +489,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 15_256_000 picoseconds. - Weight::from_parts(15_927_000, 0) + // Minimum execution time: 15_779_000 picoseconds. + Weight::from_parts(16_213_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -486,8 +501,19 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_783_000 picoseconds. - Weight::from_parts(1_904_000, 0) + // Minimum execution time: 1_774_000 picoseconds. + Weight::from_parts(1_873_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broker::RevenueInbox` (r:0 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) + fn notify_revenue() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_858_000 picoseconds. + Weight::from_parts(1_991_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -497,19 +523,19 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:0) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `398` - // Estimated: `3863` - // Minimum execution time: 12_307_000 picoseconds. - Weight::from_parts(12_967_000, 0) - .saturating_add(Weight::from_parts(0, 3863)) + // Measured: `408` + // Estimated: `1893` + // Minimum execution time: 10_874_000 picoseconds. + Weight::from_parts(11_265_000, 0) + .saturating_add(Weight::from_parts(0, 1893)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) @@ -517,10 +543,32 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `470` // Estimated: `1886` - // Minimum execution time: 6_597_000 picoseconds. - Weight::from_parts(6_969_000, 0) + // Minimum execution time: 6_525_000 picoseconds. + Weight::from_parts(6_769_000, 0) .saturating_add(Weight::from_parts(0, 1886)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `322` + // Estimated: `3787` + // Minimum execution time: 45_561_000 picoseconds. + Weight::from_parts(47_306_000, 0) + .saturating_add(Weight::from_parts(0, 3787)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs index 9f79cea831aed..b8db473f10662 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs @@ -18,10 +18,10 @@ mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; +use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; use xcm::{latest::prelude::*, DoubleEncoded}; trait WeighAssets { diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 7ff1cce2e0723..73a7198053070 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::fungible`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 16412eb49a526..676048f92ad93 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 78018537f5d3c..07a4332800d7f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -10,74 +10,74 @@ license = "Apache-2.0" workspace = true [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -hex-literal = "0.4.1" +codec = { features = ["derive"], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-broker = { path = "../../../../../substrate/frame/broker", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-broker = { workspace = true } +pallet-multisig = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-inherents = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +westend-runtime-constants = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true } [features] default = ["std"] @@ -94,6 +94,7 @@ std = [ "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -129,7 +130,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", @@ -138,7 +138,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -167,7 +167,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ @@ -196,4 +196,14 @@ try-runtime = [ "sp-runtime/try-runtime", ] -fast-runtime = [] +fast-runtime = [ + "westend-runtime-constants/fast-runtime", +] + +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller, like logging for example. +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/build.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/build.rs index 28dacd20cf305..2f10a39d1b2e2 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/build.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/build.rs @@ -13,20 +13,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(feature = "std")] +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); + + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .set_file_name("fast_runtime_binary.rs") + .enable_feature("fast-runtime") + .build(); +} + +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("WND", 12) .build(); - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() + substrate_wasm_builder::WasmBuilder::init_with_defaults() .set_file_name("fast_runtime_binary.rs") .enable_feature("fast-runtime") - .import_memory() - .export_heap_base() + .enable_metadata_hash("WND", 12) .build(); } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs index a5e219b9897e0..4f06e3e3669c8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs @@ -21,22 +21,67 @@ use cumulus_primitives_core::relay_chain; use frame_support::{ parameter_types, traits::{ - fungible::{Balanced, Credit}, - OnUnbalanced, + fungible::{Balanced, Credit, Inspect}, + tokens::{Fortitude, Preservation}, + DefensiveResult, OnUnbalanced, }, }; -use pallet_broker::{CoreAssignment, CoreIndex, CoretimeInterface, PartsOf57600, RCBlockNumberOf}; -use parachains_common::{AccountId, Balance, BlockNumber}; +use frame_system::Pallet as System; +use pallet_broker::{ + CoreAssignment, CoreIndex, CoretimeInterface, PartsOf57600, RCBlockNumberOf, Timeslice, +}; +use parachains_common::{AccountId, Balance}; +use sp_runtime::traits::AccountIdConversion; +use westend_runtime_constants::system_parachain::coretime; use xcm::latest::prelude::*; +use xcm_executor::traits::TransactAsset; -pub struct CreditToCollatorPot; -impl OnUnbalanced> for CreditToCollatorPot { - fn on_nonzero_unbalanced(credit: Credit) { - let staking_pot = CollatorSelection::account_id(); - let _ = >::resolve(&staking_pot, credit); +pub struct BurnCoretimeRevenue; +impl OnUnbalanced> for BurnCoretimeRevenue { + fn on_nonzero_unbalanced(amount: Credit) { + let acc = RevenueAccumulationAccount::get(); + if !System::::account_exists(&acc) { + System::::inc_providers(&acc); + } + Balances::resolve(&acc, amount).defensive_ok(); } } +type AssetTransactor = ::AssetTransactor; + +fn burn_at_relay(stash: &AccountId, value: Balance) -> Result<(), XcmError> { + let dest = Location::parent(); + let stash_location = + Junction::AccountId32 { network: None, id: stash.clone().into() }.into_location(); + let asset = Asset { id: AssetId(Location::parent()), fun: Fungible(value) }; + let dummy_xcm_context = XcmContext { origin: None, message_id: [0; 32], topic: None }; + + let withdrawn = AssetTransactor::withdraw_asset(&asset, &stash_location, None)?; + + AssetTransactor::can_check_out(&dest, &asset, &dummy_xcm_context)?; + + let parent_assets = Into::::into(withdrawn) + .reanchored(&dest, &Here.into()) + .defensive_map_err(|_| XcmError::ReanchorFailed)?; + + PolkadotXcm::send_xcm( + Here, + Location::parent(), + Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + ReceiveTeleportedAsset(parent_assets.clone()), + BurnAsset(parent_assets), + ]), + )?; + + AssetTransactor::check_out(&dest, &asset, &dummy_xcm_context); + + Ok(()) +} + /// A type containing the encoding of the coretime pallet in the Relay chain runtime. Used to /// construct any remote calls. The codec index must correspond to the index of `Coretime` in the /// `construct_runtime` of the Relay chain. @@ -66,11 +111,7 @@ enum CoretimeProviderCalls { parameter_types! { pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); -} - -parameter_types! { - pub storage CoreCount: Option = None; - pub storage CoretimeRevenue: Option<(BlockNumber, Balance)> = None; + pub RevenueAccumulationAccount: AccountId = BrokerPalletId::get().into_sub_account_truncating(b"burnstash"); } /// Type that implements the `CoretimeInterface` for the allocation of Coretime. Meant to operate @@ -217,26 +258,30 @@ impl CoretimeInterface for CoretimeAllocator { } } - fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)> { - let revenue = CoretimeRevenue::get(); - CoretimeRevenue::set(&None); - revenue - } + fn on_new_timeslice(_timeslice: Timeslice) { + let stash = RevenueAccumulationAccount::get(); + let value = + Balances::reducible_balance(&stash, Preservation::Expendable, Fortitude::Polite); - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(when: RCBlockNumberOf, revenue: Self::Balance) { - CoretimeRevenue::set(&Some((when, revenue))); + if value > 0 { + log::debug!(target: "runtime::coretime", "Going to burn {value} stashed tokens at RC"); + match burn_at_relay(&stash, value) { + Ok(()) => { + log::debug!(target: "runtime::coretime", "Succesfully burnt {value} tokens"); + }, + Err(err) => { + log::error!(target: "runtime::coretime", "burn_at_relay failed: {err:?}"); + }, + } + } } } impl pallet_broker::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type OnRevenue = CreditToCollatorPot; - #[cfg(feature = "fast-runtime")] - type TimeslicePeriod = ConstU32<10>; - #[cfg(not(feature = "fast-runtime"))] - type TimeslicePeriod = ConstU32<80>; + type OnRevenue = BurnCoretimeRevenue; + type TimeslicePeriod = ConstU32<{ coretime::TIMESLICE_PERIOD }>; // We don't actually need any leases at launch but set to 10 in case we want to sudo some in. type MaxLeasedCores = ConstU32<10>; type MaxReservedCores = ConstU32<10>; diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 6c22702ce872f..7907f252cf8e4 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -33,6 +33,9 @@ mod coretime; mod weights; pub mod xcm_config; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ @@ -66,7 +69,6 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, DispatchError, MultiAddress, Perbill, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -76,7 +78,7 @@ use xcm::prelude::*; use xcm_config::{ FellowshipLocation, GovernanceLocation, TokenRelayLocation, XcmOriginToTransactDispatchOrigin, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -104,6 +106,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. @@ -116,6 +119,7 @@ pub type Migrations = ( cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, pallet_broker::migration::MigrateV0ToV1, pallet_broker::migration::MigrateV1ToV2, + pallet_broker::migration::MigrateV2ToV3, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -141,10 +145,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 1, + transaction_version: 2, state_version: 1, }; @@ -541,7 +545,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -651,7 +655,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::TokenRelayLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -664,11 +668,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -683,7 +687,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -693,6 +697,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) @@ -751,7 +767,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); Ok(()) } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs index 7e1c832a90924..d130b306f7a52 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_897_000 picoseconds. - Weight::from_parts(2_053_000, 0) + // Minimum execution time: 1_899_000 picoseconds. + Weight::from_parts(2_051_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 22_550_000 picoseconds. - Weight::from_parts(22_871_000, 0) + // Minimum execution time: 21_965_000 picoseconds. + Weight::from_parts(22_774_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 21_170_000 picoseconds. - Weight::from_parts(21_645_000, 0) + // Minimum execution time: 20_748_000 picoseconds. + Weight::from_parts(21_464_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -93,24 +93,34 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `146` // Estimated: `1631` - // Minimum execution time: 10_494_000 picoseconds. - Weight::from_parts(10_942_000, 0) + // Minimum execution time: 10_269_000 picoseconds. + Weight::from_parts(10_508_000, 0) .saturating_add(Weight::from_parts(0, 1631)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::LastRelayChainBlockNumber` (r:1 w:0) /// Proof: `ParachainSystem::LastRelayChainBlockNumber` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:0) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) - /// Storage: `Broker::Leases` (r:1 w:1) - /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -118,15 +128,18 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Workplan` (r:0 w:20) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(_n: u32, ) -> Weight { + fn start_sales(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `12247` - // Estimated: `13732` - // Minimum execution time: 61_014_000 picoseconds. - Weight::from_parts(63_267_651, 0) - .saturating_add(Weight::from_parts(0, 13732)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `12279` + // Estimated: `14805 + n * (1 ยฑ0)` + // Minimum execution time: 41_900_000 picoseconds. + Weight::from_parts(80_392_728, 0) + .saturating_add(Weight::from_parts(0, 14805)) + // Standard Error: 870 + .saturating_add(Weight::from_parts(4_361, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(26)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) @@ -135,13 +148,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:0 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `316` + // Measured: `332` // Estimated: `3593` - // Minimum execution time: 30_931_000 picoseconds. - Weight::from_parts(31_941_000, 0) + // Minimum execution time: 40_911_000 picoseconds. + Weight::from_parts(43_102_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -160,47 +173,47 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `450` // Estimated: `4698` - // Minimum execution time: 57_466_000 picoseconds. - Weight::from_parts(65_042_000, 0) + // Minimum execution time: 70_257_000 picoseconds. + Weight::from_parts(73_889_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `357` - // Estimated: `3550` - // Minimum execution time: 12_799_000 picoseconds. - Weight::from_parts(13_401_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `358` + // Estimated: `3551` + // Minimum execution time: 13_302_000 picoseconds. + Weight::from_parts(13_852_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Broker::Regions` (r:1 w:2) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn partition() -> Weight { // Proof Size summary in bytes: - // Measured: `357` - // Estimated: `3550` - // Minimum execution time: 14_107_000 picoseconds. - Weight::from_parts(14_630_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `358` + // Estimated: `3551` + // Minimum execution time: 14_927_000 picoseconds. + Weight::from_parts(15_553_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Broker::Regions` (r:1 w:3) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: - // Measured: `357` - // Estimated: `3550` - // Minimum execution time: 15_254_000 picoseconds. - Weight::from_parts(16_062_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `358` + // Estimated: `3551` + // Minimum execution time: 16_237_000 picoseconds. + Weight::from_parts(16_995_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -209,15 +222,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn assign() -> Weight { // Proof Size summary in bytes: - // Measured: `735` + // Measured: `736` // Estimated: `4681` - // Minimum execution time: 23_557_000 picoseconds. - Weight::from_parts(24_382_000, 0) + // Minimum execution time: 24_621_000 picoseconds. + Weight::from_parts(25_165_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -225,7 +238,7 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolIo` (r:2 w:2) @@ -234,10 +247,10 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::InstaPoolContribution` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn pool() -> Weight { // Proof Size summary in bytes: - // Measured: `801` + // Measured: `802` // Estimated: `5996` - // Minimum execution time: 29_371_000 picoseconds. - Weight::from_parts(30_200_000, 0) + // Minimum execution time: 29_832_000 picoseconds. + Weight::from_parts(30_894_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -253,11 +266,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `652` // Estimated: `6196 + m * (2520 ยฑ0)` - // Minimum execution time: 54_331_000 picoseconds. - Weight::from_parts(55_322_165, 0) + // Minimum execution time: 55_390_000 picoseconds. + Weight::from_parts(56_124_789, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 35_225 - .saturating_add(Weight::from_parts(1_099_614, 0).saturating_mul(m.into())) + // Standard Error: 41_724 + .saturating_add(Weight::from_parts(1_551_266, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -277,25 +290,25 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn purchase_credit() -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `3680` - // Minimum execution time: 53_789_000 picoseconds. - Weight::from_parts(55_439_000, 0) - .saturating_add(Weight::from_parts(0, 3680)) + // Measured: `320` + // Estimated: `3785` + // Minimum execution time: 59_759_000 picoseconds. + Weight::from_parts(61_310_000, 0) + .saturating_add(Weight::from_parts(0, 3785)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn drop_region() -> Weight { // Proof Size summary in bytes: - // Measured: `465` - // Estimated: `3550` - // Minimum execution time: 43_941_000 picoseconds. - Weight::from_parts(49_776_000, 0) - .saturating_add(Weight::from_parts(0, 3550)) + // Measured: `466` + // Estimated: `3551` + // Minimum execution time: 37_007_000 picoseconds. + Weight::from_parts(51_927_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -309,8 +322,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 64_917_000 picoseconds. - Weight::from_parts(70_403_000, 0) + // Minimum execution time: 86_563_000 picoseconds. + Weight::from_parts(91_274_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -327,8 +340,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `857` // Estimated: `3593` - // Minimum execution time: 72_633_000 picoseconds. - Weight::from_parts(79_305_000, 0) + // Minimum execution time: 93_655_000 picoseconds. + Weight::from_parts(98_160_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -341,8 +354,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `556` // Estimated: `4698` - // Minimum execution time: 36_643_000 picoseconds. - Weight::from_parts(48_218_000, 0) + // Minimum execution time: 33_985_000 picoseconds. + Weight::from_parts(43_618_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -358,13 +371,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(_n: u32, ) -> Weight { + fn request_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 17_617_000 picoseconds. - Weight::from_parts(18_904_788, 0) + // Minimum execution time: 18_778_000 picoseconds. + Weight::from_parts(19_543_425, 0) .saturating_add(Weight::from_parts(0, 3539)) + // Standard Error: 41 + .saturating_add(Weight::from_parts(33, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -375,26 +390,26 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 5_575_000 picoseconds. - Weight::from_parts(5_887_598, 0) + // Minimum execution time: 5_505_000 picoseconds. + Weight::from_parts(5_982_015, 0) .saturating_add(Weight::from_parts(0, 1487)) - // Standard Error: 16 - .saturating_add(Weight::from_parts(41, 0).saturating_mul(n.into())) + // Standard Error: 13 + .saturating_add(Weight::from_parts(44, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `447` + // Measured: `442` // Estimated: `6196` - // Minimum execution time: 36_415_000 picoseconds. - Weight::from_parts(37_588_000, 0) + // Minimum execution time: 38_128_000 picoseconds. + Weight::from_parts(40_979_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -414,11 +429,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12194` // Estimated: `13506` - // Minimum execution time: 48_362_000 picoseconds. - Weight::from_parts(49_616_106, 0) + // Minimum execution time: 49_041_000 picoseconds. + Weight::from_parts(50_522_788, 0) .saturating_add(Weight::from_parts(0, 13506)) - // Standard Error: 61 - .saturating_add(Weight::from_parts(59, 0).saturating_mul(n.into())) + // Standard Error: 72 + .saturating_add(Weight::from_parts(78, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(25)) } @@ -430,8 +445,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 6_148_000 picoseconds. - Weight::from_parts(6_374_000, 0) + // Minimum execution time: 5_903_000 picoseconds. + Weight::from_parts(6_202_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -454,8 +469,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 30_267_000 picoseconds. - Weight::from_parts(30_825_000, 0) + // Minimum execution time: 31_412_000 picoseconds. + Weight::from_parts(31_964_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -474,8 +489,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 13_491_000 picoseconds. - Weight::from_parts(13_949_000, 0) + // Minimum execution time: 14_098_000 picoseconds. + Weight::from_parts(14_554_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -486,8 +501,19 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_711_000 picoseconds. - Weight::from_parts(1_913_000, 0) + // Minimum execution time: 1_723_000 picoseconds. + Weight::from_parts(1_822_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broker::RevenueInbox` (r:0 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) + fn notify_revenue() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_865_000 picoseconds. + Weight::from_parts(1_983_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -497,19 +523,19 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:0) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `398` - // Estimated: `3863` - // Minimum execution time: 12_035_000 picoseconds. - Weight::from_parts(12_383_000, 0) - .saturating_add(Weight::from_parts(0, 3863)) + // Measured: `408` + // Estimated: `1893` + // Minimum execution time: 10_387_000 picoseconds. + Weight::from_parts(10_819_000, 0) + .saturating_add(Weight::from_parts(0, 1893)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) @@ -517,10 +543,21 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `150` // Estimated: `1566` - // Minimum execution time: 6_142_000 picoseconds. - Weight::from_parts(6_538_000, 0) + // Minimum execution time: 5_996_000 picoseconds. + Weight::from_parts(6_278_000, 0) .saturating_add(Weight::from_parts(0, 1566)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 2_187_000 picoseconds. + Weight::from_parts(2_372_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs index 99af88812da2b..f35f7bfc188dc 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs @@ -17,10 +17,10 @@ mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; +use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; use xcm::{latest::prelude::*, DoubleEncoded}; trait WeighAssets { diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 8e1461c4a99e2..ddfc599fa579d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::fungible`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 9657fa55c1f2f..7390f35e39740 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -43,7 +43,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index 92a5bbbd13760..d20b62a557b95 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -10,54 +10,53 @@ description = "Glutton parachain runtime." workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-glutton = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +pallet-message-queue = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-timestamp = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] @@ -109,7 +108,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 4092fb78594d2..1b505ad3acbf7 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -47,6 +47,9 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod weights; pub mod xcm_config; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use sp_api::impl_runtime_apis; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -57,7 +60,6 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -100,7 +102,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -296,6 +298,7 @@ pub type SignedExtra = ( frame_system::CheckGenesis, frame_system::CheckEra, frame_system::CheckNonce, + frame_system::CheckWeight, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = @@ -344,7 +347,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -454,7 +457,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); Ok(()) } diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index d4e65da3cd642..a732bec2352d2 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -7,74 +7,73 @@ description = "Rococo's People parachain runtime" license = "Apache-2.0" [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -enumflags2 = { version = "0.7.7" } -hex-literal = { version = "0.4.1" } +codec = { features = ["derive"], workspace = true } +enumflags2 = { workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-identity = { path = "../../../../../substrate/frame/identity", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-identity = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-multisig = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +rococo-runtime-constants = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true } [features] default = ["std"] @@ -128,7 +127,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", @@ -136,7 +134,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -165,7 +163,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index c80f6879fb344..4f007c3fc39db 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -22,6 +22,9 @@ pub mod people; mod weights; pub mod xcm_config; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ @@ -59,7 +62,6 @@ use sp_runtime::{ ApplyExtrinsicResult, }; pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -70,7 +72,7 @@ use xcm_config::{ FellowshipLocation, GovernanceLocation, PriceForSiblingParachainDelivery, XcmConfig, XcmOriginToTransactDispatchOrigin, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -132,7 +134,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -521,7 +523,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -625,7 +627,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::RelayLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -638,11 +640,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -657,7 +659,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -667,6 +669,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) @@ -725,7 +739,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); Ok(()) } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/people.rs b/cumulus/parachains/runtimes/people/people-rococo/src/people.rs index 88a89711019d5..8211447d68c8a 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/people.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/people.rs @@ -28,7 +28,6 @@ use sp_runtime::{ traits::{AccountIdConversion, Verify}, RuntimeDebug, }; -use sp_std::prelude::*; parameter_types! { // 27 | Min encoded size of `Registration` @@ -94,8 +93,8 @@ pub enum IdentityField { )] #[codec(mel_bound())] pub struct IdentityInfo { - /// A reasonable display name for the controller of the account. This should be whatever the - /// account is typically known as and should not be confusable with other entities, given + /// A reasonable display name for the controller of the account. This should be whatever the + /// account is typically known as and should not be confusable with other entities, given /// reasonable context. /// /// Stored as UTF-8. @@ -151,7 +150,7 @@ impl IdentityInformationProvider for IdentityInfo { #[cfg(feature = "runtime-benchmarks")] fn create_identity_info() -> Self { - let data = Data::Raw(vec![0; 32].try_into().unwrap()); + let data = Data::Raw(alloc::vec![0; 32].try_into().unwrap()); IdentityInfo { display: data.clone(), diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_parachain_system.rs index fcea5fd1bf679..5715d56c21868 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_parachain_system.rs @@ -20,7 +20,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `cumulus_pallet_parachain_system`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_message_queue.rs index fe1911b77a72d..47c6790140736 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_message_queue.rs @@ -20,7 +20,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs index 4afd65bdcfea1..11c1bad9aa178 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs @@ -17,10 +17,10 @@ mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; +use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; use xcm::{latest::prelude::*, DoubleEncoded}; trait WeighAssets { diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index b279399e7a96b..2364798596d50 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -42,7 +42,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::fungible`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index e2be324ee2d48..a50c8860c48f8 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -42,7 +42,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index b040613d19e75..20c7e691ebc88 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -7,74 +7,73 @@ description = "Westend's People parachain runtime" license = "Apache-2.0" [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -enumflags2 = { version = "0.7.7" } -hex-literal = { version = "0.4.1" } +codec = { features = ["derive"], workspace = true } +enumflags2 = { workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-identity = { path = "../../../../../substrate/frame/identity", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-identity = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-multisig = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-utility = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +westend-runtime-constants = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true } [features] default = ["std"] @@ -127,7 +126,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", @@ -136,7 +134,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -165,7 +163,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 06c938b8a40c3..1378324ce7b02 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -22,6 +22,9 @@ pub mod people; mod weights; pub mod xcm_config; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ @@ -59,7 +62,6 @@ use sp_runtime::{ ApplyExtrinsicResult, }; pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -70,7 +72,7 @@ use xcm_config::{ FellowshipLocation, GovernanceLocation, PriceForSiblingParachainDelivery, XcmConfig, XcmOriginToTransactDispatchOrigin, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -132,7 +134,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -521,7 +523,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -625,7 +627,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::RelayLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -638,11 +640,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -657,7 +659,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { PolkadotXcm::dry_run_call::(origin, call) } @@ -667,6 +669,18 @@ impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) @@ -725,7 +739,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); Ok(()) } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/people.rs b/cumulus/parachains/runtimes/people/people-westend/src/people.rs index a5c0e66a3f882..0255fd074b111 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/people.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/people.rs @@ -28,7 +28,6 @@ use sp_runtime::{ traits::{AccountIdConversion, Verify}, RuntimeDebug, }; -use sp_std::prelude::*; parameter_types! { // 27 | Min encoded size of `Registration` @@ -151,7 +150,7 @@ impl IdentityInformationProvider for IdentityInfo { #[cfg(feature = "runtime-benchmarks")] fn create_identity_info() -> Self { - let data = Data::Raw(vec![0; 32].try_into().unwrap()); + let data = Data::Raw(alloc::vec![0; 32].try_into().unwrap()); IdentityInfo { display: data.clone(), diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_parachain_system.rs index fcea5fd1bf679..5715d56c21868 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -20,7 +20,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `cumulus_pallet_parachain_system`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_message_queue.rs index fe1911b77a72d..47c6790140736 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_message_queue.rs @@ -20,7 +20,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs index b2579230c9ed7..b1fc7ad8ed832 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs @@ -17,10 +17,10 @@ mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; +use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; use xcm::{latest::prelude::*, DoubleEncoded}; trait WeighAssets { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index efffd31881710..92d08a2461807 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -42,7 +42,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::fungible`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index d7b10f95c792a..861f038199596 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -42,7 +42,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index 910944f54a5ff..c76c09a31234e 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -10,41 +10,40 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-aura = { workspace = true } +pallet-balances = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-solo-to-para = { path = "../../../../pallets/solo-to-para", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-solo-to-para = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-timestamp = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -74,7 +73,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index 461133f6cfc06..1fe72604d3731 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -27,6 +27,9 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +extern crate alloc; + +use alloc::{vec, vec::Vec}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use sp_api::impl_runtime_apis; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -37,7 +40,6 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -310,7 +312,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index 7a7fad537ac30..8f3b2204cfe34 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -10,45 +10,44 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-timestamp = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } +pallet-message-queue = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-primitives-core = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -77,7 +76,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index 7422b580cc3e0..1dfbe2b6c41c8 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -31,6 +31,9 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod xcm_config; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::AggregateMessageOrigin; @@ -45,7 +48,6 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -280,7 +282,7 @@ impl sp_runtime::traits::SignedExtension for DisallowSigned { type Pre = (); fn additional_signed( &self, - ) -> sp_std::result::Result<(), sp_runtime::transaction_validity::TransactionValidityError> { + ) -> core::result::Result<(), sp_runtime::transaction_validity::TransactionValidityError> { Ok(()) } fn pre_dispatch( @@ -368,7 +370,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index c081bac4babe8..01d7fcc2b5c8b 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -10,41 +10,40 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } # Substrate -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } -pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } -pallet-session = { path = "../../../../substrate/frame/session", default-features = false } -pallet-timestamp = { path = "../../../../substrate/frame/timestamp", default-features = false } -sp-consensus-aura = { path = "../../../../substrate/primitives/consensus/aura", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +sp-core = { workspace = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../pallets/parachain-system", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../pallets/xcmp-queue", default-features = false } -pallet-collator-selection = { path = "../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../pallets/parachain-info", default-features = false } -cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } -cumulus-primitives-parachain-inherent = { path = "../../../primitives/parachain-inherent", default-features = false } -cumulus-test-relay-sproof-builder = { path = "../../../test/relay-sproof-builder", default-features = false } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-parachain-inherent = { workspace = true } +cumulus-test-relay-sproof-builder = { workspace = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } -pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../../polkadot/parachain", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } [dev-dependencies] -hex-literal = "0.4.1" +hex-literal = { workspace = true, default-features = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] @@ -68,7 +67,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "xcm-executor/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index 3c84243306fbe..3fc3822a63eb9 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_std::marker::PhantomData; +use core::marker::PhantomData; use codec::{Decode, DecodeLimit}; use cumulus_primitives_core::{ diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 3262233053e7e..1a2737f3aa224 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -15,70 +15,69 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1", optional = true } +codec = { features = ["derive"], workspace = true } +hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -smallvec = "1.11.0" +scale-info = { features = ["derive"], workspace = true } +smallvec = { workspace = true, default-features = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-asset-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false } -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-session = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-asset-tx-payment = { workspace = true } +pallet-assets = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -polkadot-primitives = { path = "../../../../../polkadot/primitives", default-features = false } -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } +polkadot-primitives = { workspace = true } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -assets-common = { path = "../../assets/common", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +assets-common = { workspace = true } [features] default = ["std"] @@ -128,14 +127,13 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] @@ -166,7 +164,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index e77416e6cd5b6..bf39c02a3f594 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -32,6 +32,9 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); mod weights; pub mod xcm_config; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use codec::Encode; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; @@ -69,7 +72,6 @@ use sp_runtime::{ ApplyExtrinsicResult, }; pub use sp_runtime::{traits::ConvertInto, MultiAddress, Perbill, Permill}; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -85,7 +87,7 @@ use xcm::{ latest::prelude::{AssetId as AssetLocationId, BodyId}, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -737,7 +739,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -847,7 +849,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetLocationId(xcm_config::RelayLocation::get())]; PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -860,11 +862,11 @@ impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -879,7 +881,7 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { use xcm_builder::InspectMessageQueues; use xcm_executor::RecordXcm; diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index cf734345a976f..a0ad248bb7048 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -10,60 +10,59 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../../substrate/frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-assets = { workspace = true } +pallet-aura = { workspace = true } +pallet-balances = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +polkadot-runtime-common = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-ping = { path = "../../../pallets/ping", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +pallet-message-queue = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-ping = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +cumulus-primitives-utility = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["rococo"], workspace = true } +parachain-info = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -106,7 +105,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index fd4716ab972e8..dff7046f19726 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -22,6 +22,9 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +extern crate alloc; + +use alloc::{vec, vec::Vec}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use sp_api::impl_runtime_apis; @@ -32,7 +35,6 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -107,7 +109,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -711,7 +713,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 639b8b3d4dcf1..b20d2a28fa7f3 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -15,113 +15,114 @@ name = "polkadot-parachain" path = "src/main.rs" [dependencies] -async-trait = "0.1.79" -clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.28" -hex-literal = "0.4.1" +async-trait = { workspace = true } +clap = { features = ["derive"], workspace = true } +codec = { workspace = true, default-features = true } +color-print = { workspace = true } +futures = { workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +docify = { workspace = true } # Local -rococo-parachain-runtime = { path = "../parachains/runtimes/testing/rococo-parachain" } -shell-runtime = { path = "../parachains/runtimes/starters/shell" } -glutton-westend-runtime = { path = "../parachains/runtimes/glutton/glutton-westend" } -seedling-runtime = { path = "../parachains/runtimes/starters/seedling" } -asset-hub-rococo-runtime = { path = "../parachains/runtimes/assets/asset-hub-rococo" } -asset-hub-westend-runtime = { path = "../parachains/runtimes/assets/asset-hub-westend" } -collectives-westend-runtime = { path = "../parachains/runtimes/collectives/collectives-westend" } -contracts-rococo-runtime = { path = "../parachains/runtimes/contracts/contracts-rococo" } -bridge-hub-rococo-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-rococo" } -coretime-rococo-runtime = { path = "../parachains/runtimes/coretime/coretime-rococo" } -coretime-westend-runtime = { path = "../parachains/runtimes/coretime/coretime-westend" } -bridge-hub-westend-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-westend" } -penpal-runtime = { path = "../parachains/runtimes/testing/penpal" } -jsonrpsee = { version = "0.22", features = ["server"] } -people-rococo-runtime = { path = "../parachains/runtimes/people/people-rococo" } -people-westend-runtime = { path = "../parachains/runtimes/people/people-westend" } -parachains-common = { path = "../parachains/common" } -testnet-parachains-constants = { path = "../parachains/runtimes/constants", default-features = false, features = [ +rococo-parachain-runtime = { workspace = true } +shell-runtime = { workspace = true } +glutton-westend-runtime = { workspace = true } +seedling-runtime = { workspace = true } +asset-hub-rococo-runtime = { workspace = true, default-features = true } +asset-hub-westend-runtime = { workspace = true } +collectives-westend-runtime = { workspace = true } +contracts-rococo-runtime = { workspace = true } +bridge-hub-rococo-runtime = { workspace = true, default-features = true } +coretime-rococo-runtime = { workspace = true } +coretime-westend-runtime = { workspace = true } +bridge-hub-westend-runtime = { workspace = true, default-features = true } +penpal-runtime = { workspace = true } +jsonrpsee = { features = ["server"], workspace = true } +people-rococo-runtime = { workspace = true } +people-westend-runtime = { workspace = true } +parachains-common = { workspace = true, default-features = true } +testnet-parachains-constants = { features = [ "rococo", "westend", -] } +], workspace = true } # Substrate -frame-benchmarking = { path = "../../substrate/frame/benchmarking" } -frame-benchmarking-cli = { path = "../../substrate/utils/frame/benchmarking-cli" } -sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } -sp-io = { path = "../../substrate/primitives/io" } -sp-core = { path = "../../substrate/primitives/core" } -sp-session = { path = "../../substrate/primitives/session" } -frame-try-runtime = { path = "../../substrate/frame/try-runtime", optional = true } -sc-consensus = { path = "../../substrate/client/consensus/common" } -sp-tracing = { path = "../../substrate/primitives/tracing" } -frame-support = { path = "../../substrate/frame/support" } -sc-cli = { path = "../../substrate/client/cli" } -sc-client-api = { path = "../../substrate/client/api" } -sc-executor = { path = "../../substrate/client/executor" } -sc-service = { path = "../../substrate/client/service" } -sc-telemetry = { path = "../../substrate/client/telemetry" } -sc-transaction-pool = { path = "../../substrate/client/transaction-pool" } -sp-transaction-pool = { path = "../../substrate/primitives/transaction-pool" } -sc-network = { path = "../../substrate/client/network" } -sc-network-sync = { path = "../../substrate/client/network/sync" } -sc-basic-authorship = { path = "../../substrate/client/basic-authorship" } -sp-timestamp = { path = "../../substrate/primitives/timestamp" } -sp-blockchain = { path = "../../substrate/primitives/blockchain" } -sp-genesis-builder = { path = "../../substrate/primitives/genesis-builder", default-features = false } -sp-block-builder = { path = "../../substrate/primitives/block-builder" } -sp-keystore = { path = "../../substrate/primitives/keystore" } -sc-chain-spec = { path = "../../substrate/client/chain-spec" } -sc-rpc = { path = "../../substrate/client/rpc" } -sp-version = { path = "../../substrate/primitives/version" } -sc-tracing = { path = "../../substrate/client/tracing" } -sp-offchain = { path = "../../substrate/primitives/offchain" } -frame-system-rpc-runtime-api = { path = "../../substrate/frame/system/rpc/runtime-api" } -pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } -pallet-transaction-payment-rpc-runtime-api = { path = "../../substrate/frame/transaction-payment/rpc/runtime-api" } -sp-std = { path = "../../substrate/primitives/std" } -sp-inherents = { path = "../../substrate/primitives/inherents" } -sp-api = { path = "../../substrate/primitives/api" } -sp-consensus-aura = { path = "../../substrate/primitives/consensus/aura" } -sc-sysinfo = { path = "../../substrate/client/sysinfo" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../substrate/utils/prometheus" } -sc-transaction-pool-api = { path = "../../substrate/client/transaction-pool/api" } -substrate-frame-rpc-system = { path = "../../substrate/utils/frame/rpc/system" } -pallet-transaction-payment-rpc = { path = "../../substrate/frame/transaction-payment/rpc" } -substrate-state-trie-migration-rpc = { path = "../../substrate/utils/frame/rpc/state-trie-migration-rpc" } +frame-benchmarking = { workspace = true, default-features = true } +frame-benchmarking-cli = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-io = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-session = { workspace = true, default-features = true } +frame-try-runtime = { optional = true, workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true } +sp-block-builder = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sp-offchain = { workspace = true, default-features = true } +frame-system-rpc-runtime-api = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +substrate-frame-rpc-system = { workspace = true, default-features = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } +substrate-state-trie-migration-rpc = { workspace = true, default-features = true } # Polkadot # Use rococo-native as this is currently the default "local" relay chain -polkadot-cli = { path = "../../polkadot/cli", features = ["rococo-native"] } -polkadot-primitives = { path = "../../polkadot/primitives" } -polkadot-service = { path = "../../polkadot/node/service" } -xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } +polkadot-cli = { features = ["rococo-native", "westend-native"], workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-service = { workspace = true, default-features = true } +xcm = { workspace = true, default-features = true } # Cumulus -cumulus-client-cli = { path = "../client/cli" } -cumulus-client-collator = { path = "../client/collator" } -cumulus-client-consensus-aura = { path = "../client/consensus/aura" } -cumulus-client-consensus-relay-chain = { path = "../client/consensus/relay-chain" } -cumulus-client-consensus-common = { path = "../client/consensus/common" } -cumulus-client-consensus-proposer = { path = "../client/consensus/proposer" } -cumulus-client-parachain-inherent = { path = "../client/parachain-inherent" } -cumulus-client-service = { path = "../client/service" } -cumulus-primitives-aura = { path = "../primitives/aura" } -cumulus-primitives-core = { path = "../primitives/core" } -cumulus-relay-chain-interface = { path = "../client/relay-chain-interface" } -color-print = "0.3.4" +cumulus-client-cli = { workspace = true, default-features = true } +cumulus-client-collator = { workspace = true, default-features = true } +cumulus-client-consensus-aura = { workspace = true, default-features = true } +cumulus-client-consensus-relay-chain = { workspace = true, default-features = true } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-client-consensus-proposer = { workspace = true, default-features = true } +cumulus-client-parachain-inherent = { workspace = true, default-features = true } +cumulus-client-service = { workspace = true, default-features = true } +cumulus-primitives-aura = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } [build-dependencies] -substrate-build-script-utils = { path = "../../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [dev-dependencies] -assert_cmd = "2.0" -nix = { version = "0.28.0", features = ["signal"] } -tempfile = "3.8.0" +assert_cmd = { workspace = true } +nix = { features = ["signal"], workspace = true } +tempfile = { workspace = true } tokio = { version = "1.32.0", features = ["macros", "parking_lot", "time"] } -wait-timeout = "0.2" +wait-timeout = { workspace = true } [features] default = [] @@ -171,4 +172,8 @@ try-runtime = [ "shell-runtime/try-runtime", "sp-runtime/try-runtime", ] -fast-runtime = ["bridge-hub-rococo-runtime/fast-runtime"] +fast-runtime = [ + "bridge-hub-rococo-runtime/fast-runtime", + "coretime-rococo-runtime/fast-runtime", + "coretime-westend-runtime/fast-runtime", +] diff --git a/cumulus/polkadot-parachain/src/cli.rs b/cumulus/polkadot-parachain/src/cli.rs index f7d2fd0f0be3c..d06354dda2205 100644 --- a/cumulus/polkadot-parachain/src/cli.rs +++ b/cumulus/polkadot-parachain/src/cli.rs @@ -14,6 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +use crate::common::NodeExtraArgs; +use clap::{Command, CommandFactory, FromArgMatches}; +use sc_cli::SubstrateCli; use std::path::PathBuf; /// Sub-commands supported by the collator. @@ -57,22 +60,13 @@ pub enum Subcommand { Benchmark(frame_benchmarking_cli::BenchmarkCmd), } -const AFTER_HELP_EXAMPLE: &str = color_print::cstr!( - r#"Examples: - polkadot-parachain --chain asset-hub-polkadot --sync warp -- --chain polkadot --sync warp - Launch a warp-syncing full node of the Asset Hub parachain on the Polkadot Relay Chain. - polkadot-parachain --chain asset-hub-polkadot --sync warp --relay-chain-rpc-url ws://rpc.example.com -- --chain polkadot - Launch a warp-syncing full node of the Asset Hub parachain on the Polkadot Relay Chain. - Uses ws://rpc.example.com as remote relay chain node. - "# -); #[derive(Debug, clap::Parser)] #[command( propagate_version = true, args_conflicts_with_subcommands = true, - subcommand_negates_reqs = true + subcommand_negates_reqs = true, + after_help = crate::examples(Self::executable_name()) )] -#[clap(after_help = AFTER_HELP_EXAMPLE)] pub struct Cli { #[command(subcommand)] pub subcommand: Option, @@ -80,6 +74,12 @@ pub struct Cli { #[command(flatten)] pub run: cumulus_client_cli::RunCmd, + /// EXPERIMENTAL: Use slot-based collator which can handle elastic scaling. + /// + /// Use with care, this flag is unstable and subject to change. + #[arg(long)] + pub experimental_use_slot_based: bool, + /// Disable automatic hardware benchmarks. /// /// By default these benchmarks are automatically ran at startup and measure @@ -92,7 +92,13 @@ pub struct Cli { /// Relay chain arguments #[arg(raw = true)] - pub relaychain_args: Vec, + pub relay_chain_args: Vec, +} + +impl Cli { + pub(crate) fn node_extra_args(&self) -> NodeExtraArgs { + NodeExtraArgs { use_slot_based_consensus: self.experimental_use_slot_based } + } } #[derive(Debug)] @@ -108,18 +114,32 @@ pub struct RelayChainCli { } impl RelayChainCli { - /// Parse the relay chain CLI parameters using the para chain `Configuration`. + fn polkadot_cmd() -> Command { + let help_template = color_print::cformat!( + "The arguments that are passed to the relay chain node. \n\ + \n\ + RELAY_CHAIN_ARGS: \n\ + {{options}}", + ); + + polkadot_cli::RunCmd::command() + .no_binary_name(true) + .help_template(help_template) + } + + /// Parse the relay chain CLI parameters using the parachain `Configuration`. pub fn new<'a>( para_config: &sc_service::Configuration, relay_chain_args: impl Iterator, ) -> Self { + let polkadot_cmd = Self::polkadot_cmd(); + let matches = polkadot_cmd.get_matches_from(relay_chain_args); + let base = FromArgMatches::from_arg_matches(&matches).unwrap_or_else(|e| e.exit()); + let extension = crate::chain_spec::Extensions::try_get(&*para_config.chain_spec); let chain_id = extension.map(|e| e.relay_chain.clone()); + let base_path = para_config.base_path.path().join("polkadot"); - Self { - base_path: Some(base_path), - chain_id, - base: clap::Parser::parse_from(relay_chain_args), - } + Self { base, chain_id, base_path: Some(base_path) } } } diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 653ea3281f0f7..fcf6c06f42227 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -14,15 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +#[cfg(feature = "runtime-benchmarks")] +use crate::service::Block; use crate::{ chain_spec, chain_spec::GenericChainSpec, cli::{Cli, RelayChainCli, Subcommand}, + common::NodeExtraArgs, fake_runtime_api::{ - asset_hub_polkadot_aura::RuntimeApi as AssetHubPolkadotRuntimeApi, aura::RuntimeApi, + asset_hub_polkadot_aura::RuntimeApi as AssetHubPolkadotRuntimeApi, + aura::RuntimeApi as AuraRuntimeApi, }, - service::{new_partial, Block, Hash}, + service::{new_aura_node_spec, DynNodeSpec, ShellNode}, }; +#[cfg(feature = "runtime-benchmarks")] use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; @@ -34,27 +39,39 @@ use sc_cli::{ }; use sc_service::config::{BasePath, PrometheusConfig}; use sp_runtime::traits::AccountIdConversion; +#[cfg(feature = "runtime-benchmarks")] +use sp_runtime::traits::HashingFor; use std::{net::SocketAddr, path::PathBuf}; +/// The choice of consensus for the parachain omni-node. +#[derive(PartialEq, Eq, Debug, Default)] +pub enum Consensus { + /// Aura consensus. + #[default] + Aura, + /// Use the relay chain consensus. + // TODO: atm this is just a demonstration, not really reach-able. We can add it to the CLI, + // env, or the chain spec. Or, just don't, and when we properly refactor this mess we will + // re-introduce it. + #[allow(unused)] + Relay, +} + /// Helper enum that is used for better distinction of different parachain/runtime configuration /// (it is based/calculated on ChainSpec's ID attribute) -#[derive(Debug, PartialEq, Default)] +#[derive(Debug, PartialEq)] enum Runtime { - /// This is the default runtime (actually based on rococo) - #[default] - Default, + /// None of the system-chain runtimes, rather the node will act agnostic to the runtime ie. be + /// an omni-node, and simply run a node with the given consensus algorithm. + Omni(Consensus), Shell, Seedling, AssetHubPolkadot, - AssetHubKusama, - AssetHubRococo, - AssetHubWestend, + AssetHub, Penpal(ParaId), ContractsRococo, - CollectivesPolkadot, - CollectivesWestend, + Collectives, Glutton, - GluttonWestend, BridgeHub(chain_spec::bridge_hubs::BridgeHubRuntimeType), Coretime(chain_spec::coretime::CoretimeRuntimeType), People(chain_spec::people::PeopleRuntimeType), @@ -97,20 +114,20 @@ fn runtime(id: &str) -> Runtime { Runtime::Seedling } else if id.starts_with("asset-hub-polkadot") | id.starts_with("statemint") { Runtime::AssetHubPolkadot - } else if id.starts_with("asset-hub-kusama") | id.starts_with("statemine") { - Runtime::AssetHubKusama - } else if id.starts_with("asset-hub-rococo") { - Runtime::AssetHubRococo - } else if id.starts_with("asset-hub-westend") | id.starts_with("westmint") { - Runtime::AssetHubWestend + } else if id.starts_with("asset-hub-kusama") | + id.starts_with("statemine") | + id.starts_with("asset-hub-rococo") | + id.starts_with("rockmine") | + id.starts_with("asset-hub-westend") | + id.starts_with("westmint") + { + Runtime::AssetHub } else if id.starts_with("penpal") { Runtime::Penpal(para_id.unwrap_or(ParaId::new(0))) } else if id.starts_with("contracts-rococo") { Runtime::ContractsRococo - } else if id.starts_with("collectives-polkadot") { - Runtime::CollectivesPolkadot - } else if id.starts_with("collectives-westend") { - Runtime::CollectivesWestend + } else if id.starts_with("collectives-polkadot") || id.starts_with("collectives-westend") { + Runtime::Collectives } else if id.starts_with(chain_spec::bridge_hubs::BridgeHubRuntimeType::ID_PREFIX) { Runtime::BridgeHub( id.parse::() @@ -120,15 +137,17 @@ fn runtime(id: &str) -> Runtime { Runtime::Coretime( id.parse::().expect("Invalid value"), ) - } else if id.starts_with("glutton-westend") { - Runtime::GluttonWestend } else if id.starts_with("glutton") { Runtime::Glutton } else if id.starts_with(chain_spec::people::PeopleRuntimeType::ID_PREFIX) { Runtime::People(id.parse::().expect("Invalid value")) } else { - log::warn!("No specific runtime was recognized for ChainSpec's id: '{}', so Runtime::default() will be used", id); - Runtime::default() + log::warn!( + "No specific runtime was recognized for ChainSpec's id: '{}', \ + so Runtime::Omni(Consensus::Aura) will be used", + id + ); + Runtime::Omni(Consensus::Aura) } } @@ -274,55 +293,34 @@ fn load_spec(id: &str) -> std::result::Result, String> { /// (H/T to Phala for the idea) /// E.g. "penpal-kusama-2004" yields ("penpal-kusama", Some(2004)) fn extract_parachain_id(id: &str) -> (&str, &str, Option) { - const ROCOCO_TEST_PARA_PREFIX: &str = "penpal-rococo-"; - const KUSAMA_TEST_PARA_PREFIX: &str = "penpal-kusama-"; - const POLKADOT_TEST_PARA_PREFIX: &str = "penpal-polkadot-"; - - const GLUTTON_PARA_DEV_PREFIX: &str = "glutton-kusama-dev-"; - const GLUTTON_PARA_LOCAL_PREFIX: &str = "glutton-kusama-local-"; - const GLUTTON_PARA_GENESIS_PREFIX: &str = "glutton-kusama-genesis-"; - - const GLUTTON_WESTEND_PARA_DEV_PREFIX: &str = "glutton-westend-dev-"; - const GLUTTON_WESTEND_PARA_LOCAL_PREFIX: &str = "glutton-westend-local-"; - const GLUTTON_WESTEND_PARA_GENESIS_PREFIX: &str = "glutton-westend-genesis-"; - - let (norm_id, orig_id, para) = if let Some(suffix) = id.strip_prefix(ROCOCO_TEST_PARA_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..ROCOCO_TEST_PARA_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(KUSAMA_TEST_PARA_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..KUSAMA_TEST_PARA_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(POLKADOT_TEST_PARA_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..POLKADOT_TEST_PARA_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(GLUTTON_PARA_DEV_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..GLUTTON_PARA_DEV_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(GLUTTON_PARA_LOCAL_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..GLUTTON_PARA_LOCAL_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(GLUTTON_PARA_GENESIS_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..GLUTTON_PARA_GENESIS_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_DEV_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..GLUTTON_WESTEND_PARA_DEV_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_LOCAL_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..GLUTTON_WESTEND_PARA_LOCAL_PREFIX.len() - 1], id, Some(para_id)) - } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_GENESIS_PREFIX) { - let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); - (&id[..GLUTTON_WESTEND_PARA_GENESIS_PREFIX.len() - 1], id, Some(para_id)) - } else { - (id, id, None) - }; + let para_prefixes = [ + // Penpal + "penpal-rococo-", + "penpal-kusama-", + "penpal-polkadot-", + // Glutton Kusama + "glutton-kusama-dev-", + "glutton-kusama-local-", + "glutton-kusama-genesis-", + // Glutton Westend + "glutton-westend-dev-", + "glutton-westend-local-", + "glutton-westend-genesis-", + ]; + + for para_prefix in para_prefixes { + if let Some(suffix) = id.strip_prefix(para_prefix) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + return (&id[..para_prefix.len() - 1], id, Some(para_id.into())) + } + } - (norm_id, orig_id, para.map(Into::into)) + (id, id, None) } impl SubstrateCli for Cli { fn impl_name() -> String { - "Polkadot parachain".into() + Self::executable_name() } fn impl_version() -> String { @@ -331,10 +329,12 @@ impl SubstrateCli for Cli { fn description() -> String { format!( - "Polkadot parachain\n\nThe command-line arguments provided first will be \ - passed to the parachain node, while the arguments provided after -- will be passed \ - to the relaychain node.\n\n\ - {} [parachain-args] -- [relaychain-args]", + "The command-line arguments provided first will be passed to the parachain node, \n\ + and the arguments provided after -- will be passed to the relay chain node. \n\ + \n\ + Example: \n\ + \n\ + {} [parachain-args] -- [relay-chain-args]", Self::executable_name() ) } @@ -358,33 +358,27 @@ impl SubstrateCli for Cli { impl SubstrateCli for RelayChainCli { fn impl_name() -> String { - "Polkadot parachain".into() + Cli::impl_name() } fn impl_version() -> String { - env!("SUBSTRATE_CLI_IMPL_VERSION").into() + Cli::impl_version() } fn description() -> String { - format!( - "Polkadot parachain\n\nThe command-line arguments provided first will be \ - passed to the parachain node, while the arguments provided after -- will be passed \ - to the relay chain node.\n\n\ - {} [parachain-args] -- [relay_chain-args]", - Self::executable_name() - ) + Cli::description() } fn author() -> String { - env!("CARGO_PKG_AUTHORS").into() + Cli::author() } fn support_url() -> String { - "https://github.com/paritytech/polkadot-sdk/issues/new".into() + Cli::support_url() } fn copyright_start_year() -> i32 { - 2017 + Cli::copyright_start_year() } fn load_spec(&self, id: &str) -> std::result::Result, String> { @@ -392,108 +386,27 @@ impl SubstrateCli for RelayChainCli { } } -/// Creates partial components for the runtimes that are supported by the benchmarks. -macro_rules! construct_partials { - ($config:expr, |$partials:ident| $code:expr) => { - match $config.chain_spec.runtime()? { - Runtime::AssetHubPolkadot => { - let $partials = new_partial::( - &$config, - crate::service::build_relay_to_aura_import_queue::<_, AssetHubPolkadotAuraId>, - )?; - $code - }, - Runtime::AssetHubKusama | - Runtime::AssetHubRococo | - Runtime::AssetHubWestend | - Runtime::BridgeHub(_) | - Runtime::CollectivesPolkadot | - Runtime::CollectivesWestend | - Runtime::Coretime(_) | - Runtime::People(_) => { - let $partials = new_partial::( - &$config, - crate::service::build_relay_to_aura_import_queue::<_, AuraId>, - )?; - $code - }, - Runtime::GluttonWestend | Runtime::Glutton | Runtime::Shell | Runtime::Seedling => { - let $partials = new_partial::( - &$config, - crate::service::build_shell_import_queue, - )?; - $code - }, - Runtime::ContractsRococo | Runtime::Penpal(_) | Runtime::Default => { - let $partials = new_partial::( - &$config, - crate::service::build_aura_import_queue, - )?; - $code - }, - } - }; -} - -macro_rules! construct_async_run { - (|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{ - let runner = $cli.create_runner($cmd)?; - match runner.config().chain_spec.runtime()? { - Runtime::AssetHubPolkadot => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::build_relay_to_aura_import_queue::<_, AssetHubPolkadotAuraId>, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::AssetHubKusama | - Runtime::AssetHubRococo | - Runtime::AssetHubWestend | - Runtime::BridgeHub(_) | - Runtime::CollectivesPolkadot | - Runtime::CollectivesWestend | - Runtime::Coretime(_) | - Runtime::People(_) => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::build_relay_to_aura_import_queue::<_, AuraId>, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::Shell | - Runtime::Seedling | - Runtime::GluttonWestend | - Runtime::Glutton => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::build_shell_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - } - Runtime::ContractsRococo | Runtime::Penpal(_) | Runtime::Default => { - runner.async_run(|$config| { - let $components = new_partial::< - RuntimeApi, - _, - >( - &$config, - crate::service::build_aura_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - } - }} +fn new_node_spec( + config: &sc_service::Configuration, + extra_args: NodeExtraArgs, +) -> std::result::Result, sc_cli::Error> { + Ok(match config.chain_spec.runtime()? { + Runtime::AssetHubPolkadot => + new_aura_node_spec::(extra_args), + Runtime::AssetHub | + Runtime::BridgeHub(_) | + Runtime::Collectives | + Runtime::Coretime(_) | + Runtime::People(_) | + Runtime::ContractsRococo | + Runtime::Glutton | + Runtime::Penpal(_) => new_aura_node_spec::(extra_args), + Runtime::Shell | Runtime::Seedling => Box::new(ShellNode), + Runtime::Omni(consensus) => match consensus { + Consensus::Aura => new_aura_node_spec::(extra_args), + Consensus::Relay => Box::new(ShellNode), + }, + }) } /// Parse command line arguments into service configuration. @@ -506,37 +419,45 @@ pub fn run() -> Result<()> { runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) }, Some(Subcommand::CheckBlock(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_check_block_cmd(config, cmd) }) }, Some(Subcommand::ExportBlocks(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, config.database)) + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_export_blocks_cmd(config, cmd) }) }, Some(Subcommand::ExportState(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, config.chain_spec)) + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_export_state_cmd(config, cmd) }) }, Some(Subcommand::ImportBlocks(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_import_blocks_cmd(config, cmd) + }) + }, + Some(Subcommand::Revert(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.prepare_revert_cmd(config, cmd) }) }, - Some(Subcommand::Revert(cmd)) => construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.backend, None)) - }), Some(Subcommand::PurgeChain(cmd)) => { let runner = cli.create_runner(cmd)?; + let polkadot_cli = RelayChainCli::new(runner.config(), cli.relay_chain_args.iter()); runner.sync_run(|config| { - let polkadot_cli = RelayChainCli::new( - &config, - [RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()), - ); - let polkadot_config = SubstrateCli::create_configuration( &polkadot_cli, &polkadot_cli, @@ -549,8 +470,10 @@ pub fn run() -> Result<()> { }, Some(Subcommand::ExportGenesisHead(cmd)) => { let runner = cli.create_runner(cmd)?; - runner - .sync_run(|config| construct_partials!(config, |partials| cmd.run(partials.client))) + runner.sync_run(|config| { + let node = new_node_spec(&config, cli.node_extra_args())?; + node.run_export_genesis_head_cmd(config, cmd) + }) }, Some(Subcommand::ExportGenesisWasm(cmd)) => { let runner = cli.create_runner(cmd)?; @@ -564,45 +487,34 @@ pub fn run() -> Result<()> { // Switch on the concrete benchmark sub-command- match cmd { - BenchmarkCmd::Pallet(cmd) => - if cfg!(feature = "runtime-benchmarks") { - runner.sync_run(|config| cmd.run_with_spec::, ReclaimHostFunctions>(Some(config.chain_spec))) - } else { - Err("Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`." - .into()) - }, + #[cfg(feature = "runtime-benchmarks")] + BenchmarkCmd::Pallet(cmd) => runner.sync_run(|config| { + cmd.run_with_spec::, ReclaimHostFunctions>(Some( + config.chain_spec, + )) + }), BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { - construct_partials!(config, |partials| cmd.run(partials.client)) + let node = new_node_spec(&config, cli.node_extra_args())?; + node.run_benchmark_block_cmd(config, cmd) }), - #[cfg(not(feature = "runtime-benchmarks"))] - BenchmarkCmd::Storage(_) => - return Err(sc_cli::Error::Input( - "Compile with --features=runtime-benchmarks \ - to enable storage benchmarks." - .into(), - ) - .into()), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { - construct_partials!(config, |partials| { - let db = partials.backend.expose_db(); - let storage = partials.backend.expose_storage(); - - cmd.run(config, partials.client.clone(), db, storage) - }) + let node = new_node_spec(&config, cli.node_extra_args())?; + node.run_benchmark_storage_cmd(config, cmd) }), BenchmarkCmd::Machine(cmd) => runner.sync_run(|config| cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())), - // NOTE: this allows the Client to leniently implement - // new benchmark commands without requiring a companion MR. #[allow(unreachable_patterns)] - _ => Err("Benchmarking sub-command unsupported".into()), + _ => Err("Benchmarking sub-command unsupported or compilation feature missing. \ + Make sure to compile with --features=runtime-benchmarks \ + to enable all supported benchmarks." + .into()), } }, Some(Subcommand::Key(cmd)) => Ok(cmd.run(&cli)?), None => { let runner = cli.create_runner(&cli.run.normalize())?; + let polkadot_cli = RelayChainCli::new(runner.config(), cli.relay_chain_args.iter()); let collator_options = cli.run.collator_options(); runner.run_node_until_exit(|config| async move { @@ -624,230 +536,82 @@ pub fn run() -> Result<()> { if old_path.exists() && new_path.exists() { return Err(format!( - "Found legacy {} path {} and new asset-hub path {}. Delete one path such that only one exists.", - old_name, old_path.display(), new_path.display() - ).into()) + "Found legacy {} path {} and new Asset Hub path {}. \ + Delete one path such that only one exists.", + old_name, + old_path.display(), + new_path.display() + ) + .into()) } if old_path.exists() { std::fs::rename(old_path.clone(), new_path.clone())?; info!( - "Statemint renamed to Asset Hub. The filepath with associated data on disk has been renamed from {} to {}.", - old_path.display(), new_path.display() + "{} was renamed to Asset Hub. The filepath with associated data on disk \ + has been renamed from {} to {}.", + old_name, + old_path.display(), + new_path.display() ); } } - let hwbench = (!cli.no_hardware_benchmarks).then_some( - config.database.path().map(|database_path| { + let hwbench = (!cli.no_hardware_benchmarks) + .then_some(config.database.path().map(|database_path| { let _ = std::fs::create_dir_all(database_path); sc_sysinfo::gather_hwbench(Some(database_path)) - })).flatten(); + })) + .flatten(); let para_id = chain_spec::Extensions::try_get(&*config.chain_spec) .map(|e| e.para_id) .ok_or("Could not find parachain extension in chain-spec.")?; - let polkadot_cli = RelayChainCli::new( - &config, - [RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()), - ); - let id = ParaId::from(para_id); let parachain_account = - AccountIdConversion::::into_account_truncating(&id); + AccountIdConversion::::into_account_truncating( + &id, + ); let tokio_handle = config.tokio_handle.clone(); let polkadot_config = SubstrateCli::create_configuration(&polkadot_cli, &polkadot_cli, tokio_handle) .map_err(|err| format!("Relay chain argument error: {}", err))?; - info!("Parachain id: {:?}", id); - info!("Parachain Account: {}", parachain_account); - info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); - - match polkadot_config.network.network_backend { - sc_network::config::NetworkBackendType::Libp2p => - start_node::>( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await, - sc_network::config::NetworkBackendType::Litep2p => - start_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await, - } + info!("๐Ÿชช Parachain id: {:?}", id); + info!("๐Ÿงพ Parachain Account: {}", parachain_account); + info!("โœ๏ธ Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); + + start_node( + config, + polkadot_config, + collator_options, + id, + cli.node_extra_args(), + hwbench, + ) + .await }) }, } } -async fn start_node>( +#[sc_tracing::logging::prefix_logs_with("Parachain")] +async fn start_node( config: sc_service::Configuration, polkadot_config: sc_service::Configuration, collator_options: cumulus_client_cli::CollatorOptions, id: ParaId, + extra_args: NodeExtraArgs, hwbench: Option, ) -> Result { - match config.chain_spec.runtime()? { - Runtime::AssetHubPolkadot => crate::service::start_asset_hub_lookahead_node::< - AssetHubPolkadotRuntimeApi, - AssetHubPolkadotAuraId, - Network, - >(config, polkadot_config, collator_options, id, hwbench) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::AssetHubRococo | Runtime::AssetHubWestend | Runtime::AssetHubKusama => - crate::service::start_asset_hub_lookahead_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::CollectivesWestend | Runtime::CollectivesPolkadot => - crate::service::start_generic_aura_lookahead_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::Seedling | Runtime::Shell => crate::service::start_shell_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::ContractsRococo => crate::service::start_contracts_rococo_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) + let node_spec = new_node_spec(&config, extra_args)?; + node_spec + .start_node(config, polkadot_config, collator_options, id, hwbench) .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { - chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment | - chain_spec::bridge_hubs::BridgeHubRuntimeType::Rococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment => - crate::service::start_generic_aura_lookahead_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0), - } - .map_err(Into::into), - - Runtime::Coretime(coretime_runtime_type) => match coretime_runtime_type { - chain_spec::coretime::CoretimeRuntimeType::Kusama | - chain_spec::coretime::CoretimeRuntimeType::KusamaLocal | - chain_spec::coretime::CoretimeRuntimeType::Polkadot | - chain_spec::coretime::CoretimeRuntimeType::PolkadotLocal | - chain_spec::coretime::CoretimeRuntimeType::Rococo | - chain_spec::coretime::CoretimeRuntimeType::RococoLocal | - chain_spec::coretime::CoretimeRuntimeType::RococoDevelopment | - chain_spec::coretime::CoretimeRuntimeType::Westend | - chain_spec::coretime::CoretimeRuntimeType::WestendLocal | - chain_spec::coretime::CoretimeRuntimeType::WestendDevelopment => - crate::service::start_generic_aura_lookahead_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0), - } - .map_err(Into::into), - - Runtime::Penpal(_) | Runtime::Default => - crate::service::start_rococo_parachain_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::Glutton | Runtime::GluttonWestend => - crate::service::start_basic_lookahead_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0) - .map_err(Into::into), - - Runtime::People(people_runtime_type) => match people_runtime_type { - chain_spec::people::PeopleRuntimeType::Kusama | - chain_spec::people::PeopleRuntimeType::KusamaLocal | - chain_spec::people::PeopleRuntimeType::Polkadot | - chain_spec::people::PeopleRuntimeType::PolkadotLocal | - chain_spec::people::PeopleRuntimeType::Rococo | - chain_spec::people::PeopleRuntimeType::RococoLocal | - chain_spec::people::PeopleRuntimeType::RococoDevelopment | - chain_spec::people::PeopleRuntimeType::Westend | - chain_spec::people::PeopleRuntimeType::WestendLocal | - chain_spec::people::PeopleRuntimeType::WestendDevelopment => - crate::service::start_generic_aura_lookahead_node::( - config, - polkadot_config, - collator_options, - id, - hwbench, - ) - .await - .map(|r| r.0), - } - .map_err(Into::into), - } + .map_err(Into::into) } impl DefaultConfigurationValues for RelayChainCli { @@ -979,7 +743,7 @@ impl CliConfiguration for RelayChainCli { mod tests { use crate::{ chain_spec::{get_account_id_from_seed, get_from_seed}, - command::{Runtime, RuntimeResolver}, + command::{Consensus, Runtime, RuntimeResolver}, }; use sc_chain_spec::{ChainSpec, ChainSpecExtension, ChainSpecGroup, ChainType, Extension}; use serde::{Deserialize, Serialize}; @@ -1006,9 +770,9 @@ mod tests { pub attribute_z: u32, } - fn store_configuration(dir: &TempDir, spec: Box) -> PathBuf { + fn store_configuration(dir: &TempDir, spec: &dyn ChainSpec) -> PathBuf { let raw_output = true; - let json = sc_service::chain_ops::build_spec(&*spec, raw_output) + let json = sc_service::chain_ops::build_spec(spec, raw_output) .expect("Failed to build json string"); let mut cfg_file_path = dir.path().to_path_buf(); cfg_file_path.push(spec.id()); @@ -1049,32 +813,44 @@ mod tests { let path = store_configuration( &temp_dir, - Box::new(create_default_with_extensions("shell-1", Extensions1::default())), + &create_default_with_extensions("shell-1", Extensions1::default()), ); assert_eq!(Runtime::Shell, path.runtime().unwrap()); let path = store_configuration( &temp_dir, - Box::new(create_default_with_extensions("shell-2", Extensions2::default())), + &create_default_with_extensions("shell-2", Extensions2::default()), ); assert_eq!(Runtime::Shell, path.runtime().unwrap()); let path = store_configuration( &temp_dir, - Box::new(create_default_with_extensions("seedling", Extensions2::default())), + &create_default_with_extensions("seedling", Extensions2::default()), ); assert_eq!(Runtime::Seedling, path.runtime().unwrap()); let path = store_configuration( &temp_dir, - Box::new(crate::chain_spec::rococo_parachain::rococo_parachain_local_config()), + &create_default_with_extensions("penpal-rococo-1000", Extensions2::default()), ); - assert_eq!(Runtime::Default, path.runtime().unwrap()); + assert_eq!(Runtime::Penpal(1000.into()), path.runtime().unwrap()); let path = store_configuration( &temp_dir, - Box::new(crate::chain_spec::contracts::contracts_rococo_local_config()), + &create_default_with_extensions("penpal-polkadot-2000", Extensions2::default()), + ); + assert_eq!(Runtime::Penpal(2000.into()), path.runtime().unwrap()); + + let path = store_configuration( + &temp_dir, + &crate::chain_spec::contracts::contracts_rococo_local_config(), ); assert_eq!(Runtime::ContractsRococo, path.runtime().unwrap()); + + let path = store_configuration( + &temp_dir, + &crate::chain_spec::rococo_parachain::rococo_parachain_local_config(), + ); + assert_eq!(Runtime::Omni(Consensus::Aura), path.runtime().unwrap()); } } diff --git a/cumulus/polkadot-parachain/src/common/aura.rs b/cumulus/polkadot-parachain/src/common/aura.rs new file mode 100644 index 0000000000000..9f72d847926f3 --- /dev/null +++ b/cumulus/polkadot-parachain/src/common/aura.rs @@ -0,0 +1,68 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Aura-related primitives for cumulus parachain collators. + +use codec::Codec; +use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::BlockT; +use sp_consensus_aura::AuraApi; +use sp_runtime::app_crypto::{AppCrypto, AppPair, AppSignature, Pair}; + +/// Convenience trait for defining the basic bounds of an `AuraId`. +pub trait AuraIdT: AppCrypto + Codec + Send { + /// Extra bounds for the `Pair`. + type BoundedPair: AppPair + AppCrypto; + + /// Extra bounds for the `Signature`. + type BoundedSignature: AppSignature + + TryFrom> + + std::hash::Hash + + sp_runtime::traits::Member + + Codec; +} + +impl AuraIdT for T +where + T: AppCrypto + Codec + Send + Sync, + <::Pair as AppCrypto>::Signature: + TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, +{ + type BoundedPair = ::Pair; + type BoundedSignature = <::Pair as AppCrypto>::Signature; +} + +/// Convenience trait for defining the basic bounds of a parachain runtime that supports +/// the Aura consensus. +pub trait AuraRuntimeApi: + sp_api::ApiExt + + AuraApi::Public> + + AuraUnincludedSegmentApi + + Sized +{ + /// Check if the runtime has the Aura API. + fn has_aura_api(&self, at: Block::Hash) -> bool { + self.has_api::::Public>>(at) + .unwrap_or(false) + } +} + +impl AuraRuntimeApi for T where + T: sp_api::ApiExt + + AuraApi::Public> + + AuraUnincludedSegmentApi +{ +} diff --git a/cumulus/polkadot-parachain/src/common/mod.rs b/cumulus/polkadot-parachain/src/common/mod.rs new file mode 100644 index 0000000000000..9f5febafe3042 --- /dev/null +++ b/cumulus/polkadot-parachain/src/common/mod.rs @@ -0,0 +1,72 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Cumulus parachain collator primitives. + +#![warn(missing_docs)] + +pub mod aura; + +use cumulus_primitives_core::CollectCollationInfo; +use sp_api::{ApiExt, CallApiAt, ConstructRuntimeApi, Metadata}; +use sp_block_builder::BlockBuilder; +use sp_runtime::traits::Block as BlockT; +use sp_session::SessionKeys; +use sp_transaction_pool::runtime_api::TaggedTransactionQueue; + +/// Convenience trait that defines the basic bounds for the `RuntimeApi` of a parachain node. +pub trait NodeRuntimeApi: + ApiExt + + Metadata + + SessionKeys + + BlockBuilder + + TaggedTransactionQueue + + CollectCollationInfo + + Sized +{ +} + +impl NodeRuntimeApi for T where + T: ApiExt + + Metadata + + SessionKeys + + BlockBuilder + + TaggedTransactionQueue + + CollectCollationInfo +{ +} + +/// Convenience trait that defines the basic bounds for the `ConstructRuntimeApi` of a parachain +/// node. +pub trait ConstructNodeRuntimeApi>: + ConstructRuntimeApi + Send + Sync + 'static +{ + /// Basic bounds for the `RuntimeApi` of a parachain node. + type BoundedRuntimeApi: NodeRuntimeApi; +} + +impl> ConstructNodeRuntimeApi for T +where + T: ConstructRuntimeApi + Send + Sync + 'static, + T::RuntimeApi: NodeRuntimeApi, +{ + type BoundedRuntimeApi = T::RuntimeApi; +} + +/// Extra args that are passed when creating a new node spec. +pub struct NodeExtraArgs { + pub use_slot_based_consensus: bool, +} diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs index 82c02943c5fc9..7d54e9b4be043 100644 --- a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs @@ -53,7 +53,7 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> Vec { unimplemented!() } } @@ -105,12 +105,6 @@ sp_api::impl_runtime_apis! { } } - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(_: &::Header) { - unimplemented!() - } - } - impl sp_session::SessionKeys for Runtime { fn generate_session_keys(_: Option>) -> Vec { unimplemented!() diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs index 6b718e9121642..ca5fc8bdf119b 100644 --- a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs @@ -53,7 +53,7 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> Vec { unimplemented!() } } @@ -105,12 +105,6 @@ sp_api::impl_runtime_apis! { } } - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(_: &::Header) { - unimplemented!() - } - } - impl sp_session::SessionKeys for Runtime { fn generate_session_keys(_: Option>) -> Vec { unimplemented!() diff --git a/cumulus/polkadot-parachain/src/main.rs b/cumulus/polkadot-parachain/src/main.rs index 0757bea84aae8..cbb76fa214cbe 100644 --- a/cumulus/polkadot-parachain/src/main.rs +++ b/cumulus/polkadot-parachain/src/main.rs @@ -14,14 +14,38 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -//! Cumulus test parachain collator +//! Polkadot parachain node. #![warn(missing_docs)] #![warn(unused_extern_crates)] +pub(crate) fn examples(executable_name: String) -> String { + color_print::cformat!( + r#"Examples: + + {0} --chain para.json --sync warp -- --chain relay.json --sync warp + Launch a warp-syncing full node of a given para's chain-spec, and a given relay's chain-spec. + + The above approach is the most flexible, and the most forward-compatible way to spawn an omni-node. + + You can find the chain-spec of some networks in: + https://paritytech.github.io/chainspecs + + {0} --chain asset-hub-polkadot --sync warp -- --chain polkadot --sync warp + Launch a warp-syncing full node of the Asset Hub parachain on the Polkadot Relay Chain. + + {0} --chain asset-hub-kusama --sync warp --relay-chain-rpc-url ws://rpc.example.com -- --chain kusama + Launch a warp-syncing full node of the Asset Hub parachain on the Kusama Relay Chain. + Uses ws://rpc.example.com as remote relay chain node. + "#, + executable_name, + ) +} + mod chain_spec; mod cli; mod command; +mod common; mod fake_runtime_api; mod rpc; mod service; diff --git a/cumulus/polkadot-parachain/src/rpc.rs b/cumulus/polkadot-parachain/src/rpc.rs index 7437bb1f4b937..283a73d931d76 100644 --- a/cumulus/polkadot-parachain/src/rpc.rs +++ b/cumulus/polkadot-parachain/src/rpc.rs @@ -18,91 +18,82 @@ #![warn(missing_docs)] -use std::sync::Arc; - +use crate::{ + common::ConstructNodeRuntimeApi, + service::{ParachainBackend, ParachainClient}, +}; +use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; use parachains_common::{AccountId, Balance, Block, Nonce}; -use sc_client_api::AuxStore; -pub use sc_rpc::DenyUnsafe; -use sc_transaction_pool_api::TransactionPool; -use sp_api::ProvideRuntimeApi; -use sp_block_builder::BlockBuilder; -use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use sc_rpc::{ + dev::{Dev, DevApiServer}, + DenyUnsafe, +}; +use std::{marker::PhantomData, sync::Arc}; +use substrate_frame_rpc_system::{System, SystemApiServer}; +use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; /// A type representing all RPC extensions. pub type RpcExtension = jsonrpsee::RpcModule<()>; -/// Full client dependencies -pub struct FullDeps { - /// The client instance to use. - pub client: Arc, - /// Transaction pool instance. - pub pool: Arc

, - /// Whether to deny unsafe calls - pub deny_unsafe: DenyUnsafe, +pub(crate) trait BuildRpcExtensions { + fn build_rpc_extensions( + deny_unsafe: DenyUnsafe, + client: Arc, + backend: Arc, + pool: Arc, + ) -> sc_service::error::Result; } -/// Instantiate all RPC extensions. -pub fn create_full( - deps: FullDeps, - backend: Arc, -) -> Result> +pub(crate) struct BuildEmptyRpcExtensions(PhantomData); + +impl + BuildRpcExtensions< + ParachainClient, + ParachainBackend, + sc_transaction_pool::FullPool>, + > for BuildEmptyRpcExtensions where - C: ProvideRuntimeApi - + HeaderBackend - + AuxStore - + HeaderMetadata - + Send - + Sync - + 'static, - C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, - C::Api: BlockBuilder, - P: TransactionPool + Sync + Send + 'static, - B: sc_client_api::Backend + Send + Sync + 'static, - B::State: sc_client_api::backend::StateBackend>, + RuntimeApi: ConstructNodeRuntimeApi> + Send + Sync + 'static, { - use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; - use substrate_frame_rpc_system::{System, SystemApiServer}; - use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; - - let mut module = RpcExtension::new(()); - let FullDeps { client, pool, deny_unsafe } = deps; - - module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; - module.merge(TransactionPayment::new(client.clone()).into_rpc())?; - module.merge(StateMigration::new(client, backend, deny_unsafe).into_rpc())?; - - Ok(module) + fn build_rpc_extensions( + _deny_unsafe: DenyUnsafe, + _client: Arc>, + _backend: Arc, + _pool: Arc>>, + ) -> sc_service::error::Result { + Ok(RpcExtension::new(())) + } } -/// Instantiate all RPCs we want at the contracts-rococo chain. -pub fn create_contracts_rococo( - deps: FullDeps, -) -> Result> +pub(crate) struct BuildParachainRpcExtensions(PhantomData); + +impl + BuildRpcExtensions< + ParachainClient, + ParachainBackend, + sc_transaction_pool::FullPool>, + > for BuildParachainRpcExtensions where - C: ProvideRuntimeApi - + sc_client_api::BlockBackend - + HeaderBackend - + AuxStore - + HeaderMetadata - + Send - + Sync - + 'static, - C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, - C::Api: BlockBuilder, - P: TransactionPool + Sync + Send + 'static, + RuntimeApi: ConstructNodeRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + substrate_frame_rpc_system::AccountNonceApi, { - use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; - use sc_rpc::dev::{Dev, DevApiServer}; - use substrate_frame_rpc_system::{System, SystemApiServer}; - - let mut module = RpcExtension::new(()); - let FullDeps { client, pool, deny_unsafe } = deps; - - module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; - module.merge(TransactionPayment::new(client.clone()).into_rpc())?; - module.merge(Dev::new(client, deny_unsafe).into_rpc())?; - - Ok(module) + fn build_rpc_extensions( + deny_unsafe: DenyUnsafe, + client: Arc>, + backend: Arc, + pool: Arc>>, + ) -> sc_service::error::Result { + let build = || -> Result> { + let mut module = RpcExtension::new(()); + + module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; + module.merge(TransactionPayment::new(client.clone()).into_rpc())?; + module.merge(StateMigration::new(client.clone(), backend, deny_unsafe).into_rpc())?; + module.merge(Dev::new(client, deny_unsafe).into_rpc())?; + + Ok(module) + }; + build().map_err(Into::into) + } } diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 19ad75e384cec..6a6cf15635e01 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -14,56 +14,61 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use codec::{Codec, Decode}; -use cumulus_client_cli::CollatorOptions; -use cumulus_client_collator::service::CollatorService; +use cumulus_client_cli::{CollatorOptions, ExportGenesisHeadCommand}; +use cumulus_client_collator::service::{ + CollatorService, ServiceInterface as CollatorServiceInterface, +}; use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; -use cumulus_client_consensus_common::{ - ParachainBlockImport as TParachainBlockImport, ParachainCandidate, ParachainConsensus, +#[docify::export(slot_based_colator_import)] +use cumulus_client_consensus_aura::collators::slot_based::{ + self as slot_based, Params as SlotBasedParams, }; -use cumulus_client_consensus_proposer::Proposer; +use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport; +use cumulus_client_consensus_proposer::{Proposer, ProposerInterface}; +use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; #[allow(deprecated)] use cumulus_client_service::old_consensus; use cumulus_client_service::{ build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks, BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, StartRelayChainTasksParams, }; -use cumulus_primitives_core::{ - relay_chain::{Hash as PHash, PersistedValidationData, ValidationCode}, - ParaId, -}; +use cumulus_primitives_core::{relay_chain::ValidationCode, ParaId}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; -use sc_rpc::DenyUnsafe; -use sp_core::Pair; - -use jsonrpsee::RpcModule; -use crate::{fake_runtime_api::aura::RuntimeApi as FakeRuntimeApi, rpc}; -pub use parachains_common::{AccountId, AuraId, Balance, Block, Hash, Header, Nonce}; +use crate::{ + common::{ + aura::{AuraIdT, AuraRuntimeApi}, + ConstructNodeRuntimeApi, NodeExtraArgs, + }, + fake_runtime_api::aura::RuntimeApi as FakeRuntimeApi, + rpc::BuildRpcExtensions, +}; +pub use parachains_common::{AccountId, Balance, Block, Hash, Nonce}; -use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; -use futures::{lock::Mutex, prelude::*}; +use crate::rpc::{BuildEmptyRpcExtensions, BuildParachainRpcExtensions}; +use frame_benchmarking_cli::BlockCmd; +#[cfg(any(feature = "runtime-benchmarks"))] +use frame_benchmarking_cli::StorageCmd; +use futures::prelude::*; +use polkadot_primitives::CollatorPair; use prometheus_endpoint::Registry; +use sc_cli::{CheckBlockCmd, ExportBlocksCmd, ExportStateCmd, ImportBlocksCmd, RevertCmd}; +use sc_client_api::BlockchainEvents; use sc_consensus::{ import_queue::{BasicQueue, Verifier as VerifierT}, - BlockImportParams, ImportQueue, + BlockImportParams, DefaultImportQueue, ImportQueue, }; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sc_network::{config::FullNetworkConfiguration, service::traits::NetworkBackend, NetworkBlock}; -use sc_network_sync::SyncingService; -use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; +use sc_service::{Configuration, Error, PartialComponents, TFullBackend, TFullClient, TaskManager}; +use sc_sysinfo::HwBench; use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; -use sp_api::{ApiExt, ConstructRuntimeApi, ProvideRuntimeApi}; -use sp_consensus_aura::AuraApi; -use sp_core::traits::SpawnEssentialNamed; +use sc_transaction_pool::FullPool; +use sp_api::ProvideRuntimeApi; +use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; -use sp_runtime::{ - app_crypto::AppCrypto, - traits::{Block as BlockT, Header as HeaderT}, -}; -use std::{marker::PhantomData, sync::Arc, time::Duration}; - -use polkadot_primitives::CollatorPair; +use sp_runtime::{app_crypto::AppCrypto, traits::Header as HeaderT}; +use std::{marker::PhantomData, pin::Pin, sync::Arc, time::Duration}; #[cfg(not(feature = "runtime-benchmarks"))] type HostFunctions = cumulus_client_service::ParachainHostFunctions; @@ -74,9 +79,9 @@ type HostFunctions = ( frame_benchmarking::benchmarking::HostFunctions, ); -type ParachainClient = TFullClient>; +pub type ParachainClient = TFullClient>; -type ParachainBackend = TFullBackend; +pub type ParachainBackend = TFullBackend; type ParachainBlockImport = TParachainBlockImport>, ParachainBackend>; @@ -91,502 +96,317 @@ pub type Service = PartialComponents< (ParachainBlockImport, Option, Option), >; -/// Starts a `ServiceBuilder` for a full service. -/// -/// Use this macro if you don't actually need the full service, but just the builder in order to -/// be able to perform chain operations. -pub fn new_partial( - config: &Configuration, - build_import_queue: BIQ, -) -> Result, sc_service::Error> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder, - BIQ: FnOnce( - Arc>, - ParachainBlockImport, - &Configuration, - Option, - &TaskManager, - ) -> Result, sc_service::Error>, -{ - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let heap_pages = config - .default_heap_pages - .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { extra_pages: h as _ }); - - let executor = sc_executor::WasmExecutor::::builder() - .with_execution_method(config.wasm_method) - .with_max_runtime_instances(config.max_runtime_instances) - .with_runtime_cache_size(config.runtime_cache_size) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .build(); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts_record_import::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - true, - )?; - let client = Arc::new(client); - - let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); - - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", None, worker.run()); - telemetry - }); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - - let import_queue = build_import_queue( - client.clone(), - block_import.clone(), - config, - telemetry.as_ref().map(|telemetry| telemetry.handle()), - &task_manager, - )?; - - Ok(PartialComponents { - backend, - client, - import_queue, - keystore_container, - task_manager, - transaction_pool, - select_chain: (), - other: (block_import, telemetry, telemetry_worker_handle), - }) +pub(crate) trait BuildImportQueue { + fn build_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + telemetry_handle: Option, + task_manager: &TaskManager, + ) -> sc_service::error::Result>; } -/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. -/// -/// This is the actual implementation that is abstract over the executor and the runtime api. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_node_impl( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - sybil_resistance_level: CollatorSybilResistance, - para_id: ParaId, - rpc_ext_builder: RB, - build_import_queue: BIQ, - start_consensus: SC, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> +pub(crate) trait StartConsensus where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + substrate_frame_rpc_system::AccountNonceApi, - RB: Fn( - DenyUnsafe, - Arc>, - Arc, - Arc>>, - ) -> Result, sc_service::Error> - + 'static, - BIQ: FnOnce( - Arc>, - ParachainBlockImport, - &Configuration, - Option, - &TaskManager, - ) -> Result, sc_service::Error>, - SC: FnOnce( - Arc>, - ParachainBlockImport, - Option<&Registry>, - Option, - &TaskManager, - Arc, - Arc>>, - Arc>, - KeystorePtr, - Duration, - ParaId, - CollatorPair, - OverseerHandle, - Arc>) + Send + Sync>, - Arc, - ) -> Result<(), sc_service::Error>, - Net: NetworkBackend, + RuntimeApi: ConstructNodeRuntimeApi>, { - let parachain_config = prepare_node_config(parachain_config); - - let params = new_partial::(¶chain_config, build_import_queue)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; - - let client = params.client.clone(); - let backend = params.backend.clone(); - - let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = build_relay_chain_interface( - polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let net_config = FullNetworkConfiguration::<_, _, Net>::new(¶chain_config.network); - - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - build_network(BuildNetworkParams { - parachain_config: ¶chain_config, - net_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - para_id, - spawn_handle: task_manager.spawn_handle(), - relay_chain_interface: relay_chain_interface.clone(), - import_queue: params.import_queue, - sybil_resistance_level, - }) - .await?; - - let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); - let backend_for_rpc = backend.clone(); - - Box::new(move |deny_unsafe, _| { - rpc_ext_builder( - deny_unsafe, - client.clone(), - backend_for_rpc.clone(), - transaction_pool.clone(), - ) - }) - }; - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend: backend.clone(), - network: network.clone(), - sync_service: sync_service.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - if validator { - warn_if_slow_hardware(&hwbench); - } - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } - - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; + fn start_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + keystore: KeystorePtr, + relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + backend: Arc, + ) -> Result<(), sc_service::Error>; +} - let relay_chain_slot_duration = Duration::from_secs(6); +pub(crate) trait NodeSpec { + type RuntimeApi: ConstructNodeRuntimeApi>; + + type BuildImportQueue: BuildImportQueue + 'static; + + type BuildRpcExtensions: BuildRpcExtensions< + ParachainClient, + ParachainBackend, + sc_transaction_pool::FullPool>, + > + 'static; + + type StartConsensus: StartConsensus + 'static; + + const SYBIL_RESISTANCE: CollatorSybilResistance; + + /// Starts a `ServiceBuilder` for a full service. + /// + /// Use this macro if you don't actually need the full service, but just the builder in order to + /// be able to perform chain operations. + fn new_partial(config: &Configuration) -> sc_service::error::Result> { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let heap_pages = config.default_heap_pages.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| { + HeapAllocStrategy::Static { extra_pages: h as _ } + }); + + let executor = sc_executor::WasmExecutor::::builder() + .with_execution_method(config.wasm_method) + .with_max_runtime_instances(config.max_runtime_instances) + .with_runtime_cache_size(config.runtime_cache_size) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .build(); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts_record_import::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + true, + )?; + let client = Arc::new(client); + + let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); + telemetry + }); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - start_relay_chain_tasks(StartRelayChainTasksParams { - client: client.clone(), - announce_block: announce_block.clone(), - para_id, - relay_chain_interface: relay_chain_interface.clone(), - task_manager: &mut task_manager, - da_recovery_profile: if validator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, - import_queue: import_queue_service, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service: sync_service.clone(), - })?; - - if validator { - start_consensus( + let import_queue = Self::BuildImportQueue::build_import_queue( client.clone(), - block_import, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), + block_import.clone(), + config, + telemetry.as_ref().map(|telemetry| telemetry.handle()), &task_manager, - relay_chain_interface.clone(), - transaction_pool, - sync_service.clone(), - params.keystore_container.keystore(), - relay_chain_slot_duration, - para_id, - collator_key.expect("Command line arguments do not allow this. qed"), - overseer_handle, - announce_block, - backend.clone(), )?; - } - - start_network.start_network(); - - Ok((task_manager, client)) -} - -/// Build the import queue for Aura-based runtimes. -pub fn build_aura_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - telemetry: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - - cumulus_client_consensus_aura::import_queue::< - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - >(cumulus_client_consensus_aura::ImportQueueParams { - block_import, - client, - create_inherent_data_providers: move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) - }, - registry: config.prometheus_registry(), - spawner: &task_manager.spawn_essential_handle(), - telemetry, - }) - .map_err(Into::into) -} - -/// Start a rococo parachain node. -pub async fn start_rococo_parachain_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_parachain_rpc_extensions::, - build_aura_import_queue, - start_lookahead_aura_consensus, - hwbench, - ) - .await -} - -/// Build the import queue for the shell runtime. -pub fn build_shell_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - _: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> { - cumulus_client_consensus_relay_chain::import_queue( - client, - block_import, - |_, _| async { Ok(()) }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - ) - .map_err(Into::into) -} -fn build_parachain_rpc_extensions( - deny_unsafe: sc_rpc::DenyUnsafe, - client: Arc>, - backend: Arc, - pool: Arc>>, -) -> Result, sc_service::Error> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_block_builder::BlockBuilder - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + substrate_frame_rpc_system::AccountNonceApi, -{ - let deps = rpc::FullDeps { client, pool, deny_unsafe }; + Ok(PartialComponents { + backend, + client, + import_queue, + keystore_container, + task_manager, + transaction_pool, + select_chain: (), + other: (block_import, telemetry, telemetry_worker_handle), + }) + } - rpc::create_full(deps, backend).map_err(Into::into) -} + /// Start a node with the given parachain spec. + /// + /// This is the actual implementation that is abstract over the executor and the runtime api. + fn start_node( + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, + ) -> Pin>>> + where + Net: NetworkBackend, + { + Box::pin(async move { + let parachain_config = prepare_node_config(parachain_config); + + let params = Self::new_partial(¶chain_config)?; + let (block_import, mut telemetry, telemetry_worker_handle) = params.other; + + let client = params.client.clone(); + let backend = params.backend.clone(); + + let mut task_manager = params.task_manager; + let (relay_chain_interface, collator_key) = build_relay_chain_interface( + polkadot_config, + ¶chain_config, + telemetry_worker_handle, + &mut task_manager, + collator_options.clone(), + hwbench.clone(), + ) + .await + .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; + + let validator = parachain_config.role.is_authority(); + let prometheus_registry = parachain_config.prometheus_registry().cloned(); + let transaction_pool = params.transaction_pool.clone(); + let import_queue_service = params.import_queue.service(); + let net_config = FullNetworkConfiguration::<_, _, Net>::new(¶chain_config.network); + + let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = + build_network(BuildNetworkParams { + parachain_config: ¶chain_config, + net_config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + para_id, + spawn_handle: task_manager.spawn_handle(), + relay_chain_interface: relay_chain_interface.clone(), + import_queue: params.import_queue, + sybil_resistance_level: Self::SYBIL_RESISTANCE, + }) + .await?; + + let rpc_builder = { + let client = client.clone(); + let transaction_pool = transaction_pool.clone(); + let backend_for_rpc = backend.clone(); + + Box::new(move |deny_unsafe, _| { + Self::BuildRpcExtensions::build_rpc_extensions( + deny_unsafe, + client.clone(), + backend_for_rpc.clone(), + transaction_pool.clone(), + ) + }) + }; + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + rpc_builder, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + config: parachain_config, + keystore: params.keystore_container.keystore(), + backend: backend.clone(), + network: network.clone(), + sync_service: sync_service.clone(), + system_rpc_tx, + tx_handler_controller, + telemetry: telemetry.as_mut(), + })?; + + if let Some(hwbench) = hwbench { + sc_sysinfo::print_hwbench(&hwbench); + if validator { + warn_if_slow_hardware(&hwbench); + } -fn build_contracts_rpc_extensions( - deny_unsafe: sc_rpc::DenyUnsafe, - client: Arc>, - _backend: Arc, - pool: Arc>>, -) -> Result, sc_service::Error> { - let deps = crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; + if let Some(ref mut telemetry) = telemetry { + let telemetry_handle = telemetry.handle(); + task_manager.spawn_handle().spawn( + "telemetry_hwbench", + None, + sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), + ); + } + } - crate::rpc::create_contracts_rococo(deps).map_err(Into::into) -} + let announce_block = { + let sync_service = sync_service.clone(); + Arc::new(move |hash, data| sync_service.announce_block(hash, data)) + }; + + let relay_chain_slot_duration = Duration::from_secs(6); + + let overseer_handle = relay_chain_interface + .overseer_handle() + .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + + start_relay_chain_tasks(StartRelayChainTasksParams { + client: client.clone(), + announce_block: announce_block.clone(), + para_id, + relay_chain_interface: relay_chain_interface.clone(), + task_manager: &mut task_manager, + da_recovery_profile: if validator { + DARecoveryProfile::Collator + } else { + DARecoveryProfile::FullNode + }, + import_queue: import_queue_service, + relay_chain_slot_duration, + recovery_handle: Box::new(overseer_handle.clone()), + sync_service, + })?; + + if validator { + Self::StartConsensus::start_consensus( + client.clone(), + block_import, + prometheus_registry.as_ref(), + telemetry.as_ref().map(|t| t.handle()), + &task_manager, + relay_chain_interface.clone(), + transaction_pool, + params.keystore_container.keystore(), + relay_chain_slot_duration, + para_id, + collator_key.expect("Command line arguments do not allow this. qed"), + overseer_handle, + announce_block, + backend.clone(), + )?; + } -/// Start a polkadot-shell parachain node. -pub async fn start_shell_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Unresistant, // free-for-all consensus - para_id, - |_, _, _, _| Ok(RpcModule::new(())), - build_shell_import_queue, - start_relay_chain_consensus, - hwbench, - ) - .await -} + start_network.start_network(); -enum BuildOnAccess { - Uninitialized(Option R + Send + Sync>>), - Initialized(R), + Ok(task_manager) + }) + } } -impl BuildOnAccess { - fn get_mut(&mut self) -> &mut R { - loop { - match self { - Self::Uninitialized(f) => { - *self = Self::Initialized((f.take().unwrap())()); - }, - Self::Initialized(ref mut r) => return r, - } - } +/// Build the import queue for the shell runtime. +pub(crate) struct BuildShellImportQueue(PhantomData); + +impl BuildImportQueue for BuildShellImportQueue { + fn build_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + _telemetry_handle: Option, + task_manager: &TaskManager, + ) -> sc_service::error::Result> { + cumulus_client_consensus_relay_chain::import_queue( + client, + block_import, + |_, _| async { Ok(()) }, + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + ) + .map_err(Into::into) } } -/// Special [`ParachainConsensus`] implementation that waits for the upgrade from -/// shell to a parachain runtime that implements Aura. -struct WaitForAuraConsensus { - client: Arc, - aura_consensus: Arc>>>>, - relay_chain_consensus: Arc>>>, - _phantom: PhantomData, -} +pub(crate) struct ShellNode; -impl Clone for WaitForAuraConsensus { - fn clone(&self) -> Self { - Self { - client: self.client.clone(), - aura_consensus: self.aura_consensus.clone(), - relay_chain_consensus: self.relay_chain_consensus.clone(), - _phantom: PhantomData, - } - } -} +impl NodeSpec for ShellNode { + type RuntimeApi = FakeRuntimeApi; + type BuildImportQueue = BuildShellImportQueue; + type BuildRpcExtensions = BuildEmptyRpcExtensions; + type StartConsensus = StartRelayChainConsensus; -#[async_trait::async_trait] -impl ParachainConsensus for WaitForAuraConsensus -where - Client: sp_api::ProvideRuntimeApi + Send + Sync, - Client::Api: AuraApi, - AuraId: Send + Codec + Sync, -{ - async fn produce_candidate( - &mut self, - parent: &Header, - relay_parent: PHash, - validation_data: &PersistedValidationData, - ) -> Option> { - if self - .client - .runtime_api() - .has_api::>(parent.hash()) - .unwrap_or(false) - { - self.aura_consensus - .lock() - .await - .get_mut() - .produce_candidate(parent, relay_parent, validation_data) - .await - } else { - self.relay_chain_consensus - .lock() - .await - .produce_candidate(parent, relay_parent, validation_data) - .await - } - } + const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Unresistant; } struct Verifier { client: Arc, - aura_verifier: BuildOnAccess>>, + aura_verifier: Box>, relay_chain_verifier: Box>, _phantom: PhantomData, } @@ -594,21 +414,16 @@ struct Verifier { #[async_trait::async_trait] impl VerifierT for Verifier where - Client: sp_api::ProvideRuntimeApi + Send + Sync, - Client::Api: AuraApi, - AuraId: Send + Sync + Codec, + Client: ProvideRuntimeApi + Send + Sync, + Client::Api: AuraRuntimeApi, + AuraId: AuraIdT + Sync, { async fn verify( - &mut self, + &self, block_import: BlockImportParams, ) -> Result, String> { - if self - .client - .runtime_api() - .has_api::>(*block_import.header.parent_hash()) - .unwrap_or(false) - { - self.aura_verifier.get_mut().verify(block_import).await + if self.client.runtime_api().has_aura_api(*block_import.header.parent_hash()) { + self.aura_verifier.verify(block_import).await } else { self.relay_chain_verifier.verify(block_import).await } @@ -617,422 +432,407 @@ where /// Build the import queue for parachain runtimes that started with relay chain consensus and /// switched to aura. -pub fn build_relay_to_aura_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - telemetry_handle: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> +pub(crate) struct BuildRelayToAuraImportQueue( + PhantomData<(RuntimeApi, AuraId)>, +); + +impl BuildImportQueue + for BuildRelayToAuraImportQueue where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + sp_consensus_aura::AuraApi::Pair as Pair>::Public>, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, { - let verifier_client = client.clone(); - - let aura_verifier = move || { - Box::new(cumulus_client_consensus_aura::build_verifier::< - ::Pair, - _, - _, - _, - >(cumulus_client_consensus_aura::BuildVerifierParams { - client: verifier_client.clone(), - create_inherent_data_providers: move |parent_hash, _| { - let cidp_client = verifier_client.clone(); - async move { - let slot_duration = cumulus_client_consensus_aura::slot_duration_at( - &*cidp_client, - parent_hash, - )?; - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) - } - }, - telemetry: telemetry_handle, - })) as Box<_> - }; + fn build_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + telemetry_handle: Option, + task_manager: &TaskManager, + ) -> sc_service::error::Result> { + let verifier_client = client.clone(); + + let aura_verifier = + cumulus_client_consensus_aura::build_verifier::<::Pair, _, _, _>( + cumulus_client_consensus_aura::BuildVerifierParams { + client: verifier_client.clone(), + create_inherent_data_providers: move |parent_hash, _| { + let cidp_client = verifier_client.clone(); + async move { + let slot_duration = cumulus_client_consensus_aura::slot_duration_at( + &*cidp_client, + parent_hash, + )?; + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + + Ok((slot, timestamp)) + } + }, + telemetry: telemetry_handle, + }, + ); - let relay_chain_verifier = - Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) })) as Box<_>; + let relay_chain_verifier = + Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) })); - let verifier = Verifier { - client, - relay_chain_verifier, - aura_verifier: BuildOnAccess::Uninitialized(Some(Box::new(aura_verifier))), - _phantom: PhantomData, - }; + let verifier = Verifier { + client, + relay_chain_verifier, + aura_verifier: Box::new(aura_verifier), + _phantom: PhantomData, + }; - let registry = config.prometheus_registry(); - let spawner = task_manager.spawn_essential_handle(); + let registry = config.prometheus_registry(); + let spawner = task_manager.spawn_essential_handle(); - Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry)) + Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry)) + } } /// Uses the lookahead collator to support async backing. /// /// Start an aura powered parachain node. Some system chains use this. -pub async fn start_generic_aura_lookahead_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_parachain_rpc_extensions::, - build_relay_to_aura_import_queue::<_, AuraId>, - start_lookahead_aura_consensus, - hwbench, - ) - .await +pub(crate) struct AuraNode( + pub PhantomData<(RuntimeApi, AuraId, StartConsensus)>, +); + +impl Default for AuraNode { + fn default() -> Self { + Self(Default::default()) + } } -/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub -/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and -/// needs to sync and upgrade before it can run `AuraApi` functions. -/// -/// Uses the lookahead collator to support async backing. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -pub async fn start_asset_hub_lookahead_node< - RuntimeApi, - AuraId: AppCrypto + Send + Codec + Sync, - Net, ->( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> +impl NodeSpec for AuraNode where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + substrate_frame_rpc_system::AccountNonceApi - + cumulus_primitives_aura::AuraUnincludedSegmentApi, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, - Net: NetworkBackend, + + substrate_frame_rpc_system::AccountNonceApi, + AuraId: AuraIdT + Sync, + StartConsensus: self::StartConsensus + 'static, { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_parachain_rpc_extensions::, - build_relay_to_aura_import_queue::<_, AuraId>, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - backend| { - let relay_chain_interface2 = relay_chain_interface.clone(); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let spawner = task_manager.spawn_handle(); - - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - spawner, - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - - let collation_future = Box::pin(async move { - // Start collating with the `shell` runtime while waiting for an upgrade to an Aura - // compatible runtime. - let mut request_stream = cumulus_client_collator::relay_chain_driven::init( - collator_key.clone(), - para_id, - overseer_handle.clone(), - ) - .await; - while let Some(request) = request_stream.next().await { - let pvd = request.persisted_validation_data().clone(); - let last_head_hash = - match ::Header::decode(&mut &pvd.parent_head.0[..]) { - Ok(header) => header.hash(), - Err(e) => { - log::error!("Could not decode the head data: {e}"); - request.complete(None); - continue - }, - }; - - // Check if we have upgraded to an Aura compatible runtime and transition if - // necessary. - if client - .runtime_api() - .has_api::>(last_head_hash) - .unwrap_or(false) - { - // Respond to this request before transitioning to Aura. - request.complete(None); - break - } - } - - // Move to Aura consensus. - let proposer = Proposer::new(proposer_factory); + type RuntimeApi = RuntimeApi; + type BuildImportQueue = BuildRelayToAuraImportQueue; + type BuildRpcExtensions = BuildParachainRpcExtensions; + type StartConsensus = StartConsensus; + const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Resistant; +} - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend, - relay_client: relay_chain_interface2, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: true, /* we need to always re-initialize for asset-hub moving - * to aura */ - }; - - aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params) - .await - }); - - let spawner = task_manager.spawn_essential_handle(); - spawner.spawn_essential("cumulus-asset-hub-collator", None, collation_future); - - Ok(()) - }, - hwbench, - ) - .await +pub fn new_aura_node_spec(extra_args: NodeExtraArgs) -> Box +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi + + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + substrate_frame_rpc_system::AccountNonceApi, + AuraId: AuraIdT + Sync, +{ + if extra_args.use_slot_based_consensus { + Box::new(AuraNode::< + RuntimeApi, + AuraId, + StartSlotBasedAuraConsensus, + >::default()) + } else { + Box::new(AuraNode::< + RuntimeApi, + AuraId, + StartLookaheadAuraConsensus, + >::default()) + } } /// Start relay-chain consensus that is free for all. Everyone can submit a block, the relay-chain /// decides what is backed and included. -fn start_relay_chain_consensus( - client: Arc>, - block_import: ParachainBlockImport, - prometheus_registry: Option<&Registry>, - telemetry: Option, - task_manager: &TaskManager, - relay_chain_interface: Arc, - transaction_pool: Arc>>, - _sync_oracle: Arc>, - _keystore: KeystorePtr, - _relay_chain_slot_duration: Duration, - para_id: ParaId, - collator_key: CollatorPair, - overseer_handle: OverseerHandle, - announce_block: Arc>) + Send + Sync>, - _backend: Arc, -) -> Result<(), sc_service::Error> { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry, - ); - - let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus( - cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams { - para_id, - proposer_factory, - block_import, - relay_chain_interface: relay_chain_interface.clone(), - create_inherent_data_providers: move |_, (relay_parent, validation_data)| { - let relay_chain_interface = relay_chain_interface.clone(); - async move { - let parachain_inherent = +pub(crate) struct StartRelayChainConsensus; + +impl StartConsensus for StartRelayChainConsensus { + fn start_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + _keystore: KeystorePtr, + _relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + _backend: Arc, + ) -> Result<(), Error> { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry, + ); + + let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus( + cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams { + para_id, + proposer_factory, + block_import, + relay_chain_interface: relay_chain_interface.clone(), + create_inherent_data_providers: move |_, (relay_parent, validation_data)| { + let relay_chain_interface = relay_chain_interface.clone(); + async move { + let parachain_inherent = cumulus_client_parachain_inherent::ParachainInherentDataProvider::create_at( relay_parent, &relay_chain_interface, &validation_data, para_id, ).await; - let parachain_inherent = parachain_inherent.ok_or_else(|| { - Box::::from( - "Failed to create parachain inherent", - ) - })?; - Ok(parachain_inherent) - } + let parachain_inherent = parachain_inherent.ok_or_else(|| { + Box::::from( + "Failed to create parachain inherent", + ) + })?; + Ok(parachain_inherent) + } + }, }, - }, - ); - - let spawner = task_manager.spawn_handle(); - - // Required for free-for-all consensus - #[allow(deprecated)] - old_consensus::start_collator_sync(old_consensus::StartCollatorParams { - para_id, - block_status: client.clone(), - announce_block, - overseer_handle, - spawner, - key: collator_key, - parachain_consensus: free_for_all, - runtime_api: client.clone(), - }); - - Ok(()) + ); + + let spawner = task_manager.spawn_handle(); + + // Required for free-for-all consensus + #[allow(deprecated)] + old_consensus::start_collator_sync(old_consensus::StartCollatorParams { + para_id, + block_status: client.clone(), + announce_block, + overseer_handle, + spawner, + key: collator_key, + parachain_consensus: free_for_all, + runtime_api: client.clone(), + }); + + Ok(()) + } } /// Start consensus using the lookahead aura collator. -fn start_lookahead_aura_consensus( - client: Arc>, - block_import: ParachainBlockImport, - prometheus_registry: Option<&Registry>, - telemetry: Option, - task_manager: &TaskManager, - relay_chain_interface: Arc, - transaction_pool: Arc>>, - sync_oracle: Arc>, - keystore: KeystorePtr, - relay_chain_slot_duration: Duration, - para_id: ParaId, - collator_key: CollatorPair, - overseer_handle: OverseerHandle, - announce_block: Arc>) + Send + Sync>, - backend: Arc, -) -> Result<(), sc_service::Error> { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend, - relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer: Proposer::new(proposer_factory), - collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: false, - }; +pub(crate) struct StartSlotBasedAuraConsensus( + PhantomData<(RuntimeApi, AuraId)>, +); + +impl StartSlotBasedAuraConsensus +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, +{ + #[docify::export_content] + fn launch_slot_based_collator( + params: SlotBasedParams< + ParachainBlockImport, + CIDP, + ParachainClient, + ParachainBackend, + Arc, + CHP, + Proposer, + CS, + >, + task_manager: &TaskManager, + ) where + CIDP: CreateInherentDataProviders + 'static, + CIDP::InherentDataProviders: Send, + CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + Send + 'static, + Proposer: ProposerInterface + Send + Sync + 'static, + CS: CollatorServiceInterface + Send + Sync + Clone + 'static, + { + let (collation_future, block_builder_future) = + slot_based::run::::Pair, _, _, _, _, _, _, _, _>(params); + + task_manager.spawn_essential_handle().spawn( + "collation-task", + Some("parachain-block-authoring"), + collation_future, + ); + task_manager.spawn_essential_handle().spawn( + "block-builder-task", + Some("parachain-block-authoring"), + block_builder_future, + ); + } +} + +impl StartConsensus + for StartSlotBasedAuraConsensus +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, +{ + fn start_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + keystore: KeystorePtr, + relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + _overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + backend: Arc, + ) -> Result<(), Error> { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); - let fut = aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); + let proposer = Proposer::new(proposer_factory); + let collator_service = CollatorService::new( + client.clone(), + Arc::new(task_manager.spawn_handle()), + announce_block, + client.clone(), + ); - Ok(()) + let client_for_aura = client.clone(); + let params = SlotBasedParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend.clone(), + relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client_for_aura.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + }, + keystore, + collator_key, + para_id, + relay_chain_slot_duration, + proposer, + collator_service, + authoring_duration: Duration::from_millis(2000), + reinitialize: false, + slot_drift: Duration::from_secs(1), + }; + + // We have a separate function only to be able to use `docify::export` on this piece of + // code. + Self::launch_slot_based_collator(params, task_manager); + + Ok(()) + } } -/// Start an aura powered parachain node which uses the lookahead collator to support async backing. -/// This node is basic in the sense that its runtime api doesn't include common contents such as -/// transaction payment. Used for aura glutton. -pub async fn start_basic_lookahead_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - |_, _, _, _| Ok(RpcModule::new(())), - build_relay_to_aura_import_queue::<_, AuraId>, - start_lookahead_aura_consensus, - hwbench, - ) - .await +/// Wait for the Aura runtime API to appear on chain. +/// This is useful for chains that started out without Aura. Components that +/// are depending on Aura functionality will wait until Aura appears in the runtime. +async fn wait_for_aura(client: Arc>) +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, +{ + let finalized_hash = client.chain_info().finalized_hash; + if client.runtime_api().has_aura_api(finalized_hash) { + return; + }; + + let mut stream = client.finality_notification_stream(); + while let Some(notification) = stream.next().await { + if client.runtime_api().has_aura_api(notification.hash) { + return; + } + } } -/// Start a parachain node for Rococo Contracts. -pub async fn start_contracts_rococo_node>( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - build_contracts_rpc_extensions, - build_aura_import_queue, - start_lookahead_aura_consensus, - hwbench, - ) - .await +/// Start consensus using the lookahead aura collator. +pub(crate) struct StartLookaheadAuraConsensus( + PhantomData<(RuntimeApi, AuraId)>, +); + +impl StartConsensus + for StartLookaheadAuraConsensus +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, +{ + fn start_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + keystore: KeystorePtr, + relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + backend: Arc, + ) -> Result<(), Error> { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); + + let collator_service = CollatorService::new( + client.clone(), + Arc::new(task_manager.spawn_handle()), + announce_block, + client.clone(), + ); + + let params = AuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend, + relay_client: relay_chain_interface, + code_hash_provider: { + let client = client.clone(); + move |block_hash| { + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + } + }, + keystore, + collator_key, + para_id, + overseer_handle, + relay_chain_slot_duration, + proposer: Proposer::new(proposer_factory), + collator_service, + authoring_duration: Duration::from_millis(1500), + reinitialize: false, + }; + + let fut = async move { + wait_for_aura(client).await; + aura::run::::Pair, _, _, _, _, _, _, _, _>(params).await; + }; + task_manager.spawn_essential_handle().spawn("aura", None, fut); + + Ok(()) + } } /// Checks that the hardware meets the requirements and print a warning otherwise. @@ -1047,3 +847,177 @@ fn warn_if_slow_hardware(hwbench: &sc_sysinfo::HwBench) { ); } } + +type SyncCmdResult = sc_cli::Result<()>; + +type AsyncCmdResult<'a> = + sc_cli::Result<(Pin + 'a>>, TaskManager)>; + +pub(crate) trait DynNodeSpec { + fn prepare_check_block_cmd( + self: Box, + config: Configuration, + cmd: &CheckBlockCmd, + ) -> AsyncCmdResult<'_>; + + fn prepare_export_blocks_cmd( + self: Box, + config: Configuration, + cmd: &ExportBlocksCmd, + ) -> AsyncCmdResult<'_>; + + fn prepare_export_state_cmd( + self: Box, + config: Configuration, + cmd: &ExportStateCmd, + ) -> AsyncCmdResult<'_>; + + fn prepare_import_blocks_cmd( + self: Box, + config: Configuration, + cmd: &ImportBlocksCmd, + ) -> AsyncCmdResult<'_>; + + fn prepare_revert_cmd( + self: Box, + config: Configuration, + cmd: &RevertCmd, + ) -> AsyncCmdResult<'_>; + + fn run_export_genesis_head_cmd( + self: Box, + config: Configuration, + cmd: &ExportGenesisHeadCommand, + ) -> SyncCmdResult; + + fn run_benchmark_block_cmd( + self: Box, + config: Configuration, + cmd: &BlockCmd, + ) -> SyncCmdResult; + + #[cfg(any(feature = "runtime-benchmarks"))] + fn run_benchmark_storage_cmd( + self: Box, + config: Configuration, + cmd: &StorageCmd, + ) -> SyncCmdResult; + + fn start_node( + self: Box, + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, + ) -> Pin>>>; +} + +impl DynNodeSpec for T +where + T: NodeSpec, +{ + fn prepare_check_block_cmd( + self: Box, + config: Configuration, + cmd: &CheckBlockCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, partial.import_queue)), partial.task_manager)) + } + + fn prepare_export_blocks_cmd( + self: Box, + config: Configuration, + cmd: &ExportBlocksCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, config.database)), partial.task_manager)) + } + + fn prepare_export_state_cmd( + self: Box, + config: Configuration, + cmd: &ExportStateCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, config.chain_spec)), partial.task_manager)) + } + + fn prepare_import_blocks_cmd( + self: Box, + config: Configuration, + cmd: &ImportBlocksCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, partial.import_queue)), partial.task_manager)) + } + + fn prepare_revert_cmd( + self: Box, + config: Configuration, + cmd: &RevertCmd, + ) -> AsyncCmdResult<'_> { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + Ok((Box::pin(cmd.run(partial.client, partial.backend, None)), partial.task_manager)) + } + + fn run_export_genesis_head_cmd( + self: Box, + config: Configuration, + cmd: &ExportGenesisHeadCommand, + ) -> SyncCmdResult { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + cmd.run(partial.client) + } + + fn run_benchmark_block_cmd( + self: Box, + config: Configuration, + cmd: &BlockCmd, + ) -> SyncCmdResult { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + cmd.run(partial.client) + } + + #[cfg(any(feature = "runtime-benchmarks"))] + fn run_benchmark_storage_cmd( + self: Box, + config: Configuration, + cmd: &StorageCmd, + ) -> SyncCmdResult { + let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?; + let db = partial.backend.expose_db(); + let storage = partial.backend.expose_storage(); + + cmd.run(config, partial.client, db, storage) + } + + fn start_node( + self: Box, + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, + ) -> Pin>>> { + match parachain_config.network.network_backend { + sc_network::config::NetworkBackendType::Libp2p => + ::start_node::>( + parachain_config, + polkadot_config, + collator_options, + para_id, + hwbench, + ), + sc_network::config::NetworkBackendType::Litep2p => + ::start_node::( + parachain_config, + polkadot_config, + collator_options, + para_id, + hwbench, + ), + } + } +} diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index ef96f334d6375..062b9ce736e7f 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -10,17 +10,16 @@ description = "Core primitives for Aura in Cumulus" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } # Substrate -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-api = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-runtime = { workspace = true } # Polkadot -polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false } -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } +polkadot-core-primitives = { workspace = true } +polkadot-primitives = { workspace = true } [features] default = ["std"] @@ -31,5 +30,4 @@ std = [ "sp-api/std", "sp-consensus-aura/std", "sp-runtime/std", - "sp-std/std", ] diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 595aa5f72bf24..533d368d3b00e 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -10,20 +10,19 @@ description = "Cumulus related core primitive types and traits" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-trie = { workspace = true } # Polkadot -polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false } -polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false } -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +polkadot-core-primitives = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-primitives = { workspace = true } +xcm = { workspace = true } [features] default = ["std"] @@ -35,7 +34,6 @@ std = [ "scale-info/std", "sp-api/std", "sp-runtime/std", - "sp-std/std", "sp-trie/std", "xcm/std", ] diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 29216d5134651..6eafecfc3ff57 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -18,11 +18,13 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use polkadot_parachain_primitives::primitives::HeadData; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; -use sp_std::prelude::*; pub use polkadot_core_primitives::InboundDownwardMessage; pub use polkadot_parachain_primitives::primitives::{ @@ -202,7 +204,7 @@ pub struct ParachainBlockData { /// The header of the parachain block. header: B::Header, /// The extrinsics of the parachain block. - extrinsics: sp_std::vec::Vec, + extrinsics: alloc::vec::Vec, /// The data that is required to emulate the storage accesses executed by all extrinsics. storage_proof: sp_trie::CompactProof, } @@ -211,7 +213,7 @@ impl ParachainBlockData { /// Creates a new instance of `Self`. pub fn new( header: ::Header, - extrinsics: sp_std::vec::Vec<::Extrinsic>, + extrinsics: alloc::vec::Vec<::Extrinsic>, storage_proof: sp_trie::CompactProof, ) -> Self { Self { header, extrinsics, storage_proof } @@ -243,7 +245,7 @@ impl ParachainBlockData { } /// Deconstruct into the inner parts. - pub fn deconstruct(self) -> (B::Header, sp_std::vec::Vec, sp_trie::CompactProof) { + pub fn deconstruct(self) -> (B::Header, alloc::vec::Vec, sp_trie::CompactProof) { (self.header, self.extrinsics, self.storage_proof) } } diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index 0156eb02e2b4a..172af4b9ec63e 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -10,20 +10,19 @@ license = "Apache-2.0" workspace = true [dependencies] -async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +async-trait = { optional = true, workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", optional = true, default-features = false } -sp-state-machine = { path = "../../../substrate/primitives/state-machine", optional = true, default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +sp-core = { workspace = true } +sp-inherents = { workspace = true } +sp-runtime = { optional = true, workspace = true } +sp-state-machine = { optional = true, workspace = true } +sp-trie = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] @@ -36,6 +35,5 @@ std = [ "sp-inherents/std", "sp-runtime?/std", "sp-state-machine?/std", - "sp-std/std", "sp-trie/std", ] diff --git a/cumulus/primitives/parachain-inherent/src/lib.rs b/cumulus/primitives/parachain-inherent/src/lib.rs index 75a56693958e6..ad4b39b547c5f 100644 --- a/cumulus/primitives/parachain-inherent/src/lib.rs +++ b/cumulus/primitives/parachain-inherent/src/lib.rs @@ -27,14 +27,16 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use cumulus_primitives_core::{ relay_chain::{BlakeTwo256, Hash as RelayHash, HashT as _}, InboundDownwardMessage, InboundHrmpMessage, ParaId, PersistedValidationData, }; +use alloc::{collections::btree_map::BTreeMap, vec::Vec}; use scale_info::TypeInfo; use sp_inherents::InherentIdentifier; -use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; /// The identifier for the parachain inherent. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"sysi1337"; diff --git a/cumulus/primitives/proof-size-hostfunction/Cargo.toml b/cumulus/primitives/proof-size-hostfunction/Cargo.toml index dd584ce86b2e3..e61c865d05fb0 100644 --- a/cumulus/primitives/proof-size-hostfunction/Cargo.toml +++ b/cumulus/primitives/proof-size-hostfunction/Cargo.toml @@ -10,14 +10,14 @@ license = "Apache-2.0" workspace = true [dependencies] -sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } -sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +sp-runtime-interface = { workspace = true } +sp-externalities = { workspace = true } +sp-trie = { workspace = true } [dev-dependencies] -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-io = { path = "../../../substrate/primitives/io" } +sp-state-machine = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index bdfb83ad72a96..3a98fdd017aef 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -10,24 +10,23 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-runtime = { workspace = true } -cumulus-primitives-core = { path = "../core", default-features = false } -cumulus-primitives-proof-size-hostfunction = { path = "../proof-size-hostfunction", default-features = false } -docify = "0.2.8" +cumulus-primitives-core = { workspace = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true } +docify = { workspace = true } [dev-dependencies] -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -cumulus-test-runtime = { path = "../../test/runtime" } +sp-trie = { workspace = true } +sp-io = { workspace = true } +cumulus-test-runtime = { workspace = true } [features] default = ["std"] @@ -41,6 +40,5 @@ std = [ "scale-info/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "sp-trie/std", ] diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index c09c12d7a0abf..f48dd927ee962 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -18,6 +18,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::{Decode, Encode}; +use core::marker::PhantomData; use cumulus_primitives_core::Weight; use cumulus_primitives_proof_size_hostfunction::{ storage_proof_size::storage_proof_size, PROOF_RECORDING_DISABLED, @@ -33,7 +34,6 @@ use sp_runtime::{ transaction_validity::TransactionValidityError, DispatchResult, }; -use sp_std::marker::PhantomData; const LOG_TARGET: &'static str = "runtime::storage_reclaim"; @@ -199,14 +199,14 @@ where #[cfg(test)] mod tests { use super::*; + use core::marker::PhantomData; use frame_support::{ assert_ok, - dispatch::DispatchClass, + dispatch::{DispatchClass, PerDispatchClass}, weights::{Weight, WeightMeter}, }; use frame_system::{BlockWeight, CheckWeight}; use sp_runtime::{AccountId32, BuildStorage}; - use sp_std::marker::PhantomData; use sp_trie::proof_size_extension::ProofSizeExt; type Test = cumulus_test_runtime::Runtime; @@ -215,7 +215,7 @@ mod tests { pages: 0u64, }); const ALICE: AccountId32 = AccountId32::new([1u8; 32]); - const LEN: usize = 0; + const LEN: usize = 150; pub fn new_test_ext() -> sp_io::TestExternalities { let ext: sp_io::TestExternalities = cumulus_test_runtime::RuntimeGenesisConfig::default() @@ -256,6 +256,10 @@ mod tests { }); } + fn get_storage_weight() -> PerDispatchClass { + BlockWeight::::get() + } + #[test] fn basic_refund() { // The real cost will be 100 bytes of storage size @@ -268,6 +272,9 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; let post_info = PostDispatchInfo::default(); + // Should add 500 + 150 (len) to weight. + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -283,7 +290,7 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 600); + assert_eq!(get_storage_weight().total().proof_size(), 1250); }) } @@ -299,6 +306,9 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; let post_info = PostDispatchInfo::default(); + // Adds 500 + 150 (len) weight + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -313,7 +323,7 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 1000); + assert_eq!(get_storage_weight().total().proof_size(), 1650); }) } @@ -327,6 +337,9 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; let post_info = PostDispatchInfo::default(); + // Weight added should be 100 + 150 (len) + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -342,7 +355,10 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 1100); + assert_eq!( + get_storage_weight().total().proof_size(), + 1100 + LEN as u64 + info.weight.proof_size() + ); }) } @@ -354,6 +370,8 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; let post_info = PostDispatchInfo::default(); + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -368,7 +386,8 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 0); + // Proof size should be exactly equal to extrinsic length + assert_eq!(get_storage_weight().total().proof_size(), LEN as u64); }); } @@ -382,12 +401,17 @@ mod tests { let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; let post_info = PostDispatchInfo::default(); + // Adds 500 + 150 (len) weight, total weight is 1950 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(300)); + // Refund 500 unspent weight according to `post_info`, total weight is now 1650 assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + // Recorded proof size is negative -200, total weight is now 1450 assert_ok!(StorageWeightReclaim::::post_dispatch( Some(pre), &info, @@ -396,7 +420,7 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 800); + assert_eq!(get_storage_weight().total().proof_size(), 1450); }); } @@ -416,6 +440,9 @@ mod tests { pays_fee: Default::default(), }; + // Should add 300 + 150 (len) of weight + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -432,7 +459,8 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 900); + // Reclaimed 100 + assert_eq!(get_storage_weight().total().proof_size(), 1350); }) } @@ -451,6 +479,9 @@ mod tests { pays_fee: Default::default(), }; + // Adds 50 + 150 (len) weight, total weight 1200 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); @@ -458,7 +489,56 @@ mod tests { // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. + + // Refunds unspent 25 weight according to `post_info`, 1175 assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + // Adds 200 - 25 (unspent) == 175 weight, total weight 1350 + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1350); + }) + } + + #[test] + fn test_nothing_relcaimed() { + let mut test_ext = setup_test_externalities(&[100, 200]); + + test_ext.execute_with(|| { + set_current_storage_weight(0); + // Benchmarked storage weight: 100 + let info = DispatchInfo { weight: Weight::from_parts(100, 100), ..Default::default() }; + + // Actual proof size is 100 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 100)), + pays_fee: Default::default(), + }; + + // Adds benchmarked weight 100 + 150 (len), total weight is now 250 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + + // Weight should go up by 150 len + 100 proof size weight, total weight 250 + assert_eq!(get_storage_weight().total().proof_size(), 250); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + // Should return `setup_test_externalities` proof recorder value: 100. + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + // Nothing to refund, unspent is 0, total weight 250 + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, LEN, &Ok(()))); + // `setup_test_externalities` proof recorder value: 200, so this means the extrinsic + // actually used 100 proof size. + // Nothing to refund or add, weight matches proof recorder assert_ok!(StorageWeightReclaim::::post_dispatch( Some(pre), &info, @@ -467,7 +547,9 @@ mod tests { &Ok(()) )); - assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + // Check block len weight was not reclaimed: + // 100 weight + 150 extrinsic len == 250 proof size + assert_eq!(get_storage_weight().total().proof_size(), 250); }) } @@ -487,11 +569,15 @@ mod tests { pays_fee: Default::default(), }; + // Adds 300 + 150 (len) weight, total weight 1450 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(100)); + // This refunds 100 - 50(unspent), total weight is now 1400 assert_ok!(StorageWeightReclaim::::post_dispatch( Some(pre), &info, @@ -504,7 +590,8 @@ mod tests { // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_eq!(BlockWeight::::get().total().proof_size(), 900); + // Above call refunds 50 (unspent), total weight is 1350 now + assert_eq!(get_storage_weight().total().proof_size(), 1350); }) } @@ -523,11 +610,15 @@ mod tests { pays_fee: Default::default(), }; + // Adds 50 + 150 (len) weight, total weight is 1200 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + let pre = StorageWeightReclaim::(PhantomData) .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); assert_eq!(pre, Some(100)); + // Adds additional 150 weight recorded assert_ok!(StorageWeightReclaim::::post_dispatch( Some(pre), &info, @@ -540,7 +631,7 @@ mod tests { // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + assert_eq!(get_storage_weight().total().proof_size(), 1350); }) } @@ -644,7 +735,7 @@ mod tests { // We reclaimed 3 bytes of storage size! assert_eq!(reclaimed, Some(Weight::from_parts(0, 3))); - assert_eq!(BlockWeight::::get().total().proof_size(), 10); + assert_eq!(get_storage_weight().total().proof_size(), 10); assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(10, 8)); } } diff --git a/cumulus/primitives/timestamp/Cargo.toml b/cumulus/primitives/timestamp/Cargo.toml index 7a6f4787ba312..a50011bc3f0e9 100644 --- a/cumulus/primitives/timestamp/Cargo.toml +++ b/cumulus/primitives/timestamp/Cargo.toml @@ -10,16 +10,15 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -futures = "0.3.28" +codec = { features = ["derive"], workspace = true } +futures = { workspace = true } # Substrate -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-timestamp = { path = "../../../substrate/primitives/timestamp", default-features = false } +sp-inherents = { workspace = true } +sp-timestamp = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] @@ -27,6 +26,5 @@ std = [ "codec/std", "cumulus-primitives-core/std", "sp-inherents/std", - "sp-std/std", "sp-timestamp/std", ] diff --git a/cumulus/primitives/timestamp/src/lib.rs b/cumulus/primitives/timestamp/src/lib.rs index e6aba6d0bb740..5365f83efdf11 100644 --- a/cumulus/primitives/timestamp/src/lib.rs +++ b/cumulus/primitives/timestamp/src/lib.rs @@ -27,9 +27,9 @@ #![cfg_attr(not(feature = "std"), no_std)] +use core::time::Duration; use cumulus_primitives_core::relay_chain::Slot; use sp_inherents::{Error, InherentData}; -use sp_std::time::Duration; pub use sp_timestamp::{InherentType, INHERENT_IDENTIFIER}; diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 85e3ac2f7606c..82d18c8c0aac6 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -10,25 +10,24 @@ description = "Helper datatypes for Cumulus" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -pallet-asset-conversion = { path = "../../../substrate/frame/asset-conversion", default-features = false } +frame-support = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +pallet-asset-conversion = { workspace = true } # Polkadot -polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false } -polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +polkadot-runtime-common = { workspace = true } +polkadot-runtime-parachains = { workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-builder = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] @@ -42,7 +41,6 @@ std = [ "polkadot-runtime-parachains/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", diff --git a/cumulus/primitives/utility/src/lib.rs b/cumulus/primitives/utility/src/lib.rs index 64784eb36f846..9d5bf4e231eb0 100644 --- a/cumulus/primitives/utility/src/lib.rs +++ b/cumulus/primitives/utility/src/lib.rs @@ -19,7 +19,11 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{vec, vec::Vec}; use codec::Encode; +use core::marker::PhantomData; use cumulus_primitives_core::{MessageSendError, UpwardMessageSender}; use frame_support::{ defensive, @@ -33,7 +37,6 @@ use sp_runtime::{ traits::{Saturating, Zero}, SaturatedConversion, }; -use sp_std::{marker::PhantomData, prelude::*}; use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm, WrapVersion}; use xcm_builder::{InspectMessageQueues, TakeRevenue}; use xcm_executor::{ @@ -803,7 +806,7 @@ mod test_trader { /// needed. #[cfg(feature = "runtime-benchmarks")] pub struct ToParentDeliveryHelper( - sp_std::marker::PhantomData<(XcmConfig, ExistentialDeposit, PriceForDelivery)>, + core::marker::PhantomData<(XcmConfig, ExistentialDeposit, PriceForDelivery)>, ); #[cfg(feature = "runtime-benchmarks")] diff --git a/cumulus/templates/xcm-bench-template.hbs b/cumulus/templates/xcm-bench-template.hbs index 5d0ded403f634..119924bca2ee1 100644 --- a/cumulus/templates/xcm-bench-template.hbs +++ b/cumulus/templates/xcm-bench-template.hbs @@ -17,7 +17,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weights for `{{pallet}}`. pub struct WeightInfo(PhantomData); diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 120983eb9390e..fbbaab73ce769 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -9,43 +9,43 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } # Substrate -sc-service = { path = "../../../substrate/client/service" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-consensus-aura = { path = "../../../substrate/client/consensus/aura" } -sc-block-builder = { path = "../../../substrate/client/block-builder" } -sc-executor = { path = "../../../substrate/client/executor" } -sc-executor-common = { path = "../../../substrate/client/executor/common" } -substrate-test-client = { path = "../../../substrate/test-utils/client" } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-inherents = { path = "../../../substrate/primitives/inherents" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -frame-system = { path = "../../../substrate/frame/system" } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment" } -pallet-balances = { path = "../../../substrate/frame/balances" } +sc-service = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +substrate-test-client = { workspace = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives" } -polkadot-parachain-primitives = { path = "../../../polkadot/parachain" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } # Cumulus -cumulus-test-runtime = { path = "../runtime" } -cumulus-test-service = { path = "../service" } -cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" } -cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } -cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim" } +cumulus-test-runtime = { workspace = true } +cumulus-test-service = { workspace = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } [features] runtime-benchmarks = [ diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index d233ad2691768..f26413e441e72 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -79,6 +79,7 @@ impl substrate_test_client::GenesisInit for GenesisParameters { cumulus_test_service::chain_spec::get_chain_spec_with_extra_endowed( None, self.endowed_accounts.clone(), + cumulus_test_runtime::WASM_BINARY.expect("WASM binary not compiled!"), ) .build_storage() .expect("Builds test runtime genesis storage") diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index d775c61f7801e..e266b5807081a 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -10,19 +10,18 @@ description = "Mocked relay state proof builder for testing Cumulus." workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } # Substrate -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false } -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-runtime = { workspace = true } +sp-state-machine = { workspace = true } +sp-trie = { workspace = true } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } +polkadot-primitives = { workspace = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] @@ -32,6 +31,5 @@ std = [ "polkadot-primitives/std", "sp-runtime/std", "sp-state-machine/std", - "sp-std/std", "sp-trie/std", ] diff --git a/cumulus/test/relay-sproof-builder/src/lib.rs b/cumulus/test/relay-sproof-builder/src/lib.rs index fbd2692a36b46..d1016085c8073 100644 --- a/cumulus/test/relay-sproof-builder/src/lib.rs +++ b/cumulus/test/relay-sproof-builder/src/lib.rs @@ -14,12 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +extern crate alloc; + +use alloc::collections::btree_map::BTreeMap; use cumulus_primitives_core::{ relay_chain, AbridgedHostConfiguration, AbridgedHrmpChannel, ParaId, }; use polkadot_primitives::UpgradeGoAhead; use sp_runtime::traits::HashingFor; -use sp_std::collections::btree_map::BTreeMap; use sp_trie::PrefixedMemoryDB; /// Builds a sproof (portmanteau of 'spoof' and 'proof') of the relay chain state. diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index b14e3b7f040e7..54b83e2dfedae 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -9,48 +9,47 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-glutton = { path = "../../../substrate/frame/glutton", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-sudo = { workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-glutton = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-session = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Cumulus -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../parachains/pallets/parachain-info", default-features = false } -cumulus-primitives-aura = { path = "../../primitives/aura", default-features = false } -pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } -cumulus-pallet-aura-ext = { path = "../../pallets/aura-ext", default-features = false } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim", default-features = false } +cumulus-pallet-parachain-system = { workspace = true } +parachain-info = { workspace = true } +cumulus-primitives-aura = { workspace = true } +pallet-collator-selection = { workspace = true } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -87,9 +86,9 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", ] increment-spec-version = [] +elastic-scaling = [] diff --git a/cumulus/test/runtime/build.rs b/cumulus/test/runtime/build.rs index ebd5c178cba07..bf579f4121e5f 100644 --- a/cumulus/test/runtime/build.rs +++ b/cumulus/test/runtime/build.rs @@ -24,6 +24,13 @@ fn main() { .enable_feature("increment-spec-version") .set_file_name("wasm_binary_spec_version_incremented.rs") .build(); + + WasmBuilder::new() + .with_current_project() + .enable_feature("elastic-scaling") + .import_memory() + .set_file_name("wasm_binary_elastic_scaling.rs") + .build(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 452b3241d0bfa..274f16ab630d6 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -27,7 +27,16 @@ pub mod wasm_spec_version_incremented { include!(concat!(env!("OUT_DIR"), "/wasm_binary_spec_version_incremented.rs")); } +pub mod elastic_scaling { + #[cfg(feature = "std")] + include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling.rs")); +} + mod test_pallet; + +extern crate alloc; + +use alloc::{vec, vec::Vec}; use frame_support::{derive_impl, traits::OnRuntimeUpgrade, PalletId}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -39,7 +48,6 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, MultiSignature, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -66,7 +74,7 @@ use frame_system::{ pub use pallet_balances::Call as BalancesCall; pub use pallet_glutton::Call as GluttonCall; pub use pallet_sudo::Call as SudoCall; -pub use pallet_timestamp::Call as TimestampCall; +pub use pallet_timestamp::{Call as TimestampCall, Now}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; @@ -83,8 +91,23 @@ impl_opaque_keys! { /// The para-id used in this runtime. pub const PARACHAIN_ID: u32 = 100; -const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; +#[cfg(not(feature = "elastic-scaling"))] +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 4; +#[cfg(not(feature = "elastic-scaling"))] const BLOCK_PROCESSING_VELOCITY: u32 = 1; + +#[cfg(feature = "elastic-scaling")] +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 7; +#[cfg(feature = "elastic-scaling")] +const BLOCK_PROCESSING_VELOCITY: u32 = 4; + +#[cfg(not(feature = "elastic-scaling"))] +pub const MILLISECS_PER_BLOCK: u64 = 6000; +#[cfg(feature = "elastic-scaling")] +pub const MILLISECS_PER_BLOCK: u64 = 2000; + +pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; // The only difference between the two declarations below is the `spec_version`. With the @@ -126,10 +149,6 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { state_version: 1, }; -pub const MILLISECS_PER_BLOCK: u64 = 6000; - -pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; - pub const EPOCH_DURATION_IN_BLOCKS: u32 = 10 * MINUTES; // These time units are defined in number of blocks. @@ -438,7 +457,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> Vec { Runtime::metadata_versions() } } @@ -499,7 +518,7 @@ impl_runtime_apis! { impl crate::GetLastTimestamp for Runtime { fn get_last_timestamp() -> u64 { - Timestamp::now() + Now::::get() } } diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 7f43f713fadc4..61195386ae79d 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -42,7 +42,7 @@ pub mod pallet { #[pallet::weight(0)] pub fn set_custom_validation_head_data( _: OriginFor, - custom_header: sp_std::vec::Vec, + custom_header: alloc::vec::Vec, ) -> DispatchResult { cumulus_pallet_parachain_system::Pallet::::set_custom_validation_head_data( custom_header, @@ -79,7 +79,7 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 732d884528f89..f766d12363209 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -13,96 +13,94 @@ name = "test-parachain" path = "src/main.rs" [dependencies] -async-trait = "0.1.79" -clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12" } -criterion = { version = "0.5.1", features = ["async_tokio"] } -jsonrpsee = { version = "0.22", features = ["server"] } -rand = "0.8.5" +async-trait = { workspace = true } +clap = { features = ["derive"], workspace = true } +codec = { workspace = true, default-features = true } +criterion = { features = ["async_tokio"], workspace = true, default-features = true } +jsonrpsee = { features = ["server"], workspace = true } +rand = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -tokio = { version = "1.32.0", features = ["macros"] } -tracing = "0.1.37" -url = "2.4.0" -tempfile = "3.8.0" +tokio = { features = ["macros"], workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } +url = { workspace = true } +tempfile = { workspace = true } # Substrate -frame-system = { path = "../../../substrate/frame/system" } -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api" } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment" } -sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } -sc-chain-spec = { path = "../../../substrate/client/chain-spec" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-consensus-aura = { path = "../../../substrate/client/consensus/aura" } -sc-executor = { path = "../../../substrate/client/executor" } -sc-network = { path = "../../../substrate/client/network" } -sc-service = { path = "../../../substrate/client/service" } -sc-tracing = { path = "../../../substrate/client/tracing" } -sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -sc-telemetry = { path = "../../../substrate/client/telemetry" } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } -substrate-test-client = { path = "../../../substrate/test-utils/client" } -sc-cli = { path = "../../../substrate/client/cli" } -sc-block-builder = { path = "../../../substrate/client/block-builder" } -sc-executor-wasmtime = { path = "../../../substrate/client/executor/wasmtime" } -sc-executor-common = { path = "../../../substrate/client/executor/common" } +frame-system = { workspace = true, default-features = true } +frame-system-rpc-runtime-api = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-state-machine = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +substrate-test-client = { workspace = true } +sc-cli = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-executor-wasmtime = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives" } -polkadot-service = { path = "../../../polkadot/node/service" } -polkadot-test-service = { path = "../../../polkadot/node/test/service" } -polkadot-cli = { path = "../../../polkadot/cli" } -polkadot-node-subsystem = { path = "../../../polkadot/node/subsystem" } -polkadot-overseer = { path = "../../../polkadot/node/overseer" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-service = { workspace = true, default-features = true } +polkadot-test-service = { workspace = true } +polkadot-cli = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } # Cumulus -cumulus-client-cli = { path = "../../client/cli" } -parachains-common = { path = "../../parachains/common" } -cumulus-client-consensus-common = { path = "../../client/consensus/common" } -cumulus-client-consensus-proposer = { path = "../../client/consensus/proposer" } -cumulus-client-consensus-aura = { path = "../../client/consensus/aura" } -cumulus-client-consensus-relay-chain = { path = "../../client/consensus/relay-chain" } -cumulus-client-parachain-inherent = { path = "../../client/parachain-inherent" } -cumulus-client-service = { path = "../../client/service" } -cumulus-client-collator = { path = "../../client/collator" } -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-relay-chain-inprocess-interface = { path = "../../client/relay-chain-inprocess-interface" } -cumulus-relay-chain-interface = { path = "../../client/relay-chain-interface" } -cumulus-test-runtime = { path = "../runtime" } -cumulus-relay-chain-minimal-node = { path = "../../client/relay-chain-minimal-node" } -cumulus-client-pov-recovery = { path = "../../client/pov-recovery" } -cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim" } -pallet-timestamp = { path = "../../../substrate/frame/timestamp" } +cumulus-client-cli = { workspace = true, default-features = true } +parachains-common = { workspace = true, default-features = true } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-client-consensus-proposer = { workspace = true, default-features = true } +cumulus-client-consensus-aura = { workspace = true, default-features = true } +cumulus-client-consensus-relay-chain = { workspace = true, default-features = true } +cumulus-client-parachain-inherent = { workspace = true, default-features = true } +cumulus-client-service = { workspace = true, default-features = true } +cumulus-client-collator = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +cumulus-test-runtime = { workspace = true } +cumulus-relay-chain-minimal-node = { workspace = true, default-features = true } +cumulus-client-pov-recovery = { workspace = true, default-features = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } [dev-dependencies] -futures = "0.3.28" -portpicker = "0.1.1" -rococo-parachain-runtime = { path = "../../parachains/runtimes/testing/rococo-parachain" } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } -cumulus-test-client = { path = "../client" } +futures = { workspace = true } +portpicker = { workspace = true } +sp-authority-discovery = { workspace = true, default-features = true } +cumulus-test-client = { workspace = true } # Polkadot dependencies -polkadot-test-service = { path = "../../../polkadot/node/test/service" } +polkadot-test-service = { workspace = true } # Substrate dependencies -sc-cli = { path = "../../../substrate/client/cli" } -substrate-test-utils = { path = "../../../substrate/test-utils" } +sc-cli = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } [features] runtime-benchmarks = [ @@ -116,7 +114,6 @@ runtime-benchmarks = [ "polkadot-primitives/runtime-benchmarks", "polkadot-service/runtime-benchmarks", "polkadot-test-service/runtime-benchmarks", - "rococo-parachain-runtime/runtime-benchmarks", "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index 174d478f2575c..ae71028ad486a 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -66,9 +66,10 @@ where pub fn get_chain_spec_with_extra_endowed( id: Option, extra_endowed_accounts: Vec, + code: &[u8], ) -> ChainSpec { ChainSpec::builder( - cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + code, Extensions { para_id: id.unwrap_or(cumulus_test_runtime::PARACHAIN_ID.into()).into() }, ) .with_name("Local Testnet") @@ -83,7 +84,21 @@ pub fn get_chain_spec_with_extra_endowed( /// Get the chain spec for a specific parachain ID. pub fn get_chain_spec(id: Option) -> ChainSpec { - get_chain_spec_with_extra_endowed(id, Default::default()) + get_chain_spec_with_extra_endowed( + id, + Default::default(), + cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + ) +} + +/// Get the chain spec for a specific parachain ID. +pub fn get_elastic_scaling_chain_spec(id: Option) -> ChainSpec { + get_chain_spec_with_extra_endowed( + id, + Default::default(), + cumulus_test_runtime::elastic_scaling::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + ) } /// Local testnet genesis for testing. diff --git a/cumulus/test/service/src/cli.rs b/cumulus/test/service/src/cli.rs index 87d1d4af8a95e..37ca27542cbfe 100644 --- a/cumulus/test/service/src/cli.rs +++ b/cumulus/test/service/src/cli.rs @@ -50,6 +50,12 @@ pub struct TestCollatorCli { #[arg(long)] pub fail_pov_recovery: bool, + + /// EXPERIMENTAL: Use slot-based collator which can handle elastic scaling. + /// + /// Use with care, this flag is unstable and subject to change. + #[arg(long)] + pub experimental_use_slot_based: bool, } #[derive(Debug, clap::Subcommand)] @@ -253,8 +259,16 @@ impl SubstrateCli for TestCollatorCli { fn load_spec(&self, id: &str) -> std::result::Result, String> { Ok(match id { - "" => - Box::new(cumulus_test_service::get_chain_spec(Some(ParaId::from(2000)))) as Box<_>, + "" => { + tracing::info!("Using default test service chain spec."); + Box::new(cumulus_test_service::get_chain_spec(Some(ParaId::from(2000)))) as Box<_> + }, + "elastic-scaling" => { + tracing::info!("Using elastic-scaling chain spec."); + Box::new(cumulus_test_service::get_elastic_scaling_chain_spec(Some(ParaId::from( + 2100, + )))) as Box<_> + }, path => { let chain_spec = cumulus_test_service::chain_spec::ChainSpec::from_json_file(path.into())?; diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 6f8b9d19bb29b..51cdebbaf54e0 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -25,7 +25,10 @@ pub mod chain_spec; use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_aura::{ - collators::lookahead::{self as aura, Params as AuraParams}, + collators::{ + lookahead::{self as aura, Params as AuraParams}, + slot_based::{self as slot_based, Params as SlotBasedParams}, + }, ImportQueueParams, }; use cumulus_client_consensus_proposer::Proposer; @@ -45,7 +48,7 @@ use cumulus_client_cli::{CollatorOptions, RelayChainMode}; use cumulus_client_consensus_common::{ ParachainBlockImport as TParachainBlockImport, ParachainCandidate, ParachainConsensus, }; -use cumulus_client_pov_recovery::RecoveryHandle; +use cumulus_client_pov_recovery::{RecoveryDelayRange, RecoveryHandle}; #[allow(deprecated)] use cumulus_client_service::old_consensus; use cumulus_client_service::{ @@ -304,7 +307,7 @@ async fn build_relay_chain_interface( /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. /// /// This is the actual implementation that is abstract over the executor and the runtime api. -#[sc_tracing::logging::prefix_logs_with(parachain_config.network.node_name.as_str())] +#[sc_tracing::logging::prefix_logs_with("Parachain")] pub async fn start_node_impl>( parachain_config: Configuration, collator_key: Option, @@ -316,6 +319,7 @@ pub async fn start_node_impl>( consensus: Consensus, collator_options: CollatorOptions, proof_recording_during_import: bool, + use_slot_based_collator: bool, ) -> sc_service::error::Result<( TaskManager, Arc, @@ -409,7 +413,6 @@ where } else { Box::new(overseer_handle.clone()) }; - let is_collator = collator_key.is_some(); let relay_chain_slot_duration = Duration::from_secs(6); start_relay_chain_tasks(StartRelayChainTasksParams { @@ -418,11 +421,11 @@ where para_id, relay_chain_interface: relay_chain_interface.clone(), task_manager: &mut task_manager, - da_recovery_profile: if is_collator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, + // Increase speed of recovery for testing purposes. + da_recovery_profile: DARecoveryProfile::Other(RecoveryDelayRange { + min: Duration::from_secs(1), + max: Duration::from_secs(5), + }), import_queue: import_queue_service, relay_chain_slot_duration, recovery_handle, @@ -461,29 +464,72 @@ where ); let client_for_aura = client.clone(); - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend.clone(), - relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client_for_aura.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle: sync_service, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - authoring_duration: Duration::from_millis(2000), - reinitialize: false, - }; - let fut = aura::run::(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); + if use_slot_based_collator { + tracing::info!(target: LOG_TARGET, "Starting block authoring with slot based authoring."); + let params = SlotBasedParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend.clone(), + relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client_for_aura + .code_at(block_hash) + .ok() + .map(|c| ValidationCode::from(c).hash()) + }, + keystore, + collator_key, + para_id, + relay_chain_slot_duration, + proposer, + collator_service, + authoring_duration: Duration::from_millis(2000), + reinitialize: false, + slot_drift: Duration::from_secs(1), + }; + + let (collation_future, block_builer_future) = + slot_based::run::(params); + task_manager.spawn_essential_handle().spawn( + "collation-task", + None, + collation_future, + ); + task_manager.spawn_essential_handle().spawn( + "block-builder-task", + None, + block_builer_future, + ); + } else { + tracing::info!(target: LOG_TARGET, "Starting block authoring with lookahead collator."); + let params = AuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend.clone(), + relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client_for_aura + .code_at(block_hash) + .ok() + .map(|c| ValidationCode::from(c).hash()) + }, + keystore, + collator_key, + para_id, + overseer_handle, + relay_chain_slot_duration, + proposer, + collator_service, + authoring_duration: Duration::from_millis(2000), + reinitialize: false, + }; + + let fut = aura::run::(params); + task_manager.spawn_essential_handle().spawn("aura", None, fut); + } } } @@ -720,6 +766,7 @@ impl TestNodeBuilder { self.consensus, collator_options, self.record_proof_during_import, + false, ) .await .expect("could not create Cumulus test service"), @@ -735,6 +782,7 @@ impl TestNodeBuilder { self.consensus, collator_options, self.record_proof_during_import, + false, ) .await .expect("could not create Cumulus test service"), @@ -766,8 +814,11 @@ pub fn node_config( let root = base_path.path().join(format!("cumulus_test_service_{}", key)); let role = if is_collator { Role::Authority } else { Role::Full }; let key_seed = key.to_seed(); - let mut spec = - Box::new(chain_spec::get_chain_spec_with_extra_endowed(Some(para_id), endowed_accounts)); + let mut spec = Box::new(chain_spec::get_chain_spec_with_extra_endowed( + Some(para_id), + endowed_accounts, + cumulus_test_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + )); let mut storage = spec.as_storage_builder().build_storage().expect("could not build storage"); diff --git a/cumulus/test/service/src/main.rs b/cumulus/test/service/src/main.rs index 90d37173dd590..9357978b769a4 100644 --- a/cumulus/test/service/src/main.rs +++ b/cumulus/test/service/src/main.rs @@ -118,6 +118,7 @@ fn main() -> Result<(), sc_cli::Error> { consensus, collator_options, true, + cli.experimental_use_slot_based, ) .await, sc_network::config::NetworkBackendType::Litep2p => @@ -135,6 +136,7 @@ fn main() -> Result<(), sc_cli::Error> { consensus, collator_options, true, + cli.experimental_use_slot_based, ) .await, } diff --git a/cumulus/xcm/xcm-emulator/Cargo.toml b/cumulus/xcm/xcm-emulator/Cargo.toml index 0ed77bf5b7073..ba1097fba0756 100644 --- a/cumulus/xcm/xcm-emulator/Cargo.toml +++ b/cumulus/xcm/xcm-emulator/Cargo.toml @@ -10,36 +10,36 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -paste = "1.0.14" +codec = { workspace = true, default-features = true } +paste = { workspace = true, default-features = true } log = { workspace = true } -lazy_static = "1.4.0" -impl-trait-for-tuples = "0.2.2" +lazy_static = { workspace = true } +impl-trait-for-tuples = { workspace = true } # Substrate -frame-support = { path = "../../../substrate/frame/support" } -frame-system = { path = "../../../substrate/frame/system" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" } -sp-std = { path = "../../../substrate/primitives/std" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -pallet-balances = { path = "../../../substrate/frame/balances" } -pallet-message-queue = { path = "../../../substrate/frame/message-queue" } +frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } # Cumulus -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue" } -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system" } -cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } -cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } -parachains-common = { path = "../../parachains/common" } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-pallet-xcmp-queue = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true, default-features = true } +cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } +parachains-common = { workspace = true, default-features = true } # Polkadot -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm" } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor" } -polkadot-primitives = { path = "../../../polkadot/primitives" } -polkadot-parachain-primitives = { path = "../../../polkadot/parachain" } -polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains" } +xcm = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index 1a3f3930cb347..8de3660c22362 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +extern crate alloc; + pub use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; pub use lazy_static::lazy_static; pub use log; @@ -24,6 +26,8 @@ pub use std::{ }; // Substrate +pub use alloc::collections::vec_deque::VecDeque; +pub use core::{cell::RefCell, fmt::Debug}; pub use cumulus_primitives_core::AggregateMessageOrigin as CumulusAggregateMessageOrigin; pub use frame_support::{ assert_ok, @@ -44,7 +48,6 @@ pub use sp_core::{parameter_types, sr25519, storage::Storage, Pair}; pub use sp_crypto_hashing::blake2_256; pub use sp_io::TestExternalities; pub use sp_runtime::BoundedSlice; -pub use sp_std::{cell::RefCell, collections::vec_deque::VecDeque, fmt::Debug}; pub use sp_tracing; // Cumulus diff --git a/cumulus/zombienet/tests/0002-pov_recovery.zndsl b/cumulus/zombienet/tests/0002-pov_recovery.zndsl index b05285c87bff5..dc7095ced252d 100644 --- a/cumulus/zombienet/tests/0002-pov_recovery.zndsl +++ b/cumulus/zombienet/tests/0002-pov_recovery.zndsl @@ -2,7 +2,9 @@ Description: PoV recovery test Network: ./0002-pov_recovery.toml Creds: config -# wait 20 blocks and register parachain +# Wait 20 blocks and register parachain. This part is important for pov-recovery. +# We need to make sure that the recovering node is able to see all relay-chain +# notifications containing the candidates to recover. validator-3: reports block height is at least 20 within 250 seconds validator-0: js-script ./register-para.js with "2000" within 240 seconds validator-0: parachain 2000 is registered within 300 seconds diff --git a/cumulus/zombienet/tests/0003-full_node_catching_up.zndsl b/cumulus/zombienet/tests/0003-full_node_catching_up.zndsl index 49b6d9e94fd16..e1e8442f30509 100644 --- a/cumulus/zombienet/tests/0003-full_node_catching_up.zndsl +++ b/cumulus/zombienet/tests/0003-full_node_catching_up.zndsl @@ -6,3 +6,6 @@ alice: parachain 2000 is registered within 225 seconds dave: reports block height is at least 7 within 250 seconds eve: reports block height is at least 7 within 250 seconds ferdie: reports block height is at least 7 within 250 seconds + +# We want to make sure that none of the consensus hook checks fail, even if the chain makes progress +charlie: count of log lines containing "set_validation_data inherent needs to be present in every block" is 0 within 10 seconds diff --git a/cumulus/zombienet/tests/0006-rpc_collator_builds_blocks.zndsl b/cumulus/zombienet/tests/0006-rpc_collator_builds_blocks.zndsl index 7da8416d0161a..b14c15ed5e5b9 100644 --- a/cumulus/zombienet/tests/0006-rpc_collator_builds_blocks.zndsl +++ b/cumulus/zombienet/tests/0006-rpc_collator_builds_blocks.zndsl @@ -13,3 +13,7 @@ two: restart after 1 seconds three: restart after 20 seconds dave: is up dave: reports block height is at least 30 within 200 seconds + +# We want to make sure that none of the consensus hook checks fail, even if the chain makes progress +dave: count of log lines containing "set_validation_data inherent needs to be present in every block" is 0 within 10 seconds +eve: count of log lines containing "set_validation_data inherent needs to be present in every block" is 0 within 10 seconds diff --git a/cumulus/zombienet/tests/0008-elastic_authoring.toml b/cumulus/zombienet/tests/0008-elastic_authoring.toml new file mode 100644 index 0000000000000..f2e2010a9e458 --- /dev/null +++ b/cumulus/zombienet/tests/0008-elastic_authoring.toml @@ -0,0 +1,50 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] + max_candidate_depth = 6 + allowed_ancestry_len = 3 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + num_cores = 4 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + max_approval_coalesce_count = 5 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.nodes]] + name = "alice" + args = ["" ] + + [[relaychain.node_groups]] + name = "validator" + args = ["-lruntime=debug,parachain=trace" ] + count = 8 + +# Slot based authoring with 3 cores and 2s slot duration +[[parachains]] +id = 2100 +chain = "elastic-scaling" +add_to_genesis = true + + [[parachains.collators]] + name = "collator-elastic" + image = "{{COL_IMAGE}}" + command = "test-parachain" + args = ["-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug", "--force-authoring", "--experimental-use-slot-based"] + +# Slot based authoring with 1 core and 6s slot duration +[[parachains]] +id = 2000 +add_to_genesis = true + + [[parachains.collators]] + name = "collator-single-core" + image = "{{COL_IMAGE}}" + command = "test-parachain" + args = ["-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug", "--force-authoring", "--experimental-use-slot-based"] diff --git a/cumulus/zombienet/tests/0008-elastic_authoring.zndsl b/cumulus/zombienet/tests/0008-elastic_authoring.zndsl new file mode 100644 index 0000000000000..a06ffd24fefd2 --- /dev/null +++ b/cumulus/zombienet/tests/0008-elastic_authoring.zndsl @@ -0,0 +1,19 @@ +Description: Slot based authoring for elastic scaling +Network: ./0008-elastic_authoring.toml +Creds: config + +alice: is up +collator-elastic: is up +collator-single-core: is up + + +# configure relay chain +alice: js-script ./assign-core.js with "2100,0" return is 0 within 600 seconds +alice: js-script ./assign-core.js with "2100,1" return is 0 within 600 seconds + +collator-single-core: reports block height is at least 20 within 225 seconds +collator-elastic: reports block height is at least 40 within 225 seconds + +# We want to make sure that none of the consensus hook checks fail, even if the chain makes progress +collator-elastic: count of log lines containing "set_validation_data inherent needs to be present in every block" is 0 within 10 seconds +collator-single-core: count of log lines containing "set_validation_data inherent needs to be present in every block" is 0 within 10 seconds diff --git a/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml b/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml new file mode 100644 index 0000000000000..b695f8aa93765 --- /dev/null +++ b/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml @@ -0,0 +1,48 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] + max_candidate_depth = 6 + allowed_ancestry_len = 3 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + num_cores = 4 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + max_approval_coalesce_count = 5 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.nodes]] + name = "alice" + args = ["" ] + + [[relaychain.node_groups]] + name = "validator" + args = ["-lruntime=debug,parachain=trace", "--reserved-only", "--reserved-nodes {{'alice'|zombie('multiAddress')}}"] + count = 8 + +# Slot based authoring with 3 cores and 2s slot duration +[[parachains]] +id = 2100 +chain = "elastic-scaling" +add_to_genesis = false + + # Slot based authoring with 3 cores and 2s slot duration + [[parachains.collators]] + name = "collator-elastic" + image = "{{COL_IMAGE}}" + command = "test-parachain" + args = ["--disable-block-announcements", "-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug", "--force-authoring", "--experimental-use-slot-based"] + + # run 'recovery-target' as a parachain full node + [[parachains.collators]] + name = "recovery-target" + validator = false # full node + image = "{{COL_IMAGE}}" + command = "test-parachain" + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'collator-elastic'|zombie('multiAddress')}}", "--in-peers 0", "--out-peers 0", "--", "--reserved-only", "--reserved-nodes {{'alice'|zombie('multiAddress')}}"] diff --git a/cumulus/zombienet/tests/0009-elastic_pov_recovery.zndsl b/cumulus/zombienet/tests/0009-elastic_pov_recovery.zndsl new file mode 100644 index 0000000000000..5cca6120ff3a3 --- /dev/null +++ b/cumulus/zombienet/tests/0009-elastic_pov_recovery.zndsl @@ -0,0 +1,24 @@ +Description: Elastic scaling PoV recovery test +Network: ./0009-elastic_pov_recovery.toml +Creds: config + +alice: is up +collator-elastic: is up + +# configure relay chain +alice: js-script ./assign-core.js with "2100,0" return is 0 within 200 seconds +alice: js-script ./assign-core.js with "2100,1" return is 0 within 200 seconds + +# Wait 20 blocks and register parachain. This part is important for pov-recovery. +# We need to make sure that the recovering node is able to see all relay-chain +# notifications containing the candidates to recover. +alice: reports block height is at least 20 within 250 seconds +alice: js-script ./register-para.js with "2100" within 240 seconds +alice: parachain 2100 is registered within 300 seconds + + +# check block production +collator-elastic: reports block height is at least 40 within 225 seconds +collator-elastic: count of log lines containing "set_validation_data inherent needs to be present in every block" is 0 within 10 seconds + +recovery-target: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 35 within 10 seconds diff --git a/cumulus/zombienet/tests/assign-core.js b/cumulus/zombienet/tests/assign-core.js new file mode 100644 index 0000000000000..4179b68b2e3cb --- /dev/null +++ b/cumulus/zombienet/tests/assign-core.js @@ -0,0 +1,46 @@ +// Assign a parachain to a core. +// +// First argument should be the parachain id. +// Second argument should be the core. +async function run(nodeName, networkInfo, args) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + let para = Number(args[0]); + let core = Number(args[1]); + console.log(`Assigning para ${para} to core ${core}`); + + await zombie.util.cryptoWaitReady(); + + // Submit transaction with Alice accoung + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + // Wait for this transaction to be finalized in a block. + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.coretime.assignCore(core, 0, [[{ task: para }, 57600]], null)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + + + return 0; +} + +module.exports = { run }; diff --git a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile index 196ba861f503c..60698de1d6add 100644 --- a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile +++ b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile @@ -1,7 +1,7 @@ # this image is built on top of existing Zombienet image ARG ZOMBIENET_IMAGE # this image uses substrate-relay image built elsewhere -ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.5.0 +ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.6.6 # metadata ARG VCS_REF diff --git a/docs/contributor/CONTRIBUTING.md b/docs/contributor/CONTRIBUTING.md index 96dc86e978056..7d54b2681b413 100644 --- a/docs/contributor/CONTRIBUTING.md +++ b/docs/contributor/CONTRIBUTING.md @@ -4,18 +4,16 @@ The `Polkadot SDK` project is an **OPENISH Open Source Project** ## What? -Individuals making significant and valuable contributions are given commit-access to the project. -Contributions are done via pull-requests and need to be approved by the maintainers. +Individuals making significant and valuable contributions are given commit-access to the project. Contributions are done +via pull-requests and need to be approved by the maintainers. ## Rules There are a few basic ground-rules for contributors (including the maintainer(s) of the project): -1. **No `--force` pushes** or modifying the master branch history in any way. - If you need to rebase, ensure you do it in your own repo. No rewriting of the history - after the code has been shared (e.g. through a Pull-Request). -2. **Non-master branches**, prefixed with a short name moniker (e.g. `gav-my-feature`) must be - used for ongoing work. +1. **No `--force` pushes** or modifying the master branch history in any way. If you need to rebase, ensure you do it in + your own repo. No rewriting of the history after the code has been shared (e.g. through a Pull-Request). +2. **Non-master branches**, prefixed with a short name moniker (e.g. `gav-my-feature`) must be used for ongoing work. 3. **All modifications** must be made in a **pull-request** to solicit feedback from other contributors. 4. A pull-request **must not be merged until CI** has finished successfully. 5. Contributors should adhere to the [house coding style](./STYLE_GUIDE.md). @@ -25,12 +23,10 @@ There are a few basic ground-rules for contributors (including the maintainer(s) ### In General -A Pull Request (PR) needs to be reviewed and approved by project maintainers. -If a change does not alter any logic (e.g. comments, dependencies, docs), then it may be tagged -`A1-insubstantial` and merged faster. -If it is an urgent fix with no large change to logic, then it may be merged after a non-author -contributor has reviewed it well and approved the review once CI is complete. -No PR should be merged until all reviews' comments are addressed. +* A Pull Request (PR) needs to be reviewed and approved by project maintainers. +* If a change does not alter any logic (e.g. comments, dependencies, docs), then it may be tagged `A1-insubstantial` and +merged faster. +* No PR should be merged until all reviews' comments are addressed. ### Labels @@ -38,39 +34,26 @@ The set of labels and their description can be found [here](https://paritytech.g ### Process -1. Please use our [Pull Request Template](./PULL_REQUEST_TEMPLATE.md) and make sure all relevant - information is reflected in your PR. -2. Please tag each PR with minimum one `T*` label. The respective `T*` labels should signal the - component that was changed, they are also used by downstream users to track changes and to - include these changes properly into their own releases. -3. If youโ€™re still working on your PR, please submit as โ€œDraftโ€. Once a PR is ready for review change - the status to โ€œOpenโ€, so that the maintainers get to review your PR. Generally PRs should sit for - 48 hours in order to garner feedback. It may be merged before if all relevant parties had a look at it. -4. If youโ€™re introducing a major change, that might impact the documentation please add the label - `T13-documentation`. The docs team will get in touch. -5. If your PR changes files in these paths: - - `polkadot` : `^runtime/polkadot` - `polkadot` : `^runtime/kusama` - `polkadot` : `^primitives/src/` - `polkadot` : `^runtime/common` - `substrate` : `^frame/` - `substrate` : `^primitives/` - - It should be added to the [security audit board](https://github.com/orgs/paritytech/projects/103) - and will need to undergo an audit before merge. -6. PRs will be able to be merged once all reviewers' comments are addressed and CI is successful. - -**Noting breaking changes:** -When breaking APIs, the PR description should mention what was changed alongside some examples on how -to change the code to make it work/compile. -It should also mention potential storage migrations and if they require some special setup aside adding -it to the list of migrations in the runtime. +1. Please use our [Pull Request Template](./PULL_REQUEST_TEMPLATE.md) and make sure all relevant information is + reflected in your PR. +2. Please tag each PR with minimum one `T*` label. The respective `T*` labels should signal the component that was + changed, they are also used by downstream users to track changes and to include these changes properly into their own + releases. +3. If youโ€™re still working on your PR, please submit as โ€œDraftโ€. Once a PR is ready for review change the status to + โ€œOpenโ€, so that the maintainers get to review your PR. Generally PRs should sit for 48 hours in order to garner + feedback. It may be merged before if all relevant parties had a look at it. +4. With respect to auditing, please see [AUDIT.md](../AUDIT.md). In general, merging to master can happen independent of + audit. +5. PRs will be able to be merged once all reviewers' comments are addressed and CI is successful. + +**Noting breaking changes:** When breaking APIs, the PR description should mention what was changed alongside some +examples on how to change the code to make it work/compile. It should also mention potential storage migrations and if +they require some special setup aside adding it to the list of migrations in the runtime. ## Reviewing pull requests -When reviewing a pull request, the end-goal is to suggest useful changes to the author. -Reviews should finish with approval unless there are issues that would result in: +When reviewing a pull request, the end-goal is to suggest useful changes to the author. Reviews should finish with +approval unless there are issues that would result in: 1. Buggy behavior. 2. Undue maintenance burden. 3. Breaking with house coding style. @@ -80,18 +63,17 @@ Reviews should finish with approval unless there are issues that would result in The reviewers are also responsible to check: -1. if a changelog is necessary and attached -1. the quality of information in the changelog file -1. the PR has an impact on docs -1. that the docs team was included in the review process of a docs update +* if the PR description is well written to facilitate integration, in case it contains breaking changes. +* the PR has an impact on docs. **Reviews may not be used as an effective veto for a PR because**: 1. There exists a somewhat cleaner/better/faster way of accomplishing the same feature/fix. 2. It does not fit well with some other contributors' longer-term vision for the project. -## Documentation +## `PRDoc` -All Pull Requests must contain proper title & description. +All Pull Requests must contain proper title & description, as described in [Pull Request +Template](./PULL_REQUEST_TEMPLATE.md). Moreover, all pull requests must have a proper `prdoc` file attached. Some Pull Requests can be exempt of `prdoc` documentation, those must be labelled with [`R0-silent`](https://github.com/paritytech/labels/blob/main/ruled_labels/specs_polkadot-sdk.yaml#L89-L91). @@ -102,46 +84,49 @@ See more about `prdoc` [here](./prdoc.md) ## Helping out -We use [labels](https://github.com/paritytech/polkadot-sdk/labels) to manage PRs and issues and communicate -state of a PR. Please familiarise yourself with them. Best way to get started is to a pick a ticket tagged -[easy](https://github.com/paritytech/polkadot-sdk/issues?q=is%3Aopen+is%3Aissue+label%3AD0-easy) -or [medium](https://github.com/paritytech/polkadot-sdk/issues?q=is%3Aopen+is%3Aissue+label%3AD1-medium) -and get going. Alternatively, look out for issues tagged [mentor](https://github.com/paritytech/polkadot-sdk/issues?q=is%3Aopen+is%3Aissue+label%3AC1-mentor) -and get in contact with the mentor offering their support on that larger task. +We use [labels](https://github.com/paritytech/polkadot-sdk/labels) to manage PRs and issues and communicate state of a +PR. Please familiarise yourself with them. Best way to get started is to a pick a ticket tagged +[easy](https://github.com/paritytech/polkadot-sdk/issues?q=is%3Aopen+is%3Aissue+label%3AD0-easy) or +[medium](https://github.com/paritytech/polkadot-sdk/issues?q=is%3Aopen+is%3Aissue+label%3AD1-medium) and get going. +Alternatively, look out for issues tagged +[mentor](https://github.com/paritytech/polkadot-sdk/issues?q=is%3Aopen+is%3Aissue+label%3AC1-mentor) and get in contact +with the mentor offering their support on that larger task. **** ### Issues If what you are looking for is an answer rather than proposing a new feature or fix, search -[https://substrate.stackexchange.com](https://substrate.stackexchange.com/) to see if an post already -exists, and ask if not. Please do not file support issues here. -Before opening a new issue search to see if a similar one already exists and leave a comment that you -also experienced this issue or add your specifics that are related to an existing issue. -Please label issues with the following labels: +[https://substrate.stackexchange.com](https://substrate.stackexchange.com/) to see if an post already exists, and ask if +not. Please do not file support issues here. + +Before opening a new issue search to see if a similar one already exists and leave a comment that you also experienced +this issue or add your specifics that are related to an existing issue. + +Please label issues with the following labels (only relevant for maintainer): 1. `I*` issue severity and type. EXACTLY ONE REQUIRED. 2. `D*` issue difficulty, suggesting the level of complexity this issue has. AT MOST ONE ALLOWED. 3. `T*` Issue topic. MULTIPLE ALLOWED. ## Releases -Declaring formal releases remains the prerogative of the project maintainer(s). +Declaring formal releases remains the prerogative of the project maintainer(s). See [RELEASE.md](../RELEASE.md). ## UI tests -UI tests are used for macros to ensure that the output of a macro doesnโ€™t change and is in the expected format. -These UI tests are sensible to any changes in the macro generated code or to switching the rust stable version. -The tests are only run when the `RUN_UI_TESTS` environment variable is set. So, when the CI is for example complaining -about failing UI tests and it is expected that they fail these tests need to be executed locally. -To simplify the updating of the UI test output there is a script -- `./scripts/update-ui-tests.sh` to update the tests for a current rust version locally -- `./scripts/update-ui-tests.sh 1.70` # to update the tests for a specific rust version locally +UI tests are used for macros to ensure that the output of a macro doesnโ€™t change and is in the expected format. These UI +tests are sensible to any changes in the macro generated code or to switching the rust stable version. The tests are +only run when the `RUN_UI_TESTS` environment variable is set. So, when the CI is for example complaining about failing +UI tests and it is expected that they fail these tests need to be executed locally. To simplify the updating of the UI +test output there is a script +* `./scripts/update-ui-tests.sh` to update the tests for a current rust version locally +* `./scripts/update-ui-tests.sh 1.70` # to update the tests for a specific rust version locally Or if you have opened PR and you're member of `paritytech` - you can use command-bot to run the tests for you in CI: -- `bot update-ui` - will run the tests for the current rust version -- `bot update-ui latest --rust_version=1.70.0` - will run the tests for the specified rust version -- `bot update-ui latest -v CMD_IMAGE=paritytech/ci-unified:bullseye-1.70.0-2023-05-23 --rust_version=1.70.0` - -will run the tests for the specified rust version and specified image +* `bot update-ui` - will run the tests for the current rust version +* `bot update-ui latest --rust_version=1.70.0` - will run the tests for the specified rust version +* `bot update-ui latest -v CMD_IMAGE=paritytech/ci-unified:bullseye-1.70.0-2023-05-23 --rust_version=1.70.0` - will run +the tests for the specified rust version and specified image ## Feature Propagation @@ -157,4 +142,5 @@ Start with comment in PR: `bot help` to see the list of available commands. ## Deprecating code When deprecating and removing code you need to be mindful of how this could impact downstream developers. In order to -mitigate this impact, it is recommended to adhere to the steps outlined in the [Deprecation Checklist](./DEPRECATION_CHECKLIST.md). +mitigate this impact, it is recommended to adhere to the steps outlined in the [Deprecation +Checklist](./DEPRECATION_CHECKLIST.md). diff --git a/docs/contributor/PULL_REQUEST_TEMPLATE.md b/docs/contributor/PULL_REQUEST_TEMPLATE.md index 79a036a235ad9..083b30b4a3567 100644 --- a/docs/contributor/PULL_REQUEST_TEMPLATE.md +++ b/docs/contributor/PULL_REQUEST_TEMPLATE.md @@ -2,35 +2,42 @@ โœ„ ----------------------------------------------------------------------------- -Thank you for your Pull Request! ๐Ÿ™ Please make sure it follows the contribution guidelines outlined in -[this document](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and fill -out the sections below. Once you're ready to submit your PR for review, please -delete this section and leave only the text under the "Description" heading. +Thank you for your Pull Request! ๐Ÿ™ Please make sure it follows the contribution guidelines outlined in [this +document](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and fill out the +sections below. Once you're ready to submit your PR for review, please delete this section and leave only the text under +the "Description" heading. # Description -*Please include a summary of the changes and the related issue. Please also include relevant motivation and context, -including:* +*A concise description of what your PR is doing, and what potential issue it is solving. Use [Github semantic +linking](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) +to link the PR to an issue that must be closed once this is merged.* -- What does this PR do? -- Why are these changes needed? -- How were these changes implemented and what do they affect? +## Integration -*Use [Github semantic -linking](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) -to address any open issues this PR relates to or closes.* +*In depth notes about how this PR should be integrated by downstream projects. This part is mandatory, and should be +reviewed by reviewers, if the PR does NOT have the `R0-Silent` label. In case of a `R0-Silent`, it can be ignored.* + +## Review Notes + +*In depth notes about the **implenentation** details of your PR. This should be the main guide for reviewers to +understand your approach and effectively review it. If too long, use +[`

`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/details)*. -Fixes # (issue number, *if applicable*) +*Imagine that someone who is depending on the old code wants to integrate your new code and the only information that +they get is this section. It helps to include example usage and default value here, with a `diff` code-block to show +possibly integration.* -Closes # (issue number, *if applicable*) +*Include your leftover TODOs, if any, here.* # Checklist -- [ ] My PR includes a detailed description as outlined in the "Description" section above -- [ ] My PR follows the [labeling requirements](CONTRIBUTING.md#Process) of this project (at minimum one label for `T` +* [ ] My PR includes a detailed description as outlined in the "Description" and its two subsections above. +* [ ] My PR follows the [labeling requirements](CONTRIBUTING.md#Process) of this project (at minimum one label for `T` required) -- [ ] I have made corresponding changes to the documentation (if applicable) -- [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) + * External contributors: ask maintainers to put the right label on your PR. +* [ ] I have made corresponding changes to the documentation (if applicable) +* [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) You can remove the "Checklist" section once all have been checked. Thank you for your contribution! diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd index fe9a96bcafc00..37417497e1f84 100644 --- a/docs/mermaid/IA.mmd +++ b/docs/mermaid/IA.mmd @@ -1,6 +1,6 @@ flowchart parity[paritytech.github.io] --> devhub[polkadot_sdk_docs] - polkadot[polkadot.network] --> devhub[polkadot_sdk_docs] + polkadot_network[polkadot.network] --> devhub[polkadot_sdk_docs] devhub --> polkadot_sdk devhub --> reference_docs @@ -9,5 +9,5 @@ flowchart polkadot_sdk --> substrate polkadot_sdk --> frame polkadot_sdk --> cumulus - polkadot_sdk --> polkadot + polkadot_sdk --> polkadot[polkadot node] polkadot_sdk --> xcm diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index b0671623f48d4..d3e48de5d1819 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -15,95 +15,104 @@ workspace = true [dependencies] # Needed for all FRAME-based code -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.6.0", default-features = false } -frame = { package = "polkadot-sdk-frame", path = "../../substrate/frame", features = [ +codec = { workspace = true } +scale-info = { workspace = true } +frame = { features = [ "experimental", "runtime", -] } -pallet-examples = { path = "../../substrate/frame/examples" } -pallet-default-config-example = { path = "../../substrate/frame/examples/default-config" } -pallet-example-offchain-worker = { path = "../../substrate/frame/examples/offchain-worker" } +], workspace = true, default-features = true } +pallet-examples = { workspace = true } +pallet-default-config-example = { workspace = true, default-features = true } +pallet-example-offchain-worker = { workspace = true, default-features = true } # How we build docs in rust-docs simple-mermaid = "0.1.1" -docify = "0.2.8" +docify = { workspace = true } # Polkadot SDK deps, typically all should only be in scope such that we can link to their doc item. -polkadot-sdk = { path = "../../umbrella", features = ["runtime"] } -node-cli = { package = "staging-node-cli", path = "../../substrate/bin/node/cli" } -kitchensink-runtime = { path = "../../substrate/bin/node/runtime" } -chain-spec-builder = { package = "staging-chain-spec-builder", path = "../../substrate/bin/utils/chain-spec-builder" } -subkey = { path = "../../substrate/bin/utils/subkey" } -frame-system = { path = "../../substrate/frame/system", default-features = false } -frame-support = { path = "../../substrate/frame/support", default-features = false } -frame-executive = { path = "../../substrate/frame/executive", default-features = false } -pallet-example-single-block-migrations = { path = "../../substrate/frame/examples/single-block-migrations" } -frame-metadata-hash-extension = { path = "../../substrate/frame/metadata-hash-extension" } +polkadot-sdk = { features = ["runtime"], workspace = true, default-features = true } +node-cli = { workspace = true } +kitchensink-runtime = { workspace = true } +chain-spec-builder = { workspace = true, default-features = true } +subkey = { workspace = true, default-features = true } +frame-system = { workspace = true } +frame-support = { workspace = true } +frame-executive = { workspace = true } +pallet-example-single-block-migrations = { workspace = true, default-features = true } +frame-metadata-hash-extension = { workspace = true, default-features = true } # Substrate Client -sc-network = { path = "../../substrate/client/network" } -sc-rpc-api = { path = "../../substrate/client/rpc-api" } -sc-rpc = { path = "../../substrate/client/rpc" } -sc-client-db = { path = "../../substrate/client/db" } -sc-cli = { path = "../../substrate/client/cli" } -sc-consensus-aura = { path = "../../substrate/client/consensus/aura" } -sc-consensus-babe = { path = "../../substrate/client/consensus/babe" } -sc-consensus-grandpa = { path = "../../substrate/client/consensus/grandpa" } -sc-consensus-beefy = { path = "../../substrate/client/consensus/beefy" } -sc-consensus-manual-seal = { path = "../../substrate/client/consensus/manual-seal" } -sc-consensus-pow = { path = "../../substrate/client/consensus/pow" } -sc-executor = { path = "../../substrate/client/executor" } -sc-service = { path = "../../substrate/client/service" } -sc-chain-spec = { path = "../../substrate/client/chain-spec" } +sc-network = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-client-db = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sc-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-manual-seal = { workspace = true, default-features = true } +sc-consensus-pow = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } -substrate-wasm-builder = { path = "../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../cumulus/pallets/aura-ext" } -cumulus-pallet-parachain-system = { path = "../../cumulus/pallets/parachain-system" } -parachain-info = { package = "staging-parachain-info", path = "../../cumulus/parachains/pallets/parachain-info" } -cumulus-primitives-proof-size-hostfunction = { path = "../../cumulus/primitives/proof-size-hostfunction" } -cumulus-client-service = { path = "../../cumulus/client/service" } -cumulus-primitives-storage-weight-reclaim = { path = "../../cumulus/primitives/storage-weight-reclaim" } +cumulus-pallet-aura-ext = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true, default-features = true } +parachain-info = { workspace = true, default-features = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +cumulus-client-service = { workspace = true, default-features = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } # Pallets and FRAME internals -pallet-aura = { path = "../../substrate/frame/aura" } -pallet-timestamp = { path = "../../substrate/frame/timestamp" } -pallet-balances = { path = "../../substrate/frame/balances" } -pallet-assets = { path = "../../substrate/frame/assets" } -pallet-preimage = { path = "../../substrate/frame/preimage" } -pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } -pallet-utility = { path = "../../substrate/frame/utility" } -pallet-multisig = { path = "../../substrate/frame/multisig" } -pallet-proxy = { path = "../../substrate/frame/proxy" } -pallet-authorship = { path = "../../substrate/frame/authorship" } -pallet-collective = { path = "../../substrate/frame/collective" } -pallet-democracy = { path = "../../substrate/frame/democracy" } -pallet-uniques = { path = "../../substrate/frame/uniques" } -pallet-nfts = { path = "../../substrate/frame/nfts" } -pallet-scheduler = { path = "../../substrate/frame/scheduler" } +pallet-aura = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-preimage = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-asset-tx-payment = { workspace = true, default-features = true } +pallet-skip-feeless-payment = { workspace = true, default-features = true } +pallet-asset-conversion-tx-payment = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +pallet-multisig = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } +pallet-authorship = { workspace = true, default-features = true } +pallet-collective = { workspace = true, default-features = true } +pallet-democracy = { workspace = true, default-features = true } +pallet-uniques = { workspace = true, default-features = true } +pallet-nfts = { workspace = true, default-features = true } +pallet-scheduler = { workspace = true, default-features = true } +pallet-referenda = { workspace = true, default-features = true } +pallet-broker = { workspace = true, default-features = true } +pallet-babe = { workspace = true, default-features = true } # Primitives -sp-io = { path = "../../substrate/primitives/io" } -sp-api = { path = "../../substrate/primitives/api" } -sp-core = { path = "../../substrate/primitives/core" } -sp-keyring = { path = "../../substrate/primitives/keyring" } -sp-runtime = { path = "../../substrate/primitives/runtime" } -sp-arithmetic = { path = "../../substrate/primitives/arithmetic" } -sp-genesis-builder = { path = "../../substrate/primitives/genesis-builder" } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-runtime-interface = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true, default-features = true } +sp-offchain = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } -# Misc pallet dependencies -pallet-referenda = { path = "../../substrate/frame/referenda" } -pallet-broker = { path = "../../substrate/frame/broker" } -pallet-babe = { path = "../../substrate/frame/babe" } - -sp-offchain = { path = "../../substrate/primitives/offchain" } -sp-version = { path = "../../substrate/primitives/version" } # XCM -xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } -xcm-docs = { path = "../../polkadot/xcm/docs" } +xcm = { workspace = true, default-features = true } +xcm-docs = { workspace = true } # runtime guides -chain-spec-guide-runtime = { path = "./src/reference_docs/chain_spec_runtime" } +chain-spec-guide-runtime = { workspace = true } + +# Templates +minimal-template-runtime = { workspace = true } +solochain-template-runtime = { workspace = true } +parachain-template-runtime = { workspace = true } diff --git a/docs/sdk/src/guides/async_backing_guide.rs b/docs/sdk/src/guides/async_backing_guide.rs new file mode 100644 index 0000000000000..f2f4dcabfd29b --- /dev/null +++ b/docs/sdk/src/guides/async_backing_guide.rs @@ -0,0 +1,254 @@ +//! # Upgrade Parachain for Asynchronous Backing Compatibility +//! +//! This guide is relevant for cumulus based parachain projects started in 2023 or before, whose +//! backing process is synchronous where parablocks can only be built on the latest Relay Chain +//! block. Async Backing allows collators to build parablocks on older Relay Chain blocks and create +//! pipelines of multiple pending parablocks. This parallel block generation increases efficiency +//! and throughput. For more information on Async backing and its terminology, refer to the document +//! on [the Polkadot Wiki.](https://wiki.polkadot.network/docs/maintain-guides-async-backing) +//! +//! > If starting a new parachain project, please use an async backing compatible template such as +//! > the +//! > [parachain template](https://github.com/paritytech/polkadot-sdk/tree/master/templates/parachain). +//! The rollout process for Async Backing has three phases. Phases 1 and 2 below put new +//! infrastructure in place. Then we can simply turn on async backing in phase 3. +//! +//! ## Prerequisite +//! +//! The relay chain needs to have async backing enabled so double-check that the relay-chain +//! configuration contains the following three parameters (especially when testing locally e.g. with +//! zombienet): +//! +//! ```json +//! "async_backing_params": { +//! "max_candidate_depth": 3, +//! "allowed_ancestry_len": 2 +//! }, +//! "scheduling_lookahead": 2 +//! ``` +//! +//!
`scheduling_lookahead` must be set to 2, otherwise parachain block times +//! will degrade to worse than with sync backing!
+//! +//! ## Phase 1 - Update Parachain Runtime +//! +//! This phase involves configuring your parachainโ€™s runtime `/runtime/src/lib.rs` to make use of +//! async backing system. +//! +//! 1. Establish and ensure constants for `capacity` and `velocity` are both set to 1 in the +//! runtime. +//! 2. Establish and ensure the constant relay chain slot duration measured in milliseconds equal to +//! `6000` in the runtime. +//! ```rust +//! // Maximum number of blocks simultaneously accepted by the Runtime, not yet included into the +//! // relay chain. +//! pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; +//! // How many parachain blocks are processed by the relay chain per parent. Limits the number of +//! // blocks authored per slot. +//! pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; +//! // Relay chain slot duration, in milliseconds. +//! pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +//! ``` +//! +//! 3. Establish constants `MILLISECS_PER_BLOCK` and `SLOT_DURATION` if not already present in the +//! runtime. +//! ```ignore +//! // `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked +//! // up by `pallet_aura` to implement `fn slot_duration()`. +//! // +//! // Change this to adjust the block time. +//! pub const MILLISECS_PER_BLOCK: u64 = 12000; +//! pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; +//! ``` +//! +//! 4. Configure `cumulus_pallet_parachain_system` in the runtime. +//! +//! - Define a `FixedVelocityConsensusHook` using our capacity, velocity, and relay slot duration +//! constants. Use this to set the parachain system `ConsensusHook` property. +#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", ConsensusHook)] +//! ```ignore +//! impl cumulus_pallet_parachain_system::Config for Runtime { +//! .. +//! type ConsensusHook = ConsensusHook; +//! .. +//! } +//! ``` +//! - Set the parachain system property `CheckAssociatedRelayNumber` to +//! `RelayNumberMonotonicallyIncreases` +//! ```ignore +//! impl cumulus_pallet_parachain_system::Config for Runtime { +//! .. +//! type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; +//! .. +//! } +//! ``` +//! +//! 5. Configure `pallet_aura` in the runtime. +//! +//! - Set `AllowMultipleBlocksPerSlot` to `false` (don't worry, we will set it to `true` when we +//! activate async backing in phase 3). +//! +//! - Define `pallet_aura::SlotDuration` using our constant `SLOT_DURATION` +//! ```ignore +//! impl pallet_aura::Config for Runtime { +//! .. +//! type AllowMultipleBlocksPerSlot = ConstBool; +//! #[cfg(feature = "experimental")] +//! type SlotDuration = ConstU64; +//! .. +//! } +//! ``` +//! +//! 6. Update `sp_consensus_aura::AuraApi::slot_duration` in `sp_api::impl_runtime_apis` to match +//! the constant `SLOT_DURATION` +#![doc = docify::embed!("../../templates/parachain/runtime/src/apis.rs", impl_slot_duration)] +//! +//! 7. Implement the `AuraUnincludedSegmentApi`, which allows the collator client to query its +//! runtime to determine whether it should author a block. +//! +//! - Add the dependency `cumulus-primitives-aura` to the `runtime/Cargo.toml` file for your +//! runtime +//! ```ignore +//! .. +//! cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } +//! .. +//! ``` +//! +//! - In the same file, add `"cumulus-primitives-aura/std",` to the `std` feature. +//! +//! - Inside the `impl_runtime_apis!` block for your runtime, implement the +//! `cumulus_primitives_aura::AuraUnincludedSegmentApi` as shown below. +#![doc = docify::embed!("../../templates/parachain/runtime/src/apis.rs", impl_can_build_upon)] +//! +//! **Note:** With a capacity of 1 we have an effective velocity of ยฝ even when velocity is +//! configured to some larger value. This is because capacity will be filled after a single block is +//! produced and will only be freed up after that block is included on the relay chain, which takes +//! 2 relay blocks to accomplish. Thus with capacity 1 and velocity 1 we get the customary 12 second +//! parachain block time. +//! +//! 8. If your `runtime/src/lib.rs` provides a `CheckInherents` type to `register_validate_block`, +//! remove it. `FixedVelocityConsensusHook` makes it unnecessary. The following example shows how +//! `register_validate_block` should look after removing `CheckInherents`. +#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", register_validate_block)] +//! +//! +//! ## Phase 2 - Update Parachain Nodes +//! +//! This phase consists of plugging in the new lookahead collator node. +//! +//! 1. Import `cumulus_primitives_core::ValidationCode` to `node/src/service.rs`. +#![doc = docify::embed!("../../templates/parachain/node/src/service.rs", cumulus_primitives)] +//! +//! 2. In `node/src/service.rs`, modify `sc_service::spawn_tasks` to use a clone of `Backend` rather +//! than the original +//! ```ignore +//! sc_service::spawn_tasks(sc_service::SpawnTasksParams { +//! .. +//! backend: backend.clone(), +//! .. +//! })?; +//! ``` +//! +//! 3. Add `backend` as a parameter to `start_consensus()` in `node/src/service.rs` +//! ```text +//! fn start_consensus( +//! .. +//! backend: Arc, +//! .. +//! ``` +//! ```ignore +//! if validator { +//! start_consensus( +//! .. +//! backend.clone(), +//! .. +//! )?; +//! } +//! ``` +//! +//! 4. In `node/src/service.rs` import the lookahead collator rather than the basic collator +#![doc = docify::embed!("../../templates/parachain/node/src/service.rs", lookahead_collator)] +//! +//! 5. In `start_consensus()` replace the `BasicAuraParams` struct with `AuraParams` +//! - Change the struct type from `BasicAuraParams` to `AuraParams` +//! - In the `para_client` field, pass in a cloned para client rather than the original +//! - Add a `para_backend` parameter after `para_client`, passing in our para backend +//! - Provide a `code_hash_provider` closure like that shown below +//! - Increase `authoring_duration` from 500 milliseconds to 1500 +//! ```ignore +//! let params = AuraParams { +//! .. +//! para_client: client.clone(), +//! para_backend: backend.clone(), +//! .. +//! code_hash_provider: move |block_hash| { +//! client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) +//! }, +//! .. +//! authoring_duration: Duration::from_millis(1500), +//! .. +//! }; +//! ``` +//! +//! **Note:** Set `authoring_duration` to whatever you want, taking your own hardware into account. +//! But if the backer who should be slower than you due to reading from disk, times out at two +//! seconds your candidates will be rejected. +//! +//! 6. In `start_consensus()` replace `basic_aura::run` with `aura::run` +//! ```ignore +//! let fut = +//! aura::run::( +//! params, +//! ); +//! task_manager.spawn_essential_handle().spawn("aura", None, fut); +//! ``` +//! +//! ## Phase 3 - Activate Async Backing +//! +//! This phase consists of changes to your parachainโ€™s runtime that activate async backing feature. +//! +//! 1. Configure `pallet_aura`, setting `AllowMultipleBlocksPerSlot` to true in +//! `runtime/src/lib.rs`. +#![doc = docify::embed!("../../templates/parachain/runtime/src/configs/mod.rs", aura_config)] +//! +//! 2. Increase the maximum `UNINCLUDED_SEGMENT_CAPACITY` in `runtime/src/lib.rs`. +#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", async_backing_params)] +//! +//! 3. Decrease `MILLISECS_PER_BLOCK` to 6000. +//! +//! - Note: For a parachain which measures time in terms of its own block number rather than by +//! relay block number it may be preferable to increase velocity. Changing block time may cause +//! complications, requiring additional changes. See the section โ€œTiming by Block Numberโ€. +#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", block_times)] +//! +//! 4. Update `MAXIMUM_BLOCK_WEIGHT` to reflect the increased time available for block production. +#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", max_block_weight)] +//! +//! 5. Add a feature flagged alternative for `MinimumPeriod` in `pallet_timestamp`. The type should +//! be `ConstU64<0>` with the feature flag experimental, and `ConstU64<{SLOT_DURATION / 2}>` +//! without. +//! ```ignore +//! impl pallet_timestamp::Config for Runtime { +//! .. +//! #[cfg(feature = "experimental")] +//! type MinimumPeriod = ConstU64<0>; +//! #[cfg(not(feature = "experimental"))] +//! type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; +//! .. +//! } +//! ``` +//! +//! ## Timing by Block Number +//! +//! With asynchronous backing it will be possible for parachains to opt for a block time of 6 +//! seconds rather than 12 seconds. But modifying block duration isnโ€™t so simple for a parachain +//! which was measuring time in terms of its own block number. It could result in expected and +//! actual time not matching up, stalling the parachain. +//! +//! One strategy to deal with this issue is to instead rely on relay chain block numbers for timing. +//! Relay block number is kept track of by each parachain in `pallet-parachain-system` with the +//! storage value `LastRelayChainBlockNumber`. This value can be obtained and used wherever timing +//! based on block number is needed. + +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(rustdoc::private_intra_doc_links)] diff --git a/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs b/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs new file mode 100644 index 0000000000000..bc4f36c271fe3 --- /dev/null +++ b/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs @@ -0,0 +1,142 @@ +//! # Enable elastic scaling MVP for a parachain +//! +//!
This guide assumes full familiarity with Asynchronous Backing and its +//! terminology, as defined in https://wiki.polkadot.network/docs/maintain-guides-async-backing. +//! Furthermore, the parachain should have already been upgraded according to the guide.
+//! +//! ## Quick introduction to elastic scaling +//! +//! [Elastic scaling](https://polkadot.network/blog/elastic-scaling-streamling-growth-on-polkadot) +//! is a feature that will enable parachains to seamlessly scale up/down the number of used cores. +//! This can be desirable in order to increase the compute or storage throughput of a parachain or +//! to lower the latency between a transaction being submitted and it getting built in a parachain +//! block. +//! +//! At present, with Asynchronous Backing enabled, a parachain can only include a block on the relay +//! chain every 6 seconds, irregardless of how many cores the parachain acquires. Elastic scaling +//! builds further on the 10x throughput increase of Async Backing, enabling collators to submit up +//! to 3 parachain blocks per relay chain block, resulting in a further 3x throughput increase. +//! +//! ## Current limitations of the MVP +//! +//! The full implementation of elastic scaling spans across the entire relay/parachain stack and is +//! still [work in progress](https://github.com/paritytech/polkadot-sdk/issues/1829). +//! The MVP is still considered experimental software, so stability is not guaranteed. +//! If you encounter any problems, +//! [please open an issue](https://github.com/paritytech/polkadot-sdk/issues). +//! Below are described the current limitations of the MVP: +//! +//! 1. **Limited core count**. Parachain block authoring is sequential, so the second block will +//! start being built only after the previous block is imported. The current block production is +//! capped at 2 seconds of execution. Therefore, assuming the full 2 seconds are used, a +//! parachain can only utilise at most 3 cores in a relay chain slot of 6 seconds. If the full +//! execution time is not being used, higher core counts can be achieved. +//! 2. **Single collator requirement for consistently scaling beyond a core at full authorship +//! duration of 2 seconds per block.** Using the current implementation with multiple collators +//! adds additional latency to the block production pipeline. Assuming block execution takes +//! about the same as authorship, the additional overhead is equal the duration of the authorship +//! plus the block announcement. Each collator must first import the previous block before +//! authoring a new one, so it is clear that the highest throughput can be achieved using a +//! single collator. Experiments show that the peak performance using more than one collator +//! (measured up to 10 collators) is utilising 2 cores with authorship time of 1.3 seconds per +//! block, which leaves 400ms for networking overhead. This would allow for 2.6 seconds of +//! execution, compared to the 2 seconds async backing enabled. +//! [More experiments](https://github.com/paritytech/polkadot-sdk/issues/4696) are being +//! conducted in this space. +//! 3. **Trusted collator set.** The collator set needs to be trusted until thereโ€™s a mitigation +//! that would prevent or deter multiple collators from submitting the same collation to multiple +//! backing groups. A solution is being discussed +//! [here](https://github.com/polkadot-fellows/RFCs/issues/92). +//! 4. **Fixed scaling.** For true elasticity, the parachain must be able to seamlessly acquire or +//! sell coretime as the user demand grows and shrinks over time, in an automated manner. This is +//! currently lacking - a parachain can only scale up or down by โ€œmanuallyโ€ acquiring coretime. +//! This is not in the scope of the relay chain functionality. Parachains can already start +//! implementing such autoscaling, but we aim to provide a framework/examples for developing +//! autoscaling strategies. +//! +//! Another hard limitation that is not envisioned to ever be lifted is that parachains which create +//! forks will generally not be able to utilise the full number of cores they acquire. +//! +//! ## Using elastic scaling MVP +//! +//! ### Prerequisites +//! +//! - Ensure Asynchronous Backing is enabled on the network and you have enabled it on the parachain +//! using [`crate::guides::async_backing_guide`]. +//! - Ensure the `AsyncBackingParams.max_candidate_depth` value is configured to a value that is at +//! least double the maximum targeted parachain velocity. For example, if the parachain will build +//! at most 3 candidates per relay chain block, the `max_candidate_depth` should be at least 6. +//! - Use a trusted single collator for maximum throughput. +//! - Ensure enough coretime is assigned to the parachain. For maximum throughput the upper bound is +//! 3 cores. +//! +//!
Phase 1 is not needed if using the `polkadot-parachain` binary built +//! from the latest polkadot-sdk release! Simply pass the `--experimental-use-slot-based` parameter +//! to the command line and jump to Phase 2.
+//! +//! The following steps assume using the cumulus parachain template. +//! +//! ### Phase 1 - (For custom parachain node) Update Parachain Node +//! +//! This assumes you are using +//! [the latest parachain template](https://github.com/paritytech/polkadot-sdk/tree/master/templates/parachain). +//! +//! This phase consists of plugging in the new slot-based collator. +//! +//! 1. In `node/src/service.rs` import the slot based collator instead of the lookahead collator. +#![doc = docify::embed!("../../cumulus/polkadot-parachain/src/service.rs", slot_based_colator_import)] +//! +//! 2. In `start_consensus()` +//! - Remove the `overseer_handle` param (also remove the +//! `OverseerHandle` type import if itโ€™s not used elsewhere). +//! - Rename `AuraParams` to `SlotBasedParams`, remove the `overseer_handle` field and add a +//! `slot_drift` field with a value of `Duration::from_secs(1)`. +//! - Replace the single future returned by `aura::run` with the two futures returned by it and +//! spawn them as separate tasks: +#![doc = docify::embed!("../../cumulus/polkadot-parachain/src/service.rs", launch_slot_based_collator)] +//! +//! 3. In `start_parachain_node()` remove the `overseer_handle` param passed to `start_consensus`. +//! +//! ### Phase 2 - Activate fixed factor scaling in the runtime +//! +//! This phase consists of a couple of changes needed to be made to the parachainโ€™s runtime in order +//! to utilise fixed factor scaling. +//! +//! First of all, you need to decide the upper limit to how many parachain blocks you need to +//! produce per relay chain block (in direct correlation with the number of acquired cores). This +//! should be either 1 (no scaling), 2 or 3. This is called the parachain velocity. +//! +//! If you configure a velocity which is different from the number of assigned cores, the measured +//! velocity in practice will be the minimum of these two. +//! +//! The chosen velocity will also be used to compute: +//! - The slot duration, by dividing the 6000 ms duration of the relay chain slot duration by the +//! velocity. +//! - The unincluded segment capacity, by multiplying the velocity with 2 and adding 1 to +//! it. +//! +//! Letโ€™s assume a desired maximum velocity of 3 parachain blocks per relay chain block. The needed +//! changes would all be done in `runtime/src/lib.rs`: +//! +//! 1. Rename `BLOCK_PROCESSING_VELOCITY` to `MAX_BLOCK_PROCESSING_VELOCITY` and increase it to the +//! desired value. In this example, 3. +//! +//! ```ignore +//! const MAX_BLOCK_PROCESSING_VELOCITY: u32 = 3; +//! ``` +//! +//! 2. Set the `MILLISECS_PER_BLOCK` to the desired value. +//! +//! ```ignore +//! const MILLISECS_PER_BLOCK: u32 = +//! RELAY_CHAIN_SLOT_DURATION_MILLIS / MAX_BLOCK_PROCESSING_VELOCITY; +//! ``` +//! Note: for a parachain which measures time in terms of its own block number, changing block +//! time may cause complications, requiring additional changes. See here more information: +//! [`crate::guides::async_backing_guide#timing-by-block-number`]. +//! +//! 3. Increase the `UNINCLUDED_SEGMENT_CAPACITY` to the desired value. +//! +//! ```ignore +//! const UNINCLUDED_SEGMENT_CAPACITY: u32 = 2 * MAX_BLOCK_PROCESSING_VELOCITY + 1; +//! ``` diff --git a/docs/sdk/src/guides/mod.rs b/docs/sdk/src/guides/mod.rs index f5f6d2b5e0c07..9384f4c82ab3e 100644 --- a/docs/sdk/src/guides/mod.rs +++ b/docs/sdk/src/guides/mod.rs @@ -1,7 +1,16 @@ //! # Polkadot SDK Docs Guides //! -//! This crate contains a collection of guides that are foundational to the developers of -//! Polkadot SDK. They are common user-journeys that are traversed in the Polkadot ecosystem. +//! This crate contains a collection of guides that are foundational to the developers of Polkadot +//! SDK. They are common user-journeys that are traversed in the Polkadot ecosystem. +//! +//! 1. [`crate::guides::your_first_pallet`] is your starting point with Polkadot SDK. It contains +//! the basics of +//! building a simple crypto currency with FRAME. +//! 2. [`crate::guides::your_first_runtime`] is the next step in your journey. It contains the +//! basics of building a runtime that contains this pallet, plus a few common pallets from FRAME. +//! +//! +//! Other guides are related to other miscellaneous topics and are listed as modules below. /// Write your first simple pallet, learning the most most basic features of FRAME along the way. pub mod your_first_pallet; @@ -11,21 +20,27 @@ pub mod your_first_pallet; pub mod your_first_runtime; /// Running the given runtime with a node. No specific consensus mechanism is used at this stage. -pub mod your_first_node; - -/// How to change the consensus engine of both the node and the runtime. -pub mod changing_consensus; +// TODO +// pub mod your_first_node; /// How to enhance a given runtime and node to be cumulus-enabled, run it as a parachain and connect /// it to a relay-chain. -pub mod cumulus_enabled_parachain; +// TODO +// pub mod cumulus_enabled_parachain; /// How to make a given runtime XCM-enabled, capable of sending messages (`Transact`) between itself /// and the relay chain to which it is connected. -pub mod xcm_enabled_parachain; +// TODO +// pub mod xcm_enabled_parachain; /// How to enable storage weight reclaiming in a parachain node and runtime. pub mod enable_pov_reclaim; +/// How to enable Async Backing on parachain projects that started in 2023 or before. +pub mod async_backing_guide; + /// How to enable metadata hash verification in the runtime. pub mod enable_metadata_hash; + +/// How to enable elastic scaling MVP on a parachain. +pub mod enable_elastic_scaling_mvp; diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index c6e0dd0edf892..da4624f5ac2b8 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -14,18 +14,14 @@ //! > FRAME-based runtimes use various techniques to re-use a currency pallet instead of writing //! > one. Further advanced FRAME related topics are discussed in [`crate::reference_docs`]. //! -//! ## Topics Covered +//! ## Writing Your First Pallet //! -//! The following FRAME topics are covered in this guide: +//! To get started, use one of the templates mentioned in [`crate::polkadot_sdk::templates`]. We +//! recommend using the `polkadot-sdk-minimal-template`. You might need to change small parts of +//! this guide, namely the crate/package names, based on which tutorial you use. //! -//! - [Storage](frame::pallet_macros::storage) -//! - [Call](frame::pallet_macros::call) -//! - [Event](frame::pallet_macros::event) -//! - [Error](frame::pallet_macros::error) -//! - Basics of testing a pallet -//! - [Constructing a runtime](frame::runtime::prelude::construct_runtime) -//! -//! ## Writing Your First Pallet +//! > Be aware that you can read the entire source code backing this tutorial by clicking on the +//! > [`source`](./mod.rs.html) button at the top right of the page. //! //! You should have studied the following modules as a prelude to this guide: //! @@ -33,16 +29,28 @@ //! - [`crate::reference_docs::trait_based_programming`] //! - [`crate::polkadot_sdk::frame_runtime`] //! +//! ## Topics Covered +//! +//! The following FRAME topics are covered in this guide: +//! +//! - [`pallet::storage`] +//! - [`pallet::call`] +//! - [`pallet::event`] +//! - [`pallet::error`] +//! - Basics of testing a pallet +//! - [Constructing a runtime](frame::runtime::prelude::construct_runtime) +//! //! ### Shell Pallet //! //! Consider the following as a "shell pallet". We continue building the rest of this pallet based //! on this template. //! -//! [`pallet::config`](frame::pallet_macros::config) and -//! [`pallet::pallet`](frame::pallet_macros::pallet) are both mandatory parts of any pallet. Refer -//! to the documentation of each to get an overview of what they do. +//! [`pallet::config`] and [`pallet::pallet`] are both mandatory parts of any pallet. Refer to the +//! documentation of each to get an overview of what they do. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", shell_pallet)] //! +//! All of the code that follows in this guide should live inside of the `mod pallet`. +//! //! ### Storage //! //! First, we will need to create two onchain storage declarations. @@ -55,15 +63,14 @@ //! > generic bounded type in the `Config` trait, and then specify it in the implementation. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balance)] //! -//! The definition of these two storage items, based on [`frame::pallet_macros::storage`] details, -//! is as follows: +//! The definition of these two storage items, based on [`pallet::storage`] details, is as follows: #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", TotalIssuance)] #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balances)] //! //! ### Dispatchables //! -//! Next, we will define the dispatchable functions. As per [`frame::pallet_macros::call`], these -//! will be defined as normal `fn`s attached to `struct Pallet`. +//! Next, we will define the dispatchable functions. As per [`pallet::call`], these will be defined +//! as normal `fn`s attached to `struct Pallet`. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_pallet)] //! //! The logic of the functions is self-explanatory. Instead, we will focus on the FRAME-related @@ -79,7 +86,6 @@ //! was signed by `who`. #![doc = docify::embed!("../../substrate/frame/system/src/lib.rs", ensure_signed)] //! -//! //! - Where does `mutate`, `get` and `insert` and other storage APIs come from? All of them are //! explained in the corresponding `type`, for example, for `Balances::::insert`, you can look //! into [`frame::prelude::StorageMap::insert`]. @@ -95,8 +101,7 @@ //! //! - Why are all `get` and `mutate` functions returning an `Option`? This is the default behavior //! of FRAME storage APIs. You can learn more about how to override this by looking into -//! [`frame::pallet_macros::storage`], and -//! [`frame::prelude::ValueQuery`]/[`frame::prelude::OptionQuery`] +//! [`pallet::storage`], and [`frame::prelude::ValueQuery`]/[`frame::prelude::OptionQuery`] //! //! ### Improving Errors //! @@ -116,6 +121,25 @@ //! //! ### Your First (Test) Runtime //! +//! The typical testing code of a pallet lives in a module that imports some preludes useful for +//! testing, similar to: +//! +//! ``` +//! pub mod pallet { +//! // snip -- actually pallet code. +//! } +//! +//! #[cfg(test)] +//! mod tests { +//! // bring in the testing prelude of frame +//! use frame::testing_prelude::*; +//! // bring in all pallet items +//! use super::pallet::*; +//! +//! // snip -- rest of the testing code. +//! } +//! ``` +//! //! Next, we create a "test runtime" in order to test our pallet. Recall from //! [`crate::polkadot_sdk::frame_runtime`] that a runtime is a collection of pallets, expressed //! through [`frame::runtime::prelude::construct_runtime`]. All runtimes also have to include @@ -166,7 +190,6 @@ //! As noted above, the `T::AccountId` is now `u64`. Moreover, `Runtime` is replacing ``. //! This is why for example you see `Balances::::get(..)`. Finally, notice that the //! dispatchables are simply functions that can be called on top of the `Pallet` struct. -// TODO: hard to explain exactly `RuntimeOrigin::signed(ALICE)` at this point. //! //! Congratulations! You have written your first pallet and tested it! Next, we learn a few optional //! steps to improve our pallet. @@ -236,8 +259,7 @@ //! by one character. FRAME errors are exactly a solution to maintain readability, whilst fixing //! the drawbacks mentioned. In short, we use an enum to represent different variants of our //! error. These variants are then mapped in an efficient way (using only `u8` indices) to -//! [`sp_runtime::DispatchError::Module`]. Read more about this in -//! [`frame::pallet_macros::error`]. +//! [`sp_runtime::DispatchError::Module`]. Read more about this in [`pallet::error`]. //! //! - **Event**: Events are akin to the return type of dispatchables. They are mostly data blobs //! emitted by the runtime to let outside world know what is happening inside the pallet. Since @@ -246,20 +268,16 @@ //! use passive tense for event names (eg. `SomethingHappened`). This allows other sub-systems or //! external parties (eg. a light-node, a DApp) to listen to particular events happening, without //! needing to re-execute the whole state transition function. -// TODO: both need to be improved a lot at the pallet-macro rust-doc level. Also my explanation -// of event is probably not the best. //! //! With the explanation out of the way, let's see how these components can be added. Both follow a -//! fairly familiar syntax: normal Rust enums, with extra -//! [`#[frame::event]`](frame::pallet_macros::event) and -//! [`#[frame::error]`](frame::pallet_macros::error) attributes attached. +//! fairly familiar syntax: normal Rust enums, with extra [`pallet::event`] and [`pallet::error`] +//! attributes attached. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Event)] #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Error)] //! -//! One slightly custom part of this is the [`#[pallet::generate_deposit(pub(super) fn -//! deposit_event)]`](frame::pallet_macros::generate_deposit) part. Without going into too -//! much detail, in order for a pallet to emit events to the rest of the system, it needs to do two -//! things: +//! One slightly custom part of this is the [`pallet::generate_deposit`] part. Without going into +//! too much detail, in order for a pallet to emit events to the rest of the system, it needs to do +//! two things: //! //! 1. Declare a type in its `Config` that refers to the overarching event type of the runtime. In //! short, by doing this, the pallet is expressing an important bound: `type RuntimeEvent: @@ -268,8 +286,8 @@ //! store it where needed. //! //! 2. But, doing this conversion and storing is too much to expect each pallet to define. FRAME -//! provides a default way of storing events, and this is what -//! [`pallet::generate_deposit`](frame::pallet_macros::generate_deposit) is doing. +//! provides a default way of storing events, and this is what [`pallet::generate_deposit`] is +//! doing. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", config_v2)] //! //! > These `Runtime*` types are better explained in @@ -297,10 +315,17 @@ //! - [`crate::reference_docs::defensive_programming`]. //! - [`crate::reference_docs::frame_origin`]. //! - [`crate::reference_docs::frame_runtime_types`]. -//! - The pallet we wrote in this guide was using `dev_mode`, learn more in -//! [`frame::pallet_macros::config`]. +//! - The pallet we wrote in this guide was using `dev_mode`, learn more in [`pallet::config`]. //! - Learn more about the individual pallet items/macros, such as event and errors and call, in //! [`frame::pallet_macros`]. +//! +//! [`pallet::storage`]: ../../../frame_support/pallet_macros/attr.config.html +//! [`pallet::call`]: ../../../frame_support/pallet_macros/attr.call.html +//! [`pallet::event`]: ../../../frame_support/pallet_macros/attr.event.html +//! [`pallet::error`]: ../../../frame_support/pallet_macros/attr.error.html +//! [`pallet::pallet`]: ../../../frame_support/pallet_macros/attr.pallet.html +//! [`pallet::config`]: ../../../frame_support/pallet_macros/attr.config.html +//! [`pallet::generate_deposit`]: ../../../frame_support/pallet_macros/attr.generate_deposit.html #[docify::export] #[frame::pallet(dev_mode)] @@ -418,16 +443,22 @@ pub mod pallet { #[cfg(any(test, doc))] pub(crate) mod tests { use crate::guides::your_first_pallet::pallet::*; + + #[docify::export(testing_prelude)] use frame::testing_prelude::*; - const ALICE: u64 = 1; - const BOB: u64 = 2; - const CHARLIE: u64 = 3; + + pub(crate) const ALICE: u64 = 1; + pub(crate) const BOB: u64 = 2; + pub(crate) const CHARLIE: u64 = 3; #[docify::export] + // This runtime is only used for testing, so it should be somewhere like `#[cfg(test)] mod + // tests { .. }` mod runtime { use super::*; // we need to reference our `mod pallet` as an identifier to pass to // `construct_runtime`. + // YOU HAVE TO CHANGE THIS LINE BASED ON YOUR TEMPLATE use crate::guides::your_first_pallet::pallet as pallet_currency; construct_runtime!( @@ -595,7 +626,7 @@ pub mod pallet { #[test] fn transfer_works() { StateBuilder::default().build_and_execute(|| { - // given the the initial state, when: + // given the initial state, when: assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(ALICE), BOB, 50)); // then: @@ -617,7 +648,7 @@ pub mod pallet { #[test] fn transfer_from_non_existent_fails() { StateBuilder::default().build_and_execute(|| { - // given the the initial state, when: + // given the initial state, when: assert_err!( Pallet::::transfer(RuntimeOrigin::signed(CHARLIE), ALICE, 10), "NonExistentAccount" @@ -738,7 +769,7 @@ pub mod pallet_v2 { // the final assertion. System::set_block_number(ALICE); - // given the the initial state, when: + // given the initial state, when: assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(ALICE), BOB, 50)); // then: diff --git a/docs/sdk/src/guides/your_first_runtime.rs b/docs/sdk/src/guides/your_first_runtime.rs index 3e02ef1b1b28e..c58abc1120c13 100644 --- a/docs/sdk/src/guides/your_first_runtime.rs +++ b/docs/sdk/src/guides/your_first_runtime.rs @@ -1 +1,3 @@ //! # Your first Runtime +//! +//! ๐Ÿšง diff --git a/docs/sdk/src/polkadot_sdk/frame_runtime.rs b/docs/sdk/src/polkadot_sdk/frame_runtime.rs index f9b8a381365c4..39255c8f51ad6 100644 --- a/docs/sdk/src/polkadot_sdk/frame_runtime.rs +++ b/docs/sdk/src/polkadot_sdk/frame_runtime.rs @@ -87,8 +87,6 @@ //! * writing a runtime in pure Rust, as done in [this template](https://github.com/JoshOrndorff/frameless-node-template). //! * writing a runtime in AssemblyScript,as explored in [this project](https://github.com/LimeChain/subsembly). -use frame::prelude::*; - /// A FRAME based pallet. This `mod` is the entry point for everything else. All /// `#[pallet::xxx]` macros must be defined in this `mod`. Although, frame also provides an /// experimental feature to break these parts into different `mod`s. See [`pallet_examples`] for @@ -96,7 +94,7 @@ use frame::prelude::*; #[docify::export] #[frame::pallet(dev_mode)] pub mod pallet { - use super::*; + use frame::prelude::*; /// The configuration trait of a pallet. Mandatory. Allows a pallet to receive types at a /// later point from the runtime that wishes to contain it. It allows the pallet to be diff --git a/docs/sdk/src/polkadot_sdk/templates.rs b/docs/sdk/src/polkadot_sdk/templates.rs index 4bf0e839c798f..e87eb9c2bc8ab 100644 --- a/docs/sdk/src/polkadot_sdk/templates.rs +++ b/docs/sdk/src/polkadot_sdk/templates.rs @@ -1,19 +1,33 @@ //! # Templates //! -//! ### Internal +//! This document enumerates a non-exhaustive list of templates that one can use to get started with +//! polkadot-sdk. //! -//! The following templates are maintained as a part of the `polkadot-sdk` repository: +//! > Know more tools/templates that are not listed here? please contribute them by opening a PR. //! -//! - classic [`substrate-node-template`]: is a white-labeled substrate-based blockchain with a -//! moderate amount of features. It can act as a great starting point for those who want to learn -//! Substrate/FRAME and want to have a template that is already doing something. -//! - [`substrate-minimal-template`]: Same as the above, but it contains the least amount of code in -//! both the node and runtime. It is a great starting point for those who want to deeply learn -//! Substrate and FRAME. -//! - classic [`cumulus-parachain-template`], which is the de-facto parachain template shipped with -//! Cumulus. It is the parachain-enabled version of [`substrate-node-template`]. +//! ## Internal //! -//! ### External Templates +//! The following [templates](https://github.com/paritytech/polkadot-sdk/blob/master/templates) are +//! maintained as a part of the `polkadot-sdk` repository: +//! +//! - `minimal_template_node`/[`minimal_template_runtime`]: A minimal template that contains the +//! least amount of features to be a functioning blockchain. Suitable for learning, development +//! and testing. This template is not meant to be used in production. +//! - `solochain_template_node`/[`solochain_template_runtime`]: Formerly known as +//! "substrate-node-template", is a white-labeled substrate-based blockchain (aka. solochain) that +//! contains moderate features, such as a basic consensus engine and some FRAME pallets. This +//! template can act as a good starting point for those who want to launch a solochain. +//! - `parachain_template_node`/[`parachain_template_runtime`]: A parachain template ready to be +//! connected to a test relay-chain. +//! +//! These templates are always kept up to date, and are mirrored to external repositories for easy +//! forking: +//! +//! - +//! - +//! - +//! +//! ## External Templates //! //! Noteworthy templates outside of this repository. //! @@ -22,23 +36,17 @@ //! - [`frontier-parachain-template`](https://github.com/paritytech/frontier-parachain-template): A //! parachain template for launching EVM-compatible parachains. //! -//! [`minimal-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/templates/minimal/ -//! [`parachain-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/templates/parachain/ - -// TODO: in general, we need to make a deliberate choice here of moving a few key templates to this -// repo (nothing stays in `substrate-developer-hub`) and the everything else should be community -// maintained. https://github.com/paritytech/polkadot-sdk-docs/issues/67 - -// TODO: we should rename `substrate-node-template` to `substrate-basic-template`, -// `substrate-blockchain-template`. `node` is confusing in the name. -// `substrate-blockchain-template` and `cumulus-parachain-template` go well together ๐Ÿค. https://github.com/paritytech/polkadot-sdk-docs/issues/67 - -// NOTE: a super important detail that I am looking forward to here is -// and -// . Meaning that I would not spend time on -// teaching someone too much detail about the ugly thing we call "node" nowadays. In the future, I -// am sure we will either have a better "node-builder" code that can actually be tested, or an -// "omni-node" that can run (almost) any wasm file. We should already build tutorials in this -// direction IMO. This also affects all the templates. If we have a good neat runtime file, which we -// are moving toward, and a good node-builder, we don't need all of these damn templates. These -// templates are only there because the boilerplate is super horrible atm. +//! ## OpenZeppelin +//! +//! In June 2023, OpenZeppelin was awarded a grant from the [Polkadot +//! treasury](https://polkadot.polkassembly.io/treasury/406) for building a number of Polkadot-sdk +//! based templates. These templates are expected to be a great starting point for developers. +//! +//! - +//! +//! ## POP-CLI +//! +//! Is a CLI tool capable of scaffolding a new polkadot-sdk-based project, possibly removing the +//! need for templates. +//! +//! - diff --git a/docs/sdk/src/reference_docs/blockchain_scalibility.rs b/docs/sdk/src/reference_docs/blockchain_scalibility.rs deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml b/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml index c6dd3af9d90be..0284957120328 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml @@ -10,44 +10,43 @@ edition.workspace = true publish = false [dependencies] -docify = "0.2.8" -parity-scale-codec = { version = "3.6.12", default-features = false } -scale-info = { version = "2.6.0", default-features = false } -serde = { workspace = true, default-features = false } +docify = { workspace = true } +codec = { workspace = true } +scale-info = { workspace = true } +serde = { workspace = true } serde_json = { workspace = true } # this is a frame-based runtime, thus importing `frame` with runtime feature enabled. -frame = { package = "polkadot-sdk-frame", path = "../../../../../substrate/frame", default-features = false, features = [ +frame = { features = [ "experimental", "runtime", -] } +], workspace = true } # pallets that we want to use -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-balances = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } # genesis builder that allows us to interact with runtime genesis config -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } -sp-keyring = { path = "../../../../../substrate/primitives/keyring", default-features = false } -sp-application-crypto = { path = "../../../../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } +sp-genesis-builder = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-core = { workspace = true } +sp-keyring = { workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dev-dependencies] -chain-spec-builder = { package = "staging-chain-spec-builder", path = "../../../../../substrate/bin/utils/chain-spec-builder" } -sc-chain-spec = { path = "../../../../../substrate/client/chain-spec" } +chain-spec-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } [features] default = ["std"] std = [ - "parity-scale-codec/std", + "codec/std", "scale-info/std", "frame/std", @@ -63,7 +62,6 @@ std = [ "sp-genesis-builder/std", "sp-keyring/std", "sp-runtime/std", - "sp-std/std", "serde/std", "serde_json/std", diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/lib.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/lib.rs index 4606104fb9680..e7effce1bd669 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/lib.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/lib.rs @@ -19,6 +19,8 @@ //! A minimal runtime that shows runtime genesis state. +extern crate alloc; + pub mod pallets; pub mod presets; pub mod runtime; diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/pallets.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/pallets.rs index be4455aa21979..2ff2d9539e2db 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/pallets.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/pallets.rs @@ -17,6 +17,7 @@ //! Pallets for the chain-spec demo runtime. +use alloc::vec::Vec; use frame::prelude::*; #[docify::export] diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs index c51947f6cc7cb..02c2d90f7c827 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs @@ -18,10 +18,10 @@ //! Presets for the chain-spec demo runtime. use crate::pallets::{FooEnum, SomeFooData1, SomeFooData2}; +use alloc::vec; use serde_json::{json, to_string, Value}; use sp_application_crypto::Ss58Codec; use sp_keyring::AccountKeyring; -use sp_std::vec; /// A demo preset with strings only. pub const PRESET_1: &str = "preset_1"; @@ -122,7 +122,7 @@ fn preset_invalid() -> Value { /// /// If no preset with given `id` exits `None` is returned. #[docify::export] -pub fn get_builtin_preset(id: &sp_genesis_builder::PresetId) -> Option> { +pub fn get_builtin_preset(id: &sp_genesis_builder::PresetId) -> Option> { let preset = match id.try_into() { Ok(PRESET_1) => preset_1(), Ok(PRESET_2) => preset_2(), diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs index 6d9bc1260b11f..c45f0126337e5 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs @@ -25,6 +25,7 @@ use crate::{ pallets::{pallet_bar, pallet_foo}, presets::*, }; +use alloc::{vec, vec::Vec}; use frame::{ deps::frame_support::{ genesis_builder_helper::{build_state, get_preset}, diff --git a/docs/sdk/src/reference_docs/consensus_swapping.rs b/docs/sdk/src/reference_docs/consensus_swapping.rs deleted file mode 100644 index e639761ee97b4..0000000000000 --- a/docs/sdk/src/reference_docs/consensus_swapping.rs +++ /dev/null @@ -1,6 +0,0 @@ -//! Consensus Swapping -//! -//! Notes: -//! -//! - The typical workshop done by Joshy in some places where he swaps out the consensus to be PoW. -//! - This could also be a tutorial rather than a ref doc, depending on the size. diff --git a/docs/sdk/src/reference_docs/custom_host_functions.rs b/docs/sdk/src/reference_docs/custom_host_functions.rs new file mode 100644 index 0000000000000..719b208a2bff7 --- /dev/null +++ b/docs/sdk/src/reference_docs/custom_host_functions.rs @@ -0,0 +1,27 @@ +//! # Custom Host Functions +//! +//! Host functions are functions that the wasm instance can use to communicate with the node. Learn +//! more about this in [`crate::reference_docs::wasm_meta_protocol`]. +//! +//! ## Finding Host Functions +//! +//! To declare a set of functions as host functions, you need to use the `#[runtime_interface]` +//! ([`sp_runtime_interface`]) attribute macro. The most notable set of host functions are those +//! that allow the runtime to access the chain state, namely [`sp_io::storage`]. Some other notable +//! host functions are also defined in [`sp_io`]. +//! +//! ## Adding New Host Functions +//! +//! > Adding a new host function is a big commitment and should be done with care. Namely, the nodes +//! > in the network need to support all host functions forever in order to be able to sync +//! > historical blocks. +//! +//! Adding host functions is only possible when you are using a node-template, so that you have +//! access to the boilerplate of building your node. +//! +//! A group of host functions can always be grouped to gether as a tuple: +#![doc = docify::embed!("../../substrate/primitives/io/src/lib.rs", SubstrateHostFunctions)] +//! +//! The host functions are attached to the node side's [`sc_executor::WasmExecutor`]. For example in +//! the minimal template, the setup looks as follows: +#![doc = docify::embed!("../../templates/minimal/node/src/service.rs", FullClient)] diff --git a/docs/sdk/src/reference_docs/fee_less_runtime.rs b/docs/sdk/src/reference_docs/fee_less_runtime.rs index 1213c26282537..9146b30ec5774 100644 --- a/docs/sdk/src/reference_docs/fee_less_runtime.rs +++ b/docs/sdk/src/reference_docs/fee_less_runtime.rs @@ -1,5 +1,6 @@ //! # Fee-Less Runtime //! +//! ๐Ÿšง Work In Progress ๐Ÿšง //! //! Notes: //! diff --git a/docs/sdk/src/reference_docs/frame_logging.rs b/docs/sdk/src/reference_docs/frame_logging.rs new file mode 100644 index 0000000000000..301fa7ef83f82 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_logging.rs @@ -0,0 +1,116 @@ +//! # FRAME Logging +//! +//! This reference docs briefly explores how to do logging and printing runtimes, mainly +//! FRAME-based. +//! +//! ## Using `println!` +//! +//! To recap, as with standard Rust, you can use `println!` _in your tests_, but it will only print +//! out if executed with `--nocapture`, or if the test panics. +//! +//! ``` +//! fn it_print() { +//! println!("Hello, world!"); +//! } +//! ``` +//! +//! within the pallet, if you want to use the standard `println!`, it needs to be wrapped in +//! [`sp_std::if_std`]. Of course, this means that this print code is only available to you in the +//! `std` compiler flag, and never present in a wasm build. +//! +//! ``` +//! // somewhere in your pallet. This is not a real pallet code. +//! mod pallet { +//! struct Pallet; +//! impl Pallet { +//! fn print() { +//! sp_std::if_std! { +//! println!("Hello, world!"); +//! } +//! } +//! } +//! } +//! ``` +//! +//! ## Using `log` +//! +//! First, ensure you are familiar with the `log` crate. In short, each log statement has: +//! +//! 1. `log-level`, signifying how important it is +//! 2. `log-target`, signifying to which component it belongs. +//! +//! Add log statements to your pallet as such: +//! +//! You can add the log crate to the `Cargo.toml` of the pallet. +//! +//! ```text +//! #[dependencies] +//! log = { version = "x.y.z", default-features = false } +//! +//! #[features] +//! std = [ +//! // snip -- other pallets +//! "log/std" +//! ] +//! ``` +//! +//! More conveniently, the `frame` umbrella crate re-exports the log crate as [`frame::log`]. +//! +//! Then, the pallet can use this crate to emit log statements. In this statement, we use the info +//! level, and the target is `pallet-example`. +//! +//! ``` +//! mod pallet { +//! struct Pallet; +//! +//! impl Pallet { +//! fn logs() { +//! frame::log::info!(target: "pallet-example", "Hello, world!"); +//! } +//! } +//! } +//! ``` +//! +//! This will in itself just emit the log messages, **but unless if captured by a logger, they will +//! not go anywhere**. [`sp_api`] provides a handy function to enable the runtime logging: +//! +//! ``` +//! // in your test +//! fn it_also_prints() { +//! sp_api::init_runtime_logger(); +//! // call into your pallet, and now it will print `log` statements. +//! } +//! ``` +//! +//! Alternatively, you can use [`sp_tracing::try_init_simple`]. +//! +//! `info`, `error` and `warn` logs are printed by default, but if you want lower level logs to also +//! be printed, you must to add the following compiler flag: +//! +//! ```text +//! RUST_LOG=pallet-example=trace cargo test +//! ``` +//! +//! ## Enabling Logs in Production +//! +//! All logs from the runtime are emitted by default, but there is a feature flag in [`sp_api`], +//! called `disable-logging`, that can be used to disable all logs in the runtime. This is useful +//! for production chains to reduce the size and overhead of the wasm runtime. +#![doc = docify::embed!("../../substrate/primitives/api/src/lib.rs", init_runtime_logger)] +//! +//! Similar to the above, the proper `RUST_LOG` must also be passed to your compiler flag when +//! compiling the runtime. +//! +//! ## Log Target Prefixing +//! +//! Many [`crate::polkadot_sdk::frame_runtime`] pallets emit logs with log target `runtime::`, for example `runtime::system`. This then allows one to run a node with a wasm blob +//! compiled with `LOG_TARGET=runtime=debug`, which enables the log target of all pallets who's log +//! target starts with `runtime`. +//! +//! ## Low Level Primitives +//! +//! Under the hood, logging is another instance of host functions under the hood (as defined in +//! [`crate::reference_docs::wasm_meta_protocol`]). The runtime uses a set of host functions under +//! [`sp_io::logging`] and [`sp_io::misc`] to emit all logs and prints. You typically do not need to +//! use these APIs directly. diff --git a/docs/sdk/src/reference_docs/frame_offchain_workers.rs b/docs/sdk/src/reference_docs/frame_offchain_workers.rs index 7999707e5ee01..1ec9212e23066 100644 --- a/docs/sdk/src/reference_docs/frame_offchain_workers.rs +++ b/docs/sdk/src/reference_docs/frame_offchain_workers.rs @@ -58,7 +58,6 @@ //! [`frame::pallet_macros::hooks`]. //! //! ``` -//! //! #[frame::pallet] //! pub mod pallet { //! use frame::prelude::*; diff --git a/docs/sdk/src/reference_docs/frame_storage_derives.rs b/docs/sdk/src/reference_docs/frame_storage_derives.rs new file mode 100644 index 0000000000000..3d9edef398a07 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_storage_derives.rs @@ -0,0 +1,199 @@ +//!
+//! In all examples, a few lines of boilerplate have been hidden from each snippet for conciseness. +//!
+//! +//! Let's begin by starting to store a `NewType` in a storage item: +//! +//! ```compile_fail +//! #[frame::pallet] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! pub struct NewType(u32); +// +//! #[pallet::storage] +//! pub type Something = StorageValue<_, NewType>; +//! } +//! ``` +//! +//! This raises a number of compiler errors, like: +//! ```text +//! the trait `MaxEncodedLen` is not implemented for `NewType`, which is required by +//! `frame::prelude::StorageValue<_GeneratedPrefixForStorageSomething, NewType>: +//! StorageInfoTrait` +//! ``` +//! +//! This implies the following set of traits that need to be derived for a type to be stored in +//! `frame` storage: +//! ```rust +//! #[frame::pallet] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! #[derive(codec::Encode, codec::Decode, codec::MaxEncodedLen, scale_info::TypeInfo)] +//! pub struct NewType(u32); +//! +//! #[pallet::storage] +//! pub type Something = StorageValue<_, NewType>; +//! } +//! ``` +//! +//! Next, let's look at how this will differ if we are to store a type that is derived from `T` in +//! storage, such as [`frame::prelude::BlockNumberFor`]: +//! ```compile_fail +//! #[frame::pallet] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! #[derive(codec::Encode, codec::Decode, codec::MaxEncodedLen, scale_info::TypeInfo)] +//! pub struct NewType(BlockNumberFor); +//! +//! #[pallet::storage] +//! pub type Something = StorageValue<_, NewType>; +//! } +//! ``` +//! +//! Surprisingly, this will also raise a number of errors, like: +//! ```text +//! the trait `TypeInfo` is not implemented for `T`, which is required +//! by`frame_support::pallet_prelude::StorageValue, +//! pallet_2::NewType>:StorageEntryMetadataBuilder +//! ``` +//! +//! Why is that? The underlying reason is that the `TypeInfo` `derive` macro will only work for +//! `NewType` if all of `NewType`'s generics also implement `TypeInfo`. This is not the case for `T` +//! in the example above. +//! +//! If you expand an instance of the derive, you will find something along the lines of: +//! `impl TypeInfo for NewType where T: TypeInfo { ... }`. This is the reason why the +//! `TypeInfo` trait is required for `T`. +//! +//! To fix this, we need to add a `#[scale_info(skip_type_params(T))]` +//! attribute to `NewType`. This additional macro will instruct the `derive` to skip the bound on +//! `T`. +//! ```rust +//! #[frame::pallet] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! #[derive(codec::Encode, codec::Decode, codec::MaxEncodedLen, scale_info::TypeInfo)] +//! #[scale_info(skip_type_params(T))] +//! pub struct NewType(BlockNumberFor); +//! +//! #[pallet::storage] +//! pub type Something = StorageValue<_, NewType>; +//! } +//! ``` +//! +//! Next, let's say we wish to store `NewType` as [`frame::prelude::ValueQuery`], which means it +//! must also implement `Default`. This should be as simple as adding `derive(Default)` to it, +//! right? +//! ```compile_fail +//! #[frame::pallet] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! #[derive(codec::Encode, codec::Decode, codec::MaxEncodedLen, scale_info::TypeInfo, Default)] +//! #[scale_info(skip_type_params(T))] +//! pub struct NewType(BlockNumberFor); +//! +//! #[pallet::storage] +//! pub type Something = StorageValue<_, NewType, ValueQuery>; +//! } +//! ``` +//! +//! Under the hood, the expansion of the `derive(Default)` will suffer from the same restriction as +//! before: it will only work if `T: Default`, and `T` is not `Default`. Note that this is an +//! expected issue: `T` is merely a wrapper of many other types, such as `BlockNumberFor`. +//! `BlockNumberFor` should indeed implement `Default`, but `T` implementing `Default` is rather +//! meaningless. +//! +//! To fix this, frame provides a set of macros that are analogous to normal rust derive macros, but +//! work nicely on top of structs that are generic over `T: Config`. These macros are: +//! +//! - [`frame::prelude::DefaultNoBound`] +//! - [`frame::prelude::DebugNoBound`] +//! - [`frame::prelude::PartialEqNoBound`] +//! - [`frame::prelude::EqNoBound`] +//! - [`frame::prelude::CloneNoBound`] +//! - [`frame::prelude::PartialOrdNoBound`] +//! - [`frame::prelude::OrdNoBound`] +//! +//! The above traits are almost certainly needed for your tests: To print your type, assert equality +//! or clone it. +//! +//! We can fix the following example by using [`frame::prelude::DefaultNoBound`]. +//! ```rust +//! #[frame::pallet] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! #[derive( +//! codec::Encode, +//! codec::Decode, +//! codec::MaxEncodedLen, +//! scale_info::TypeInfo, +//! DefaultNoBound +//! )] +//! #[scale_info(skip_type_params(T))] +//! pub struct NewType(BlockNumberFor); +//! +//! #[pallet::storage] +//! pub type Something = StorageValue<_, NewType, ValueQuery>; +//! } +//! ``` +//! +//! Finally, if a custom type that is provided through `Config` is to be stored in the storage, it +//! is subject to the same trait requirements. The following does not work: +//! ```compile_fail +//! #[frame::pallet] +//! pub mod pallet { +//! use frame::prelude::*; +//! #[pallet::config] +//! pub trait Config: frame_system::Config { +//! type CustomType; +//! } +//! #[pallet::pallet] +//! pub struct Pallet(_); +//! #[pallet::storage] +//! pub type Something = StorageValue<_, T::CustomType>; +//! } +//! ``` +//! +//! But adding the right trait bounds will fix it. +//! ```rust +//! #[frame::pallet] +//! pub mod pallet { +//! use frame::prelude::*; +//! #[pallet::config] +//! pub trait Config: frame_system::Config { +//! type CustomType: codec::FullCodec +//! + codec::MaxEncodedLen +//! + scale_info::TypeInfo +//! + Debug +//! + Default; +//! } +//! #[pallet::pallet] +//! pub struct Pallet(_); +//! #[pallet::storage] +//! pub type Something = StorageValue<_, T::CustomType>; +//! } +//! ``` diff --git a/docs/sdk/src/reference_docs/frame_system_accounts.rs b/docs/sdk/src/reference_docs/frame_system_accounts.rs index ae9d2c9e0cb3c..523fe70430849 100644 --- a/docs/sdk/src/reference_docs/frame_system_accounts.rs +++ b/docs/sdk/src/reference_docs/frame_system_accounts.rs @@ -1,5 +1,7 @@ //! # FRAME Accounts //! +//! //! ๐Ÿšง Work In Progress ๐Ÿšง +//! //! How `frame_system` handles accountIds. Nonce. Consumers and Providers, reference counting. // - poorly understood topics, needs one great article to rul them all. diff --git a/docs/sdk/src/reference_docs/light_nodes.rs b/docs/sdk/src/reference_docs/light_nodes.rs deleted file mode 100644 index d6670bf03ab1a..0000000000000 --- a/docs/sdk/src/reference_docs/light_nodes.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! # Light Clients -//! -//! -//! Notes: should contain only high level information about light clients, then link to how to set -//! it up in PAPI and SubXT -//! -//! diff --git a/docs/sdk/src/reference_docs/metadata.rs b/docs/sdk/src/reference_docs/metadata.rs index 702c1c30fd9cf..96f92ac0c412b 100644 --- a/docs/sdk/src/reference_docs/metadata.rs +++ b/docs/sdk/src/reference_docs/metadata.rs @@ -1 +1,25 @@ //! # Metadata +//! +//! The existence of metadata in polkadot-sdk goes back to the (forkless) upgrade-ability of all +//! Substrate-based blockchains, which is achieved through +//! [`crate::reference_docs::wasm_meta_protocol`]. You can learn more about the details of how to +//! deal with these upgrades in [`crate::reference_docs::frame_runtime_upgrades_and_migrations`]. +//! +//! Another consequence of upgrade-ability is that as a UI, wallet, or generally an offchain entity, +//! it is hard to know the types internal to the runtime, specifically in light of the fact that +//! they can change at any point in time. +//! +//! This is why all Substrate-based runtimes must expose a [`sp_api::Metadata`] api, which mandates +//! the runtime to return a description of itself. The return type of this api is `Vec`, meaning +//! that it is up to the runtime developer to decide on the format of this. +//! +//! All [`crate::polkadot_sdk::frame_runtime`] based runtimes expose a specific metadata language, +//! maintained in which is adopted in the Polkadot +//! ecosystem. +//! +//! ## Metadata Explorers: +//! +//! A few noteworthy tools that inspect the (FRAME-based) metadata of a chain: +//! +//! +//! diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index 8e0431c48b6f6..c69c79365427e 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -40,12 +40,15 @@ pub mod runtime_vs_smart_contract; pub mod extrinsic_encoding; /// Learn about the signed extensions that form a part of extrinsics. -// TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/42 pub mod signed_extensions; /// Learn about *Origins*, a topic in FRAME that enables complex account abstractions to be built. pub mod frame_origin; +/// Learn about the details of what derives are needed for a type to be store-able in `frame` +/// storage. +pub mod frame_storage_derives; + /// Learn about how to write safe and defensive code in your FRAME runtime. pub mod defensive_programming; @@ -59,9 +62,11 @@ pub mod fee_less_runtime; /// Learn about metadata, the main means through which an upgradeable runtime communicates its /// properties to the outside world. -// TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/47 pub mod metadata; +/// Learn about how to add custom host functions to the node. +pub mod custom_host_functions; + /// Learn about how frame-system handles `account-ids`, nonces, consumers and providers. pub mod frame_system_accounts; @@ -78,26 +83,12 @@ pub mod frame_tokens; /// Learn about chain specification file and the genesis state of the blockchain. pub mod chain_spec_genesis; -/// Learn about all the memory limitations of the WASM runtime when it comes to memory usage. -// TODO: @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/52 -pub mod wasm_memory; - /// Learn about Substrate's CLI, and how it can be extended. -// TODO: @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/53 pub mod cli; -/// Learn about Substrate's consensus algorithms, and how you can switch between two. -// TODO: @JoshOrndorff @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/54 -pub mod consensus_swapping; - /// Learn about Runtime Upgrades and best practices for writing Migrations. pub mod frame_runtime_upgrades_and_migrations; -/// Learn about light nodes, how they function, and how Substrate-based chains come -/// light-node-first out of the box. -// TODO: @jsdw @josepot https://github.com/paritytech/polkadot-sdk-docs/issues/68 -pub mod light_nodes; - /// Learn about the offchain workers, how they function, and how to use them, as provided by the /// [`frame`] APIs. pub mod frame_offchain_workers; @@ -106,6 +97,9 @@ pub mod frame_offchain_workers; /// together. pub mod frame_pallet_coupling; +/// Learn about how to do logging in FRAME-based runtimes. +pub mod frame_logging; + /// Learn about the Polkadot Umbrella crate that re-exports all other crates. pub mod umbrella_crate; diff --git a/docs/sdk/src/reference_docs/signed_extensions.rs b/docs/sdk/src/reference_docs/signed_extensions.rs index 43a6bcc14c5d2..c644aeaa41650 100644 --- a/docs/sdk/src/reference_docs/signed_extensions.rs +++ b/docs/sdk/src/reference_docs/signed_extensions.rs @@ -1,7 +1,59 @@ //! Signed extensions are, briefly, a means for different chains to extend the "basic" extrinsic //! format with custom data that can be checked by the runtime. //! -//! # Example +//! # FRAME provided signed extensions +//! +//! FRAME by default already provides the following signed extensions: +//! +//! - [`CheckGenesis`](frame_system::CheckGenesis): Ensures that a transaction was sent for the same +//! network. Determined based on genesis. +//! +//! - [`CheckMortality`](frame_system::CheckMortality): Extends a transaction with a configurable +//! mortality. +//! +//! - [`CheckNonZeroSender`](frame_system::CheckNonZeroSender): Ensures that the sender of a +//! transaction is not the *all zero account* (all bytes of the accountid are zero). +//! +//! - [`CheckNonce`](frame_system::CheckNonce): Extends a transaction with a nonce to prevent replay +//! of transactions and to provide ordering of transactions. +//! +//! - [`CheckSpecVersion`](frame_system::CheckSpecVersion): Ensures that a transaction was built for +//! the currently active runtime. +//! +//! - [`CheckTxVersion`](frame_system::CheckTxVersion): Ensures that the transaction signer used the +//! correct encoding of the call. +//! +//! - [`CheckWeight`](frame_system::CheckWeight): Ensures that the transaction fits into the block +//! before dispatching it. +//! +//! - [`ChargeTransactionPayment`](pallet_transaction_payment::ChargeTransactionPayment): Charges +//! transaction fees from the signer based on the weight of the call using the native token. +//! +//! - [`ChargeAssetTxPayment`](pallet_asset_tx_payment::ChargeAssetTxPayment): Charges transaction +//! fees from the signer based on the weight of the call using any supported asset (including the +//! native token). +//! +//! - [`ChargeAssetTxPayment`(using +//! conversion)](pallet_asset_conversion_tx_payment::ChargeAssetTxPayment): Charges transaction +//! fees from the signer based on the weight of the call using any supported asset (including the +//! native token). The asset is converted to the native token using a pool. +//! +//! - [`SkipCheckIfFeeless`](pallet_skip_feeless_payment::SkipCheckIfFeeless): Allows transactions +//! to be processed without paying any fee. This requires that the `call` that should be +//! dispatched is augmented with the [`feeless_if`](frame_support::pallet_macros::feeless_if) +//! attribute. +//! +//! - [`CheckMetadataHash`](frame_metadata_hash_extension::CheckMetadataHash): Extends transactions +//! to include the so-called metadata hash. This is required by chains to support the generic +//! Ledger application and other similar offline wallets. +//! +//! - [`StorageWeightReclaim`](cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim): A +//! signed extension for parachains that reclaims unused storage weight after executing a +//! transaction. +//! +//! For more information about these extensions, follow the link to the type documentation. +//! +//! # Building a custom signed extension //! //! Defining a couple of very simple signed extensions looks like the following: #![doc = docify::embed!("./src/reference_docs/signed_extensions.rs", signed_extensions_example)] diff --git a/docs/sdk/src/reference_docs/umbrella_crate.rs b/docs/sdk/src/reference_docs/umbrella_crate.rs index 9751b0ad5ad6d..0b3445cfc4bc0 100644 --- a/docs/sdk/src/reference_docs/umbrella_crate.rs +++ b/docs/sdk/src/reference_docs/umbrella_crate.rs @@ -28,8 +28,9 @@ //! `node` feature. For docs.rs the manifest contains specific configuration to make it show up //! all re-exports. //! -//! There is a specific `zepter` check in place to ensure that the features of the umbrella are -//! correctly configured. This check is run in CI and locally when running `zepter`. +//! There is a specific [`zepter`](https://github.com/ggwpez/zepter) check in place to ensure that +//! the features of the umbrella are correctly configured. This check is run in CI and locally when +//! running `zepter`. //! //! ## Generation //! diff --git a/docs/sdk/src/reference_docs/wasm_memory.rs b/docs/sdk/src/reference_docs/wasm_memory.rs deleted file mode 100644 index 4f4cda31094e4..0000000000000 --- a/docs/sdk/src/reference_docs/wasm_memory.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! # WASM Memory Limitations. -//! -//! Notes: -//! -//! - Stack: Need to use `Box<_>` -//! - Heap: Substrate imposes a limit. PvF execution has its own limits -//! - Heap: There is also a maximum amount that a single allocation can have. diff --git a/docs/sdk/src/reference_docs/wasm_meta_protocol.rs b/docs/sdk/src/reference_docs/wasm_meta_protocol.rs index 37d1460f0e1a3..0e91e65c55e36 100644 --- a/docs/sdk/src/reference_docs/wasm_meta_protocol.rs +++ b/docs/sdk/src/reference_docs/wasm_meta_protocol.rs @@ -1,11 +1,13 @@ //! # WASM Meta Protocol //! //! All Substrate based chains adhere to a unique architectural design novel to the Polkadot -//! ecosystem. We refer to this design as the "WASM Meta Protocol". +//! ecosystem. We refer to this design as the "**WASM Meta Protocol**". //! //! Consider the fact that a traditional blockchain software is usually a monolithic artifact. -//! Upgrading any part of the system implies upgrading the entire system. This has historically led -//! to cumbersome forkful upgrades to be the status quo in the blockchain ecosystem. +//! **Upgrading any part of the system implies upgrading the entire system**. This has historically +//! led to cumbersome forkful upgrades to be the status quo in blockchain ecosystems. In other +//! words, the entire node software is the specification of the blockchain's [`state transition +//! function`](crate::reference_docs::blockchain_state_machines). //! //! Moreover, the idea of "storing code in the state" is explored in the context of smart contracts //! platforms, but has not been expanded further. @@ -15,17 +17,16 @@ //! that a smart contract platform stores the code of individual contracts in its state. As noted in //! [`crate::reference_docs::blockchain_state_machines`], this state transition function is called //! the **Runtime**, and WASM is chosen as the bytecode. The Runtime is stored under a special key -//! in the state (see -//! [`sp_core::storage::well_known_keys`](../../../sp_core/index.html)) and can be -//! updated as a part of the state transition function's execution, just like a user's account -//! balance can be updated. +//! in the state (see [`sp_core::storage::well_known_keys`]) and can be updated as a part of the +//! state transition function's execution, just like a user's account balance can be updated. //! //! > Note that while we drew an analogy between smart contracts and runtimes in the above, there //! > are fundamental differences between the two, explained in //! > [`crate::reference_docs::runtime_vs_smart_contract`]. //! -//! The rest of the system that is NOT the state transition function is called the **node**, and -//! is a normal binary that is compiled from Rust to different hardware targets. +//! The rest of the system that is NOT the state transition function is called the +//! [**Node**](crate::reference_docs::glossary#node), and is a normal binary that is compiled from +//! Rust to different hardware targets. //! //! This design enables all Substrate-based chains to be fork-less-ly upgradeable, because the //! Runtime can be updates on the fly, within the execution of a block, and the node is (for the @@ -47,15 +48,18 @@ #![doc = simple_mermaid::mermaid!("../../../mermaid/substrate_client_runtime.mmd")] //! //! A runtime must have a set of runtime APIs in order to have any meaningful blockchain -//! functionality, but it can also expose more APIs. See TODO as an example of how to add custom -//! runtime APIs to your FRAME-based runtime. +//! functionality, but it can also expose more APIs. See +//! [`crate::reference_docs::custom_runtime_api_rpc`] as an example of how to add custom runtime +//! APIs to your FRAME-based runtime. //! //! Similarly, for a runtime to be "compatible" with a node, the node must implement the full set of //! host functions that the runtime at any point in time requires. Given the fact that a runtime can //! evolve in time, and a blockchain node (typically) wishes to be capable of re-executing all the //! previous blocks, this means that a node must always maintain support for the old host functions. -//! This also implies that adding a new host function is a big commitment and should be done with -//! care. This is why, for example, adding a new host function to Polkadot always requires an RFC. +//! **This implies that adding a new host function is a big commitment and should be done with +//! care**. This is why, for example, adding a new host function to Polkadot always requires an RFC. +//! Learn how to add a new host function to your runtime in +//! [`crate::reference_docs::custom_host_functions`]. //! //! ## Node vs. Runtime //! @@ -90,11 +94,11 @@ //! //! In fact, [`sp_core::storage::well_known_keys`] are the only state keys that the node side is //! aware of. The rest of the state, including what logic the runtime has, what balance each user -//! has and such are all only comprehensible to the runtime. +//! has and such, are all only comprehensible to the runtime. #![doc = simple_mermaid::mermaid!("../../../mermaid/state.mmd")] //! //! In the above diagram, all of the state keys and values are opaque bytes to the node. The node -//! does not know what they mean, and it does not now what is the type of the corresponding value +//! does not know what they mean, and it does not know what is the type of the corresponding value //! (e.g. if it is a number of a vector). Contrary, the runtime knows both the meaning of their //! keys, and the type of the values. //! @@ -105,9 +109,50 @@ //! //! ## Native Runtime //! -//! TODO +//! Historically, the node software also kept a native copy of the runtime at the time of +//! compilation within it. This used to be called the "Native Runtime". The main purpose of the +//! native runtime used to be leveraging the faster execution time and better debugging +//! infrastructure of native code. However, neither of the two arguments strongly hold and the +//! native runtime is being fully removed from the node-sdk. //! +//! See: +//! +//! > Also, note that the flags [`sc_cli::ExecutionStrategy::Native`] is already a noop and all +//! > chains built with Substrate only use WASM execution. +//! +//! ### Runtime Versions +//! +//! An important detail of the native execution worth learning about is that the node software, +//! obviously, only uses the native runtime if it is the same code as with the wasm blob stored +//! onchain. Else, nodes who run the native runtime will come to a different state transition. How +//! do nodes determine if two runtimes are the same? Through the very important +//! [`sp_version::RuntimeVersion`]. All runtimes expose their version via a runtime api +//! ([`sp_api::Core::version`]) that returns this struct. The node software, or other applications, +//! inspect this struct to examine the identity of a runtime, and to determine if two runtimes are +//! the same. Namely, [`sp_version::RuntimeVersion::spec_version`] is the main key that implies two +//! runtimes are the same. +//! +//! Therefore, it is utmost important to make sure before any runtime upgrade, the spec version is +//! updated. //! //! ## Example: Block Execution. //! -//! TODO +//! As a final example to recap, let's look at how Substrate-based nodes execute blocks. Blocks are +//! received in the node side software as opaque blobs and in the networking layer. +//! +//! At some point, based on the consensus algorithm's rules, the node decides to import (aka. +//! *validate*) a block. +//! +//! * First, the node will then fetch the state of the parent hash of the block that wishes to be +//! imported. +//! * The runtime is fetched from this state, and placed into a WASM execution environment. +//! * The [`sp_api::Core::execute_block`] runtime API is called and the blocked is passed in as an +//! argument. +//! * The runtime will then execute the block, and update the state accordingly. Any state update is +//! issues via the [`sp_io::storage`] host functions. +//! * Both the runtime and node will check the state-root of the state after the block execution to +//! match the one claimed in the block header. +//! +//! > Example taken from [this +//! > lecture](https://polkadot-blockchain-academy.github.io/pba-book/substrate/wasm/page.html#example-2-block-import-9) +//! > of the Polkadot Blockchain Academy. diff --git a/master.wasm b/master.wasm new file mode 100644 index 0000000000000..7ebb14371243a Binary files /dev/null and b/master.wasm differ diff --git a/modified.wasm b/modified.wasm new file mode 100644 index 0000000000000..7ebb14371243a Binary files /dev/null and b/modified.wasm differ diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index 3aeec8d5961e3..3a939464868fe 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -25,32 +25,32 @@ default-run = "polkadot" workspace = true [dependencies] -color-eyre = { version = "0.6.1", default-features = false } -tikv-jemallocator = { version = "0.5.0", optional = true, features = ["unprefixed_malloc_on_supported_platforms"] } +color-eyre = { workspace = true } +tikv-jemallocator = { optional = true, features = ["unprefixed_malloc_on_supported_platforms"], workspace = true } # Crates in our workspace, defined as dependencies so we can pass them feature flags. -polkadot-cli = { path = "cli", features = ["rococo-native", "westend-native"] } -polkadot-node-core-pvf = { path = "node/core/pvf" } -polkadot-node-core-pvf-prepare-worker = { path = "node/core/pvf/prepare-worker" } -polkadot-overseer = { path = "node/overseer" } +polkadot-cli = { features = ["rococo-native", "westend-native"], workspace = true, default-features = true } +polkadot-node-core-pvf = { workspace = true, default-features = true } +polkadot-node-core-pvf-prepare-worker = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } # Needed for worker binaries. -polkadot-node-core-pvf-common = { path = "node/core/pvf/common" } -polkadot-node-core-pvf-execute-worker = { path = "node/core/pvf/execute-worker" } +polkadot-node-core-pvf-common = { workspace = true, default-features = true } +polkadot-node-core-pvf-execute-worker = { workspace = true, default-features = true } [target.'cfg(target_os = "linux")'.dependencies] tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"] } [dev-dependencies] -assert_cmd = "2.0.4" -nix = { version = "0.28.0", features = ["signal"] } -tempfile = "3.2.0" -tokio = "1.37" -substrate-rpc-client = { path = "../substrate/utils/frame/rpc/client" } -polkadot-core-primitives = { path = "core-primitives" } +assert_cmd = { workspace = true } +nix = { features = ["signal"], workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true, default-features = true } +substrate-rpc-client = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true, default-features = true } [build-dependencies] -substrate-build-script-utils = { path = "../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [badges] maintenance = { status = "actively-developed" } @@ -68,6 +68,11 @@ jemalloc-allocator = [ "polkadot-overseer/jemalloc-allocator", ] +# Generate the metadata hash needed for CheckMetadataHash +# in the builtin test runtimes (westend and rococo). +metadata-hash = [ + "polkadot-cli/metadata-hash", +] # Enables timeout-based tests supposed to be run only in CI environment as they may be flaky # when run locally depending on system load diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 1917dcd579c4c..da37f6062c572 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -18,37 +18,38 @@ wasm-opt = false crate-type = ["cdylib", "rlib"] [dependencies] -cfg-if = "1.0" -clap = { version = "4.5.3", features = ["derive"], optional = true } +cfg-if = { workspace = true } +clap = { features = ["derive"], optional = true, workspace = true } log = { workspace = true, default-features = true } thiserror = { workspace = true } -futures = "0.3.30" -pyroscope = { version = "0.5.3", optional = true } -pyroscope_pprofrs = { version = "0.2", optional = true } +futures = { workspace = true } +pyroscope = { optional = true, workspace = true } +pyroscope_pprofrs = { optional = true, workspace = true } -polkadot-service = { path = "../node/service", default-features = false, optional = true } +polkadot-service = { optional = true, workspace = true } -sp-core = { path = "../../substrate/primitives/core" } -sp-io = { path = "../../substrate/primitives/io" } -sp-keyring = { path = "../../substrate/primitives/keyring" } -sp-maybe-compressed-blob = { path = "../../substrate/primitives/maybe-compressed-blob" } -frame-benchmarking-cli = { path = "../../substrate/utils/frame/benchmarking-cli", optional = true } -sc-cli = { path = "../../substrate/client/cli", optional = true } -sc-service = { path = "../../substrate/client/service", optional = true } -polkadot-node-metrics = { path = "../node/metrics" } -polkadot-node-primitives = { path = "../node/primitives" } -sc-tracing = { path = "../../substrate/client/tracing", optional = true } -sc-sysinfo = { path = "../../substrate/client/sysinfo" } -sc-executor = { path = "../../substrate/client/executor" } -sc-storage-monitor = { path = "../../substrate/client/storage-monitor" } -sp-runtime = { path = "../../substrate/primitives/runtime" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +frame-benchmarking-cli = { optional = true, workspace = true, default-features = true } +sc-cli = { optional = true, workspace = true, default-features = true } +sc-service = { optional = true, workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +sc-tracing = { optional = true, workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-storage-monitor = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [build-dependencies] -substrate-build-script-utils = { path = "../../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [features] default = ["cli", "db", "full-node"] db = ["polkadot-service/db"] +metadata-hash = ["polkadot-service/metadata-hash"] service = ["dep:polkadot-service"] cli = [ "clap", diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index b89054b4dc321..62d99122c3012 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -192,7 +192,7 @@ where F: FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration), { let runner = cli - .create_runner_with_logger_hook::(&cli.run.base, logger_hook) + .create_runner_with_logger_hook::<_, _, F>(&cli.run.base, logger_hook) .map_err(Error::from)?; let chain_spec = &runner.config().chain_spec; diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index 7d94196fa26db..42ca27953738e 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -10,11 +10,10 @@ license.workspace = true workspace = true [dependencies] -sp-core = { path = "../../substrate/primitives/core", default-features = false } -sp-std = { path = "../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +codec = { features = ["derive"], workspace = true } [features] default = ["std"] @@ -23,5 +22,4 @@ std = [ "scale-info/std", "sp-core/std", "sp-runtime/std", - "sp-std/std", ] diff --git a/polkadot/core-primitives/src/lib.rs b/polkadot/core-primitives/src/lib.rs index 072c045a8c703..666636def4604 100644 --- a/polkadot/core-primitives/src/lib.rs +++ b/polkadot/core-primitives/src/lib.rs @@ -20,6 +20,8 @@ //! //! These core Polkadot types are used by the relay chain and the Parachains. +extern crate alloc; + use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ @@ -81,8 +83,8 @@ impl std::fmt::Display for CandidateHash { } } -impl sp_std::fmt::Debug for CandidateHash { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { +impl core::fmt::Debug for CandidateHash { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "{:?}", self.0) } } @@ -119,7 +121,7 @@ pub type Remark = [u8; 32]; /// A message sent from the relay-chain down to a parachain. /// /// The size of the message is limited by the `config.max_downward_message_size` parameter. -pub type DownwardMessage = sp_std::vec::Vec; +pub type DownwardMessage = alloc::vec::Vec; /// A wrapped version of `DownwardMessage`. The difference is that it has attached the block number /// when the message was sent. @@ -139,7 +141,7 @@ pub struct InboundHrmpMessage { /// enacted. pub sent_at: BlockNumber, /// The message payload. - pub data: sp_std::vec::Vec, + pub data: alloc::vec::Vec, } /// An HRMP message seen from the perspective of a sender. @@ -148,7 +150,7 @@ pub struct OutboundHrmpMessage { /// The para that will get this message in its downward message queue. pub recipient: Id, /// The message payload. - pub data: sp_std::vec::Vec, + pub data: alloc::vec::Vec, } /// `V2` primitives. diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index 3c14fd95eee3b..969742c5bb0aa 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -10,17 +10,17 @@ license.workspace = true workspace = true [dependencies] -polkadot-primitives = { path = "../primitives" } -polkadot-node-primitives = { package = "polkadot-node-primitives", path = "../node/primitives" } -novelpoly = { package = "reed-solomon-novelpoly", version = "2.0.0" } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "std"] } -sp-core = { path = "../../substrate/primitives/core" } -sp-trie = { path = "../../substrate/primitives/trie" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +novelpoly = { workspace = true } +codec = { features = ["derive", "std"], workspace = true } +sp-core = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } thiserror = { workspace = true } [dev-dependencies] -quickcheck = { version = "1.0.3", default-features = false } -criterion = { version = "0.5.1", default-features = false, features = ["cargo_bench_support"] } +quickcheck = { workspace = true } +criterion = { features = ["cargo_bench_support"], workspace = true } [[bench]] name = "scaling_with_validators" diff --git a/polkadot/erasure-coding/fuzzer/Cargo.toml b/polkadot/erasure-coding/fuzzer/Cargo.toml index bd254f6d51651..6f451f0319b23 100644 --- a/polkadot/erasure-coding/fuzzer/Cargo.toml +++ b/polkadot/erasure-coding/fuzzer/Cargo.toml @@ -10,10 +10,10 @@ publish = false workspace = true [dependencies] -polkadot-erasure-coding = { path = ".." } -honggfuzz = "0.5" -polkadot-primitives = { path = "../../primitives" } -polkadot-node-primitives = { path = "../../node/primitives" } +polkadot-erasure-coding = { workspace = true, default-features = true } +honggfuzz = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } [[bin]] name = "reconstruct" diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index da5d10d799497..4b0a5f7248ab3 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -10,21 +10,21 @@ description = "Collator-side subsystem that handles incoming candidate submissio workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../gum" } -polkadot-erasure-coding = { path = "../../erasure-coding" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-node-subsystem = { path = "../subsystem" } -polkadot-node-subsystem-util = { path = "../subsystem-util" } -polkadot-primitives = { path = "../../primitives" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } +futures = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } thiserror = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } +codec = { features = ["bit-vec", "derive"], workspace = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } -assert_matches = "1.4.0" -rstest = "0.18.2" -sp-keyring = { path = "../../../substrate/primitives/keyring" } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +assert_matches = { workspace = true } +rstest = { workspace = true } +sp-keyring = { workspace = true, default-features = true } diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index 0c2f8ee14a580..d38516a4ff713 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -147,11 +147,7 @@ impl CollationGenerationSubsystem { Ok(FromOrchestra::Communication { msg: CollationGenerationMessage::Reinitialize(config), }) => { - if self.config.is_none() { - gum::error!(target: LOG_TARGET, "no initial initialization"); - } else { - self.config = Some(Arc::new(config)); - } + self.config = Some(Arc::new(config)); false }, Ok(FromOrchestra::Communication { diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 7da3d7ddd7814..65985c0a5db93 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -10,51 +10,51 @@ description = "Approval Voting Subsystem of the Polkadot node" workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3.0.2" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } -gum = { package = "tracing-gum", path = "../../gum" } -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -schnellru = "0.2.1" -merlin = "3.0" -schnorrkel = "0.11.4" -kvdb = "0.13.0" -derive_more = "0.99.17" +futures = { workspace = true } +futures-timer = { workspace = true } +codec = { features = ["bit-vec", "derive"], workspace = true } +gum = { workspace = true, default-features = true } +bitvec = { features = ["alloc"], workspace = true } +schnellru = { workspace = true } +merlin = { workspace = true, default-features = true } +schnorrkel = { workspace = true, default-features = true } +kvdb = { workspace = true } +derive_more = { workspace = true, default-features = true } thiserror = { workspace = true } -itertools = "0.11" +itertools = { workspace = true } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-overseer = { path = "../../overseer" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-jaeger = { path = "../../jaeger" } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-jaeger = { workspace = true, default-features = true } -sc-keystore = { path = "../../../../substrate/client/keystore", default-features = false } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common", default-features = false } -sp-consensus-slots = { path = "../../../../substrate/primitives/consensus/slots", default-features = false } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto", default-features = false, features = ["full_crypto"] } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sc-keystore = { workspace = true } +sp-consensus = { workspace = true } +sp-consensus-slots = { workspace = true } +sp-application-crypto = { features = ["full_crypto"], workspace = true } +sp-runtime = { workspace = true } # rand_core should match schnorrkel -rand_core = "0.6.2" -rand_chacha = { version = "0.3.1" } -rand = "0.8.5" +rand_core = { workspace = true } +rand_chacha = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } [dev-dependencies] -async-trait = "0.1.79" -parking_lot = "0.12.1" -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-consensus-babe = { path = "../../../../substrate/primitives/consensus/babe" } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -assert_matches = "1.4.0" -kvdb-memorydb = "0.13.0" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +async-trait = { workspace = true } +parking_lot = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +assert_matches = { workspace = true } +kvdb-memorydb = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } log = { workspace = true, default-features = true } -env_logger = "0.11" +env_logger = { workspace = true } -polkadot-subsystem-bench = { path = "../../subsystem-bench" } +polkadot-subsystem-bench = { workspace = true } [[bench]] name = "approval-voting-regression-bench" diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 62f7ff0b61e64..4274c8b576a3d 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -10,32 +10,32 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3.0.2" -kvdb = "0.13.0" +futures = { workspace = true } +futures-timer = { workspace = true } +kvdb = { workspace = true } thiserror = { workspace = true } -gum = { package = "tracing-gum", path = "../../gum" } -bitvec = "1.0.0" +gum = { workspace = true, default-features = true } +bitvec = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -polkadot-erasure-coding = { path = "../../../erasure-coding" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-overseer = { path = "../../overseer" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common", default-features = false } -polkadot-node-jaeger = { path = "../../jaeger" } +codec = { features = ["derive"], workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +sp-consensus = { workspace = true } +polkadot-node-jaeger = { workspace = true, default-features = true } [dev-dependencies] log = { workspace = true, default-features = true } -env_logger = "0.11" -assert_matches = "1.4.0" -kvdb-memorydb = "0.13.0" +env_logger = { workspace = true } +assert_matches = { workspace = true } +kvdb-memorydb = { workspace = true } -sp-core = { path = "../../../../substrate/primitives/core" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -parking_lot = "0.12.1" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +sp-core = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index ffd6de0768894..1b52afc309bc9 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -10,28 +10,28 @@ description = "The Candidate Backing Subsystem. Tracks parachain candidates that workspace = true [dependencies] -futures = "0.3.30" -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-erasure-coding = { path = "../../../erasure-coding" } -polkadot-statement-table = { path = "../../../statement-table" } -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -gum = { package = "tracing-gum", path = "../../gum" } +futures = { workspace = true } +sp-keystore = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-statement-table = { workspace = true, default-features = true } +bitvec = { features = ["alloc"], workspace = true } +gum = { workspace = true, default-features = true } thiserror = { workspace = true } -fatality = "0.1.1" -schnellru = "0.2.1" +fatality = { workspace = true } +schnellru = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../../substrate/primitives/core" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sc-keystore = { path = "../../../../substrate/client/keystore" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -futures = { version = "0.3.30", features = ["thread-pool"] } -assert_matches = "1.4.0" -rstest = "0.18.2" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +sp-core = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } +assert_matches = { workspace = true } +rstest = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 38e8a93bb0482..5bcd47a2434c7 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -102,6 +102,7 @@ use polkadot_node_subsystem_util::{ runtime::{ self, prospective_parachains_mode, request_min_backing_votes, ProspectiveParachainsMode, }, + vstaging::{fetch_claim_queue, ClaimQueueSnapshot}, Validator, }; use polkadot_primitives::{ @@ -212,8 +213,6 @@ struct PerRelayParentState { parent: Hash, /// Session index. session_index: SessionIndex, - /// The `ParaId` assigned to the local validator at this relay parent. - assigned_para: Option, /// The `CoreIndex` assigned to the local validator at this relay parent. assigned_core: Option, /// The candidates that are backed by enough validators in their group, by hash. @@ -233,8 +232,11 @@ struct PerRelayParentState { /// If true, we're appending extra bits in the BackedCandidate validator indices bitfield, /// which represent the assigned core index. True if ElasticScalingMVP is enabled. inject_core_index: bool, - /// The core states for all cores. - cores: Vec, + /// The number of cores. + n_cores: u32, + /// Claim queue state. If the runtime API is not available, it'll be populated with info from + /// availability cores. + claim_queue: ClaimQueueSnapshot, /// The validator index -> group mapping at this relay parent. validator_to_group: Arc>>, /// The associated group rotation information. @@ -825,8 +827,8 @@ async fn handle_communication( CandidateBackingMessage::Statement(relay_parent, statement) => { handle_statement_message(ctx, state, relay_parent, statement, metrics).await?; }, - CandidateBackingMessage::GetBackedCandidates(requested_candidates, tx) => - handle_get_backed_candidates_message(state, requested_candidates, tx, metrics)?, + CandidateBackingMessage::GetBackableCandidates(requested_candidates, tx) => + handle_get_backable_candidates_message(state, requested_candidates, tx, metrics)?, CandidateBackingMessage::CanSecond(request, tx) => handle_can_second_request(ctx, state, request, tx).await, } @@ -1004,20 +1006,19 @@ macro_rules! try_runtime_api { fn core_index_from_statement( validator_to_group: &IndexedVec>, group_rotation_info: &GroupRotationInfo, - cores: &[CoreState], + n_cores: u32, + claim_queue: &ClaimQueueSnapshot, statement: &SignedFullStatementWithPVD, ) -> Option { let compact_statement = statement.as_unchecked(); let candidate_hash = CandidateHash(*compact_statement.unchecked_payload().candidate_hash()); - let n_cores = cores.len(); - gum::trace!( target:LOG_TARGET, ?group_rotation_info, ?statement, ?validator_to_group, - n_cores = ?cores.len(), + n_cores, ?candidate_hash, "Extracting core index from statement" ); @@ -1029,7 +1030,7 @@ fn core_index_from_statement( ?group_rotation_info, ?statement, ?validator_to_group, - n_cores = ?cores.len() , + n_cores, ?candidate_hash, "Invalid validator index: {:?}", statement_validator_index @@ -1038,37 +1039,25 @@ fn core_index_from_statement( }; // First check if the statement para id matches the core assignment. - let core_index = group_rotation_info.core_for_group(*group_index, n_cores); + let core_index = group_rotation_info.core_for_group(*group_index, n_cores as _); - if core_index.0 as usize > n_cores { + if core_index.0 > n_cores { gum::warn!(target: LOG_TARGET, ?candidate_hash, ?core_index, n_cores, "Invalid CoreIndex"); return None } if let StatementWithPVD::Seconded(candidate, _pvd) = statement.payload() { let candidate_para_id = candidate.descriptor.para_id; - let assigned_para_id = match &cores[core_index.0 as usize] { - CoreState::Free => { - gum::debug!(target: LOG_TARGET, ?candidate_hash, "Invalid CoreIndex, core is not assigned to any para_id"); - return None - }, - CoreState::Occupied(occupied) => - if let Some(next) = &occupied.next_up_on_available { - next.para_id - } else { - return None - }, - CoreState::Scheduled(scheduled) => scheduled.para_id, - }; + let mut assigned_paras = claim_queue.iter_claims_for_core(&core_index); - if assigned_para_id != candidate_para_id { + if !assigned_paras.any(|id| id == &candidate_para_id) { gum::debug!( target: LOG_TARGET, ?candidate_hash, ?core_index, - ?assigned_para_id, + assigned_paras = ?claim_queue.iter_claims_for_core(&core_index).collect::>(), ?candidate_para_id, - "Invalid CoreIndex, core is assigned to a different para_id" + "Invalid CoreIndex, core is not assigned to this para_id" ); return None } @@ -1129,6 +1118,8 @@ async fn construct_per_relay_parent_state( Error::UtilError(TryFrom::try_from(e).expect("the conversion is infallible; qed")) })?; + let maybe_claim_queue = try_runtime_api!(fetch_claim_queue(ctx.sender(), parent).await); + let signing_context = SigningContext { parent_hash: parent, session_index }; let validator = match Validator::construct( &validators, @@ -1153,31 +1144,35 @@ async fn construct_per_relay_parent_state( let mut groups = HashMap::>::new(); let mut assigned_core = None; - let mut assigned_para = None; + + let has_claim_queue = maybe_claim_queue.is_some(); + let mut claim_queue = maybe_claim_queue.unwrap_or_default().0; for (idx, core) in cores.iter().enumerate() { - let core_para_id = match core { - CoreState::Scheduled(scheduled) => scheduled.para_id, - CoreState::Occupied(occupied) => - if mode.is_enabled() { + let core_index = CoreIndex(idx as _); + + if !has_claim_queue { + match core { + CoreState::Scheduled(scheduled) => + claim_queue.insert(core_index, [scheduled.para_id].into_iter().collect()), + CoreState::Occupied(occupied) if mode.is_enabled() => { // Async backing makes it legal to build on top of // occupied core. if let Some(next) = &occupied.next_up_on_available { - next.para_id + claim_queue.insert(core_index, [next.para_id].into_iter().collect()) } else { continue } - } else { - continue }, - CoreState::Free => continue, - }; + _ => continue, + }; + } else if !claim_queue.contains_key(&core_index) { + continue + } - let core_index = CoreIndex(idx as _); let group_index = group_rotation_info.group_for_core(core_index, n_cores); if let Some(g) = validator_groups.get(group_index.0 as usize) { if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { - assigned_para = Some(core_para_id); assigned_core = Some(core_index); } groups.insert(core_index, g.clone()); @@ -1212,7 +1207,6 @@ async fn construct_per_relay_parent_state( parent, session_index, assigned_core, - assigned_para, backed: HashSet::new(), table: Table::new(table_config), table_context, @@ -1221,7 +1215,8 @@ async fn construct_per_relay_parent_state( fallbacks: HashMap::new(), minimum_backing_votes, inject_core_index, - cores, + n_cores: cores.len() as u32, + claim_queue: ClaimQueueSnapshot::from(claim_queue), validator_to_group: validator_to_group.clone(), group_rotation_info, })) @@ -1674,7 +1669,8 @@ async fn import_statement( let core = core_index_from_statement( &rp_state.validator_to_group, &rp_state.group_rotation_info, - &rp_state.cores, + rp_state.n_cores, + &rp_state.claim_queue, statement, ) .ok_or(Error::CoreIndexUnavailable)?; @@ -2098,12 +2094,14 @@ async fn handle_second_message( return Ok(()) } + let assigned_paras = rp_state.assigned_core.and_then(|core| rp_state.claim_queue.0.get(&core)); + // Sanity check that candidate is from our assignment. - if Some(candidate.descriptor().para_id) != rp_state.assigned_para { + if !matches!(assigned_paras, Some(paras) if paras.contains(&candidate.descriptor().para_id)) { gum::debug!( target: LOG_TARGET, our_assignment_core = ?rp_state.assigned_core, - our_assignment_para = ?rp_state.assigned_para, + our_assignment_paras = ?assigned_paras, collation = ?candidate.descriptor().para_id, "Subsystem asked to second for para outside of our assignment", ); @@ -2113,7 +2111,7 @@ async fn handle_second_message( gum::debug!( target: LOG_TARGET, our_assignment_core = ?rp_state.assigned_core, - our_assignment_para = ?rp_state.assigned_para, + our_assignment_paras = ?assigned_paras, collation = ?candidate.descriptor().para_id, "Current assignments vs collation", ); @@ -2160,7 +2158,7 @@ async fn handle_statement_message( } } -fn handle_get_backed_candidates_message( +fn handle_get_backable_candidates_message( state: &State, requested_candidates: HashMap>, tx: oneshot::Sender>>, diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index bb23c7fbeb24f..10eb45b82d125 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -42,7 +42,10 @@ use sp_application_crypto::AppCrypto; use sp_keyring::Sr25519Keyring; use sp_keystore::Keystore; use sp_tracing as _; -use std::{collections::HashMap, time::Duration}; +use std::{ + collections::{BTreeMap, HashMap, VecDeque}, + time::Duration, +}; mod prospective_parachains; @@ -75,6 +78,7 @@ pub(crate) struct TestState { validator_groups: (Vec>, GroupRotationInfo), validator_to_group: IndexedVec>, availability_cores: Vec, + claim_queue: BTreeMap>, head_data: HashMap, signing_context: SigningContext, relay_parent: Hash, @@ -130,6 +134,10 @@ impl Default for TestState { CoreState::Scheduled(ScheduledCore { para_id: chain_b, collator: None }), ]; + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [chain_a].into_iter().collect()); + claim_queue.insert(CoreIndex(1), [chain_b].into_iter().collect()); + let mut head_data = HashMap::new(); head_data.insert(chain_a, HeadData(vec![4, 5, 6])); head_data.insert(chain_b, HeadData(vec![5, 6, 7])); @@ -153,6 +161,7 @@ impl Default for TestState { validator_groups: (validator_groups, group_rotation_info), validator_to_group, availability_cores, + claim_queue, head_data, validation_data, signing_context, @@ -338,6 +347,26 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS tx.send(Ok(test_state.disabled_validators.clone())).unwrap(); } ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok( + test_state.claim_queue.clone() + )).unwrap(); + } + ); } async fn assert_validation_requests( @@ -674,7 +703,7 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( std::iter::once(( test_state.chain_ids[0], vec![(candidate_a_hash, test_state.relay_parent)], @@ -730,11 +759,16 @@ fn get_backed_candidate_preserves_order() { // Assign the second core to the same para as the first one. test_state.availability_cores[1] = CoreState::Scheduled(ScheduledCore { para_id: test_state.chain_ids[0], collator: None }); + *test_state.claim_queue.get_mut(&CoreIndex(1)).unwrap() = + [test_state.chain_ids[0]].into_iter().collect(); // Add another availability core for paraid 2. test_state.availability_cores.push(CoreState::Scheduled(ScheduledCore { para_id: test_state.chain_ids[1], collator: None, })); + test_state + .claim_queue + .insert(CoreIndex(2), [test_state.chain_ids[1]].into_iter().collect()); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; @@ -861,7 +895,7 @@ fn get_backed_candidate_preserves_order() { // Happy case, all candidates should be present. let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( [ ( test_state.chain_ids[0], @@ -912,7 +946,7 @@ fn get_backed_candidate_preserves_order() { ], ] { let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( [ (test_state.chain_ids[0], candidates), (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), @@ -951,7 +985,7 @@ fn get_backed_candidate_preserves_order() { ], ] { let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( [ (test_state.chain_ids[0], candidates), (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), @@ -996,7 +1030,7 @@ fn get_backed_candidate_preserves_order() { ], ] { let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( [ (test_state.chain_ids[0], candidates), (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), @@ -1103,7 +1137,8 @@ fn extract_core_index_from_statement_works() { let core_index_1 = core_index_from_statement( &test_state.validator_to_group, &test_state.validator_groups.1, - &test_state.availability_cores, + test_state.availability_cores.len() as _, + &test_state.claim_queue.clone().into(), &signed_statement_1, ) .unwrap(); @@ -1113,7 +1148,8 @@ fn extract_core_index_from_statement_works() { let core_index_2 = core_index_from_statement( &test_state.validator_to_group, &test_state.validator_groups.1, - &test_state.availability_cores, + test_state.availability_cores.len() as _, + &test_state.claim_queue.clone().into(), &signed_statement_2, ); @@ -1123,7 +1159,8 @@ fn extract_core_index_from_statement_works() { let core_index_3 = core_index_from_statement( &test_state.validator_to_group, &test_state.validator_groups.1, - &test_state.availability_cores, + test_state.availability_cores.len() as _, + &test_state.claim_queue.clone().into(), &signed_statement_3, ) .unwrap(); @@ -1284,7 +1321,7 @@ fn backing_works_while_validation_ongoing() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( std::iter::once(( test_state.chain_ids[0], vec![(candidate_a.hash(), test_state.relay_parent)], @@ -1905,7 +1942,7 @@ fn backing_works_after_failed_validation() { // Try to get a set of backable candidates to trigger _some_ action in the subsystem // and check that it is still alive. let (tx, rx) = oneshot::channel(); - let msg = CandidateBackingMessage::GetBackedCandidates( + let msg = CandidateBackingMessage::GetBackableCandidates( std::iter::once(( test_state.chain_ids[0], vec![(candidate.hash(), test_state.relay_parent)], diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 74490c84eb18b..15bc0b4a11390 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -212,6 +212,26 @@ async fn activate_leaf( tx.send(Ok(Vec::new())).unwrap(); } ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == hash => { + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) + ) if parent == hash => { + tx.send(Ok( + test_state.claim_queue.clone() + )).unwrap(); + } + ); } } diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index 335e733987b01..126a18a141661 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -10,15 +10,15 @@ description = "Bitfield signing subsystem for the Polkadot node" workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -wasm-timer = "0.2.5" +futures = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +wasm-timer = { workspace = true } thiserror = { workspace = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index a0b25e6c25f9c..e1a98f80783fa 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -10,29 +10,29 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.79" -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } +async-trait = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } -sp-maybe-compressed-blob = { package = "sp-maybe-compressed-blob", path = "../../../../substrate/primitives/maybe-compressed-blob" } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +codec = { features = ["bit-vec", "derive"], workspace = true } -polkadot-primitives = { path = "../../../primitives" } -polkadot-parachain-primitives = { path = "../../../parachain" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-metrics = { path = "../../metrics" } -polkadot-overseer = { path = "../../overseer" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } [target.'cfg(not(any(target_os = "android", target_os = "unknown")))'.dependencies] -polkadot-node-core-pvf = { path = "../pvf" } +polkadot-node-core-pvf = { workspace = true, default-features = true } [dev-dependencies] -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -futures = { version = "0.3.30", features = ["thread-pool"] } -assert_matches = "1.4.0" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-core = { path = "../../../../substrate/primitives/core" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +sp-keyring = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } +assert_matches = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-core = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index c58024876b9c7..a8e911e0c5c95 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -10,20 +10,20 @@ description = "The Chain API subsystem provides access to chain related utility workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -polkadot-node-metrics = { path = "../../metrics" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-types = { path = "../../subsystem-types" } -sc-client-api = { path = "../../../../substrate/client/api" } -sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } +futures = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } [dev-dependencies] -futures = { version = "0.3.30", features = ["thread-pool"] } -maplit = "1.0.2" -codec = { package = "parity-scale-codec", version = "3.6.12" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } +futures = { features = ["thread-pool"], workspace = true } +maplit = { workspace = true } +codec = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-core = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 2aa929653ccc2..755d5cadeaaf3 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -10,20 +10,20 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3" -gum = { package = "tracing-gum", path = "../../gum" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -kvdb = "0.13.0" +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +kvdb = { workspace = true } thiserror = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12" } +codec = { workspace = true, default-features = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-core = { path = "../../../../substrate/primitives/core" } -parking_lot = "0.12.1" -assert_matches = "1" -kvdb-memorydb = "0.13.0" +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-core = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +assert_matches = { workspace = true } +kvdb-memorydb = { workspace = true } diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 2c08cfa9b1efa..eb4600b235b9f 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -10,33 +10,33 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -codec = { package = "parity-scale-codec", version = "3.6.12" } -kvdb = "0.13.0" +futures = { workspace = true } +gum = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +kvdb = { workspace = true } thiserror = { workspace = true } -schnellru = "0.2.1" -fatality = "0.1.1" +schnellru = { workspace = true } +fatality = { workspace = true } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } -sc-keystore = { path = "../../../../substrate/client/keystore" } +sc-keystore = { workspace = true, default-features = true } [dev-dependencies] -kvdb-memorydb = "0.13.0" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -assert_matches = "1.4.0" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -futures-timer = "3.0.2" -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } +kvdb-memorydb = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +assert_matches = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +futures-timer = { workspace = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] # If not enabled, the dispute coordinator will do nothing. diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 4f6090f90e953..1e4953f40d0bd 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -10,13 +10,13 @@ description = "Parachains inherent data provider for Polkadot node" workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } thiserror = { workspace = true } -async-trait = "0.1.79" -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-overseer = { path = "../../overseer" } -polkadot-primitives = { path = "../../../primitives" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -sp-inherents = { path = "../../../../substrate/primitives/inherents" } +async-trait = { workspace = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index f3193153be894..97da5a1e94a07 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -10,25 +10,26 @@ description = "The Prospective Parachains subsystem. Tracks and handles prospect workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -codec = { package = "parity-scale-codec", version = "3.6.12" } +futures = { workspace = true } +gum = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } thiserror = { workspace = true } -fatality = "0.1.1" -bitvec = "1" +fatality = { workspace = true } +bitvec = { workspace = true, default-features = true } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = "1" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-node-subsystem-types = { path = "../../subsystem-types" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -sp-core = { path = "../../../../substrate/primitives/core" } -sc-keystore = { path = "../../../../substrate/client/keystore" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } +assert_matches = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } +sp-core = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +rstest = { workspace = true } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index d5bb5ff76ba8e..e4b6deffdf4a5 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -44,6 +44,7 @@ use polkadot_node_subsystem_util::{ inclusion_emulator::{Constraints, RelayChainBlockInfo}, request_session_index_for_child, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, + vstaging::fetch_claim_queue, }; use polkadot_primitives::{ async_backing::CandidatePendingAvailability, BlockNumber, CandidateHash, @@ -870,37 +871,51 @@ async fn fetch_backing_state( async fn fetch_upcoming_paras( ctx: &mut Context, relay_parent: Hash, -) -> JfyiErrorResult> { - let (tx, rx) = oneshot::channel(); - - // This'll have to get more sophisticated with parathreads, - // but for now we can just use the `AvailabilityCores`. - ctx.send_message(RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::AvailabilityCores(tx), - )) - .await; - - let cores = rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??; - let mut upcoming = HashSet::new(); - for core in cores { - match core { - CoreState::Occupied(occupied) => { - if let Some(next_up_on_available) = occupied.next_up_on_available { - upcoming.insert(next_up_on_available.para_id); - } - if let Some(next_up_on_time_out) = occupied.next_up_on_time_out { - upcoming.insert(next_up_on_time_out.para_id); +) -> JfyiErrorResult> { + Ok(match fetch_claim_queue(ctx.sender(), relay_parent).await? { + Some(claim_queue) => { + // Runtime supports claim queue - use it + claim_queue + .iter_all_claims() + .flat_map(|(_, paras)| paras.into_iter()) + .copied() + .collect() + }, + None => { + // fallback to availability cores - remove this branch once claim queue is released + // everywhere + let (tx, rx) = oneshot::channel(); + ctx.send_message(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::AvailabilityCores(tx), + )) + .await; + + let cores = rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??; + + let mut upcoming = HashSet::with_capacity(cores.len()); + for core in cores { + match core { + CoreState::Occupied(occupied) => { + // core sharing won't work optimally with this branch because the collations + // can't be prepared in advance. + if let Some(next_up_on_available) = occupied.next_up_on_available { + upcoming.insert(next_up_on_available.para_id); + } + if let Some(next_up_on_time_out) = occupied.next_up_on_time_out { + upcoming.insert(next_up_on_time_out.para_id); + } + }, + CoreState::Scheduled(scheduled) => { + upcoming.insert(scheduled.para_id); + }, + CoreState::Free => {}, } - }, - CoreState::Scheduled(scheduled) => { - upcoming.insert(scheduled.para_id); - }, - CoreState::Free => {}, - } - } + } - Ok(upcoming.into_iter().collect()) + upcoming + }, + }) } // Fetch ancestors in descending order, up to the amount requested. diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index d2fc3cbd36235..221fbf4c4e603 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -26,11 +26,15 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ async_backing::{AsyncBackingParams, BackingState, Constraints, InboundHrmpLimitations}, - CommittedCandidateReceipt, HeadData, Header, PersistedValidationData, ScheduledCore, + CommittedCandidateReceipt, CoreIndex, HeadData, Header, PersistedValidationData, ScheduledCore, ValidationCodeHash, }; use polkadot_primitives_test_helpers::make_candidate; -use std::sync::Arc; +use rstest::rstest; +use std::{ + collections::{BTreeMap, VecDeque}, + sync::Arc, +}; use test_helpers::mock::new_leaf; const ALLOWED_ANCESTRY_LEN: u32 = 3; @@ -70,7 +74,8 @@ fn dummy_constraints( } struct TestState { - availability_cores: Vec, + claim_queue: BTreeMap>, + runtime_api_version: u32, validation_code_hash: ValidationCodeHash, } @@ -79,13 +84,23 @@ impl Default for TestState { let chain_a = ParaId::from(1); let chain_b = ParaId::from(2); - let availability_cores = vec![ - CoreState::Scheduled(ScheduledCore { para_id: chain_a, collator: None }), - CoreState::Scheduled(ScheduledCore { para_id: chain_b, collator: None }), - ]; + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [chain_a].into_iter().collect()); + claim_queue.insert(CoreIndex(1), [chain_b].into_iter().collect()); + let validation_code_hash = Hash::repeat_byte(42).into(); - Self { availability_cores, validation_code_hash } + Self { + validation_code_hash, + claim_queue, + runtime_api_version: RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + } + } +} + +impl TestState { + fn set_runtime_api_version(&mut self, version: u32) { + self.runtime_api_version = version; } } @@ -227,12 +242,39 @@ async fn handle_leaf_activation( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) ) if parent == *hash => { - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + tx.send( + Ok(test_state.runtime_api_version) + ).unwrap(); } ); + if test_state.runtime_api_version < RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) if parent == *hash => { + tx.send(Ok(test_state.claim_queue.values().map(|paras| CoreState::Scheduled( + ScheduledCore { + para_id: *paras.front().unwrap(), + collator: None + } + )).collect())).unwrap(); + } + ); + } else { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) + ) if parent == *hash => { + tx.send(Ok(test_state.claim_queue.clone())).unwrap(); + } + ); + } + send_block_header(virtual_overseer, *hash, *number).await; // Check that subsystem job issues a request for ancestors. @@ -277,14 +319,16 @@ async fn handle_leaf_activation( ); } - for _ in 0..test_state.availability_cores.len() { + let paras: HashSet<_> = test_state.claim_queue.values().flatten().collect(); + + for _ in 0..paras.len() { let message = virtual_overseer.recv().await; // Get the para we are working with since the order is not deterministic. - let para_id = match message { + let para_id = match &message { AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::ParaBackingState(p_id, _), - )) => p_id, + )) => *p_id, _ => panic!("received unexpected message {:?}", message), }; @@ -505,9 +549,18 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() { // - Two for the same leaf A (one for parachain 1 and one for parachain 2) // - One for leaf B on parachain 1 // - One for leaf C on parachain 2 +// Also tests a claim queue size larger than 1. #[test] fn introduce_candidates_basic() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); + + let chain_a = ParaId::from(1); + let chain_b = ParaId::from(2); + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [chain_a, chain_b].into_iter().collect()); + + test_state.claim_queue = claim_queue; + let view = test_harness(|mut virtual_overseer| async move { // Leaf A let leaf_a = TestLeaf { @@ -2032,9 +2085,15 @@ fn check_pvd_query() { // Test simultaneously activating and deactivating leaves, and simultaneously deactivating // multiple leaves. -#[test] -fn correctly_updates_leaves() { - let test_state = TestState::default(); +// This test is parametrised with the runtime api version. For versions that don't support the claim +// queue API, we check that av-cores are used. +#[rstest] +#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] +#[case(8)] +fn correctly_updates_leaves(#[case] runtime_api_version: u32) { + let mut test_state = TestState::default(); + test_state.set_runtime_api_version(runtime_api_version); + let view = test_harness(|mut virtual_overseer| async move { // Leaf A let leaf_a = TestLeaf { @@ -2140,15 +2199,12 @@ fn correctly_updates_leaves() { fn persists_pending_availability_candidate() { let mut test_state = TestState::default(); let para_id = ParaId::from(1); - test_state.availability_cores = test_state - .availability_cores + test_state.claim_queue = test_state + .claim_queue .into_iter() - .filter(|core| match core { - CoreState::Scheduled(scheduled_core) => scheduled_core.para_id == para_id, - _ => false, - }) + .filter(|(_, paras)| matches!(paras.front(), Some(para) if para == ¶_id)) .collect(); - assert_eq!(test_state.availability_cores.len(), 1); + assert_eq!(test_state.claim_queue.len(), 1); test_harness(|mut virtual_overseer| async move { let para_head = HeadData(vec![1, 2, 3]); @@ -2237,18 +2293,15 @@ fn persists_pending_availability_candidate() { } #[test] -fn backwards_compatible() { +fn backwards_compatible_with_non_async_backing_params() { let mut test_state = TestState::default(); let para_id = ParaId::from(1); - test_state.availability_cores = test_state - .availability_cores + test_state.claim_queue = test_state + .claim_queue .into_iter() - .filter(|core| match core { - CoreState::Scheduled(scheduled_core) => scheduled_core.para_id == para_id, - _ => false, - }) + .filter(|(_, paras)| matches!(paras.front(), Some(para) if para == ¶_id)) .collect(); - assert_eq!(test_state.availability_cores.len(), 1); + assert_eq!(test_state.claim_queue.len(), 1); test_harness(|mut virtual_overseer| async move { let para_head = HeadData(vec![1, 2, 3]); @@ -2350,20 +2403,30 @@ fn uses_ancestry_only_within_session() { .await; assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) - ) if parent == hash => { - tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len - })).unwrap(); } - ); + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::AsyncBackingParams(tx) + )) if parent == hash => { + tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len})).unwrap(); + }); assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == hash => { + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) ) if parent == hash => { - tx.send(Ok(Vec::new())).unwrap(); + tx.send(Ok(BTreeMap::new())).unwrap(); } ); diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index a81d22c6f8283..5869e494c70ff 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -10,21 +10,21 @@ license.workspace = true workspace = true [dependencies] -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } +bitvec = { features = ["alloc"], workspace = true } +futures = { workspace = true } +gum = { workspace = true, default-features = true } thiserror = { workspace = true } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -futures-timer = "3.0.2" -fatality = "0.1.1" -schnellru = "0.2.1" +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +futures-timer = { workspace = true } +fatality = { workspace = true } +schnellru = { workspace = true } [dev-dependencies] -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -rstest = "0.18.2" +sp-application-crypto = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +rstest = { workspace = true } diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index fa16b38d28bda..3f622a60a059b 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -822,7 +822,7 @@ async fn select_candidates( // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); - sender.send_unbounded_message(CandidateBackingMessage::GetBackedCandidates( + sender.send_unbounded_message(CandidateBackingMessage::GetBackableCandidates( selected_candidates.clone(), tx, )); diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index 0d3675777cbf4..b38459302c8f1 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -578,7 +578,7 @@ mod select_candidates { )) => tx.send(Ok(Some(Default::default()))).unwrap(), AllMessages::RuntimeApi(Request(_parent_hash, AvailabilityCores(tx))) => tx.send(Ok(mock_availability_cores.clone())).unwrap(), - AllMessages::CandidateBacking(CandidateBackingMessage::GetBackedCandidates( + AllMessages::CandidateBacking(CandidateBackingMessage::GetBackableCandidates( hashes, sender, )) => { diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index 6dec407e2d2d1..73ef17a2843ae 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -10,24 +10,24 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" +futures = { workspace = true } thiserror = { workspace = true } -gum = { package = "tracing-gum", path = "../../gum" } +gum = { workspace = true, default-features = true } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-overseer = { path = "../../overseer" } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } +sp-keystore = { workspace = true, default-features = true } [dev-dependencies] -sp-core = { path = "../../../../substrate/primitives/core" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sc-keystore = { path = "../../../../substrate/client/keystore" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -futures-timer = "3.0.2" +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +sp-application-crypto = { workspace = true, default-features = true } +futures-timer = { workspace = true } diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 8aebe0b4c3f0c..7444f7927f568 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -10,60 +10,60 @@ license.workspace = true workspace = true [dependencies] -always-assert = "0.1" -array-bytes = "6.2.2" -blake3 = "1.5" -cfg-if = "1.0" -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } -is_executable = { version = "1.0.1", optional = true } -pin-project = "1.0.9" -rand = "0.8.5" -slotmap = "1.0" -tempfile = "3.3.0" +always-assert = { workspace = true } +array-bytes = { workspace = true, default-features = true } +blake3 = { workspace = true } +cfg-if = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +is_executable = { optional = true, workspace = true } +pin-project = { workspace = true } +rand = { workspace = true, default-features = true } +slotmap = { workspace = true } +tempfile = { workspace = true } thiserror = { workspace = true } -tokio = { version = "1.24.2", features = ["fs", "process"] } +tokio = { features = ["fs", "process"], workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } +], workspace = true } -polkadot-parachain-primitives = { path = "../../../parachain" } -polkadot-core-primitives = { path = "../../../core-primitives" } -polkadot-node-core-pvf-common = { path = "common" } -polkadot-node-metrics = { path = "../../metrics" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-primitives = { path = "../../../primitives" } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true, default-features = true } +polkadot-node-core-pvf-common = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-maybe-compressed-blob = { path = "../../../../substrate/primitives/maybe-compressed-blob", optional = true } -polkadot-node-core-pvf-prepare-worker = { path = "prepare-worker", optional = true } -polkadot-node-core-pvf-execute-worker = { path = "execute-worker", optional = true } +sp-core = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { optional = true, workspace = true, default-features = true } +polkadot-node-core-pvf-prepare-worker = { optional = true, workspace = true, default-features = true } +polkadot-node-core-pvf-execute-worker = { optional = true, workspace = true, default-features = true } [dev-dependencies] -assert_matches = "1.4.0" -criterion = { version = "0.5.1", default-features = false, features = [ +assert_matches = { workspace = true } +criterion = { features = [ "async_tokio", "cargo_bench_support", -] } -hex-literal = "0.4.1" +], workspace = true } +hex-literal = { workspace = true, default-features = true } -polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } +polkadot-node-core-pvf-common = { features = ["test-utils"], workspace = true, default-features = true } # For benches and integration tests, depend on ourselves with the test-utils # feature. -polkadot-node-core-pvf = { path = "", features = ["test-utils"] } -rococo-runtime = { path = "../../../runtime/rococo" } +polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } +rococo-runtime = { workspace = true } -test-parachain-adder = { path = "../../../parachain/test-parachains/adder" } -test-parachain-halt = { path = "../../../parachain/test-parachains/halt" } +test-parachain-adder = { workspace = true } +test-parachain-halt = { workspace = true } [target.'cfg(target_os = "linux")'.dev-dependencies] libc = "0.2.153" procfs = "0.16.0" rusty-fork = "0.3.0" -sc-sysinfo = { path = "../../../../substrate/client/sysinfo" } +sc-sysinfo = { workspace = true, default-features = true } [[bench]] name = "host_prepare_rococo_runtime" diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 491f6cc49642c..18b3f959c9551 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -10,29 +10,29 @@ license.workspace = true workspace = true [dependencies] -cpu-time = "1.0.0" -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../../gum" } -libc = "0.2.152" -nix = { version = "0.28.0", features = ["resource", "sched"] } +cpu-time = { workspace = true } +futures = { workspace = true } +gum = { workspace = true, default-features = true } +libc = { workspace = true } +nix = { features = ["resource", "sched"], workspace = true } thiserror = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } +], workspace = true } -polkadot-parachain-primitives = { path = "../../../../parachain" } -polkadot-primitives = { path = "../../../../primitives" } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } -sc-executor = { path = "../../../../../substrate/client/executor" } -sc-executor-common = { path = "../../../../../substrate/client/executor/common" } -sc-executor-wasmtime = { path = "../../../../../substrate/client/executor/wasmtime" } +sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sc-executor-wasmtime = { workspace = true, default-features = true } -sp-core = { path = "../../../../../substrate/primitives/core" } -sp-crypto-hashing = { path = "../../../../../substrate/primitives/crypto/hashing" } -sp-externalities = { path = "../../../../../substrate/primitives/externalities" } -sp-io = { path = "../../../../../substrate/primitives/io" } -sp-tracing = { path = "../../../../../substrate/primitives/tracing" } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [target.'cfg(target_os = "linux")'.dependencies] landlock = "0.3.0" @@ -41,8 +41,8 @@ landlock = "0.3.0" seccompiler = "0.4.0" [dev-dependencies] -assert_matches = "1.4.0" -tempfile = "3.3.0" +assert_matches = { workspace = true } +tempfile = { workspace = true } [features] # This feature is used to export test code to other crates without putting it in the production build. diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index cf5b873e29d77..f24b66dc4a0e8 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -10,17 +10,17 @@ license.workspace = true workspace = true [dependencies] -cpu-time = "1.0.0" -gum = { package = "tracing-gum", path = "../../../gum" } -cfg-if = "1.0" -nix = { version = "0.28.0", features = ["process", "resource", "sched"] } -libc = "0.2.152" +cpu-time = { workspace = true } +gum = { workspace = true, default-features = true } +cfg-if = { workspace = true } +nix = { features = ["process", "resource", "sched"], workspace = true } +libc = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } -polkadot-node-core-pvf-common = { path = "../common" } -polkadot-parachain-primitives = { path = "../../../../parachain" } -polkadot-primitives = { path = "../../../../primitives" } +polkadot-node-core-pvf-common = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } [features] builder = [] diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index f7daa0d7a89c3..9e0d01fc438b0 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -10,23 +10,23 @@ license.workspace = true workspace = true [dependencies] -blake3 = "1.5" -cfg-if = "1.0" -gum = { package = "tracing-gum", path = "../../../gum" } -libc = "0.2.152" -rayon = "1.5.1" -tracking-allocator = { package = "staging-tracking-allocator", path = "../../../tracking-allocator" } -tikv-jemalloc-ctl = { version = "0.5.0", optional = true } -tikv-jemallocator = { version = "0.5.0", optional = true } -nix = { version = "0.28.0", features = ["process", "resource", "sched"] } +blake3 = { workspace = true } +cfg-if = { workspace = true } +gum = { workspace = true, default-features = true } +libc = { workspace = true } +rayon = { workspace = true } +tracking-allocator = { workspace = true, default-features = true } +tikv-jemalloc-ctl = { optional = true, workspace = true } +tikv-jemallocator = { optional = true, workspace = true } +nix = { features = ["process", "resource", "sched"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } -polkadot-node-core-pvf-common = { path = "../common" } -polkadot-primitives = { path = "../../../../primitives" } +polkadot-node-core-pvf-common = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } -sc-executor-common = { path = "../../../../../substrate/client/executor/common" } -sc-executor-wasmtime = { path = "../../../../../substrate/client/executor/wasmtime" } +sc-executor-common = { workspace = true, default-features = true } +sc-executor-wasmtime = { workspace = true, default-features = true } [target.'cfg(target_os = "linux")'.dependencies] tikv-jemallocator = "0.5.0" @@ -41,9 +41,9 @@ jemalloc-allocator = [ ] [dev-dependencies] -criterion = { version = "0.5.1", default-features = false, features = ["cargo_bench_support"] } -rococo-runtime = { path = "../../../../runtime/rococo" } -sp-maybe-compressed-blob = { path = "../../../../../substrate/primitives/maybe-compressed-blob" } +criterion = { features = ["cargo_bench_support"], workspace = true } +rococo-runtime = { workspace = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } [[bench]] name = "prepare_rococo_runtime" diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index 5524cc705457e..834e4b300b9eb 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -10,23 +10,23 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -schnellru = "0.2.1" +futures = { workspace = true } +gum = { workspace = true, default-features = true } +schnellru = { workspace = true } -sp-consensus-babe = { path = "../../../../substrate/primitives/consensus/babe" } +sp-consensus-babe = { workspace = true, default-features = true } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-metrics = { path = "../../metrics" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-types = { path = "../../subsystem-types" } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } [dev-dependencies] -sp-api = { path = "../../../../substrate/primitives/api" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -async-trait = "0.1.79" -futures = { version = "0.3.30", features = ["thread-pool"] } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +async-trait = { workspace = true } +futures = { features = ["thread-pool"], workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/gum/Cargo.toml b/polkadot/node/gum/Cargo.toml index 0d887b9be5394..9b2df435a06a9 100644 --- a/polkadot/node/gum/Cargo.toml +++ b/polkadot/node/gum/Cargo.toml @@ -10,7 +10,7 @@ description = "Stick logs together with the TraceID as provided by tempo" workspace = true [dependencies] -coarsetime = "0.1.22" -tracing = "0.1.35" -gum-proc-macro = { package = "tracing-gum-proc-macro", path = "proc-macro" } -polkadot-primitives = { path = "../../primitives", features = ["std"] } +coarsetime = { workspace = true } +tracing = { workspace = true, default-features = true } +gum-proc-macro = { workspace = true, default-features = true } +polkadot-primitives = { features = ["std"], workspace = true, default-features = true } diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index 70126b4f43367..da6364977cae2 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -18,12 +18,12 @@ proc-macro = true [dependencies] syn = { features = ["extra-traits", "full"], workspace = true } quote = { workspace = true } -proc-macro2 = "1.0.56" -proc-macro-crate = "3.0.0" -expander = "2.0.0" +proc-macro2 = { workspace = true } +proc-macro-crate = { workspace = true } +expander = { workspace = true } [dev-dependencies] -assert_matches = "1.5.0" +assert_matches = { workspace = true } [features] diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml index 18b0c417aaf3d..90a6c80e3d0bd 100644 --- a/polkadot/node/jaeger/Cargo.toml +++ b/polkadot/node/jaeger/Cargo.toml @@ -10,15 +10,15 @@ description = "Polkadot Jaeger primitives, but equally useful for Grafana/Tempo" workspace = true [dependencies] -mick-jaeger = "0.1.8" -lazy_static = "1.4" -parking_lot = "0.12.1" -polkadot-primitives = { path = "../../primitives" } -polkadot-node-primitives = { path = "../primitives" } -sc-network = { path = "../../../substrate/client/network" } -sc-network-types = { path = "../../../substrate/client/network/types" } -sp-core = { path = "../../../substrate/primitives/core" } +mick-jaeger = { workspace = true } +lazy_static = { workspace = true } +parking_lot = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } thiserror = { workspace = true } -tokio = "1.37" +tokio = { workspace = true, default-features = true } log = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index fec148f7d3815..49434606a61c8 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -29,40 +29,40 @@ path = "../../src/bin/prepare-worker.rs" doc = false [dependencies] -polkadot-cli = { path = "../../cli", features = ["malus", "rococo-native", "westend-native"] } -polkadot-node-subsystem = { path = "../subsystem" } -polkadot-node-subsystem-util = { path = "../subsystem-util" } -polkadot-node-subsystem-types = { path = "../subsystem-types" } -polkadot-node-core-dispute-coordinator = { path = "../core/dispute-coordinator" } -polkadot-node-core-candidate-validation = { path = "../core/candidate-validation" } -polkadot-node-core-backing = { path = "../core/backing" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-node-network-protocol = { path = "../network/protocol" } -polkadot-primitives = { path = "../../primitives" } -color-eyre = { version = "0.6.1", default-features = false } -assert_matches = "1.5" -async-trait = "0.1.79" -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sp-core = { path = "../../../substrate/primitives/core" } -clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../gum" } -polkadot-erasure-coding = { path = "../../erasure-coding" } -rand = "0.8.5" +polkadot-cli = { features = ["malus", "rococo-native", "westend-native"], workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-core-dispute-coordinator = { workspace = true, default-features = true } +polkadot-node-core-candidate-validation = { workspace = true, default-features = true } +polkadot-node-core-backing = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +color-eyre = { workspace = true } +assert_matches = { workspace = true } +async-trait = { workspace = true } +sp-keystore = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +clap = { features = ["derive"], workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } # Required for worker binaries to build. -polkadot-node-core-pvf-common = { path = "../core/pvf/common" } -polkadot-node-core-pvf-execute-worker = { path = "../core/pvf/execute-worker" } -polkadot-node-core-pvf-prepare-worker = { path = "../core/pvf/prepare-worker" } +polkadot-node-core-pvf-common = { workspace = true, default-features = true } +polkadot-node-core-pvf-execute-worker = { workspace = true, default-features = true } +polkadot-node-core-pvf-prepare-worker = { workspace = true, default-features = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -sp-core = { path = "../../../substrate/primitives/core" } -futures = { version = "0.3.30", features = ["thread-pool"] } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-core = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } [build-dependencies] -substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [features] default = [] diff --git a/polkadot/node/malus/src/interceptor.rs b/polkadot/node/malus/src/interceptor.rs index b44ffc8956b52..2181118646d56 100644 --- a/polkadot/node/malus/src/interceptor.rs +++ b/polkadot/node/malus/src/interceptor.rs @@ -90,6 +90,10 @@ where >::Error: std::fmt::Debug, { async fn send_message(&mut self, msg: OutgoingMessage) { + self.send_message_with_priority::(msg).await; + } + + async fn send_message_with_priority(&mut self, msg: OutgoingMessage) { let msg = < <>::Message as overseer::AssociateOutgoing >::OutgoingMessages as From>::from(msg); @@ -103,7 +107,14 @@ where } } - fn try_send_message(&mut self, msg: OutgoingMessage) -> Result<(), TrySendError> { + fn try_send_message( + &mut self, + msg: OutgoingMessage, + ) -> Result<(), polkadot_node_subsystem_util::metered::TrySendError> { + self.try_send_message_with_priority::(msg) + } + + fn try_send_message_with_priority(&mut self, msg: OutgoingMessage) -> Result<(), TrySendError> { let msg = < <>::Message as overseer::AssociateOutgoing >::OutgoingMessages as From>::from(msg); diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index 55df8d3daf6d1..41b08b66e9b48 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -10,32 +10,34 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../gum" } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } -metered = { package = "prioritized-metered-channel", version = "0.6.1", default-features = false, features = ["futures_channel"] } +metered = { features = ["futures_channel"], workspace = true } # Both `sc-service` and `sc-cli` are required by runtime metrics `logger_hook()`. -sc-service = { path = "../../../substrate/client/service" } -sc-cli = { path = "../../../substrate/client/cli" } +sc-service = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } -sc-tracing = { path = "../../../substrate/client/tracing" } -codec = { package = "parity-scale-codec", version = "3.6.12" } -polkadot-primitives = { path = "../../primitives" } -bs58 = { version = "0.5.0", features = ["alloc"] } +prometheus-endpoint = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +bs58 = { features = ["alloc"], workspace = true, default-features = true } log = { workspace = true, default-features = true } [dev-dependencies] -assert_cmd = "2.0.4" -tempfile = "3.2.0" -hyper = { version = "0.14.20", default-features = false, features = ["http1", "tcp"] } -tokio = "1.37" -polkadot-test-service = { path = "../test/service", features = ["runtime-metrics"] } -substrate-test-utils = { path = "../../../substrate/test-utils" } -sc-service = { path = "../../../substrate/client/service" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -prometheus-parse = { version = "0.2.2" } +assert_cmd = { workspace = true } +tempfile = { workspace = true } +hyper-util = { features = ["client-legacy", "tokio"], workspace = true } +hyper = { workspace = true } +http-body-util = { workspace = true } +tokio = { workspace = true, default-features = true } +polkadot-test-service = { features = ["runtime-metrics"], workspace = true } +substrate-test-utils = { workspace = true } +sc-service = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +prometheus-parse = { workspace = true } [features] default = [] diff --git a/polkadot/node/metrics/src/tests.rs b/polkadot/node/metrics/src/tests.rs index fde7c31441346..e720924feb60c 100644 --- a/polkadot/node/metrics/src/tests.rs +++ b/polkadot/node/metrics/src/tests.rs @@ -16,7 +16,9 @@ //! Polkadot runtime metrics integration test. -use hyper::{Client, Uri}; +use http_body_util::BodyExt; +use hyper::Uri; +use hyper_util::{client::legacy::Client, rt::TokioExecutor}; use polkadot_primitives::metric_definitions::PARACHAIN_INHERENT_DATA_BITFIELDS_PROCESSED; use polkadot_test_service::{node_config, run_validator_node, test_prometheus_config}; use sp_keyring::AccountKeyring::*; @@ -66,14 +68,20 @@ async fn runtime_can_publish_metrics() { } async fn scrape_prometheus_metrics(metrics_uri: &str) -> HashMap { - let res = Client::new() + let res = Client::builder(TokioExecutor::new()) + .build_http::>() .get(Uri::try_from(metrics_uri).expect("bad URI")) .await .expect("GET request failed"); // Retrieve the `HTTP` response body. let body = String::from_utf8( - hyper::body::to_bytes(res).await.expect("can't get body as bytes").to_vec(), + res.into_body() + .collect() + .await + .expect("can't get body as bytes") + .to_bytes() + .to_vec(), ) .expect("body is not an UTF8 string"); diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index d80519b9e2e95..a85cde303b61b 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -10,32 +10,32 @@ license.workspace = true workspace = true [dependencies] -polkadot-node-metrics = { path = "../../metrics" } -polkadot-node-network-protocol = { path = "../protocol" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-jaeger = { path = "../../jaeger" } -rand = "0.8" -itertools = "0.11" +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-jaeger = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +itertools = { workspace = true } -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +bitvec = { features = ["alloc"], workspace = true } [dev-dependencies] -sp-authority-discovery = { path = "../../../../substrate/primitives/authority-discovery" } -sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } +sp-authority-discovery = { workspace = true, default-features = true } +sp-core = { features = ["std"], workspace = true, default-features = true } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } -assert_matches = "1.4.0" -schnorrkel = { version = "0.11.4", default-features = false } +assert_matches = { workspace = true } +schnorrkel = { workspace = true } # rand_core should match schnorrkel -rand_core = "0.6.2" -rand_chacha = "0.3.1" -env_logger = "0.11" +rand_core = { workspace = true } +rand_chacha = { workspace = true, default-features = true } +env_logger = { workspace = true } log = { workspace = true, default-features = true } diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index 369d82b45b094..d48fb08a311c7 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -1431,6 +1431,21 @@ impl State { let required_routing = topology.map_or(RequiredRouting::PendingTopology, |t| { t.local_grid_neighbors().required_routing_by_index(validator_index, local) }); + // Peers that we will send the assignment to. + let mut peers = HashSet::new(); + + let peers_to_route_to = topology + .as_ref() + .map(|t| t.peers_to_route(required_routing)) + .unwrap_or_default(); + + for peer in peers_to_route_to { + if !entry.known_by.contains_key(&peer) { + continue + } + + peers.insert(peer); + } // All the peers that know the relay chain block. let peers_to_filter = entry.known_by(); @@ -1456,20 +1471,13 @@ impl State { let n_peers_total = self.peer_views.len(); let source_peer = source.peer_id(); - // Peers that we will send the assignment to. - let mut peers = Vec::new(); - // Filter destination peers for peer in peers_to_filter.into_iter() { if Some(peer) == source_peer { continue } - if let Some(true) = topology - .as_ref() - .map(|t| t.local_grid_neighbors().route_to_peer(required_routing, &peer)) - { - peers.push(peer); + if peers.contains(&peer) { continue } @@ -1485,7 +1493,11 @@ impl State { if route_random { approval_entry.routing_info_mut().mark_randomly_sent(peer); - peers.push(peer); + peers.insert(peer); + } + + if approval_entry.routing_info().random_routing.is_complete() { + break } } diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 5ad034464767e..2d08807f97b60 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -2404,7 +2404,7 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { let assignments = vec![(cert.clone(), candidate_index)]; let approvals = vec![approval.clone()]; - let assignment_sent_peers = assert_matches!( + let mut assignment_sent_peers = assert_matches!( overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( sent_peers, @@ -2428,12 +2428,14 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { assert_matches!( overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( - sent_peers, + mut sent_peers, Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( protocol_v1::ApprovalDistributionMessage::Approvals(sent_approvals) )) )) => { // Random sampling is reused from the assignment. + sent_peers.sort(); + assignment_sent_peers.sort(); assert_eq!(sent_peers, assignment_sent_peers); assert_eq!(sent_approvals, approvals); } @@ -2678,7 +2680,7 @@ fn propagates_to_required_after_connect() { let assignments = vec![(cert.clone(), candidate_index)]; let approvals = vec![approval.clone()]; - let assignment_sent_peers = assert_matches!( + let mut assignment_sent_peers = assert_matches!( overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( sent_peers, @@ -2702,12 +2704,14 @@ fn propagates_to_required_after_connect() { assert_matches!( overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( - sent_peers, + mut sent_peers, Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( protocol_v1::ApprovalDistributionMessage::Approvals(sent_approvals) )) )) => { // Random sampling is reused from the assignment. + sent_peers.sort(); + assignment_sent_peers.sort(); assert_eq!(sent_peers, assignment_sent_peers); assert_eq!(sent_approvals, approvals); } diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index db3a0456d9adb..8c5574f244e4a 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -10,35 +10,35 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["std"] } -polkadot-primitives = { path = "../../../primitives" } -polkadot-erasure-coding = { path = "../../../erasure-coding" } -polkadot-node-network-protocol = { path = "../protocol" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-primitives = { path = "../../primitives" } -sc-network = { path = "../../../../substrate/client/network" } -sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } +futures = { workspace = true } +gum = { workspace = true, default-features = true } +codec = { features = ["std"], workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sp-core = { features = ["std"], workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } thiserror = { workspace = true } -rand = "0.8.5" -derive_more = "0.99.17" -schnellru = "0.2.1" -fatality = "0.1.1" +rand = { workspace = true, default-features = true } +derive_more = { workspace = true, default-features = true } +schnellru = { workspace = true } +fatality = { workspace = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sc-network = { path = "../../../../substrate/client/network" } -futures-timer = "3.0.2" -assert_matches = "1.4.0" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -rstest = "0.18.2" -polkadot-subsystem-bench = { path = "../../subsystem-bench" } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-core = { features = ["std"], workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +futures-timer = { workspace = true } +assert_matches = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +rstest = { workspace = true } +polkadot-subsystem-bench = { workspace = true } [[bench]] diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs index befbff0a2f27e..97e616f79fb75 100644 --- a/polkadot/node/network/availability-distribution/src/tests/state.rs +++ b/polkadot/node/network/availability-distribution/src/tests/state.rs @@ -216,7 +216,7 @@ impl TestState { // Test will fail if this does not happen until timeout. let mut remaining_stores = self.valid_chunks.len(); - let TestSubsystemContextHandle { tx, mut rx } = harness.virtual_overseer; + let TestSubsystemContextHandle { tx, mut rx, .. } = harness.virtual_overseer; // Spawning necessary as incoming queue can only hold a single item, we don't want to dead // lock ;-) diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 1c9c861e6f733..41f09b1f70443 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -10,39 +10,39 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -tokio = "1.37" -schnellru = "0.2.1" -rand = "0.8.5" -fatality = "0.1.1" +futures = { workspace = true } +tokio = { workspace = true, default-features = true } +schnellru = { workspace = true } +rand = { workspace = true, default-features = true } +fatality = { workspace = true } thiserror = { workspace = true } -async-trait = "0.1.79" -gum = { package = "tracing-gum", path = "../../gum" } - -polkadot-erasure-coding = { path = "../../../erasure-coding" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-network-protocol = { path = "../protocol" } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sc-network = { path = "../../../../substrate/client/network" } +async-trait = { workspace = true } +gum = { workspace = true, default-features = true } + +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +sc-network = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = "1.4.0" -futures-timer = "3.0.2" -rstest = "0.18.2" +assert_matches = { workspace = true } +futures-timer = { workspace = true } +rstest = { workspace = true } log = { workspace = true, default-features = true } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sc-network = { path = "../../../../substrate/client/network" } +sp-tracing = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -polkadot-subsystem-bench = { path = "../../subsystem-bench" } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +polkadot-subsystem-bench = { workspace = true } [[bench]] name = "availability-recovery-regression-bench" diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml index 6b5b784b7fd89..b1becaf319d55 100644 --- a/polkadot/node/network/bitfield-distribution/Cargo.toml +++ b/polkadot/node/network/bitfield-distribution/Cargo.toml @@ -10,26 +10,26 @@ license.workspace = true workspace = true [dependencies] -always-assert = "0.1" -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-network-protocol = { path = "../protocol" } -rand = "0.8" +always-assert = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-authority-discovery = { path = "../../../../substrate/primitives/authority-discovery" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -maplit = "1.0.2" +polkadot-node-subsystem-test-helpers = { workspace = true } +bitvec = { features = ["alloc"], workspace = true } +sp-core = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +maplit = { workspace = true } log = { workspace = true, default-features = true } -env_logger = "0.11" -assert_matches = "1.4.0" -rand_chacha = "0.3.1" +env_logger = { workspace = true } +assert_matches = { workspace = true } +rand_chacha = { workspace = true, default-features = true } diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index cd4e00ee1e4c5..b4b5743853cd6 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -10,28 +10,28 @@ license.workspace = true workspace = true [dependencies] -always-assert = "0.1" -async-trait = "0.1.79" -futures = "0.3.30" -gum = { package = "tracing-gum", path = "../../gum" } -polkadot-primitives = { path = "../../../primitives" } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sc-network = { path = "../../../../substrate/client/network" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -polkadot-node-metrics = { path = "../../metrics" } -polkadot-node-network-protocol = { path = "../protocol" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-overseer = { path = "../../overseer" } -parking_lot = "0.12.1" -bytes = "1" -fatality = "0.1.1" +always-assert = { workspace = true } +async-trait = { workspace = true } +futures = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +sc-network = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +bytes = { workspace = true, default-features = true } +fatality = { workspace = true } thiserror = { workspace = true } [dev-dependencies] -assert_matches = "1.4.0" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -futures-timer = "3" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +assert_matches = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +futures-timer = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 84e935366d0cb..56965ce6ba404 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -1135,13 +1135,33 @@ async fn dispatch_validation_events_to_all( I: IntoIterator>, I::IntoIter: Send, { + macro_rules! send_message { + ($event:expr, $message:ident) => { + if let Ok(event) = $event.focus() { + let has_high_priority = matches!( + event, + // NetworkBridgeEvent::OurViewChange(..) must also be here, + // but it is sent via an unbounded channel. + // See https://github.com/paritytech/polkadot-sdk/issues/824 + NetworkBridgeEvent::PeerConnected(..) | + NetworkBridgeEvent::PeerDisconnected(..) | + NetworkBridgeEvent::PeerViewChange(..) + ); + let message = $message::from(event); + if has_high_priority { + sender.send_message_with_priority::(message).await; + } else { + sender.send_message(message).await; + } + } + }; + } + for event in events { - sender - .send_messages(event.focus().map(StatementDistributionMessage::from)) - .await; - sender.send_messages(event.focus().map(BitfieldDistributionMessage::from)).await; - sender.send_messages(event.focus().map(ApprovalDistributionMessage::from)).await; - sender.send_messages(event.focus().map(GossipSupportMessage::from)).await; + send_message!(event, StatementDistributionMessage); + send_message!(event, BitfieldDistributionMessage); + send_message!(event, ApprovalDistributionMessage); + send_message!(event, GossipSupportMessage); } } diff --git a/polkadot/node/network/bridge/src/rx/tests.rs b/polkadot/node/network/bridge/src/rx/tests.rs index 6182bf3d883b5..392ff7391a1c1 100644 --- a/polkadot/node/network/bridge/src/rx/tests.rs +++ b/polkadot/node/network/bridge/src/rx/tests.rs @@ -880,6 +880,8 @@ fn peer_view_updates_sent_via_overseer() { &mut virtual_overseer, ) .await; + + assert_eq!(virtual_overseer.message_counter.with_high_priority(), 8); } network_handle @@ -895,6 +897,7 @@ fn peer_view_updates_sent_via_overseer() { &mut virtual_overseer, ) .await; + assert_eq!(virtual_overseer.message_counter.with_high_priority(), 12); virtual_overseer }); } @@ -930,6 +933,8 @@ fn peer_messages_sent_via_overseer() { &mut virtual_overseer, ) .await; + + assert_eq!(virtual_overseer.message_counter.with_high_priority(), 8); } let approval_distribution_message = @@ -970,6 +975,7 @@ fn peer_messages_sent_via_overseer() { &mut virtual_overseer, ) .await; + assert_eq!(virtual_overseer.message_counter.with_high_priority(), 12); virtual_overseer }); } @@ -1008,6 +1014,8 @@ fn peer_disconnect_from_just_one_peerset() { &mut virtual_overseer, ) .await; + + assert_eq!(virtual_overseer.message_counter.with_high_priority(), 8); } { @@ -1036,6 +1044,7 @@ fn peer_disconnect_from_just_one_peerset() { &mut virtual_overseer, ) .await; + assert_eq!(virtual_overseer.message_counter.with_high_priority(), 12); // to show that we're still connected on the collation protocol, send a view update. @@ -1094,6 +1103,8 @@ fn relays_collation_protocol_messages() { &mut virtual_overseer, ) .await; + + assert_eq!(virtual_overseer.message_counter.with_high_priority(), 8); } { @@ -1201,6 +1212,8 @@ fn different_views_on_different_peer_sets() { &mut virtual_overseer, ) .await; + + assert_eq!(virtual_overseer.message_counter.with_high_priority(), 8); } { @@ -1247,6 +1260,8 @@ fn different_views_on_different_peer_sets() { ) .await; + assert_eq!(virtual_overseer.message_counter.with_high_priority(), 12); + assert_sends_collation_event_to_all( NetworkBridgeEvent::PeerViewChange(peer, view_b.clone()), &mut virtual_overseer, @@ -1481,6 +1496,8 @@ fn network_protocol_versioning_subsystem_msg() { &mut virtual_overseer, ) .await; + + assert_eq!(virtual_overseer.message_counter.with_high_priority(), 8); } let approval_distribution_message = diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index a56c1c7dfe986..d41fc7ebe8ddb 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -10,38 +10,38 @@ license.workspace = true workspace = true [dependencies] -bitvec = { version = "1.0.1", default-features = false, features = ["alloc"] } -futures = "0.3.30" -futures-timer = "3" -gum = { package = "tracing-gum", path = "../../gum" } - -sp-core = { path = "../../../../substrate/primitives/core" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } - -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-network-protocol = { path = "../protocol" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-subsystem = { path = "../../subsystem" } -fatality = "0.1.1" +bitvec = { features = ["alloc"], workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } + +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } + +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +fatality = { workspace = true } thiserror = { workspace = true } -tokio-util = "0.7.1" +tokio-util = { workspace = true } [dev-dependencies] log = { workspace = true, default-features = true } -env_logger = "0.11" -assert_matches = "1.4.0" -rstest = "0.18.2" - -sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sc-keystore = { path = "../../../../substrate/client/keystore" } -sc-network = { path = "../../../../substrate/client/network" } -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["std"] } - -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +env_logger = { workspace = true } +assert_matches = { workspace = true } +rstest = { workspace = true } + +sp-core = { features = ["std"], workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +codec = { features = ["std"], workspace = true, default-features = true } + +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } [features] default = [] diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 80a85420b392b..5c201542eb560 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -51,6 +51,7 @@ use polkadot_node_subsystem_util::{ get_availability_cores, get_group_rotation_info, prospective_parachains_mode, ProspectiveParachainsMode, RuntimeInfo, }, + vstaging::fetch_claim_queue, TimeoutExt, }; use polkadot_primitives::{ @@ -579,22 +580,27 @@ async fn determine_cores( let cores = get_availability_cores(sender, relay_parent).await?; let n_cores = cores.len(); let mut assigned_cores = Vec::new(); + let maybe_claim_queue = fetch_claim_queue(sender, relay_parent).await?; for (idx, core) in cores.iter().enumerate() { - let core_para_id = match core { - CoreState::Scheduled(scheduled) => Some(scheduled.para_id), - CoreState::Occupied(occupied) => - if relay_parent_mode.is_enabled() { - // With async backing we don't care about the core state, - // it is only needed for figuring our validators group. - Some(occupied.candidate_descriptor.para_id) - } else { - None - }, - CoreState::Free => None, + let core_is_scheduled = match maybe_claim_queue { + Some(ref claim_queue) => { + // Runtime supports claim queue - use it. + claim_queue + .iter_claims_for_core(&CoreIndex(idx as u32)) + .any(|para| para == ¶_id) + }, + None => match core { + CoreState::Scheduled(scheduled) if scheduled.para_id == para_id => true, + CoreState::Occupied(occupied) if relay_parent_mode.is_enabled() => + // With async backing we don't care about the core state, + // it is only needed for figuring our validators group. + occupied.next_up_on_available.as_ref().map(|c| c.para_id) == Some(para_id), + _ => false, + }, }; - if core_para_id == Some(para_id) { + if core_is_scheduled { assigned_cores.push(CoreIndex::from(idx as u32)); } } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index a13e99df4ab47..13601ca7a0056 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -16,7 +16,11 @@ use super::*; -use std::{collections::HashSet, sync::Arc, time::Duration}; +use std::{ + collections::{BTreeMap, HashSet, VecDeque}, + sync::Arc, + time::Duration, +}; use assert_matches::assert_matches; use futures::{executor, future, Future}; @@ -66,7 +70,7 @@ struct TestState { group_rotation_info: GroupRotationInfo, validator_peer_id: Vec, relay_parent: Hash, - availability_cores: Vec, + claim_queue: BTreeMap>, local_peer_id: PeerId, collator_pair: CollatorPair, session_index: SessionIndex, @@ -105,8 +109,9 @@ impl Default for TestState { let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, now: 1 }; - let availability_cores = - vec![CoreState::Scheduled(ScheduledCore { para_id, collator: None }), CoreState::Free]; + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [para_id].into_iter().collect()); + claim_queue.insert(CoreIndex(1), VecDeque::new()); let relay_parent = Hash::random(); @@ -133,7 +138,7 @@ impl Default for TestState { group_rotation_info, validator_peer_id, relay_parent, - availability_cores, + claim_queue, local_peer_id, collator_pair, session_index: 1, @@ -147,17 +152,14 @@ impl TestState { pub fn with_elastic_scaling() -> Self { let mut state = Self::default(); let para_id = state.para_id; - state - .availability_cores - .push(CoreState::Scheduled(ScheduledCore { para_id, collator: None })); - state - .availability_cores - .push(CoreState::Scheduled(ScheduledCore { para_id, collator: None })); + + state.claim_queue.insert(CoreIndex(2), [para_id].into_iter().collect()); + state.claim_queue.insert(CoreIndex(3), [para_id].into_iter().collect()); state } fn current_group_validator_indices(&self) -> &[ValidatorIndex] { - let core_num = self.availability_cores.len(); + let core_num = self.claim_queue.len(); let GroupIndex(group_idx) = self.group_rotation_info.group_for_core(CoreIndex(0), core_num); &self.session_info.validator_groups.get(GroupIndex::from(group_idx)).unwrap() } @@ -395,7 +397,36 @@ async fn distribute_collation_with_receipt( RuntimeApiRequest::AvailabilityCores(tx) )) => { assert_eq!(relay_parent, _relay_parent); - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + tx.send(Ok(test_state.claim_queue.values().map(|paras| + if let Some(para) = paras.front() { + CoreState::Scheduled(ScheduledCore { para_id: *para, collator: None }) + } else { + CoreState::Free + } + ).collect())).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _relay_parent, + RuntimeApiRequest::Version(tx) + )) => { + assert_eq!(relay_parent, _relay_parent); + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); + } + ); + + // obtain the claim queue schedule. + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _relay_parent, + RuntimeApiRequest::ClaimQueue(tx) + )) => { + assert_eq!(relay_parent, _relay_parent); + tx.send(Ok(test_state.claim_queue.clone())).unwrap(); } ); diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index 0a0a85fb1f275..ea8fdb0e04fbe 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -19,7 +19,7 @@ use super::*; use polkadot_node_subsystem::messages::ChainApiMessage; -use polkadot_primitives::{AsyncBackingParams, Header, OccupiedCore}; +use polkadot_primitives::{AsyncBackingParams, Header}; const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; @@ -665,90 +665,3 @@ fn advertise_and_send_collation_by_hash() { }, ) } - -/// Tests that collator distributes collation built on top of occupied core. -#[test] -fn advertise_core_occupied() { - let mut test_state = TestState::default(); - let candidate = - TestCandidateBuilder { para_id: test_state.para_id, ..Default::default() }.build(); - test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { - next_up_on_available: None, - occupied_since: 0, - time_out_at: 0, - next_up_on_time_out: None, - availability: BitVec::default(), - group_responsible: GroupIndex(0), - candidate_hash: candidate.hash(), - candidate_descriptor: candidate.descriptor, - }); - - let local_peer_id = test_state.local_peer_id; - let collator_pair = test_state.collator_pair.clone(); - - test_harness( - local_peer_id, - collator_pair, - ReputationAggregator::new(|_| true), - |mut test_harness| async move { - let virtual_overseer = &mut test_harness.virtual_overseer; - - let head_a = Hash::from_low_u64_be(128); - let head_a_num: u32 = 64; - - // Grandparent of head `a`. - let head_b = Hash::from_low_u64_be(130); - - // Set collating para id. - overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) - .await; - // Activated leaf is `a`, but the collation will be based on `b`. - update_view(virtual_overseer, vec![(head_a, head_a_num)], 1).await; - - let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; - let candidate = TestCandidateBuilder { - para_id: test_state.para_id, - relay_parent: head_b, - pov_hash: pov.hash(), - ..Default::default() - } - .build(); - let candidate_hash = candidate.hash(); - distribute_collation_with_receipt( - virtual_overseer, - &test_state, - head_b, - true, - candidate, - pov, - Hash::zero(), - ) - .await; - - let validators = test_state.current_group_validator_authority_ids(); - let peer_ids = test_state.current_group_validator_peer_ids(); - - connect_peer( - virtual_overseer, - peer_ids[0], - CollationVersion::V2, - Some(validators[0].clone()), - ) - .await; - expect_declare_msg_v2(virtual_overseer, &test_state, &peer_ids[0]).await; - // Peer is aware of the leaf. - send_peer_view_change(virtual_overseer, &peer_ids[0], vec![head_a]).await; - - // Collation is advertised. - expect_advertise_collation_msg( - virtual_overseer, - &peer_ids[0], - head_b, - Some(vec![candidate_hash]), - ) - .await; - - test_harness - }, - ) -} diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index 001df1fb3da9b..96ffe9f13db35 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -270,7 +270,7 @@ impl Collations { // We don't need to fetch any other collation when we already have seconded one. CollationStatus::Seconded => None, CollationStatus::Waiting => - if !self.is_seconded_limit_reached(relay_parent_mode) { + if self.is_seconded_limit_reached(relay_parent_mode) { None } else { self.waiting_queue.pop_front() @@ -280,7 +280,7 @@ impl Collations { } } - /// Checks the limit of seconded candidates for a given para. + /// Checks the limit of seconded candidates. pub(super) fn is_seconded_limit_reached( &self, relay_parent_mode: ProspectiveParachainsMode, @@ -293,7 +293,7 @@ impl Collations { } else { 1 }; - self.seconded_count < seconded_limit + self.seconded_count >= seconded_limit } } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 9f037a983e51c..f5c9726f3f6a5 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -19,7 +19,7 @@ use futures::{ }; use futures_timer::Delay; use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, future::Future, time::{Duration, Instant}, }; @@ -51,6 +51,7 @@ use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL}, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, + vstaging::fetch_claim_queue, }; use polkadot_primitives::{ CandidateHash, CollatorId, CoreState, Hash, HeadData, Id as ParaId, OccupiedCoreAssumption, @@ -362,8 +363,8 @@ impl PeerData { #[derive(Debug)] struct GroupAssignments { - /// Current assignment. - current: Option, + /// Current assignments. + current: Vec, } struct PerRelayParent { @@ -376,7 +377,7 @@ impl PerRelayParent { fn new(mode: ProspectiveParachainsMode) -> Self { Self { prospective_parachains_mode: mode, - assignment: GroupAssignments { current: None }, + assignment: GroupAssignments { current: vec![] }, collations: Collations::default(), } } @@ -491,34 +492,34 @@ where .await .map_err(Error::CancelledAvailabilityCores)??; - let para_now = match polkadot_node_subsystem_util::signing_key_and_index(&validators, keystore) - .and_then(|(_, index)| polkadot_node_subsystem_util::find_validator_group(&groups, index)) - { - Some(group) => { - let core_now = rotation_info.core_for_group(group, cores.len()); - - cores.get(core_now.0 as usize).and_then(|c| match c { - CoreState::Occupied(core) if relay_parent_mode.is_enabled() => Some(core.para_id()), - CoreState::Scheduled(core) => Some(core.para_id), - CoreState::Occupied(_) | CoreState::Free => None, - }) - }, - None => { - gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator"); - - return Ok(()) - }, + let core_now = if let Some(group) = + polkadot_node_subsystem_util::signing_key_and_index(&validators, keystore).and_then( + |(_, index)| polkadot_node_subsystem_util::find_validator_group(&groups, index), + ) { + rotation_info.core_for_group(group, cores.len()) + } else { + gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator"); + return Ok(()) }; - // This code won't work well, if at all for on-demand parachains. For on-demand we'll - // have to be aware of which core the on-demand claim is going to be multiplexed - // onto. The on-demand claim will also have a known collator, and we should always - // allow an incoming connection from that collator. If not even connecting to them - // directly. - // - // However, this'll work fine for parachains, as each parachain gets a dedicated - // core. - if let Some(para_id) = para_now.as_ref() { + let paras_now = match fetch_claim_queue(sender, relay_parent).await.map_err(Error::Runtime)? { + // Runtime supports claim queue - use it + // + // `relay_parent_mode` is not examined here because if the runtime supports claim queue + // then it supports async backing params too (`ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT` + // < `CLAIM_QUEUE_RUNTIME_REQUIREMENT`). + Some(mut claim_queue) => claim_queue.0.remove(&core_now), + // Claim queue is not supported by the runtime - use availability cores instead. + None => cores.get(core_now.0 as usize).and_then(|c| match c { + CoreState::Occupied(core) if relay_parent_mode.is_enabled() => + core.next_up_on_available.as_ref().map(|c| [c.para_id].into_iter().collect()), + CoreState::Scheduled(core) => Some([core.para_id].into_iter().collect()), + CoreState::Occupied(_) | CoreState::Free => None, + }), + } + .unwrap_or_else(|| VecDeque::new()); + + for para_id in paras_now.iter() { let entry = current_assignments.entry(*para_id).or_default(); *entry += 1; if *entry == 1 { @@ -531,7 +532,7 @@ where } } - *group_assignment = GroupAssignments { current: para_now }; + *group_assignment = GroupAssignments { current: paras_now.into_iter().collect() }; Ok(()) } @@ -542,7 +543,7 @@ fn remove_outgoing( ) { let GroupAssignments { current, .. } = per_relay_parent.assignment; - if let Some(cur) = current { + for cur in current { if let Entry::Occupied(mut occupied) = current_assignments.entry(cur) { *occupied.get_mut() -= 1; if *occupied.get() == 0 { @@ -857,7 +858,8 @@ async fn process_incoming_peer_message( peer_id = ?origin, ?collator_id, ?para_id, - "Declared as collator for unneeded para", + "Declared as collator for unneeded para. Current assignments: {:?}", + &state.current_assignments ); modify_reputation( @@ -1089,7 +1091,7 @@ where peer_data.collating_para().ok_or(AdvertisementError::UndeclaredCollator)?; // Check if this is assigned to us. - if assignment.current.map_or(true, |id| id != collator_para_id) { + if !assignment.current.contains(&collator_para_id) { return Err(AdvertisementError::InvalidAssignment) } @@ -1105,7 +1107,7 @@ where ) .map_err(AdvertisementError::Invalid)?; - if !per_relay_parent.collations.is_seconded_limit_reached(relay_parent_mode) { + if per_relay_parent.collations.is_seconded_limit_reached(relay_parent_mode) { return Err(AdvertisementError::SecondedLimitReached) } @@ -1197,7 +1199,7 @@ where }); let collations = &mut per_relay_parent.collations; - if !collations.is_seconded_limit_reached(relay_parent_mode) { + if collations.is_seconded_limit_reached(relay_parent_mode) { gum::trace!( target: LOG_TARGET, peer_id = ?peer_id, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 3f4459d8e65d1..44e25efd4dfcd 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -21,7 +21,12 @@ use sc_network::ProtocolName; use sp_core::{crypto::Pair, Encode}; use sp_keyring::Sr25519Keyring; use sp_keystore::Keystore; -use std::{iter, sync::Arc, time::Duration}; +use std::{ + collections::{BTreeMap, VecDeque}, + iter, + sync::Arc, + time::Duration, +}; use polkadot_node_network_protocol::{ our_view, @@ -37,7 +42,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt}; use polkadot_primitives::{ - CandidateReceipt, CollatorPair, CoreState, GroupIndex, GroupRotationInfo, HeadData, + CandidateReceipt, CollatorPair, CoreIndex, CoreState, GroupIndex, GroupRotationInfo, HeadData, OccupiedCore, PersistedValidationData, ScheduledCore, ValidatorId, ValidatorIndex, }; use polkadot_primitives_test_helpers::{ @@ -71,6 +76,7 @@ struct TestState { validator_groups: Vec>, group_rotation_info: GroupRotationInfo, cores: Vec, + claim_queue: BTreeMap>, } impl Default for TestState { @@ -104,7 +110,7 @@ impl Default for TestState { CoreState::Scheduled(ScheduledCore { para_id: chain_ids[0], collator: None }), CoreState::Free, CoreState::Occupied(OccupiedCore { - next_up_on_available: None, + next_up_on_available: Some(ScheduledCore { para_id: chain_ids[1], collator: None }), occupied_since: 0, time_out_at: 1, next_up_on_time_out: None, @@ -120,6 +126,11 @@ impl Default for TestState { }), ]; + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), [chain_ids[0]].into_iter().collect()); + claim_queue.insert(CoreIndex(1), VecDeque::new()); + claim_queue.insert(CoreIndex(2), [chain_ids[1]].into_iter().collect()); + Self { chain_ids, relay_parent, @@ -128,6 +139,7 @@ impl Default for TestState { validator_groups, group_rotation_info, cores, + claim_queue, } } } @@ -264,6 +276,26 @@ async fn respond_to_core_info_queries( let _ = tx.send(Ok(test_state.cores.clone())); } ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::Version(tx), + )) => { + let _ = tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::ClaimQueue(tx), + )) => { + let _ = tx.send(Ok(test_state.claim_queue.clone())); + } + ); } /// Assert that the next message is a `CandidateBacking(Second())`. diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index 178dcb85e035f..472731b506ab1 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -72,6 +72,26 @@ async fn assert_assign_incoming( tx.send(Ok(test_state.cores.clone())).unwrap(); } ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::Version(tx), + )) if parent == hash => { + let _ = tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::ClaimQueue(tx), + )) if parent == hash => { + let _ = tx.send(Ok(test_state.claim_queue.clone())); + } + ); } /// Handle a view update. diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index 08713209bb740..ccf1b5daad7c3 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -10,33 +10,33 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } -derive_more = "0.99.17" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["std"] } -polkadot-primitives = { path = "../../../primitives" } -polkadot-erasure-coding = { path = "../../../erasure-coding" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-network-protocol = { path = "../protocol" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-primitives = { path = "../../primitives" } -sc-network = { path = "../../../../substrate/client/network" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +derive_more = { workspace = true, default-features = true } +codec = { features = ["std"], workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } thiserror = { workspace = true } -fatality = "0.1.1" -schnellru = "0.2.1" -indexmap = "2.0.0" +fatality = { workspace = true } +schnellru = { workspace = true } +indexmap = { workspace = true } [dev-dependencies] -async-channel = "1.8.0" -async-trait = "0.1.79" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sc-keystore = { path = "../../../../substrate/client/keystore" } -futures-timer = "3.0.2" -assert_matches = "1.4.0" -lazy_static = "1.4.0" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +async-channel = { workspace = true } +async-trait = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +futures-timer = { workspace = true } +assert_matches = { workspace = true } +lazy_static = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index 2d6f2f954c667..83fdc7e26191e 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -10,34 +10,34 @@ license.workspace = true workspace = true [dependencies] -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-crypto-hashing = { path = "../../../../substrate/primitives/crypto/hashing" } -sc-network = { path = "../../../../substrate/client/network" } -sc-network-common = { path = "../../../../substrate/client/network/common" } +sp-application-crypto = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } -polkadot-node-network-protocol = { path = "../protocol" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-primitives = { path = "../../../primitives" } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } -futures = "0.3.30" -futures-timer = "3.0.2" -rand = { version = "0.8.5", default-features = false } -rand_chacha = { version = "0.3.1", default-features = false } -gum = { package = "tracing-gum", path = "../../gum" } +futures = { workspace = true } +futures-timer = { workspace = true } +rand = { workspace = true } +rand_chacha = { workspace = true } +gum = { workspace = true, default-features = true } [dev-dependencies] -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-consensus-babe = { path = "../../../../substrate/primitives/consensus/babe" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sp-authority-discovery = { path = "../../../../substrate/primitives/authority-discovery" } +sp-keyring = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } +polkadot-node-subsystem-test-helpers = { workspace = true } -assert_matches = "1.4.0" -async-trait = "0.1.79" -parking_lot = "0.12.1" -lazy_static = "1.4.0" -quickcheck = "1.0.3" +assert_matches = { workspace = true } +async-trait = { workspace = true } +parking_lot = { workspace = true, default-features = true } +lazy_static = { workspace = true } +quickcheck = { workspace = true, default-features = true } diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 83145ce401302..c9ae23d756cfc 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -10,25 +10,25 @@ description = "Primitives types for the Node-side" workspace = true [dependencies] -async-channel = "1.8.0" -async-trait = "0.1.79" -hex = "0.4.3" -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-jaeger = { path = "../../jaeger" } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sc-network = { path = "../../../../substrate/client/network" } -sc-network-types = { path = "../../../../substrate/client/network/types" } -sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -strum = { version = "0.26.2", features = ["derive"] } -futures = "0.3.30" +async-channel = { workspace = true } +async-trait = { workspace = true } +hex = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-jaeger = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-authority-discovery = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +strum = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } thiserror = { workspace = true } -fatality = "0.1.1" -rand = "0.8" -derive_more = "0.99" -gum = { package = "tracing-gum", path = "../../gum" } -bitvec = "1" +fatality = { workspace = true } +rand = { workspace = true, default-features = true } +derive_more = { workspace = true, default-features = true } +gum = { workspace = true, default-features = true } +bitvec = { workspace = true, default-features = true } [dev-dependencies] -rand_chacha = "0.3.1" +rand_chacha = { workspace = true, default-features = true } diff --git a/polkadot/node/network/protocol/src/grid_topology.rs b/polkadot/node/network/protocol/src/grid_topology.rs index a14d24610722b..4dd7d29fc25cd 100644 --- a/polkadot/node/network/protocol/src/grid_topology.rs +++ b/polkadot/node/network/protocol/src/grid_topology.rs @@ -313,6 +313,23 @@ impl SessionGridTopologyEntry { self.topology.is_validator(peer) } + /// Returns the list of peers to route based on the required routing. + pub fn peers_to_route(&self, required_routing: RequiredRouting) -> Vec { + match required_routing { + RequiredRouting::All => self.topology.peer_ids.iter().copied().collect(), + RequiredRouting::GridX => self.local_neighbors.peers_x.iter().copied().collect(), + RequiredRouting::GridY => self.local_neighbors.peers_y.iter().copied().collect(), + RequiredRouting::GridXY => self + .local_neighbors + .peers_x + .iter() + .chain(self.local_neighbors.peers_y.iter()) + .copied() + .collect(), + RequiredRouting::None | RequiredRouting::PendingTopology => Vec::new(), + } + } + /// Updates the known peer ids for the passed authorities ids. pub fn update_authority_ids( &mut self, @@ -524,6 +541,11 @@ impl RandomRouting { pub fn inc_sent(&mut self) { self.sent += 1 } + + /// Returns `true` if we already took all the necessary samples. + pub fn is_complete(&self) -> bool { + self.sent >= self.target + } } /// Routing mode diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index b044acd1a86d3..2a9773ddde4bd 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -10,39 +10,39 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -futures-timer = "3.0.2" -gum = { package = "tracing-gum", path = "../../gum" } -polkadot-primitives = { path = "../../../primitives" } -sp-staking = { path = "../../../../substrate/primitives/staking", default-features = false } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-node-subsystem-util = { path = "../../subsystem-util" } -polkadot-node-network-protocol = { path = "../protocol" } -arrayvec = "0.7.4" -indexmap = "2.0.0" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +sp-staking = { workspace = true } +sp-keystore = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +arrayvec = { workspace = true } +indexmap = { workspace = true } +codec = { features = ["derive"], workspace = true } thiserror = { workspace = true } -fatality = "0.1.1" -bitvec = "1" +fatality = { workspace = true } +bitvec = { workspace = true, default-features = true } [dev-dependencies] -async-channel = "1.8.0" -assert_matches = "1.4.0" -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-authority-discovery = { path = "../../../../substrate/primitives/authority-discovery" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } -sp-keystore = { path = "../../../../substrate/primitives/keystore" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sc-keystore = { path = "../../../../substrate/client/keystore" } -sc-network = { path = "../../../../substrate/client/network" } -futures-timer = "3.0.2" -polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } -rand_chacha = "0.3" -polkadot-subsystem-bench = { path = "../../subsystem-bench" } +async-channel = { workspace = true } +assert_matches = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-authority-discovery = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +futures-timer = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +rand_chacha = { workspace = true, default-features = true } +polkadot-subsystem-bench = { workspace = true } [[bench]] name = "statement-distribution-regression-bench" diff --git a/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs b/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs index 9cbe385e3f42e..4e7206e0a366d 100644 --- a/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs +++ b/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs @@ -63,7 +63,7 @@ fn main() -> Result<(), String> { ("Received from peers", 106.4000, 0.001), ("Sent to peers", 127.9100, 0.001), ])); - messages.extend(average_usage.check_cpu_usage(&[("statement-distribution", 0.0390, 0.1)])); + messages.extend(average_usage.check_cpu_usage(&[("statement-distribution", 0.0374, 0.1)])); if messages.is_empty() { Ok(()) diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs index 4d56c795f13b2..33431eb1edce5 100644 --- a/polkadot/node/network/statement-distribution/src/lib.rs +++ b/polkadot/node/network/statement-distribution/src/lib.rs @@ -284,7 +284,14 @@ impl StatementDistributionSubsystem { ); }, MuxedMessage::Response(result) => { - v2::handle_response(&mut ctx, &mut state, result, &mut self.reputation).await; + v2::handle_response( + &mut ctx, + &mut state, + result, + &mut self.reputation, + &self.metrics, + ) + .await; }, MuxedMessage::RetryRequest(()) => { // A pending request is ready to retry. This is only a signal to call @@ -320,7 +327,8 @@ impl StatementDistributionSubsystem { let mode = prospective_parachains_mode(ctx.sender(), activated.hash).await?; if let ProspectiveParachainsMode::Enabled { .. } = mode { let res = - v2::handle_active_leaves_update(ctx, state, activated, mode).await; + v2::handle_active_leaves_update(ctx, state, activated, mode, &metrics) + .await; // Regardless of the result of leaf activation, we always prune before // handling it to avoid leaks. v2::handle_deactivate_leaves(state, &deactivated); @@ -370,6 +378,7 @@ impl StatementDistributionSubsystem { relay_parent, statement, &mut self.reputation, + &self.metrics, ) .await?; } @@ -428,11 +437,24 @@ impl StatementDistributionSubsystem { if target.targets_current() { // pass to v2. - v2::handle_network_update(ctx, state, event, &mut self.reputation).await; + v2::handle_network_update( + ctx, + state, + event, + &mut self.reputation, + &self.metrics, + ) + .await; } }, StatementDistributionMessage::Backed(candidate_hash) => { - crate::v2::handle_backed_candidate_message(ctx, state, candidate_hash).await; + crate::v2::handle_backed_candidate_message( + ctx, + state, + candidate_hash, + &self.metrics, + ) + .await; }, }, } diff --git a/polkadot/node/network/statement-distribution/src/metrics.rs b/polkadot/node/network/statement-distribution/src/metrics.rs index 1bc9941742639..e21fff1e6421e 100644 --- a/polkadot/node/network/statement-distribution/src/metrics.rs +++ b/polkadot/node/network/statement-distribution/src/metrics.rs @@ -25,13 +25,13 @@ const HISTOGRAM_LATENCY_BUCKETS: &[f64] = &[ #[derive(Clone)] struct MetricsInner { // V1 - statements_distributed: prometheus::Counter, sent_requests: prometheus::Counter, received_responses: prometheus::CounterVec, network_bridge_update: prometheus::HistogramVec, statements_unexpected: prometheus::CounterVec, created_message_size: prometheus::Gauge, // V1+ + statements_distributed: prometheus::Counter, active_leaves_update: prometheus::Histogram, share: prometheus::Histogram, // V2+ @@ -51,6 +51,13 @@ impl Metrics { } } + /// Update statements distributed counter by an amount + pub fn on_statements_distributed(&self, n: usize) { + if let Some(metrics) = &self.0 { + metrics.statements_distributed.inc_by(n as u64); + } + } + /// Update sent requests counter /// This counter is updated merely for the statements sent via request/response method, /// meaning that it counts large statements only diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 73416b193bbec..47d350849b206 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -195,8 +195,8 @@ struct ActiveValidatorState { index: ValidatorIndex, // our validator group group: GroupIndex, - // the assignment of our validator group, if any. - assignment: Option, + // the assignments of our validator group, if any. + assignments: Vec, // the 'direct-in-group' communication at this relay-parent. cluster_tracker: ClusterTracker, } @@ -400,6 +400,7 @@ pub(crate) async fn handle_network_update( state: &mut State, update: NetworkBridgeEvent, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) { match update { NetworkBridgeEvent::PeerConnected(peer_id, role, protocol_version, mut authority_ids) => { @@ -483,23 +484,33 @@ pub(crate) async fn handle_network_update( net_protocol::StatementDistributionMessage::V3( protocol_v3::StatementDistributionMessage::Statement(relay_parent, statement), ) => - handle_incoming_statement(ctx, state, peer_id, relay_parent, statement, reputation) - .await, + handle_incoming_statement( + ctx, + state, + peer_id, + relay_parent, + statement, + reputation, + metrics, + ) + .await, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::BackedCandidateManifest(inner), ) | net_protocol::StatementDistributionMessage::V3( protocol_v3::StatementDistributionMessage::BackedCandidateManifest(inner), - ) => handle_incoming_manifest(ctx, state, peer_id, inner, reputation).await, + ) => handle_incoming_manifest(ctx, state, peer_id, inner, reputation, metrics).await, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::BackedCandidateKnown(inner), ) | net_protocol::StatementDistributionMessage::V3( protocol_v3::StatementDistributionMessage::BackedCandidateKnown(inner), - ) => handle_incoming_acknowledgement(ctx, state, peer_id, inner, reputation).await, + ) => + handle_incoming_acknowledgement(ctx, state, peer_id, inner, reputation, metrics) + .await, }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => - handle_peer_view_update(ctx, state, peer_id, view).await, + handle_peer_view_update(ctx, state, peer_id, view, metrics).await, NetworkBridgeEvent::OurViewChange(_view) => { // handled by `handle_activated_leaf` }, @@ -539,6 +550,7 @@ pub(crate) async fn handle_active_leaves_update( state: &mut State, activated: &ActivatedLeaf, leaf_mode: ProspectiveParachainsMode, + metrics: &Metrics, ) -> JfyiErrorResult<()> { let max_candidate_depth = match leaf_mode { ProspectiveParachainsMode::Disabled => return Ok(()), @@ -714,7 +726,8 @@ pub(crate) async fn handle_active_leaves_update( for (peer, fresh) in update_peers { for fresh_relay_parent in fresh { - send_peer_messages_for_relay_parent(ctx, state, peer, fresh_relay_parent).await; + send_peer_messages_for_relay_parent(ctx, state, peer, fresh_relay_parent, metrics) + .await; } } } @@ -740,8 +753,8 @@ fn find_active_validator_state( let our_group = groups.by_validator_index(validator_index)?; let core_index = group_rotation_info.core_for_group(our_group, availability_cores.len()); - let para_assigned_to_core = if let Some(claim_queue) = maybe_claim_queue { - claim_queue.get_claim_for(core_index, 0) + let paras_assigned_to_core = if let Some(claim_queue) = maybe_claim_queue { + claim_queue.iter_claims_for_core(&core_index).copied().collect() } else { availability_cores .get(core_index.0 as usize) @@ -753,6 +766,8 @@ fn find_active_validator_state( .map(|scheduled_core| scheduled_core.para_id), CoreState::Free | CoreState::Occupied(_) => None, }) + .into_iter() + .collect() }; let group_validators = groups.get(our_group)?.to_owned(); @@ -760,7 +775,7 @@ fn find_active_validator_state( active: Some(ActiveValidatorState { index: validator_index, group: our_group, - assignment: para_assigned_to_core, + assignments: paras_assigned_to_core, cluster_tracker: ClusterTracker::new(group_validators, seconding_limit) .expect("group is non-empty because we are in it; qed"), }), @@ -813,6 +828,7 @@ async fn handle_peer_view_update( state: &mut State, peer: PeerId, new_view: View, + metrics: &Metrics, ) { let fresh_implicit = { let peer_data = match state.peers.get_mut(&peer) { @@ -824,7 +840,7 @@ async fn handle_peer_view_update( }; for new_relay_parent in fresh_implicit { - send_peer_messages_for_relay_parent(ctx, state, peer, new_relay_parent).await; + send_peer_messages_for_relay_parent(ctx, state, peer, new_relay_parent, metrics).await; } } @@ -855,6 +871,7 @@ async fn send_peer_messages_for_relay_parent( state: &mut State, peer: PeerId, relay_parent: Hash, + metrics: &Metrics, ) { let peer_data = match state.peers.get_mut(&peer) { None => return, @@ -887,6 +904,7 @@ async fn send_peer_messages_for_relay_parent( &mut active.cluster_tracker, &state.candidates, &relay_parent_state.statement_store, + metrics, ) .await; } @@ -899,6 +917,7 @@ async fn send_peer_messages_for_relay_parent( &per_session_state.groups, relay_parent_state, &state.candidates, + metrics, ) .await; } @@ -947,6 +966,7 @@ async fn send_pending_cluster_statements( cluster_tracker: &mut ClusterTracker, candidates: &Candidates, statement_store: &StatementStore, + metrics: &Metrics, ) { let pending_statements = cluster_tracker.pending_statements_for(peer_validator_id); let network_messages = pending_statements @@ -972,12 +992,12 @@ async fn send_pending_cluster_statements( }) .collect::>(); - if network_messages.is_empty() { - return + if !network_messages.is_empty() { + let count = network_messages.len(); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(network_messages)) + .await; + metrics.on_statements_distributed(count); } - - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(network_messages)) - .await; } /// Send a peer all pending grid messages / acknowledgements / follow up statements @@ -991,6 +1011,7 @@ async fn send_pending_grid_messages( groups: &Groups, relay_parent_state: &mut PerRelayParentState, candidates: &Candidates, + metrics: &Metrics, ) { let pending_manifests = { let local_validator = match relay_parent_state.local_validator.as_mut() { @@ -1003,6 +1024,7 @@ async fn send_pending_grid_messages( }; let mut messages: Vec<(Vec, net_protocol::VersionedValidationProtocol)> = Vec::new(); + let mut statements_count = 0; for (candidate_hash, kind) in pending_manifests { let confirmed_candidate = match candidates.get_confirmed(&candidate_hash) { None => continue, // sanity @@ -1077,7 +1099,7 @@ async fn send_pending_grid_messages( }; }, grid::ManifestKind::Acknowledgement => { - messages.extend(acknowledgement_and_statement_messages( + let (m, c) = acknowledgement_and_statement_messages( peer_id, peer_validator_id, groups, @@ -1086,7 +1108,9 @@ async fn send_pending_grid_messages( group_index, candidate_hash, local_knowledge, - )); + ); + messages.extend(m); + statements_count += c; }, } } @@ -1105,8 +1129,9 @@ async fn send_pending_grid_messages( let pending_statements = grid_tracker.all_pending_statements_for(peer_validator_id); - let extra_statements = - pending_statements.into_iter().filter_map(|(originator, compact)| { + let extra_statements = pending_statements + .into_iter() + .filter_map(|(originator, compact)| { let res = pending_statement_network_message( &relay_parent_state.statement_store, relay_parent, @@ -1126,15 +1151,17 @@ async fn send_pending_grid_messages( } res - }); + }) + .collect::>(); + statements_count += extra_statements.len(); messages.extend(extra_statements); } - if messages.is_empty() { - return + if !messages.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; + metrics.on_statements_distributed(statements_count); } - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; } // Imports a locally originating statement and distributes it to peers. @@ -1145,6 +1172,7 @@ pub(crate) async fn share_local_statement( relay_parent: Hash, statement: SignedFullStatementWithPVD, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) -> JfyiErrorResult<()> { let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { None => return Err(JfyiError::InvalidShare), @@ -1162,10 +1190,10 @@ pub(crate) async fn share_local_statement( None => return Ok(()), }; - let (local_index, local_assignment, local_group) = + let (local_index, local_assignments, local_group) = match per_relay_parent.active_validator_state() { None => return Err(JfyiError::InvalidShare), - Some(l) => (l.index, l.assignment, l.group), + Some(l) => (l.index, &l.assignments, l.group), }; // Two possibilities: either the statement is `Seconded` or we already @@ -1203,7 +1231,7 @@ pub(crate) async fn share_local_statement( return Err(JfyiError::InvalidShare) } - if local_assignment != Some(expected_para) || relay_parent != expected_relay_parent { + if !local_assignments.contains(&expected_para) || relay_parent != expected_relay_parent { return Err(JfyiError::InvalidShare) } @@ -1267,11 +1295,12 @@ pub(crate) async fn share_local_statement( &state.authorities, &state.peers, compact_statement, + metrics, ) .await; if let Some(post_confirmation) = post_confirmation { - apply_post_confirmation(ctx, state, post_confirmation, reputation).await; + apply_post_confirmation(ctx, state, post_confirmation, reputation, metrics).await; } Ok(()) @@ -1308,6 +1337,7 @@ async fn circulate_statement( authorities: &HashMap, peers: &HashMap, statement: SignedStatement, + metrics: &Metrics, ) { let session_info = &per_session.session_info; @@ -1444,6 +1474,7 @@ async fn circulate_statement( .into(), )) .await; + metrics.on_statement_distributed(); } if !statement_to_v3_peers.is_empty() { @@ -1463,6 +1494,7 @@ async fn circulate_statement( .into(), )) .await; + metrics.on_statement_distributed(); } } /// Check a statement signature under this parent hash. @@ -1509,6 +1541,7 @@ async fn handle_incoming_statement( relay_parent: Hash, statement: UncheckedSignedStatement, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) { let peer_state = match state.peers.get(&peer) { None => { @@ -1785,6 +1818,7 @@ async fn handle_incoming_statement( &state.authorities, &state.peers, checked_statement, + metrics, ) .await; } else { @@ -1942,6 +1976,7 @@ async fn provide_candidate_to_grid( per_session: &PerSessionState, authorities: &HashMap, peers: &HashMap, + metrics: &Metrics, ) { let local_validator = match relay_parent_state.local_validator { Some(ref mut v) => v, @@ -2129,8 +2164,10 @@ async fn provide_candidate_to_grid( .await; } if !post_statements.is_empty() { + let count = post_statements.len(); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(post_statements)) .await; + metrics.on_statements_distributed(count); } } @@ -2144,12 +2181,11 @@ async fn determine_groups_per_para( let n_cores = availability_cores.len(); // Determine the core indices occupied by each para at the current relay parent. To support - // on-demand parachains we also consider the core indices at next block if core has a candidate - // pending availability. - let para_core_indices: Vec<_> = if let Some(claim_queue) = maybe_claim_queue { + // on-demand parachains we also consider the core indices at next blocks. + let schedule: HashMap> = if let Some(claim_queue) = maybe_claim_queue { claim_queue - .iter_claims_at_depth(0) - .map(|(core_index, para)| (para, core_index)) + .iter_all_claims() + .map(|(core_index, paras)| (*core_index, paras.iter().copied().collect())) .collect() } else { availability_cores @@ -2157,12 +2193,12 @@ async fn determine_groups_per_para( .enumerate() .filter_map(|(index, core)| match core { CoreState::Scheduled(scheduled_core) => - Some((scheduled_core.para_id, CoreIndex(index as u32))), + Some((CoreIndex(index as u32), vec![scheduled_core.para_id])), CoreState::Occupied(occupied_core) => if max_candidate_depth >= 1 { - occupied_core - .next_up_on_available - .map(|scheduled_core| (scheduled_core.para_id, CoreIndex(index as u32))) + occupied_core.next_up_on_available.map(|scheduled_core| { + (CoreIndex(index as u32), vec![scheduled_core.para_id]) + }) } else { None }, @@ -2173,9 +2209,12 @@ async fn determine_groups_per_para( let mut groups_per_para = HashMap::new(); // Map from `CoreIndex` to `GroupIndex` and collect as `HashMap`. - for (para, core_index) in para_core_indices { + for (core_index, paras) in schedule { let group_index = group_rotation_info.group_for_core(core_index, n_cores); - groups_per_para.entry(para).or_insert_with(Vec::new).push(group_index) + + for para in paras { + groups_per_para.entry(para).or_insert_with(Vec::new).push(group_index); + } } groups_per_para @@ -2528,6 +2567,7 @@ async fn handle_incoming_manifest( peer: PeerId, manifest: net_protocol::v2::BackedCandidateManifest, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) { gum::debug!( target: LOG_TARGET, @@ -2584,7 +2624,7 @@ async fn handle_incoming_manifest( ) }; - let messages = acknowledgement_and_statement_messages( + let (messages, statements_count) = acknowledgement_and_statement_messages( &( peer, state @@ -2605,6 +2645,7 @@ async fn handle_incoming_manifest( if !messages.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; + metrics.on_statements_distributed(statements_count); } } else if !state.candidates.is_confirmed(&manifest.candidate_hash) { // 5. if unconfirmed, add request entry @@ -2632,9 +2673,9 @@ fn acknowledgement_and_statement_messages( group_index: GroupIndex, candidate_hash: CandidateHash, local_knowledge: StatementFilter, -) -> Vec<(Vec, net_protocol::VersionedValidationProtocol)> { +) -> (Vec<(Vec, net_protocol::VersionedValidationProtocol)>, usize) { let local_validator = match relay_parent_state.local_validator.as_mut() { - None => return Vec::new(), + None => return (Vec::new(), 0), Some(l) => l, }; @@ -2662,7 +2703,7 @@ fn acknowledgement_and_statement_messages( "Bug ValidationVersion::V1 should not be used in statement-distribution v2, legacy should have handled this" ); - return Vec::new() + return (Vec::new(), 0) }, }; @@ -2683,10 +2724,11 @@ fn acknowledgement_and_statement_messages( candidate_hash, peer, ); + let statements_count = statement_messages.len(); messages.extend(statement_messages.into_iter().map(|m| (vec![peer.0], m))); - messages + (messages, statements_count) } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -2696,6 +2738,7 @@ async fn handle_incoming_acknowledgement( peer: PeerId, acknowledgement: net_protocol::v2::BackedCandidateAcknowledgement, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) { // The key difference between acknowledgments and full manifests is that only // the candidate hash is included alongside the bitfields, so the candidate @@ -2776,10 +2819,12 @@ async fn handle_incoming_acknowledgement( ); if !messages.is_empty() { + let count = messages.len(); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages( messages.into_iter().map(|m| (vec![peer], m)).collect(), )) .await; + metrics.on_statements_distributed(count); } } @@ -2789,6 +2834,7 @@ pub(crate) async fn handle_backed_candidate_message( ctx: &mut Context, state: &mut State, candidate_hash: CandidateHash, + metrics: &Metrics, ) { // If the candidate is unknown or unconfirmed, it's a race (pruned before receiving message) // or a bug. Ignore if so @@ -2830,6 +2876,7 @@ pub(crate) async fn handle_backed_candidate_message( per_session, &state.authorities, &state.peers, + metrics, ) .await; @@ -2851,6 +2898,7 @@ async fn send_cluster_candidate_statements( state: &mut State, candidate_hash: CandidateHash, relay_parent: Hash, + metrics: &Metrics, ) { let relay_parent_state = match state.per_relay_parent.get_mut(&relay_parent) { None => return, @@ -2893,6 +2941,7 @@ async fn send_cluster_candidate_statements( &state.authorities, &state.peers, statement, + metrics, ) .await; } @@ -2910,6 +2959,7 @@ async fn apply_post_confirmation( state: &mut State, post_confirmation: PostConfirmation, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) { for peer in post_confirmation.reckoning.incorrect { modify_reputation(reputation, ctx.sender(), peer, COST_INACCURATE_ADVERTISEMENT).await; @@ -2923,6 +2973,7 @@ async fn apply_post_confirmation( state, candidate_hash, post_confirmation.hypothetical.relay_parent(), + metrics, ) .await; new_confirmed_candidate_fragment_chain_updates(ctx, state, post_confirmation.hypothetical) @@ -3048,6 +3099,7 @@ pub(crate) async fn handle_response( state: &mut State, response: UnhandledResponse, reputation: &mut ReputationAggregator, + metrics: &Metrics, ) { let &requests::CandidateIdentifier { relay_parent, candidate_hash, group_index } = response.candidate_identifier(); @@ -3147,7 +3199,7 @@ pub(crate) async fn handle_response( }; // Note that this implicitly circulates all statements via the cluster. - apply_post_confirmation(ctx, state, post_confirmation, reputation).await; + apply_post_confirmation(ctx, state, post_confirmation, reputation, metrics).await; let confirmed = state.candidates.get_confirmed(&candidate_hash).expect("just confirmed; qed"); diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index e77cead4a7565..2253a5ae0c668 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -10,30 +10,30 @@ description = "System overseer of the Polkadot node" workspace = true [dependencies] -sc-client-api = { path = "../../../substrate/client/api" } -sp-api = { path = "../../../substrate/primitives/api" } -futures = "0.3.30" -futures-timer = "3.0.2" -parking_lot = "0.12.1" -polkadot-node-network-protocol = { path = "../network/protocol" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-node-subsystem-types = { path = "../subsystem-types" } -polkadot-node-metrics = { path = "../metrics" } -polkadot-primitives = { path = "../../primitives" } -orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } -gum = { package = "tracing-gum", path = "../gum" } -sp-core = { path = "../../../substrate/primitives/core" } -async-trait = "0.1.79" -tikv-jemalloc-ctl = { version = "0.5.0", optional = true } +sc-client-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } +parking_lot = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +orchestra = { features = ["futures_channel"], workspace = true } +gum = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +async-trait = { workspace = true } +tikv-jemalloc-ctl = { optional = true, workspace = true } [dev-dependencies] -metered = { package = "prioritized-metered-channel", version = "0.6.1", default-features = false, features = ["futures_channel"] } -sp-core = { path = "../../../substrate/primitives/core" } -futures = { version = "0.3.30", features = ["thread-pool"] } -femme = "2.2.1" -assert_matches = "1.4.0" -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } -polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } +metered = { features = ["futures_channel"], workspace = true } +sp-core = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } +femme = { workspace = true } +assert_matches = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } [target.'cfg(target_os = "linux")'.dependencies] tikv-jemalloc-ctl = "0.5.0" diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index 24985a99913d8..4e13d5eda76f6 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -105,10 +105,11 @@ pub use polkadot_node_metrics::{ pub use orchestra as gen; pub use orchestra::{ - contextbounds, orchestra, subsystem, FromOrchestra, MapSubsystem, MessagePacket, - OrchestraError as OverseerError, SignalsReceived, Spawner, Subsystem, SubsystemContext, - SubsystemIncomingMessages, SubsystemInstance, SubsystemMeterReadouts, SubsystemMeters, - SubsystemSender, TimeoutExt, ToOrchestra, TrySendError, + contextbounds, orchestra, subsystem, FromOrchestra, HighPriority, MapSubsystem, MessagePacket, + NormalPriority, OrchestraError as OverseerError, Priority, PriorityLevel, SignalsReceived, + Spawner, Subsystem, SubsystemContext, SubsystemIncomingMessages, SubsystemInstance, + SubsystemMeterReadouts, SubsystemMeters, SubsystemSender, TimeoutExt, ToOrchestra, + TrySendError, }; #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] @@ -495,7 +496,7 @@ pub struct Overseer { RuntimeApiMessage, ProspectiveParachainsMessage, ChainApiMessage, - ])] + ], can_receive_priority_messages)] statement_distribution: StatementDistribution, #[subsystem(AvailabilityDistributionMessage, sends: [ @@ -524,7 +525,7 @@ pub struct Overseer { RuntimeApiMessage, NetworkBridgeTxMessage, ProvisionerMessage, - ])] + ], can_receive_priority_messages)] bitfield_distribution: BitfieldDistribution, #[subsystem(ProvisionerMessage, sends: [ @@ -580,7 +581,7 @@ pub struct Overseer { #[subsystem(blocking, message_capacity: 64000, ApprovalDistributionMessage, sends: [ NetworkBridgeTxMessage, ApprovalVotingMessage, - ])] + ], can_receive_priority_messages)] approval_distribution: ApprovalDistribution, #[subsystem(blocking, ApprovalVotingMessage, sends: [ @@ -599,7 +600,7 @@ pub struct Overseer { NetworkBridgeRxMessage, // TODO RuntimeApiMessage, ChainSelectionMessage, - ])] + ], can_receive_priority_messages)] gossip_support: GossipSupport, #[subsystem(blocking, message_capacity: 32000, DisputeCoordinatorMessage, sends: [ diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index 177e3addf368d..8e78d8fc8921a 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -813,7 +813,7 @@ fn test_candidate_validation_msg() -> CandidateValidationMessage { fn test_candidate_backing_msg() -> CandidateBackingMessage { let (sender, _) = oneshot::channel(); - CandidateBackingMessage::GetBackedCandidates(Default::default(), sender) + CandidateBackingMessage::GetBackableCandidates(Default::default(), sender) } fn test_chain_api_msg() -> ChainApiMessage { diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index 0a84e5dae2a58..cd642bf16ff9b 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -10,24 +10,24 @@ license.workspace = true workspace = true [dependencies] -bounded-vec = "0.7" -futures = "0.3.30" -polkadot-primitives = { path = "../../primitives" } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-core = { path = "../../../substrate/primitives/core" } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -polkadot-parachain-primitives = { path = "../../parachain", default-features = false } -schnorrkel = "0.11.4" +bounded-vec = { workspace = true } +futures = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +sp-core = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true } +schnorrkel = { workspace = true, default-features = true } thiserror = { workspace = true } -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } +bitvec = { features = ["alloc"], workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } [target.'cfg(not(target_os = "unknown"))'.dependencies] zstd = { version = "0.12.4", default-features = false } [dev-dependencies] -polkadot-erasure-coding = { path = "../../erasure-coding" } +polkadot-erasure-coding = { workspace = true, default-features = true } diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index aded1b8fe7342..660b504e97fbb 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -59,7 +59,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.12.0"; +pub const NODE_VERSION: &'static str = "1.14.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index ec5113d2c8a5f..c0ddbf7dcfc36 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -12,147 +12,147 @@ workspace = true [dependencies] # Substrate Client -sc-authority-discovery = { path = "../../../substrate/client/authority-discovery" } -sc-consensus-babe = { path = "../../../substrate/client/consensus/babe" } -sc-consensus-beefy = { path = "../../../substrate/client/consensus/beefy" } -sc-consensus-grandpa = { path = "../../../substrate/client/consensus/grandpa" } -mmr-gadget = { path = "../../../substrate/client/merkle-mountain-range" } -sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range" } -sc-block-builder = { path = "../../../substrate/client/block-builder" } -sc-chain-spec = { path = "../../../substrate/client/chain-spec" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-client-db = { path = "../../../substrate/client/db" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-consensus-slots = { path = "../../../substrate/client/consensus/slots" } -sc-executor = { path = "../../../substrate/client/executor" } -sc-network = { path = "../../../substrate/client/network" } -sc-network-common = { path = "../../../substrate/client/network/common" } -sc-network-sync = { path = "../../../substrate/client/network/sync" } -sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -sc-sync-state-rpc = { path = "../../../substrate/client/sync-state-rpc" } -sc-keystore = { path = "../../../substrate/client/keystore" } -sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } -sc-offchain = { path = "../../../substrate/client/offchain" } -sc-sysinfo = { path = "../../../substrate/client/sysinfo" } -sc-service = { path = "../../../substrate/client/service", default-features = false } -sc-telemetry = { path = "../../../substrate/client/telemetry" } +sc-authority-discovery = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +mmr-gadget = { workspace = true, default-features = true } +sp-mmr-primitives = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-client-db = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-slots = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-sync-state-rpc = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +sc-service = { workspace = true } +sc-telemetry = { workspace = true, default-features = true } # Substrate Primitives -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy" } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } -sp-inherents = { path = "../../../substrate/primitives/inherents" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-block-builder = { path = "../../../substrate/primitives/block-builder" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sp-offchain = { package = "sp-offchain", path = "../../../substrate/primitives/offchain" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-session = { path = "../../../substrate/primitives/session" } -sp-storage = { path = "../../../substrate/primitives/storage" } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool" } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment" } -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } -sp-state-machine = { path = "../../../substrate/primitives/state-machine" } -sp-weights = { path = "../../../substrate/primitives/weights" } -sp-version = { path = "../../../substrate/primitives/version" } +sp-authority-discovery = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-offchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-session = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-weights = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } # Substrate Pallets -pallet-babe = { path = "../../../substrate/frame/babe" } -pallet-staking = { path = "../../../substrate/frame/staking" } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api" } -frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", optional = true } -frame-system = { path = "../../../substrate/frame/system" } +pallet-babe = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } +frame-metadata-hash-extension = { optional = true, workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } # Substrate Other -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } -frame-support = { path = "../../../substrate/frame/support" } -frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-cli" } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking" } +frame-system-rpc-runtime-api = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-benchmarking-cli = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } # External Crates -async-trait = "0.1.79" -futures = "0.3.30" -hex-literal = "0.4.1" -is_executable = "1.0.1" -gum = { package = "tracing-gum", path = "../gum" } +async-trait = { workspace = true } +futures = { workspace = true } +hex-literal = { workspace = true, default-features = true } +is_executable = { workspace = true } +gum = { workspace = true, default-features = true } log = { workspace = true, default-features = true } -schnellru = "0.2.1" +schnellru = { workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -kvdb = "0.13.0" -kvdb-rocksdb = { version = "0.19.0", optional = true } -parity-db = { version = "0.4.12", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12" } -parking_lot = "0.12.1" -bitvec = { version = "1.0.1", optional = true } +kvdb = { workspace = true } +kvdb-rocksdb = { optional = true, workspace = true } +parity-db = { optional = true, workspace = true } +codec = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +bitvec = { optional = true, workspace = true, default-features = true } # Polkadot -polkadot-core-primitives = { path = "../../core-primitives" } -polkadot-node-core-parachains-inherent = { path = "../core/parachains-inherent" } -polkadot-overseer = { path = "../overseer" } -polkadot-parachain-primitives = { path = "../../parachain" } -polkadot-primitives = { path = "../../primitives" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-rpc = { path = "../../rpc" } -polkadot-node-subsystem = { path = "../subsystem" } -polkadot-node-subsystem-util = { path = "../subsystem-util" } -polkadot-node-subsystem-types = { path = "../subsystem-types" } -polkadot-runtime-parachains = { path = "../../runtime/parachains" } -polkadot-node-network-protocol = { path = "../network/protocol" } +polkadot-core-primitives = { workspace = true, default-features = true } +polkadot-node-core-parachains-inherent = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-rpc = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } # Polkadot Runtime Constants -rococo-runtime-constants = { path = "../../runtime/rococo/constants", optional = true } -westend-runtime-constants = { path = "../../runtime/westend/constants", optional = true } +rococo-runtime-constants = { optional = true, workspace = true, default-features = true } +westend-runtime-constants = { optional = true, workspace = true, default-features = true } # Polkadot Runtimes -westend-runtime = { path = "../../runtime/westend", optional = true } -rococo-runtime = { path = "../../runtime/rococo", optional = true } +westend-runtime = { optional = true, workspace = true } +rococo-runtime = { optional = true, workspace = true } # Polkadot Subsystems -polkadot-approval-distribution = { path = "../network/approval-distribution", optional = true } -polkadot-availability-bitfield-distribution = { path = "../network/bitfield-distribution", optional = true } -polkadot-availability-distribution = { path = "../network/availability-distribution", optional = true } -polkadot-availability-recovery = { path = "../network/availability-recovery", optional = true } -polkadot-collator-protocol = { path = "../network/collator-protocol", optional = true } -polkadot-dispute-distribution = { path = "../network/dispute-distribution", optional = true } -polkadot-gossip-support = { path = "../network/gossip-support", optional = true } -polkadot-network-bridge = { path = "../network/bridge", optional = true } -polkadot-node-collation-generation = { path = "../collation-generation", optional = true } -polkadot-node-core-approval-voting = { path = "../core/approval-voting", optional = true } -polkadot-node-core-av-store = { path = "../core/av-store", optional = true } -polkadot-node-core-backing = { path = "../core/backing", optional = true } -polkadot-node-core-bitfield-signing = { path = "../core/bitfield-signing", optional = true } -polkadot-node-core-candidate-validation = { path = "../core/candidate-validation", optional = true } -polkadot-node-core-chain-api = { path = "../core/chain-api", optional = true } -polkadot-node-core-chain-selection = { path = "../core/chain-selection", optional = true } -polkadot-node-core-dispute-coordinator = { path = "../core/dispute-coordinator", optional = true } -polkadot-node-core-prospective-parachains = { path = "../core/prospective-parachains", optional = true } -polkadot-node-core-provisioner = { path = "../core/provisioner", optional = true } -polkadot-node-core-pvf = { path = "../core/pvf", optional = true } -polkadot-node-core-pvf-checker = { path = "../core/pvf-checker", optional = true } -polkadot-node-core-runtime-api = { path = "../core/runtime-api", optional = true } -polkadot-statement-distribution = { path = "../network/statement-distribution", optional = true } - -xcm = { package = "staging-xcm", path = "../../xcm" } -xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api" } +polkadot-approval-distribution = { optional = true, workspace = true, default-features = true } +polkadot-availability-bitfield-distribution = { optional = true, workspace = true, default-features = true } +polkadot-availability-distribution = { optional = true, workspace = true, default-features = true } +polkadot-availability-recovery = { optional = true, workspace = true, default-features = true } +polkadot-collator-protocol = { optional = true, workspace = true, default-features = true } +polkadot-dispute-distribution = { optional = true, workspace = true, default-features = true } +polkadot-gossip-support = { optional = true, workspace = true, default-features = true } +polkadot-network-bridge = { optional = true, workspace = true, default-features = true } +polkadot-node-collation-generation = { optional = true, workspace = true, default-features = true } +polkadot-node-core-approval-voting = { optional = true, workspace = true, default-features = true } +polkadot-node-core-av-store = { optional = true, workspace = true, default-features = true } +polkadot-node-core-backing = { optional = true, workspace = true, default-features = true } +polkadot-node-core-bitfield-signing = { optional = true, workspace = true, default-features = true } +polkadot-node-core-candidate-validation = { optional = true, workspace = true, default-features = true } +polkadot-node-core-chain-api = { optional = true, workspace = true, default-features = true } +polkadot-node-core-chain-selection = { optional = true, workspace = true, default-features = true } +polkadot-node-core-dispute-coordinator = { optional = true, workspace = true, default-features = true } +polkadot-node-core-prospective-parachains = { optional = true, workspace = true, default-features = true } +polkadot-node-core-provisioner = { optional = true, workspace = true, default-features = true } +polkadot-node-core-pvf = { optional = true, workspace = true, default-features = true } +polkadot-node-core-pvf-checker = { optional = true, workspace = true, default-features = true } +polkadot-node-core-runtime-api = { optional = true, workspace = true, default-features = true } +polkadot-statement-distribution = { optional = true, workspace = true, default-features = true } + +xcm = { workspace = true, default-features = true } +xcm-runtime-apis = { workspace = true, default-features = true } [dev-dependencies] -polkadot-test-client = { path = "../test/client" } -polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } -env_logger = "0.11" -assert_matches = "1.5.0" -serial_test = "2.0.0" -tempfile = "3.2" +polkadot-test-client = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +env_logger = { workspace = true } +assert_matches = { workspace = true } +serial_test = { workspace = true } +tempfile = { workspace = true } [features] default = ["db", "full-node"] @@ -201,6 +201,13 @@ rococo-native = [ "rococo-runtime-constants", ] +# Generate the metadata hash needed for CheckMetadataHash +# in the test runtimes. +metadata-hash = [ + "rococo-runtime?/metadata-hash", + "westend-runtime?/metadata-hash", +] + runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", @@ -217,7 +224,7 @@ runtime-benchmarks = [ "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "westend-runtime?/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index dd8a0a7e635bc..e971830c95cb2 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -241,7 +241,7 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( _: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, @@ -398,30 +398,30 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { - fn query_acceptable_payment_assets(_: xcm::Version) -> Result, xcm_fee_payment_runtime_api::fees::Error> { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(_: xcm::Version) -> Result, xcm_runtime_apis::fees::Error> { unimplemented!() } - fn query_weight_to_asset_fee(_: Weight, _: VersionedAssetId) -> Result { + fn query_weight_to_asset_fee(_: Weight, _: VersionedAssetId) -> Result { unimplemented!() } - fn query_xcm_weight(_: VersionedXcm<()>) -> Result { + fn query_xcm_weight(_: VersionedXcm<()>) -> Result { unimplemented!() } - fn query_delivery_fees(_: VersionedLocation, _: VersionedXcm<()>) -> Result { + fn query_delivery_fees(_: VersionedLocation, _: VersionedXcm<()>) -> Result { unimplemented!() } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { - fn dry_run_call(_: (), _: ()) -> Result, xcm_fee_payment_runtime_api::dry_run::Error> { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { + fn dry_run_call(_: (), _: ()) -> Result, xcm_runtime_apis::dry_run::Error> { unimplemented!() } - fn dry_run_xcm(_: VersionedLocation, _: VersionedXcm<()>) -> Result, xcm_fee_payment_runtime_api::dry_run::Error> { + fn dry_run_xcm(_: VersionedLocation, _: VersionedXcm<()>) -> Result, xcm_runtime_apis::dry_run::Error> { unimplemented!() } } diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 5001104f929a2..0325613d25f9a 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -20,76 +20,76 @@ path = "src/cli/subsystem-bench.rs" doc = false [dependencies] -polkadot-node-subsystem = { path = "../subsystem" } -polkadot-node-subsystem-util = { path = "../subsystem-util" } -polkadot-node-subsystem-types = { path = "../subsystem-types" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-primitives = { path = "../../primitives" } -polkadot-node-network-protocol = { path = "../network/protocol" } -polkadot-availability-recovery = { path = "../network/availability-recovery", features = ["subsystem-benchmarks"] } -polkadot-availability-distribution = { path = "../network/availability-distribution" } -polkadot-statement-distribution = { path = "../network/statement-distribution" } -polkadot-node-core-av-store = { path = "../core/av-store" } -polkadot-node-core-chain-api = { path = "../core/chain-api" } -polkadot-availability-bitfield-distribution = { path = "../network/bitfield-distribution" } -color-eyre = { version = "0.6.1", default-features = false } -polkadot-overseer = { path = "../overseer" } -colored = "2.0.4" -assert_matches = "1.5" -async-trait = "0.1.79" -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sc-keystore = { path = "../../../substrate/client/keystore" } -sp-core = { path = "../../../substrate/primitives/core" } -clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.2" -bincode = "1.3.3" -sha1 = "0.10.6" -hex = "0.4.3" -gum = { package = "tracing-gum", path = "../gum" } -polkadot-erasure-coding = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-availability-recovery = { features = ["subsystem-benchmarks"], workspace = true, default-features = true } +polkadot-availability-distribution = { workspace = true, default-features = true } +polkadot-statement-distribution = { workspace = true, default-features = true } +polkadot-node-core-av-store = { workspace = true, default-features = true } +polkadot-node-core-chain-api = { workspace = true, default-features = true } +polkadot-availability-bitfield-distribution = { workspace = true, default-features = true } +color-eyre = { workspace = true } +polkadot-overseer = { workspace = true, default-features = true } +colored = { workspace = true } +assert_matches = { workspace = true } +async-trait = { workspace = true } +sp-keystore = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +clap = { features = ["derive"], workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +bincode = { workspace = true } +sha1 = { workspace = true } +hex = { workspace = true, default-features = true } +gum = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } log = { workspace = true, default-features = true } -env_logger = "0.11" -rand = "0.8.5" +env_logger = { workspace = true } +rand = { workspace = true, default-features = true } # `rand` only supports uniform distribution, we need normal distribution for latency. -rand_distr = "0.4.3" -bitvec = "1.0.1" -kvdb-memorydb = "0.13.0" +rand_distr = { workspace = true } +bitvec = { workspace = true, default-features = true } +kvdb-memorydb = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive", "std"] } -tokio = { version = "1.24.2", features = ["parking_lot", "rt-multi-thread"] } -clap-num = "1.0.2" -polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } -sc-network = { path = "../../../substrate/client/network" } -sc-network-types = { path = "../../../substrate/client/network/types" } -sc-service = { path = "../../../substrate/client/service" } -sp-consensus = { path = "../../../substrate/primitives/consensus/common" } -polkadot-node-metrics = { path = "../metrics" } -itertools = "0.11" -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } -prometheus = { version = "0.13.0", default-features = false } +codec = { features = ["derive", "std"], workspace = true, default-features = true } +tokio = { features = ["parking_lot", "rt-multi-thread"], workspace = true, default-features = true } +clap-num = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +itertools = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +prometheus-endpoint = { workspace = true, default-features = true } +prometheus = { workspace = true } serde = { workspace = true, default-features = true } serde_yaml = { workspace = true } serde_json = { workspace = true } -polkadot-node-core-approval-voting = { path = "../core/approval-voting" } -polkadot-approval-distribution = { path = "../network/approval-distribution" } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } +polkadot-node-core-approval-voting = { workspace = true, default-features = true } +polkadot-approval-distribution = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-timestamp = { workspace = true, default-features = true } -schnorrkel = { version = "0.11.4", default-features = false } +schnorrkel = { workspace = true } # rand_core should match schnorrkel -rand_core = "0.6.2" -rand_chacha = { version = "0.3.1" } -paste = "1.0.14" -orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } -pyroscope = { version = "0.5.7" } -pyroscope_pprofrs = "0.2.7" -strum = { version = "0.24", features = ["derive"] } +rand_core = { workspace = true } +rand_chacha = { workspace = true, default-features = true } +paste = { workspace = true, default-features = true } +orchestra = { features = ["futures_channel"], workspace = true } +pyroscope = { workspace = true } +pyroscope_pprofrs = { workspace = true } +strum = { features = ["derive"], workspace = true, default-features = true } [features] default = [] diff --git a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml index 146da57d44c4a..cae1a30914da7 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml @@ -16,3 +16,5 @@ TestConfiguration: peer_bandwidth: 524288000000 bandwidth: 524288000000 num_blocks: 10 + connectivity: 100 + latency: null diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml index 6b17e62c20aa3..7edb48e302a46 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml @@ -16,3 +16,5 @@ TestConfiguration: peer_bandwidth: 524288000000 bandwidth: 524288000000 num_blocks: 10 + connectivity: 100 + latency: null diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml index e946c28e8ef5d..7c24f50e6af55 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml @@ -16,3 +16,6 @@ TestConfiguration: peer_bandwidth: 524288000000 bandwidth: 524288000000 num_blocks: 10 + connectivity: 100 + latency: null + diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml index 8f4b050e72f27..fe2402faeccdc 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml @@ -16,3 +16,6 @@ TestConfiguration: peer_bandwidth: 524288000000 bandwidth: 524288000000 num_blocks: 10 + connectivity: 100 + latency: null + diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index 5c0c65b11cdb5..4ac044ea3459a 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -60,7 +60,7 @@ use polkadot_node_subsystem_util::metrics::Metrics; use polkadot_overseer::Handle as OverseerHandleReal; use polkadot_primitives::{ BlockNumber, CandidateEvent, CandidateIndex, CandidateReceipt, Hash, Header, Slot, - ValidatorIndex, + ValidatorIndex, ASSIGNMENT_KEY_TYPE_ID, }; use prometheus::Registry; use sc_keystore::LocalKeystore; @@ -68,6 +68,7 @@ use sc_service::SpawnTaskHandle; use serde::{Deserialize, Serialize}; use sp_consensus_babe::Epoch as BabeEpoch; use sp_core::H256; +use sp_keystore::Keystore; use std::{ cmp::max, collections::{HashMap, HashSet}, @@ -697,12 +698,12 @@ impl PeerMessageProducer { .expect("We can't handle unknown peers") .clone(); - self.network - .send_message_from_peer( - &peer_authority_id, - protocol_v3::ValidationProtocol::ApprovalDistribution(message.msg).into(), - ) - .unwrap_or_else(|_| panic!("Network should be up and running {:?}", sent_by)); + if let Err(err) = self.network.send_message_from_peer( + &peer_authority_id, + protocol_v3::ValidationProtocol::ApprovalDistribution(message.msg).into(), + ) { + gum::warn!(target: LOG_TARGET, ?sent_by, ?err, "Validator can not send message"); + } } // Queues a message to be sent by the peer identified by the `sent_by` value. @@ -785,6 +786,12 @@ fn build_overseer( let db: polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]); let keystore = LocalKeystore::in_memory(); + keystore + .sr25519_generate_new( + ASSIGNMENT_KEY_TYPE_ID, + Some(state.test_authorities.key_seeds.get(NODE_UNDER_TEST as usize).unwrap().as_str()), + ) + .unwrap(); let system_clock = PastSystemClock::new(SystemClock {}, state.delta_tick_from_generated.clone()); @@ -987,11 +994,12 @@ pub async fn bench_approvals_run( "polkadot_parachain_subsystem_bounded_received", Some(("subsystem_name", "approval-distribution-subsystem")), |value| { - gum::info!(target: LOG_TARGET, ?value, ?at_least_messages, "Waiting metric"); + gum::debug!(target: LOG_TARGET, ?value, ?at_least_messages, "Waiting metric"); value >= at_least_messages as f64 }, ) .await; + gum::info!("Requesting approval votes ms"); for info in &state.blocks { @@ -1031,7 +1039,7 @@ pub async fn bench_approvals_run( "polkadot_parachain_subsystem_bounded_received", Some(("subsystem_name", "approval-distribution-subsystem")), |value| { - gum::info!(target: LOG_TARGET, ?value, ?at_least_messages, "Waiting metric"); + gum::debug!(target: LOG_TARGET, ?value, ?at_least_messages, "Waiting metric"); value >= at_least_messages as f64 }, ) diff --git a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index be9dbd55cb6f9..ee45ea05c925a 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -28,7 +28,7 @@ use polkadot_node_subsystem_types::OverseerSignal; use polkadot_primitives::{ node_features, AsyncBackingParams, CandidateEvent, CandidateReceipt, CoreState, GroupIndex, GroupRotationInfo, IndexedVec, NodeFeatures, OccupiedCore, ScheduledCore, SessionIndex, - SessionInfo, ValidatorIndex, + SessionInfo, ValidationCode, ValidatorIndex, }; use sp_consensus_babe::Epoch as BabeEpoch; use sp_core::H256; @@ -288,6 +288,15 @@ impl MockRuntimeApi { }; tx.send(Ok((groups, group_rotation_info))).unwrap(); }, + RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::ValidationCodeByHash(_, tx), + ) => { + let validation_code = ValidationCode(Vec::new()); + if let Err(err) = tx.send(Ok(Some(validation_code))) { + gum::error!(target: LOG_TARGET, ?err, "validation code wasn't received"); + } + }, // Long term TODO: implement more as needed. message => { unimplemented!("Unexpected runtime-api message: {:?}", message) diff --git a/polkadot/node/subsystem-test-helpers/Cargo.toml b/polkadot/node/subsystem-test-helpers/Cargo.toml index 57678e8e8d4a1..d3229291673c6 100644 --- a/polkadot/node/subsystem-test-helpers/Cargo.toml +++ b/polkadot/node/subsystem-test-helpers/Cargo.toml @@ -11,19 +11,19 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.79" -futures = "0.3.30" -parking_lot = "0.12.1" -polkadot-node-subsystem = { path = "../subsystem" } -polkadot-erasure-coding = { path = "../../erasure-coding" } -polkadot-node-subsystem-util = { path = "../subsystem-util" } -polkadot-primitives = { path = "../../primitives" } -polkadot-node-primitives = { path = "../primitives" } +async-trait = { workspace = true } +futures = { workspace = true } +parking_lot = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } -sc-client-api = { path = "../../../substrate/client/api" } -sc-utils = { path = "../../../substrate/client/utils" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sc-keystore = { path = "../../../substrate/client/keystore" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } +sc-client-api = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } diff --git a/polkadot/node/subsystem-test-helpers/src/lib.rs b/polkadot/node/subsystem-test-helpers/src/lib.rs index 375121c374637..bdb0647fee6f5 100644 --- a/polkadot/node/subsystem-test-helpers/src/lib.rs +++ b/polkadot/node/subsystem-test-helpers/src/lib.rs @@ -36,7 +36,7 @@ use std::{ convert::Infallible, future::Future, pin::Pin, - sync::Arc, + sync::{atomic::AtomicUsize, Arc}, task::{Context, Poll, Waker}, time::Duration, }; @@ -146,12 +146,13 @@ pub fn single_item_sink() -> (SingleItemSink, SingleItemStream) { #[derive(Clone)] pub struct TestSubsystemSender { tx: mpsc::UnboundedSender, + message_counter: MessageCounter, } /// Construct a sender/receiver pair. pub fn sender_receiver() -> (TestSubsystemSender, mpsc::UnboundedReceiver) { let (tx, rx) = mpsc::unbounded(); - (TestSubsystemSender { tx }, rx) + (TestSubsystemSender { tx, message_counter: MessageCounter::default() }, rx) } #[async_trait::async_trait] @@ -161,6 +162,11 @@ where OutgoingMessage: Send + 'static, { async fn send_message(&mut self, msg: OutgoingMessage) { + self.send_message_with_priority::(msg).await; + } + + async fn send_message_with_priority(&mut self, msg: OutgoingMessage) { + self.message_counter.increment(P::priority()); self.tx.send(msg.into()).await.expect("test overseer no longer live"); } @@ -168,6 +174,14 @@ where &mut self, msg: OutgoingMessage, ) -> Result<(), TrySendError> { + self.try_send_message_with_priority::(msg) + } + + fn try_send_message_with_priority( + &mut self, + msg: OutgoingMessage, + ) -> Result<(), TrySendError> { + self.message_counter.increment(P::priority()); self.tx.unbounded_send(msg.into()).expect("test overseer no longer live"); Ok(()) } @@ -277,6 +291,9 @@ pub struct TestSubsystemContextHandle { /// Direct access to the receiver. pub rx: mpsc::UnboundedReceiver, + + /// Message counter over subsystems. + pub message_counter: MessageCounter, } impl TestSubsystemContextHandle { @@ -322,6 +339,34 @@ pub fn make_subsystem_context( make_buffered_subsystem_context(spawner, 0) } +/// Message counter over subsystems. +#[derive(Default, Clone)] +pub struct MessageCounter { + total: Arc, + with_high_priority: Arc, +} + +impl MessageCounter { + /// Increment the message counter. + pub fn increment(&mut self, priority_level: overseer::PriorityLevel) { + self.total.fetch_add(1, std::sync::atomic::Ordering::SeqCst); + if matches!(priority_level, overseer::PriorityLevel::High) { + self.with_high_priority.fetch_add(1, std::sync::atomic::Ordering::SeqCst); + } + } + + /// Reset the message counter. + pub fn reset(&mut self) { + self.total.store(0, std::sync::atomic::Ordering::SeqCst); + self.with_high_priority.store(0, std::sync::atomic::Ordering::SeqCst); + } + + /// Get the messages with high priority count. + pub fn with_high_priority(&self) -> usize { + self.with_high_priority.load(std::sync::atomic::Ordering::SeqCst) + } +} + /// Make a test subsystem context with buffered overseer channel. Some tests (e.g. /// `dispute-coordinator`) create too many parallel operations and deadlock unless /// the channel is buffered. Usually `buffer_size=1` is enough. @@ -331,15 +376,23 @@ pub fn make_buffered_subsystem_context( ) -> (TestSubsystemContext>, TestSubsystemContextHandle) { let (overseer_tx, overseer_rx) = mpsc::channel(buffer_size); let (all_messages_tx, all_messages_rx) = mpsc::unbounded(); + let message_counter = MessageCounter::default(); ( TestSubsystemContext { - tx: TestSubsystemSender { tx: all_messages_tx }, + tx: TestSubsystemSender { + tx: all_messages_tx, + message_counter: message_counter.clone(), + }, rx: overseer_rx, spawn: SpawnGlue(spawner), message_buffer: VecDeque::new(), }, - TestSubsystemContextHandle { tx: overseer_tx, rx: all_messages_rx }, + TestSubsystemContextHandle { + tx: overseer_tx, + rx: all_messages_rx, + message_counter: message_counter.clone(), + }, ) } diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index 0178b193cba8c..c8fc324699e17 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -10,26 +10,26 @@ license.workspace = true workspace = true [dependencies] -derive_more = "0.99.17" -fatality = "0.1.1" -futures = "0.3.30" -polkadot-primitives = { path = "../../primitives" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-node-network-protocol = { path = "../network/protocol" } -polkadot-statement-table = { path = "../../statement-table" } -polkadot-node-jaeger = { path = "../jaeger" } -orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } -sc-network = { path = "../../../substrate/client/network" } -sc-network-types = { path = "../../../substrate/client/network/types" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -smallvec = "1.8.0" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } +derive_more = { workspace = true, default-features = true } +fatality = { workspace = true } +futures = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-statement-table = { workspace = true, default-features = true } +polkadot-node-jaeger = { workspace = true, default-features = true } +orchestra = { features = ["futures_channel"], workspace = true } +sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +smallvec = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } thiserror = { workspace = true } -async-trait = "0.1.79" -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } +async-trait = { workspace = true } +bitvec = { features = ["alloc"], workspace = true } diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 722a97989bce0..ee937bca05bfe 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -85,7 +85,7 @@ pub enum CandidateBackingMessage { /// candidates of the same para that follow it in the input vector. In other words, assuming /// candidates are supplied in dependency order, we must ensure that this dependency order is /// preserved. - GetBackedCandidates( + GetBackableCandidates( HashMap>, oneshot::Sender>>, ), diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index b7fb75b94b2c7..98ea21f250eda 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -10,47 +10,47 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.79" -futures = "0.3.30" -futures-channel = "0.3.23" -itertools = "0.11" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -parking_lot = "0.12.1" -pin-project = "1.0.9" -rand = "0.8.5" +async-trait = { workspace = true } +futures = { workspace = true } +futures-channel = { workspace = true } +itertools = { workspace = true } +codec = { features = ["derive"], workspace = true } +parking_lot = { workspace = true, default-features = true } +pin-project = { workspace = true } +rand = { workspace = true, default-features = true } thiserror = { workspace = true } -fatality = "0.1.1" -gum = { package = "tracing-gum", path = "../gum" } -derive_more = "0.99.17" -schnellru = "0.2.1" +fatality = { workspace = true } +gum = { workspace = true, default-features = true } +derive_more = { workspace = true, default-features = true } +schnellru = { workspace = true } -polkadot-erasure-coding = { path = "../../erasure-coding" } -polkadot-node-subsystem = { path = "../subsystem" } -polkadot-node-subsystem-types = { path = "../subsystem-types" } -polkadot-node-jaeger = { path = "../jaeger" } -polkadot-node-metrics = { path = "../metrics" } -polkadot-node-network-protocol = { path = "../network/protocol" } -polkadot-primitives = { path = "../../primitives" } -polkadot-node-primitives = { path = "../primitives" } -polkadot-overseer = { path = "../overseer" } -metered = { package = "prioritized-metered-channel", version = "0.6.1", default-features = false, features = ["futures_channel"] } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-jaeger = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +metered = { features = ["futures_channel"], workspace = true } -sp-core = { path = "../../../substrate/primitives/core" } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sc-client-api = { path = "../../../substrate/client/api" } +sp-core = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } -kvdb = "0.13.0" -parity-db = { version = "0.4.12" } +kvdb = { workspace = true } +parity-db = { workspace = true } [dev-dependencies] -assert_matches = "1.4.0" -env_logger = "0.11" -futures = { version = "0.3.30", features = ["thread-pool"] } +assert_matches = { workspace = true } +env_logger = { workspace = true } +futures = { features = ["thread-pool"], workspace = true } log = { workspace = true, default-features = true } -polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -lazy_static = "1.4.0" -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } -kvdb-shared-tests = "0.11.0" -tempfile = "3.1.0" -kvdb-memorydb = "0.13.0" +polkadot-node-subsystem-test-helpers = { workspace = true } +lazy_static = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +kvdb-shared-tests = { workspace = true } +tempfile = { workspace = true } +kvdb-memorydb = { workspace = true } diff --git a/polkadot/node/subsystem-util/src/vstaging.rs b/polkadot/node/subsystem-util/src/vstaging.rs index b166a54f75c46..b6cd73f412b33 100644 --- a/polkadot/node/subsystem-util/src/vstaging.rs +++ b/polkadot/node/subsystem-util/src/vstaging.rs @@ -31,7 +31,7 @@ const LOG_TARGET: &'static str = "parachain::subsystem-util-vstaging"; /// A snapshot of the runtime claim queue at an arbitrary relay chain block. #[derive(Default)] -pub struct ClaimQueueSnapshot(BTreeMap>); +pub struct ClaimQueueSnapshot(pub BTreeMap>); impl From>> for ClaimQueueSnapshot { fn from(claim_queue_snapshot: BTreeMap>) -> Self { @@ -56,6 +56,19 @@ impl ClaimQueueSnapshot { .iter() .filter_map(move |(core_index, paras)| Some((*core_index, *paras.get(depth)?))) } + + /// Returns an iterator over all claims on the given core. + pub fn iter_claims_for_core( + &self, + core_index: &CoreIndex, + ) -> impl Iterator + '_ { + self.0.get(core_index).map(|c| c.iter()).into_iter().flatten() + } + + /// Returns an iterator over the whole claim queue. + pub fn iter_all_claims(&self) -> impl Iterator)> + '_ { + self.0.iter() + } } // TODO: https://github.com/paritytech/polkadot-sdk/issues/1940 diff --git a/polkadot/node/subsystem/Cargo.toml b/polkadot/node/subsystem/Cargo.toml index c59c1f88e3399..8edfea9e26bf5 100644 --- a/polkadot/node/subsystem/Cargo.toml +++ b/polkadot/node/subsystem/Cargo.toml @@ -10,6 +10,6 @@ license.workspace = true workspace = true [dependencies] -polkadot-overseer = { path = "../overseer" } -polkadot-node-subsystem-types = { path = "../subsystem-types" } -polkadot-node-jaeger = { path = "../jaeger" } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-jaeger = { workspace = true, default-features = true } diff --git a/polkadot/node/test/client/Cargo.toml b/polkadot/node/test/client/Cargo.toml index 0b49866ee2aec..587af659fbd2d 100644 --- a/polkadot/node/test/client/Cargo.toml +++ b/polkadot/node/test/client/Cargo.toml @@ -10,35 +10,35 @@ license.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } # Polkadot dependencies -polkadot-test-runtime = { path = "../../../runtime/test-runtime" } -polkadot-test-service = { path = "../service" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-node-subsystem = { path = "../../subsystem" } +polkadot-test-runtime = { workspace = true } +polkadot-test-service = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } # Substrate dependencies -substrate-test-client = { path = "../../../../substrate/test-utils/client" } -sc-service = { path = "../../../../substrate/client/service" } -sc-block-builder = { path = "../../../../substrate/client/block-builder" } -sc-consensus = { path = "../../../../substrate/client/consensus/common" } -sc-offchain = { path = "../../../../substrate/client/offchain" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-inherents = { path = "../../../../substrate/primitives/inherents" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-api = { path = "../../../../substrate/primitives/api" } -sp-timestamp = { path = "../../../../substrate/primitives/timestamp" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -sp-consensus-babe = { path = "../../../../substrate/primitives/consensus/babe" } -sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } -sp-io = { path = "../../../../substrate/primitives/io" } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking" } +substrate-test-client = { workspace = true } +sc-service = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } [dev-dependencies] -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -futures = "0.3.30" +sp-keyring = { workspace = true, default-features = true } +futures = { workspace = true } [features] runtime-benchmarks = [ diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index 3fc6d060870b1..8eb6105f98e25 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -10,60 +10,60 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.30" -hex = "0.4.3" -gum = { package = "tracing-gum", path = "../../gum" } -rand = "0.8.5" +futures = { workspace = true } +hex = { workspace = true, default-features = true } +gum = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -tempfile = "3.2.0" -tokio = "1.37" +tempfile = { workspace = true } +tokio = { workspace = true, default-features = true } # Polkadot dependencies -polkadot-overseer = { path = "../../overseer" } -polkadot-primitives = { path = "../../../primitives" } -polkadot-parachain-primitives = { path = "../../../parachain" } -polkadot-rpc = { path = "../../../rpc" } -polkadot-runtime-common = { path = "../../../runtime/common" } -polkadot-service = { path = "../../service" } -polkadot-node-subsystem = { path = "../../subsystem" } -polkadot-node-primitives = { path = "../../primitives" } -polkadot-test-runtime = { path = "../../../runtime/test-runtime" } -test-runtime-constants = { path = "../../../runtime/test-runtime/constants" } -polkadot-runtime-parachains = { path = "../../../runtime/parachains" } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-rpc = { workspace = true, default-features = true } +polkadot-runtime-common = { workspace = true, default-features = true } +polkadot-service = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-test-runtime = { workspace = true } +test-runtime-constants = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } # Substrate dependencies -sp-authority-discovery = { path = "../../../../substrate/primitives/authority-discovery" } -sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } -sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } -sp-consensus-babe = { path = "../../../../substrate/primitives/consensus/babe" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -frame-system = { path = "../../../../substrate/frame/system" } -sc-consensus-grandpa = { path = "../../../../substrate/client/consensus/grandpa" } -sp-consensus-grandpa = { path = "../../../../substrate/primitives/consensus/grandpa" } -sp-inherents = { path = "../../../../substrate/primitives/inherents" } -pallet-staking = { path = "../../../../substrate/frame/staking" } -pallet-balances = { path = "../../../../substrate/frame/balances" } -pallet-transaction-payment = { path = "../../../../substrate/frame/transaction-payment" } -sc-chain-spec = { path = "../../../../substrate/client/chain-spec" } -sc-cli = { path = "../../../../substrate/client/cli" } -sc-client-api = { path = "../../../../substrate/client/api" } -sc-consensus = { path = "../../../../substrate/client/consensus/common" } -sc-network = { path = "../../../../substrate/client/network" } -sc-tracing = { path = "../../../../substrate/client/tracing" } -sc-transaction-pool = { path = "../../../../substrate/client/transaction-pool" } -sc-service = { path = "../../../../substrate/client/service", default-features = false } -sp-arithmetic = { path = "../../../../substrate/primitives/arithmetic" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } -substrate-test-client = { path = "../../../../substrate/test-utils/client" } +sp-authority-discovery = { workspace = true, default-features = true } +sc-authority-discovery = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-service = { workspace = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +substrate-test-client = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } -substrate-test-utils = { path = "../../../../substrate/test-utils" } -tokio = { version = "1.37", features = ["macros"] } +pallet-balances = { workspace = true } +substrate-test-utils = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } [features] runtime-metrics = ["polkadot-test-runtime/runtime-metrics"] diff --git a/polkadot/node/zombienet-backchannel/Cargo.toml b/polkadot/node/zombienet-backchannel/Cargo.toml index 31662ccfc4649..a9bf1f5ef093a 100644 --- a/polkadot/node/zombienet-backchannel/Cargo.toml +++ b/polkadot/node/zombienet-backchannel/Cargo.toml @@ -12,14 +12,14 @@ license.workspace = true workspace = true [dependencies] -tokio = { version = "1.24.2", default-features = false, features = ["macros", "net", "rt-multi-thread", "sync"] } -url = "2.3.1" -tokio-tungstenite = "0.20.1" -futures-util = "0.3.30" -lazy_static = "1.4.0" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -reqwest = { version = "0.11", features = ["rustls-tls"], default-features = false } +tokio = { features = ["macros", "net", "rt-multi-thread", "sync"], workspace = true } +url = { workspace = true } +tokio-tungstenite = { workspace = true } +futures-util = { workspace = true, default-features = true } +lazy_static = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +reqwest = { features = ["rustls-tls"], workspace = true } thiserror = { workspace = true } -gum = { package = "tracing-gum", path = "../gum" } +gum = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 11e8e3ce6d843..9d0518fd46ade 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -13,15 +13,14 @@ workspace = true # note: special care is taken to avoid inclusion of `sp-io` externals when compiling # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -sp-std = { path = "../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false, features = ["serde"] } -sp-core = { path = "../../substrate/primitives/core", default-features = false, features = ["serde"] } -sp-weights = { path = "../../substrate/primitives/weights", default-features = false } -polkadot-core-primitives = { path = "../core-primitives", default-features = false } -derive_more = "0.99.11" -bounded-collections = { version = "0.2.0", default-features = false, features = ["serde"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-weights = { workspace = true } +polkadot-core-primitives = { workspace = true } +derive_more = { workspace = true, default-features = true } +bounded-collections = { features = ["serde"], workspace = true } # all optional crates. serde = { features = ["alloc", "derive"], workspace = true } @@ -37,7 +36,6 @@ std = [ "serde/std", "sp-core/std", "sp-runtime/std", - "sp-std/std", "sp-weights/std", ] runtime-benchmarks = ["sp-runtime/runtime-benchmarks"] diff --git a/polkadot/parachain/src/lib.rs b/polkadot/parachain/src/lib.rs index bd75296bf8371..8941b7fbb911e 100644 --- a/polkadot/parachain/src/lib.rs +++ b/polkadot/parachain/src/lib.rs @@ -51,3 +51,5 @@ mod wasm_api; #[cfg(all(not(feature = "std"), feature = "wasm-api"))] pub use wasm_api::*; + +extern crate alloc; diff --git a/polkadot/parachain/src/primitives.rs b/polkadot/parachain/src/primitives.rs index d92bbee8d28d1..c5757928c3fc2 100644 --- a/polkadot/parachain/src/primitives.rs +++ b/polkadot/parachain/src/primitives.rs @@ -17,7 +17,7 @@ //! Primitive types which are strictly necessary from a parachain-execution point //! of view. -use sp_std::vec::Vec; +use alloc::vec::Vec; use bounded_collections::{BoundedVec, ConstU32}; use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; @@ -89,14 +89,14 @@ impl ValidationCode { #[derive(Clone, Copy, Encode, Decode, Hash, Eq, PartialEq, PartialOrd, Ord, TypeInfo)] pub struct ValidationCodeHash(Hash); -impl sp_std::fmt::Display for ValidationCodeHash { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { +impl core::fmt::Display for ValidationCodeHash { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { self.0.fmt(f) } } -impl sp_std::fmt::Debug for ValidationCodeHash { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { +impl core::fmt::Debug for ValidationCodeHash { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "{:?}", self.0) } } @@ -119,9 +119,9 @@ impl From<[u8; 32]> for ValidationCodeHash { } } -impl sp_std::fmt::LowerHex for ValidationCodeHash { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - sp_std::fmt::LowerHex::fmt(&self.0, f) +impl core::fmt::LowerHex for ValidationCodeHash { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + core::fmt::LowerHex::fmt(&self.0, f) } } @@ -225,7 +225,7 @@ impl IsSystem for Id { } } -impl sp_std::ops::Add for Id { +impl core::ops::Add for Id { type Output = Self; fn add(self, other: u32) -> Self { @@ -233,7 +233,7 @@ impl sp_std::ops::Add for Id { } } -impl sp_std::ops::Sub for Id { +impl core::ops::Sub for Id { type Output = Self; fn sub(self, other: u32) -> Self { diff --git a/polkadot/parachain/src/wasm_api.rs b/polkadot/parachain/src/wasm_api.rs index f0c832666284c..1c557c9ae5058 100644 --- a/polkadot/parachain/src/wasm_api.rs +++ b/polkadot/parachain/src/wasm_api.rs @@ -22,7 +22,7 @@ /// function's entry point. #[cfg(not(feature = "std"))] pub unsafe fn load_params(params: *const u8, len: usize) -> crate::primitives::ValidationParams { - let mut slice = sp_std::slice::from_raw_parts(params, len); + let mut slice = core::slice::from_raw_parts(params, len); codec::Decode::decode(&mut slice).expect("Invalid input data") } diff --git a/polkadot/parachain/test-parachains/Cargo.toml b/polkadot/parachain/test-parachains/Cargo.toml index c58b11a11b01f..9f35653f957f3 100644 --- a/polkadot/parachain/test-parachains/Cargo.toml +++ b/polkadot/parachain/test-parachains/Cargo.toml @@ -11,14 +11,14 @@ publish = false workspace = true [dependencies] -tiny-keccak = { version = "2.0.2", features = ["keccak"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +tiny-keccak = { features = ["keccak"], workspace = true } +codec = { features = ["derive"], workspace = true } -test-parachain-adder = { path = "adder" } -test-parachain-halt = { path = "halt" } +test-parachain-adder = { workspace = true } +test-parachain-halt = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../substrate/primitives/core" } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/parachain/test-parachains/adder/Cargo.toml b/polkadot/parachain/test-parachains/adder/Cargo.toml index e0bbe177eedce..7a150b75d5cdb 100644 --- a/polkadot/parachain/test-parachains/adder/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/Cargo.toml @@ -12,18 +12,17 @@ publish = false workspace = true [dependencies] -polkadot-parachain-primitives = { path = "../..", default-features = false, features = ["wasm-api"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -tiny-keccak = { version = "2.0.2", features = ["keccak"] } -dlmalloc = { version = "0.2.4", features = ["global"] } +polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } +codec = { features = ["derive"], workspace = true } +tiny-keccak = { features = ["keccak"], workspace = true } +dlmalloc = { features = ["global"], workspace = true } # We need to make sure the global allocator is disabled until we have support of full substrate externalities -sp-io = { path = "../../../../substrate/primitives/io", default-features = false, features = ["disable_allocator"] } +sp-io = { features = ["disable_allocator"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] -std = ["codec/std", "polkadot-parachain-primitives/std", "sp-io/std", "sp-std/std"] +std = ["codec/std", "polkadot-parachain-primitives/std", "sp-io/std"] diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index 996735e8c8bf8..061378a76a82e 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -15,30 +15,30 @@ name = "adder-collator" path = "src/main.rs" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.2" +codec = { features = ["derive"], workspace = true } +clap = { features = ["derive"], workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -test-parachain-adder = { path = ".." } -polkadot-primitives = { path = "../../../../primitives" } -polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } -polkadot-node-primitives = { path = "../../../../node/primitives" } -polkadot-node-subsystem = { path = "../../../../node/subsystem" } +test-parachain-adder = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-cli = { workspace = true, default-features = true } +polkadot-service = { features = ["rococo-native"], workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } -sc-cli = { path = "../../../../../substrate/client/cli" } -sp-core = { path = "../../../../../substrate/primitives/core" } -sc-service = { path = "../../../../../substrate/client/service" } +sc-cli = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } [dev-dependencies] -polkadot-parachain-primitives = { path = "../../.." } -polkadot-test-service = { path = "../../../../node/test/service" } -polkadot-node-core-pvf = { path = "../../../../node/core/pvf", features = ["test-utils"] } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-test-service = { workspace = true } +polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } -substrate-test-utils = { path = "../../../../../substrate/test-utils" } -sc-service = { path = "../../../../../substrate/client/service" } -sp-keyring = { path = "../../../../../substrate/primitives/keyring" } +substrate-test-utils = { workspace = true } +sc-service = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } -tokio = { version = "1.24.2", features = ["macros"] } +tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/polkadot/parachain/test-parachains/adder/src/lib.rs b/polkadot/parachain/test-parachains/adder/src/lib.rs index 28914f02511de..7e8d1bb1e1383 100644 --- a/polkadot/parachain/test-parachains/adder/src/lib.rs +++ b/polkadot/parachain/test-parachains/adder/src/lib.rs @@ -18,6 +18,8 @@ #![no_std] +extern crate alloc; + use codec::{Decode, Encode}; use tiny_keccak::{Hasher as _, Keccak}; diff --git a/polkadot/parachain/test-parachains/adder/src/wasm_validation.rs b/polkadot/parachain/test-parachains/adder/src/wasm_validation.rs index 7dba7a964d3b0..9c3c77f7350b9 100644 --- a/polkadot/parachain/test-parachains/adder/src/wasm_validation.rs +++ b/polkadot/parachain/test-parachains/adder/src/wasm_validation.rs @@ -17,10 +17,10 @@ //! WASM validation for adder parachain. use crate::{BlockData, HeadData}; +use alloc::vec::Vec; use codec::{Decode, Encode}; use core::panic; use polkadot_parachain_primitives::primitives::{HeadData as GenericHeadData, ValidationResult}; -use sp_std::vec::Vec; #[no_mangle] pub extern "C" fn validate_block(params: *const u8, len: usize) -> u64 { @@ -37,10 +37,8 @@ pub extern "C" fn validate_block(params: *const u8, len: usize) -> u64 { polkadot_parachain_primitives::write_result(&ValidationResult { head_data: GenericHeadData(new_head.encode()), new_validation_code: None, - upward_messages: sp_std::vec::Vec::new().try_into().expect("empty vec fits into bounds"), - horizontal_messages: sp_std::vec::Vec::new() - .try_into() - .expect("empty vec fits into bounds"), + upward_messages: alloc::vec::Vec::new().try_into().expect("empty vec fits into bounds"), + horizontal_messages: alloc::vec::Vec::new().try_into().expect("empty vec fits into bounds"), processed_downward_messages: 0, hrmp_watermark: params.relay_parent_number, }) diff --git a/polkadot/parachain/test-parachains/halt/Cargo.toml b/polkadot/parachain/test-parachains/halt/Cargo.toml index 1bdd4392ad313..f8272f6ed1968 100644 --- a/polkadot/parachain/test-parachains/halt/Cargo.toml +++ b/polkadot/parachain/test-parachains/halt/Cargo.toml @@ -14,8 +14,8 @@ workspace = true [dependencies] [build-dependencies] -substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } -rustversion = "1.0.6" +substrate-wasm-builder = { workspace = true, default-features = true } +rustversion = { workspace = true } [features] default = ["std"] diff --git a/polkadot/parachain/test-parachains/undying/Cargo.toml b/polkadot/parachain/test-parachains/undying/Cargo.toml index 4d3d2abaeafed..4b2e12ebf4354 100644 --- a/polkadot/parachain/test-parachains/undying/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/Cargo.toml @@ -12,18 +12,17 @@ license.workspace = true workspace = true [dependencies] -polkadot-parachain-primitives = { path = "../..", default-features = false, features = ["wasm-api"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false } -tiny-keccak = { version = "2.0.2", features = ["keccak"] } -dlmalloc = { version = "0.2.4", features = ["global"] } +polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } +codec = { features = ["derive"], workspace = true } +tiny-keccak = { features = ["keccak"], workspace = true } +dlmalloc = { features = ["global"], workspace = true } log = { workspace = true } # We need to make sure the global allocator is disabled until we have support of full substrate externalities -sp-io = { path = "../../../../substrate/primitives/io", default-features = false, features = ["disable_allocator"] } +sp-io = { features = ["disable_allocator"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] @@ -32,5 +31,4 @@ std = [ "log/std", "polkadot-parachain-primitives/std", "sp-io/std", - "sp-std/std", ] diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 288549c2c268a..5760258c70ea5 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -15,30 +15,30 @@ name = "undying-collator" path = "src/main.rs" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.2" +codec = { features = ["derive"], workspace = true } +clap = { features = ["derive"], workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -test-parachain-undying = { path = ".." } -polkadot-primitives = { path = "../../../../primitives" } -polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } -polkadot-node-primitives = { path = "../../../../node/primitives" } -polkadot-node-subsystem = { path = "../../../../node/subsystem" } +test-parachain-undying = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-cli = { workspace = true, default-features = true } +polkadot-service = { features = ["rococo-native"], workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } -sc-cli = { path = "../../../../../substrate/client/cli" } -sp-core = { path = "../../../../../substrate/primitives/core" } -sc-service = { path = "../../../../../substrate/client/service" } +sc-cli = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } [dev-dependencies] -polkadot-parachain-primitives = { path = "../../.." } -polkadot-test-service = { path = "../../../../node/test/service" } -polkadot-node-core-pvf = { path = "../../../../node/core/pvf", features = ["test-utils"] } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-test-service = { workspace = true } +polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } -substrate-test-utils = { path = "../../../../../substrate/test-utils" } -sc-service = { path = "../../../../../substrate/client/service" } -sp-keyring = { path = "../../../../../substrate/primitives/keyring" } +substrate-test-utils = { workspace = true } +sc-service = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } -tokio = { version = "1.24.2", features = ["macros"] } +tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/polkadot/parachain/test-parachains/undying/src/lib.rs b/polkadot/parachain/test-parachains/undying/src/lib.rs index dc056e64fa23f..e4ec7e99346bb 100644 --- a/polkadot/parachain/test-parachains/undying/src/lib.rs +++ b/polkadot/parachain/test-parachains/undying/src/lib.rs @@ -18,8 +18,10 @@ #![no_std] +extern crate alloc; + +use alloc::vec::Vec; use codec::{Decode, Encode}; -use sp_std::vec::Vec; use tiny_keccak::{Hasher as _, Keccak}; #[cfg(not(feature = "std"))] diff --git a/polkadot/parachain/test-parachains/undying/src/wasm_validation.rs b/polkadot/parachain/test-parachains/undying/src/wasm_validation.rs index 23fac43a3c731..46b66aa518e49 100644 --- a/polkadot/parachain/test-parachains/undying/src/wasm_validation.rs +++ b/polkadot/parachain/test-parachains/undying/src/wasm_validation.rs @@ -37,8 +37,8 @@ pub extern "C" fn validate_block(params: *const u8, len: usize) -> u64 { polkadot_parachain_primitives::write_result(&ValidationResult { head_data: GenericHeadData(new_head.encode()), new_validation_code: None, - upward_messages: sp_std::vec::Vec::new().try_into().expect("empty vec fits within bounds"), - horizontal_messages: sp_std::vec::Vec::new() + upward_messages: alloc::vec::Vec::new().try_into().expect("empty vec fits within bounds"), + horizontal_messages: alloc::vec::Vec::new() .try_into() .expect("empty vec fits within bounds"), processed_downward_messages: 0, diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index d6df077b88b77..8f7ec314ecffe 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -10,28 +10,27 @@ description = "Shared primitives used by Polkadot runtime" workspace = true [dependencies] -bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "serde"] } -hex-literal = "0.4.1" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } -log = { workspace = true, default-features = false } +bitvec = { features = ["alloc", "serde"], workspace = true } +hex-literal = { workspace = true, default-features = true } +codec = { features = ["bit-vec", "derive"], workspace = true } +scale-info = { features = ["bit-vec", "derive", "serde"], workspace = true } +log = { workspace = true } serde = { features = ["alloc", "derive"], workspace = true } -sp-application-crypto = { path = "../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } -sp-inherents = { path = "../../substrate/primitives/inherents", default-features = false } -sp-core = { path = "../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } -sp-api = { path = "../../substrate/primitives/api", default-features = false } -sp-arithmetic = { path = "../../substrate/primitives/arithmetic", default-features = false, features = ["serde"] } -sp-authority-discovery = { path = "../../substrate/primitives/authority-discovery", default-features = false, features = ["serde"] } -sp-consensus-slots = { path = "../../substrate/primitives/consensus/slots", default-features = false, features = ["serde"] } -sp-io = { path = "../../substrate/primitives/io", default-features = false } -sp-keystore = { path = "../../substrate/primitives/keystore", optional = true, default-features = false } -sp-staking = { path = "../../substrate/primitives/staking", default-features = false, features = ["serde"] } -sp-std = { package = "sp-std", path = "../../substrate/primitives/std", default-features = false } +sp-application-crypto = { features = ["serde"], workspace = true } +sp-inherents = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-api = { workspace = true } +sp-arithmetic = { features = ["serde"], workspace = true } +sp-authority-discovery = { features = ["serde"], workspace = true } +sp-consensus-slots = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-keystore = { optional = true, workspace = true } +sp-staking = { features = ["serde"], workspace = true } -polkadot-core-primitives = { path = "../core-primitives", default-features = false } -polkadot-parachain-primitives = { path = "../parachain", default-features = false } +polkadot-core-primitives = { workspace = true } +polkadot-parachain-primitives = { workspace = true } [features] default = ["std"] @@ -55,7 +54,6 @@ std = [ "sp-keystore?/std", "sp-runtime/std", "sp-staking/std", - "sp-std/std", ] runtime-benchmarks = [ "polkadot-parachain-primitives/runtime-benchmarks", diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 061794ca06d1b..73736fd4a3d6b 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -31,6 +31,8 @@ pub mod vstaging; // unstable functions. pub mod runtime_api; +extern crate alloc; + // Current primitives not requiring versioning are exported here. // Primitives requiring versioning must not be exported and must be referred by an exact version. pub use v7::{ diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index 7bd92be35c159..b4816ad15075d 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -121,12 +121,12 @@ use crate::{ SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, }; -use polkadot_core_primitives as pcp; -use polkadot_parachain_primitives::primitives as ppp; -use sp_std::{ +use alloc::{ collections::{btree_map::BTreeMap, vec_deque::VecDeque}, - prelude::*, + vec::Vec, }; +use polkadot_core_primitives as pcp; +use polkadot_parachain_primitives::primitives as ppp; sp_api::decl_runtime_apis! { /// The API for querying the state of parachains on-chain. diff --git a/polkadot/primitives/src/v7/async_backing.rs b/polkadot/primitives/src/v7/async_backing.rs index a82d843d28bf1..55d436e30de07 100644 --- a/polkadot/primitives/src/v7/async_backing.rs +++ b/polkadot/primitives/src/v7/async_backing.rs @@ -18,6 +18,7 @@ use super::*; +use alloc::vec::Vec; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_core::RuntimeDebug; diff --git a/polkadot/primitives/src/v7/executor_params.rs b/polkadot/primitives/src/v7/executor_params.rs index e58cf3e76cc2c..bfd42ec30bd39 100644 --- a/polkadot/primitives/src/v7/executor_params.rs +++ b/polkadot/primitives/src/v7/executor_params.rs @@ -22,11 +22,12 @@ //! done in `polkadot-node-core-pvf`. use crate::{BlakeTwo256, HashT as _, PvfExecKind, PvfPrepKind}; +use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; use codec::{Decode, Encode}; +use core::{ops::Deref, time::Duration}; use polkadot_core_primitives::Hash; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; -use sp_std::{collections::btree_map::BTreeMap, ops::Deref, time::Duration, vec, vec::Vec}; /// Default maximum number of wasm values allowed for the stack during execution of a PVF. pub const DEFAULT_LOGICAL_STACK_MAX: u32 = 65536; @@ -134,21 +135,21 @@ impl ExecutorParamsHash { } } -impl sp_std::fmt::Display for ExecutorParamsHash { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { +impl core::fmt::Display for ExecutorParamsHash { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { self.0.fmt(f) } } -impl sp_std::fmt::Debug for ExecutorParamsHash { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { +impl core::fmt::Debug for ExecutorParamsHash { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "{:?}", self.0) } } -impl sp_std::fmt::LowerHex for ExecutorParamsHash { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - sp_std::fmt::LowerHex::fmt(&self.0, f) +impl core::fmt::LowerHex for ExecutorParamsHash { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + core::fmt::LowerHex::fmt(&self.0, f) } } @@ -159,21 +160,21 @@ impl sp_std::fmt::LowerHex for ExecutorParamsHash { #[derive(Clone, Copy, Encode, Decode, Hash, Eq, PartialEq, PartialOrd, Ord, TypeInfo)] pub struct ExecutorParamsPrepHash(Hash); -impl sp_std::fmt::Display for ExecutorParamsPrepHash { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { +impl core::fmt::Display for ExecutorParamsPrepHash { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { self.0.fmt(f) } } -impl sp_std::fmt::Debug for ExecutorParamsPrepHash { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { +impl core::fmt::Debug for ExecutorParamsPrepHash { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "{:?}", self.0) } } -impl sp_std::fmt::LowerHex for ExecutorParamsPrepHash { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - sp_std::fmt::LowerHex::fmt(&self.0, f) +impl core::fmt::LowerHex for ExecutorParamsPrepHash { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + core::fmt::LowerHex::fmt(&self.0, f) } } diff --git a/polkadot/primitives/src/v7/metrics.rs b/polkadot/primitives/src/v7/metrics.rs index 1a29471c5450a..1d66c9848a7c4 100644 --- a/polkadot/primitives/src/v7/metrics.rs +++ b/polkadot/primitives/src/v7/metrics.rs @@ -16,8 +16,8 @@ //! Runtime metric primitives. +use alloc::vec::Vec; use codec::{Decode, Encode}; -use sp_std::prelude::*; /// Runtime metric operations. #[derive(Encode, Decode)] @@ -42,7 +42,7 @@ pub struct RuntimeMetricUpdate { } fn vec_to_str<'a>(v: &'a Vec, default: &'static str) -> &'a str { - return sp_std::str::from_utf8(v).unwrap_or(default) + return alloc::str::from_utf8(v).unwrap_or(default) } impl RuntimeMetricLabels { @@ -99,7 +99,7 @@ pub trait AsStr { impl AsStr for RuntimeMetricLabel { fn as_str(&self) -> Option<&str> { - sp_std::str::from_utf8(&self.0).ok() + alloc::str::from_utf8(&self.0).ok() } } diff --git a/polkadot/primitives/src/v7/mod.rs b/polkadot/primitives/src/v7/mod.rs index 6b7985847a106..06b7046520835 100644 --- a/polkadot/primitives/src/v7/mod.rs +++ b/polkadot/primitives/src/v7/mod.rs @@ -16,15 +16,17 @@ //! `V7` Primitives. +use alloc::{ + vec, + vec::{IntoIter, Vec}, +}; use bitvec::{field::BitField, slice::BitSlice, vec::BitVec}; use codec::{Decode, Encode}; -use scale_info::TypeInfo; -use sp_std::{ +use core::{ marker::PhantomData, - prelude::*, slice::{Iter, IterMut}, - vec::IntoIter, }; +use scale_info::TypeInfo; use sp_application_crypto::KeyTypeId; use sp_arithmetic::traits::{BaseArithmetic, Saturating}; @@ -172,10 +174,10 @@ pub type ValidatorSignature = validator_app::Signature; /// A declarations of storage keys where an external observer can find some interesting data. pub mod well_known_keys { use super::{HrmpChannelId, Id, WellKnownKey}; + use alloc::vec::Vec; use codec::Encode as _; use hex_literal::hex; use sp_io::hashing::twox_64; - use sp_std::prelude::*; // A note on generating these magic values below: // @@ -617,13 +619,13 @@ impl CommittedCandidateReceipt { } impl PartialOrd for CommittedCandidateReceipt { - fn partial_cmp(&self, other: &Self) -> Option { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } impl Ord for CommittedCandidateReceipt { - fn cmp(&self, other: &Self) -> sp_std::cmp::Ordering { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { // TODO: compare signatures or something more sane // https://github.com/paritytech/polkadot/issues/222 self.descriptor() @@ -984,7 +986,7 @@ impl GroupRotationInfo { return GroupIndex(0) } - let cores = sp_std::cmp::min(cores, u32::MAX as usize); + let cores = core::cmp::min(cores, u32::MAX as usize); let blocks_since_start = self.now.saturating_sub(self.session_start_block); let rotations = blocks_since_start / self.group_rotation_frequency; @@ -1006,7 +1008,7 @@ impl GroupRotationInfo { return CoreIndex(0) } - let cores = sp_std::cmp::min(cores, u32::MAX as usize); + let cores = core::cmp::min(cores, u32::MAX as usize); let blocks_since_start = self.now.saturating_sub(self.session_start_block); let rotations = blocks_since_start / self.group_rotation_frequency; let rotations = rotations % cores as u32; @@ -1870,7 +1872,7 @@ pub fn effective_minimum_backing_votes( group_len: usize, configured_minimum_backing_votes: u32, ) -> usize { - sp_std::cmp::min(group_len, configured_minimum_backing_votes as usize) + core::cmp::min(group_len, configured_minimum_backing_votes as usize) } /// Information about validator sets of a session. @@ -1966,7 +1968,7 @@ impl PvfCheckStatement { pub struct WellKnownKey { /// The raw storage key. pub key: Vec, - _p: sp_std::marker::PhantomData, + _p: core::marker::PhantomData, } impl From> for WellKnownKey { diff --git a/polkadot/primitives/src/v7/signed.rs b/polkadot/primitives/src/v7/signed.rs index 62e4df2385038..f819b379a30ae 100644 --- a/polkadot/primitives/src/v7/signed.rs +++ b/polkadot/primitives/src/v7/signed.rs @@ -17,11 +17,11 @@ use codec::{Decode, Encode}; use scale_info::TypeInfo; +use alloc::vec::Vec; #[cfg(feature = "std")] use sp_application_crypto::AppCrypto; #[cfg(feature = "std")] use sp_keystore::{Error as KeystoreError, KeystorePtr}; -use sp_std::prelude::Vec; use sp_core::RuntimeDebug; use sp_runtime::traits::AppVerify; @@ -57,7 +57,7 @@ pub struct UncheckedSigned { /// The signature by the validator of the signed payload. signature: ValidatorSignature, /// This ensures the real payload is tracked at the typesystem level. - real_payload: sp_std::marker::PhantomData, + real_payload: core::marker::PhantomData, } impl, RealPayload: Encode> Signed { @@ -163,7 +163,7 @@ impl, RealPayload: Encode> Signed, RealPayload: Encode> Signed, RealPayload: Encode> UncheckedSigned, RealPayload: Encode> UncheckedSigned NOTE: This module has suffered changes for the elastic scaling implementation. As a result, parts of this document may +be out of date and will be updated at a later time. Issue tracking the update: +https://github.com/paritytech/polkadot-sdk/issues/3699 + The Candidate Backing subsystem ensures every parablock considered for relay block inclusion has been seconded by at least one validator, and approved by a quorum. Parablocks for which not enough validators will assert correctness are discarded. If the block later proves invalid, the initial backers are slashable; this gives Polkadot a rational threat diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md index 701f6c87caff0..61278621cf565 100644 --- a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md +++ b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md @@ -1,5 +1,9 @@ # Prospective Parachains +> NOTE: This module has suffered changes for the elastic scaling implementation. As a result, parts of this document may +be out of date and will be updated at a later time. Issue tracking the update: +https://github.com/paritytech/polkadot-sdk/issues/3699 + ## Overview **Purpose:** Tracks and handles prospective parachain fragments and informs diff --git a/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md b/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md index 1fed671170c7c..432d9ab69bab9 100644 --- a/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md @@ -1,5 +1,9 @@ # Collator Protocol +> NOTE: This module has suffered changes for the elastic scaling implementation. As a result, parts of this document may +be out of date and will be updated at a later time. Issue tracking the update: +https://github.com/paritytech/polkadot-sdk/issues/3699 + The Collator Protocol implements the network protocol by which collators and validators communicate. It is used by collators to distribute collations to validators and used by validators to accept collations by collators. diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md b/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md index b017259da8c08..64727d39fabe0 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md @@ -1,5 +1,9 @@ # Provisioner +> NOTE: This module has suffered changes for the elastic scaling implementation. As a result, parts of this document may +be out of date and will be updated at a later time. Issue tracking the update: +https://github.com/paritytech/polkadot-sdk/issues/3699 + Relay chain block authorship authority is governed by BABE and is beyond the scope of the Overseer and the rest of the subsystems. That said, ultimately the block author needs to select a set of backable parachain candidates and other consensus data, and assemble a block from them. This subsystem is responsible for providing the necessary data to all diff --git a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md index 0700a781d4263..5031433cf5a1d 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md @@ -1,5 +1,9 @@ # Inclusion Pallet +> NOTE: This module has suffered changes for the elastic scaling implementation. As a result, parts of this document may +be out of date and will be updated at a later time. Issue tracking the update: +https://github.com/paritytech/polkadot-sdk/issues/3699 + The inclusion module is responsible for inclusion and availability of scheduled parachains. It also manages the UMP dispatch queue of each parachain. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md index 7972c706b9ee1..f21e1a59c1a4c 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md @@ -1,5 +1,9 @@ # `ParaInherent` +> NOTE: This module has suffered changes for the elastic scaling implementation. As a result, parts of this document may +be out of date and will be updated at a later time. Issue tracking the update: +https://github.com/paritytech/polkadot-sdk/issues/3699 + This module is responsible for providing all data given to the runtime by the block author to the various parachains modules. The entry-point is mandatory, in that it must be invoked exactly once within every block, and it is also "inherent", in that it is provided with no origin by the block author. The data within it carries its own diff --git a/polkadot/rpc/Cargo.toml b/polkadot/rpc/Cargo.toml index cceb4dc5a93b3..d01528d4dee07 100644 --- a/polkadot/rpc/Cargo.toml +++ b/polkadot/rpc/Cargo.toml @@ -10,31 +10,31 @@ description = "Polkadot specific RPC functionality." workspace = true [dependencies] -jsonrpsee = { version = "0.22", features = ["server"] } -polkadot-primitives = { path = "../primitives" } -sc-client-api = { path = "../../substrate/client/api" } -sp-blockchain = { path = "../../substrate/primitives/blockchain" } -sp-keystore = { path = "../../substrate/primitives/keystore" } -sp-runtime = { path = "../../substrate/primitives/runtime" } -sp-api = { path = "../../substrate/primitives/api" } -sp-application-crypto = { path = "../../substrate/primitives/application-crypto" } -sp-consensus = { path = "../../substrate/primitives/consensus/common" } -sp-consensus-babe = { path = "../../substrate/primitives/consensus/babe" } -sp-consensus-beefy = { path = "../../substrate/primitives/consensus/beefy" } -sc-chain-spec = { path = "../../substrate/client/chain-spec" } -sc-rpc = { path = "../../substrate/client/rpc" } -sc-rpc-spec-v2 = { path = "../../substrate/client/rpc-spec-v2" } -sc-consensus-babe = { path = "../../substrate/client/consensus/babe" } -sc-consensus-babe-rpc = { path = "../../substrate/client/consensus/babe/rpc" } -sc-consensus-beefy = { path = "../../substrate/client/consensus/beefy" } -sc-consensus-beefy-rpc = { path = "../../substrate/client/consensus/beefy/rpc" } -sc-consensus-epochs = { path = "../../substrate/client/consensus/epochs" } -sc-consensus-grandpa = { path = "../../substrate/client/consensus/grandpa" } -sc-consensus-grandpa-rpc = { path = "../../substrate/client/consensus/grandpa/rpc" } -sc-sync-state-rpc = { path = "../../substrate/client/sync-state-rpc" } -sc-transaction-pool-api = { path = "../../substrate/client/transaction-pool/api" } -substrate-frame-rpc-system = { path = "../../substrate/utils/frame/rpc/system" } -mmr-rpc = { path = "../../substrate/client/merkle-mountain-range/rpc" } -pallet-transaction-payment-rpc = { path = "../../substrate/frame/transaction-payment/rpc" } -sp-block-builder = { path = "../../substrate/primitives/block-builder" } -substrate-state-trie-migration-rpc = { path = "../../substrate/utils/frame/rpc/state-trie-migration-rpc" } +jsonrpsee = { features = ["server"], workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-rpc-spec-v2 = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-babe-rpc = { workspace = true, default-features = true } +sc-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-beefy-rpc = { workspace = true, default-features = true } +sc-consensus-epochs = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sc-consensus-grandpa-rpc = { workspace = true, default-features = true } +sc-sync-state-rpc = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +substrate-frame-rpc-system = { workspace = true, default-features = true } +mmr-rpc = { workspace = true, default-features = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +substrate-state-trie-migration-rpc = { workspace = true, default-features = true } diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index da89bd2251acf..cda6f3240dd2e 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -10,66 +10,65 @@ license.workspace = true workspace = true [dependencies] -impl-trait-for-tuples = "0.2.2" -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +impl-trait-for-tuples = { workspace = true } +bitvec = { features = ["alloc"], workspace = true } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +rustc-hex = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc"], workspace = true } serde_derive = { workspace = true } -static_assertions = "1.1.0" +static_assertions = { workspace = true, default-features = true } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features = ["serde"] } -sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = ["serde"] } -sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", default-features = false, features = ["serde"] } +sp-api = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-session = { workspace = true } +sp-staking = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-npos-elections = { features = ["serde"], workspace = true } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-broker = { path = "../../../substrate/frame/broker", default-features = false } -pallet-fast-unstake = { path = "../../../substrate/frame/fast-unstake", default-features = false } -pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } -pallet-staking-reward-fn = { path = "../../../substrate/frame/staking/reward-fn", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-treasury = { path = "../../../substrate/frame/treasury", default-features = false } -pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false, optional = true } -pallet-election-provider-multi-phase = { path = "../../../substrate/frame/election-provider-multi-phase", default-features = false } -frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-broker = { workspace = true } +pallet-fast-unstake = { workspace = true } +pallet-identity = { workspace = true } +pallet-session = { workspace = true } +frame-support = { workspace = true } +pallet-staking = { workspace = true } +pallet-staking-reward-fn = { workspace = true } +frame-system = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-vesting = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-treasury = { workspace = true } +pallet-asset-rate = { optional = true, workspace = true } +pallet-election-provider-multi-phase = { workspace = true } +frame-election-provider-support = { workspace = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -pallet-babe = { path = "../../../substrate/frame/babe", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +pallet-babe = { optional = true, workspace = true } -polkadot-primitives = { path = "../../primitives", default-features = false } -libsecp256k1 = { version = "0.7.0", default-features = false } -polkadot-runtime-parachains = { path = "../parachains", default-features = false } +polkadot-primitives = { workspace = true } +libsecp256k1 = { workspace = true } +polkadot-runtime-parachains = { workspace = true } -slot-range-helper = { path = "slot_range_helper", default-features = false } -xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false, optional = true } -xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } +slot-range-helper = { workspace = true } +xcm = { workspace = true } +xcm-executor = { optional = true, workspace = true } +xcm-builder = { workspace = true } [dev-dependencies] -hex-literal = "0.4.1" -frame-support-test = { path = "../../../substrate/frame/support/test" } -pallet-babe = { path = "../../../substrate/frame/babe" } -pallet-treasury = { path = "../../../substrate/frame/treasury" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } +hex-literal = { workspace = true, default-features = true } +frame-support-test = { workspace = true } +pallet-babe = { workspace = true, default-features = true } +pallet-treasury = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -libsecp256k1 = "0.7.0" -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } +libsecp256k1 = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } [features] default = ["std"] @@ -111,7 +110,6 @@ std = [ "sp-runtime/std", "sp-session/std", "sp-staking/std", - "sp-std/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index 47e8fea240025..02810b75283f8 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -10,12 +10,11 @@ description = "Helper crate for generating slot ranges for the Polkadot runtime. workspace = true [dependencies] -paste = "1.0" -enumn = "0.1.12" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-std = { package = "sp-std", path = "../../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +paste = { workspace = true, default-features = true } +enumn = { workspace = true } +codec = { features = ["derive"], workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] -std = ["codec/std", "sp-runtime/std", "sp-std/std"] +std = ["codec/std", "sp-runtime/std"] diff --git a/polkadot/runtime/common/slot_range_helper/src/lib.rs b/polkadot/runtime/common/slot_range_helper/src/lib.rs index f907390bc91b5..0dd893a284f3c 100644 --- a/polkadot/runtime/common/slot_range_helper/src/lib.rs +++ b/polkadot/runtime/common/slot_range_helper/src/lib.rs @@ -19,10 +19,10 @@ #![cfg_attr(not(feature = "std"), no_std)] pub use codec::{Decode, Encode}; +pub use core::{ops::Add, result}; pub use enumn::N; pub use paste; pub use sp_runtime::traits::CheckedSub; -pub use sp_std::{ops::Add, result}; /// This macro generates a `SlotRange` enum of arbitrary length for use in the Slot Auction /// mechanism on Polkadot. diff --git a/polkadot/runtime/common/src/assigned_slots/migration.rs b/polkadot/runtime/common/src/assigned_slots/migration.rs index b52509bbdf498..c13ee0c572dd6 100644 --- a/polkadot/runtime/common/src/assigned_slots/migration.rs +++ b/polkadot/runtime/common/src/assigned_slots/migration.rs @@ -18,13 +18,13 @@ use super::{Config, MaxPermanentSlots, MaxTemporarySlots, Pallet, LOG_TARGET}; use frame_support::traits::{Get, GetStorageVersion, UncheckedOnRuntimeUpgrade}; #[cfg(feature = "try-runtime")] -use frame_support::ensure; +use alloc::vec::Vec; #[cfg(feature = "try-runtime")] -use sp_std::vec::Vec; +use frame_support::ensure; pub mod v1 { use super::*; - pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); + pub struct VersionUncheckedMigrateToV1(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 368708f256403..dd39789e10cfd 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -30,6 +30,7 @@ use crate::{ slots::{self, Pallet as Slots, WeightInfo as SlotsWeightInfo}, traits::{LeaseError, Leaser, Registrar}, }; +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{pallet_prelude::*, traits::Currency}; use frame_system::pallet_prelude::*; @@ -41,7 +42,6 @@ use polkadot_runtime_parachains::{ }; use scale_info::TypeInfo; use sp_runtime::traits::{One, Saturating, Zero}; -use sp_std::prelude::*; const LOG_TARGET: &str = "runtime::assigned_slots"; @@ -698,24 +698,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } impl parachains_configuration::Config for Test { diff --git a/polkadot/runtime/common/src/auctions.rs b/polkadot/runtime/common/src/auctions.rs index 199b18fba51dc..78f20d918bab5 100644 --- a/polkadot/runtime/common/src/auctions.rs +++ b/polkadot/runtime/common/src/auctions.rs @@ -22,7 +22,9 @@ use crate::{ slot_range::SlotRange, traits::{AuctionStatus, Auctioneer, LeaseError, Leaser, Registrar}, }; +use alloc::{vec, vec::Vec}; use codec::Decode; +use core::mem::swap; use frame_support::{ dispatch::DispatchResult, ensure, @@ -33,7 +35,6 @@ use frame_system::pallet_prelude::BlockNumberFor; pub use pallet::*; use polkadot_primitives::Id as ParaId; use sp_runtime::traits::{CheckedSub, One, Saturating, Zero}; -use sp_std::{mem::swap, prelude::*}; type CurrencyOf = <::Leaser as Leaser>>::Currency; type BalanceOf = <<::Leaser as Leaser>>::Currency as Currency< @@ -674,7 +675,7 @@ mod tests { use frame_support::{ assert_noop, assert_ok, assert_storage_noop, derive_impl, ord_parameter_types, parameter_types, - traits::{ConstU32, EitherOfDiverse, OnFinalize, OnInitialize}, + traits::{EitherOfDiverse, OnFinalize, OnInitialize}, }; use frame_system::{EnsureRoot, EnsureSignedBy}; use pallet_balances; @@ -725,25 +726,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - pub const MaxReserves: u32 = 50; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } #[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Debug)] @@ -1426,7 +1411,8 @@ mod tests { #[test] fn initialize_winners_in_ending_period_works() { new_test_ext().execute_with(|| { - assert_eq!(::ExistentialDeposit::get(), 1); + let ed: u64 = ::ExistentialDeposit::get(); + assert_eq!(ed, 1); run_to_block(1); assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 1)); let para_1 = ParaId::from(1_u32); @@ -1539,7 +1525,8 @@ mod tests { #[test] fn less_winning_samples_work() { new_test_ext().execute_with(|| { - assert_eq!(::ExistentialDeposit::get(), 1); + let ed: u64 = ::ExistentialDeposit::get(); + assert_eq!(ed, 1); EndingPeriod::set(30); SampleLength::set(10); diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs index 54208e7fd1351..162bf01c38432 100644 --- a/polkadot/runtime/common/src/claims.rs +++ b/polkadot/runtime/common/src/claims.rs @@ -16,7 +16,11 @@ //! Pallet to process claims from Ethereum addresses. +#[cfg(not(feature = "std"))] +use alloc::{format, string::String}; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; +use core::fmt::Debug; use frame_support::{ ensure, traits::{Currency, Get, IsSubType, VestingSchedule}, @@ -35,9 +39,6 @@ use sp_runtime::{ }, RuntimeDebug, }; -#[cfg(not(feature = "std"))] -use sp_std::alloc::{format, string::String}; -use sp_std::{fmt::Debug, prelude::*}; type CurrencyOf = <::VestingSchedule as VestingSchedule< ::AccountId, @@ -150,8 +151,8 @@ impl PartialEq for EcdsaSignature { } } -impl sp_std::fmt::Debug for EcdsaSignature { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { +impl core::fmt::Debug for EcdsaSignature { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "EcdsaSignature({:?})", &self.0[..]) } } @@ -596,12 +597,12 @@ where ::RuntimeCall: IsSubType>, { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "PrevalidateAttests") } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } @@ -612,7 +613,7 @@ where { /// Create new `SignedExtension` to check runtime version. pub fn new() -> Self { - Self(sp_std::marker::PhantomData) + Self(core::marker::PhantomData) } } @@ -708,7 +709,7 @@ mod tests { assert_err, assert_noop, assert_ok, derive_impl, dispatch::{GetDispatchInfo, Pays}, ord_parameter_types, parameter_types, - traits::{ConstU32, ExistenceRequirement, WithdrawReasons}, + traits::{ExistenceRequirement, WithdrawReasons}, }; use pallet_balances; use sp_runtime::{ @@ -738,24 +739,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } parameter_types! { diff --git a/polkadot/runtime/common/src/crowdloan/migration.rs b/polkadot/runtime/common/src/crowdloan/migration.rs index 3afd6b3fbc94b..0ee3872a366ea 100644 --- a/polkadot/runtime/common/src/crowdloan/migration.rs +++ b/polkadot/runtime/common/src/crowdloan/migration.rs @@ -21,7 +21,7 @@ use frame_support::{ Twox64Concat, }; -pub struct MigrateToTrackInactiveV2(sp_std::marker::PhantomData); +pub struct MigrateToTrackInactiveV2(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToTrackInactiveV2 { fn on_runtime_upgrade() -> Weight { let on_chain_version = Pallet::::on_chain_storage_version(); diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 1dbba363de566..8cf288197e3dd 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -55,6 +55,7 @@ use crate::{ slot_range::SlotRange, traits::{Auctioneer, Registrar}, }; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; use frame_support::{ ensure, @@ -77,7 +78,6 @@ use sp_runtime::{ }, MultiSignature, MultiSigner, RuntimeDebug, }; -use sp_std::vec::Vec; type CurrencyOf = <::Auctioneer as Auctioneer>>::Currency; type LeasePeriodOf = <::Auctioneer as Auctioneer>>::LeasePeriod; @@ -832,16 +832,16 @@ impl Pallet { impl crate::traits::OnSwap for Pallet { fn on_swap(one: ParaId, other: ParaId) { - Funds::::mutate(one, |x| Funds::::mutate(other, |y| sp_std::mem::swap(x, y))) + Funds::::mutate(one, |x| Funds::::mutate(other, |y| core::mem::swap(x, y))) } } #[cfg(any(feature = "runtime-benchmarks", test))] mod crypto { + use alloc::vec::Vec; use sp_core::ed25519; use sp_io::crypto::{ed25519_generate, ed25519_sign}; use sp_runtime::{MultiSignature, MultiSigner}; - use sp_std::vec::Vec; pub fn create_ed25519_pubkey(seed: Vec) -> MultiSigner { ed25519_generate(0.into(), Some(seed)).into() @@ -860,7 +860,7 @@ mod tests { use frame_support::{ assert_noop, assert_ok, derive_impl, parameter_types, - traits::{ConstU32, OnFinalize, OnInitialize}, + traits::{OnFinalize, OnInitialize}, }; use polkadot_primitives::Id as ParaId; use sp_core::H256; @@ -918,24 +918,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } #[derive(Copy, Clone, Eq, PartialEq, Debug)] @@ -980,7 +965,7 @@ mod tests { let fund = Funds::::get(para).unwrap(); let account_id = Crowdloan::fund_account_id(fund.fund_index); if winner { - let ed = ::ExistentialDeposit::get(); + let ed: u64 = ::ExistentialDeposit::get(); let free_balance = Balances::free_balance(&account_id); Balances::reserve(&account_id, free_balance - ed) .expect("should be able to reserve free balance minus ED"); @@ -1815,7 +1800,8 @@ mod tests { #[test] fn withdraw_from_finished_works() { new_test_ext().execute_with(|| { - assert_eq!(::ExistentialDeposit::get(), 1); + let ed: u64 = ::ExistentialDeposit::get(); + assert_eq!(ed, 1); let para = new_para(); let index = NextFundIndex::::get(); let account_id = Crowdloan::fund_account_id(index); @@ -1982,7 +1968,6 @@ mod benchmarking { use polkadot_runtime_parachains::paras; use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::{Bounded, CheckedSub}; - use sp_std::prelude::*; use frame_benchmarking::{account, benchmarks, whitelisted_caller}; diff --git a/polkadot/runtime/common/src/identity_migrator.rs b/polkadot/runtime/common/src/identity_migrator.rs index 7d02e24b53681..126c886280e6e 100644 --- a/polkadot/runtime/common/src/identity_migrator.rs +++ b/polkadot/runtime/common/src/identity_migrator.rs @@ -172,6 +172,7 @@ impl OnReapIdentity for () { #[benchmarks] mod benchmarks { use super::*; + use alloc::{boxed::Box, vec, vec::Vec}; use codec::Encode; use frame_support::traits::EnsureOrigin; use frame_system::RawOrigin; @@ -180,7 +181,6 @@ mod benchmarks { traits::{Bounded, Hash, StaticLookup}, Saturating, }; - use sp_std::{boxed::Box, vec::Vec, *}; const SEED: u32 = 0; diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index ac2288c906a53..9d61cd018731f 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -28,7 +28,7 @@ use sp_runtime::{traits::TryConvert, Perquintill, RuntimeDebug}; use xcm::VersionedLocation; /// Logic for the author to get a portion of fees. -pub struct ToAuthor(sp_std::marker::PhantomData); +pub struct ToAuthor(core::marker::PhantomData); impl OnUnbalanced>> for ToAuthor where R: pallet_balances::Config + pallet_authorship::Config, @@ -44,7 +44,7 @@ where } } -pub struct DealWithFees(sp_std::marker::PhantomData); +pub struct DealWithFees(core::marker::PhantomData); impl OnUnbalanced>> for DealWithFees where R: pallet_balances::Config + pallet_authorship::Config + pallet_treasury::Config, @@ -249,7 +249,7 @@ mod tests { parameter_types, traits::{ tokens::{PayFromAccount, UnityAssetBalanceConversion}, - ConstU32, FindAuthor, + FindAuthor, }, weights::Weight, PalletId, @@ -315,20 +315,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } parameter_types! { @@ -339,13 +328,8 @@ mod tests { impl pallet_treasury::Config for Test { type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = (); - type ProposalBondMinimum = (); - type ProposalBondMaximum = (); type SpendPeriod = (); type Burn = (); type BurnDestination = (); diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index e77035b3f6b41..7a689a517eaa2 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -24,6 +24,7 @@ use crate::{ slots, traits::{AuctionStatus, Auctioneer, Leaser, Registrar as RegistrarT}, }; +use alloc::sync::Arc; use codec::Encode; use frame_support::{ assert_noop, assert_ok, derive_impl, parameter_types, @@ -50,7 +51,6 @@ use sp_runtime::{ transaction_validity::TransactionPriority, AccountId32, BuildStorage, MultiSignature, }; -use sp_std::sync::Arc; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlockU32; @@ -173,23 +173,12 @@ impl pallet_timestamp::Config for Test { parameter_types! { pub static ExistentialDeposit: Balance = 1; - pub const MaxReserves: u32 = 50; } - +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl configuration::Config for Test { diff --git a/polkadot/runtime/common/src/lib.rs b/polkadot/runtime/common/src/lib.rs index 6e50384f68c9c..41e1cdbab8011 100644 --- a/polkadot/runtime/common/src/lib.rs +++ b/polkadot/runtime/common/src/lib.rs @@ -41,6 +41,8 @@ mod integration_tests; #[cfg(test)] mod mock; +extern crate alloc; + use frame_support::{ parameter_types, traits::{ConstU32, Currency, OneSessionHandler}, @@ -169,7 +171,7 @@ static_assertions::assert_eq_size!(polkadot_primitives::Balance, u128); /// A placeholder since there is currently no provided session key handler for parachain validator /// keys. -pub struct ParachainSessionKeyPlaceholder(sp_std::marker::PhantomData); +pub struct ParachainSessionKeyPlaceholder(core::marker::PhantomData); impl sp_runtime::BoundToRuntimeAppPublic for ParachainSessionKeyPlaceholder { type Public = ValidatorId; } @@ -198,7 +200,7 @@ impl OneSessionHandler /// A placeholder since there is currently no provided session key handler for parachain validator /// keys. -pub struct AssignmentSessionKeyPlaceholder(sp_std::marker::PhantomData); +pub struct AssignmentSessionKeyPlaceholder(core::marker::PhantomData); impl sp_runtime::BoundToRuntimeAppPublic for AssignmentSessionKeyPlaceholder { type Public = AssignmentId; } diff --git a/polkadot/runtime/common/src/mock.rs b/polkadot/runtime/common/src/mock.rs index 6534110cc2104..54170b07fa62c 100644 --- a/polkadot/runtime/common/src/mock.rs +++ b/polkadot/runtime/common/src/mock.rs @@ -37,7 +37,7 @@ thread_local! { static MANAGERS: RefCell>> = RefCell::new(HashMap::new()); } -pub struct TestRegistrar(sp_std::marker::PhantomData); +pub struct TestRegistrar(core::marker::PhantomData); impl Registrar for TestRegistrar { type AccountId = T::AccountId; diff --git a/polkadot/runtime/common/src/paras_registrar/migration.rs b/polkadot/runtime/common/src/paras_registrar/migration.rs index 18bb6bbfb559a..6b110d2ff5d5e 100644 --- a/polkadot/runtime/common/src/paras_registrar/migration.rs +++ b/polkadot/runtime/common/src/paras_registrar/migration.rs @@ -25,7 +25,7 @@ pub struct ParaInfoV1 { } pub struct VersionUncheckedMigrateToV1( - sp_std::marker::PhantomData<(T, UnlockParaIds)>, + core::marker::PhantomData<(T, UnlockParaIds)>, ); impl> UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 9bbb152f855f2..07f02e9265612 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -19,6 +19,8 @@ pub mod migration; +use alloc::{vec, vec::Vec}; +use core::result; use frame_support::{ dispatch::DispatchResult, ensure, @@ -34,7 +36,6 @@ use polkadot_runtime_parachains::{ paras::{self, ParaGenesisArgs, UpgradeStrategy}, Origin, ParaLifecycle, }; -use sp_std::{prelude::*, result}; use crate::traits::{OnSwap, Registrar}; use codec::{Decode, Encode}; @@ -210,7 +211,7 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, pub next_free_para_id: ParaId, } @@ -717,11 +718,10 @@ mod tests { use crate::{ mock::conclude_pvf_checking, paras_registrar, traits::Registrar as RegistrarTrait, }; + use alloc::collections::btree_map::BTreeMap; use frame_support::{ - assert_noop, assert_ok, derive_impl, - error::BadOrigin, - parameter_types, - traits::{ConstU32, OnFinalize, OnInitialize}, + assert_noop, assert_ok, derive_impl, parameter_types, + traits::{OnFinalize, OnInitialize}, }; use frame_system::limits; use pallet_balances::Error as BalancesError; @@ -731,11 +731,10 @@ mod tests { use sp_io::TestExternalities; use sp_keyring::Sr25519Keyring; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, transaction_validity::TransactionPriority, BuildStorage, Perbill, }; - use sp_std::collections::btree_map::BTreeMap; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlockU32; @@ -799,20 +798,11 @@ mod tests { pub const ExistentialDeposit: Balance = 1; } + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; + type Balance = Balance; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } impl shared::Config for Test { diff --git a/polkadot/runtime/common/src/paras_sudo_wrapper.rs b/polkadot/runtime/common/src/paras_sudo_wrapper.rs index 3ff8d4ac08e15..af93c70b4783f 100644 --- a/polkadot/runtime/common/src/paras_sudo_wrapper.rs +++ b/polkadot/runtime/common/src/paras_sudo_wrapper.rs @@ -16,6 +16,7 @@ //! A simple wrapper allowing `Sudo` to call into `paras` routines. +use alloc::boxed::Box; use codec::Encode; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -26,7 +27,6 @@ use polkadot_runtime_parachains::{ paras::{self, AssignCoretime, ParaGenesisArgs}, ParaLifecycle, }; -use sp_std::boxed::Box; #[frame_support::pallet] pub mod pallet { diff --git a/polkadot/runtime/common/src/purchase.rs b/polkadot/runtime/common/src/purchase.rs index 5ae6b422618e0..d650548b8ac39 100644 --- a/polkadot/runtime/common/src/purchase.rs +++ b/polkadot/runtime/common/src/purchase.rs @@ -16,6 +16,7 @@ //! Pallet to process purchase of DOTs. +use alloc::vec::Vec; use codec::{Decode, Encode}; use frame_support::{ pallet_prelude::*, @@ -29,7 +30,6 @@ use sp_runtime::{ traits::{CheckedAdd, Saturating, Verify, Zero}, AnySignature, DispatchError, DispatchResult, Permill, RuntimeDebug, }; -use sp_std::prelude::*; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -534,24 +534,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } parameter_types! { diff --git a/polkadot/runtime/common/src/slots/mod.rs b/polkadot/runtime/common/src/slots/mod.rs index 900e04eaff188..333f14c6608ac 100644 --- a/polkadot/runtime/common/src/slots/mod.rs +++ b/polkadot/runtime/common/src/slots/mod.rs @@ -25,6 +25,7 @@ pub mod migration; use crate::traits::{LeaseError, Leaser, Registrar}; +use alloc::{vec, vec::Vec}; use frame_support::{ pallet_prelude::*, traits::{Currency, ReservableCurrency}, @@ -34,7 +35,6 @@ use frame_system::pallet_prelude::*; pub use pallet::*; use polkadot_primitives::Id as ParaId; use sp_runtime::traits::{CheckedConversion, CheckedSub, Saturating, Zero}; -use sp_std::prelude::*; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -309,7 +309,7 @@ impl Pallet { // Useful when trying to clean up a parachain leases, as this would tell // you all the balances you need to unreserve. fn all_deposits_held(para: ParaId) -> Vec<(T::AccountId, BalanceOf)> { - let mut tracker = sp_std::collections::btree_map::BTreeMap::new(); + let mut tracker = alloc::collections::btree_map::BTreeMap::new(); Leases::::get(para).into_iter().for_each(|lease| match lease { Some((who, amount)) => match tracker.get(&who) { Some(prev_amount) => @@ -329,7 +329,7 @@ impl Pallet { impl crate::traits::OnSwap for Pallet { fn on_swap(one: ParaId, other: ParaId) { - Leases::::mutate(one, |x| Leases::::mutate(other, |y| sp_std::mem::swap(x, y))) + Leases::::mutate(one, |x| Leases::::mutate(other, |y| core::mem::swap(x, y))) } } @@ -551,24 +551,9 @@ mod tests { type MaxConsumers = frame_support::traits::ConstU32<16>; } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; } parameter_types! { diff --git a/polkadot/runtime/common/src/traits.rs b/polkadot/runtime/common/src/traits.rs index 2ed1fb8af9bea..6e49abcee98b2 100644 --- a/polkadot/runtime/common/src/traits.rs +++ b/polkadot/runtime/common/src/traits.rs @@ -16,12 +16,12 @@ //! Traits used across pallets for Polkadot. +use alloc::vec::*; use frame_support::{ dispatch::DispatchResult, traits::{Currency, ReservableCurrency}, }; use polkadot_primitives::{HeadData, Id as ParaId, ValidationCode}; -use sp_std::vec::*; /// Parachain registration API. pub trait Registrar { @@ -56,7 +56,7 @@ pub trait Registrar { /// Remove any lock on the para registration. fn remove_lock(id: ParaId); - /// Register a Para ID under control of `who`. Registration may be be + /// Register a Para ID under control of `who`. Registration may be /// delayed by session rotation. fn register( who: Self::AccountId, diff --git a/polkadot/runtime/common/src/try_runtime.rs b/polkadot/runtime/common/src/try_runtime.rs index 81aa34317bfd7..b22e170329206 100644 --- a/polkadot/runtime/common/src/try_runtime.rs +++ b/polkadot/runtime/common/src/try_runtime.rs @@ -16,13 +16,13 @@ //! Common try-runtime only tests for runtimes. +use alloc::{collections::btree_set::BTreeSet, vec::Vec}; use frame_support::{ dispatch::RawOrigin, traits::{Get, Hooks}, }; use pallet_fast_unstake::{Pallet as FastUnstake, *}; use pallet_staking::*; -use sp_std::{collections::btree_set::BTreeSet, prelude::*}; /// register all inactive nominators for fast-unstake, and progress until they have all been /// processed. diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index 5858a0ac3ca76..dace785a535b9 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -16,7 +16,9 @@ //! XCM sender for relay chain. +use alloc::vec::Vec; use codec::{Decode, Encode}; +use core::marker::PhantomData; use frame_support::traits::Get; use frame_system::pallet_prelude::BlockNumberFor; use polkadot_primitives::Id as ParaId; @@ -25,7 +27,6 @@ use polkadot_runtime_parachains::{ dmp, FeeTracker, }; use sp_runtime::FixedPointNumber; -use sp_std::{marker::PhantomData, prelude::*}; use xcm::prelude::*; use xcm_builder::InspectMessageQueues; use SendError::*; @@ -56,7 +57,7 @@ impl PriceForMessageDelivery for NoPriceForMessageDelivery { } /// Implementation of [`PriceForMessageDelivery`] which returns a fixed price. -pub struct ConstantPrice(sp_std::marker::PhantomData); +pub struct ConstantPrice(core::marker::PhantomData); impl> PriceForMessageDelivery for ConstantPrice { type Id = (); @@ -79,7 +80,7 @@ impl> PriceForMessageDelivery for ConstantPrice { /// - `B`: The base fee to pay for message delivery. /// - `M`: The fee to pay for each and every byte of the message after encoding it. /// - `F`: A fee factor multiplier. It can be understood as the exponent term in the formula. -pub struct ExponentialPrice(sp_std::marker::PhantomData<(A, B, M, F)>); +pub struct ExponentialPrice(core::marker::PhantomData<(A, B, M, F)>); impl, B: Get, M: Get, F: FeeTracker> PriceForMessageDelivery for ExponentialPrice { @@ -169,7 +170,7 @@ pub struct ToParachainDeliveryHelper< ParaId, ToParaIdHelper, >( - sp_std::marker::PhantomData<( + core::marker::PhantomData<( XcmConfig, ExistentialDeposit, PriceForDelivery, @@ -223,7 +224,7 @@ impl< } // overestimate delivery fee - let overestimated_xcm = vec![ClearOrigin; 128].into(); + let overestimated_xcm = alloc::vec![ClearOrigin; 128].into(); let overestimated_fees = PriceForDelivery::price_for_delivery(Parachain::get(), &overestimated_xcm); @@ -258,6 +259,7 @@ impl EnsureForParachain for () { mod tests { use super::*; use crate::integration_tests::new_test_ext; + use alloc::vec; use frame_support::{assert_ok, parameter_types}; use polkadot_runtime_parachains::FeeTracker; use sp_runtime::FixedU128; diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index 342c5a8850338..3709e1eb697ea 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -10,13 +10,12 @@ description = "Runtime metric interface for the Polkadot node" workspace = true [dependencies] -sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } -sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -polkadot-primitives = { path = "../../primitives", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +sp-tracing = { workspace = true } +codec = { workspace = true } +polkadot-primitives = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } -bs58 = { version = "0.5.0", default-features = false, features = ["alloc"] } +bs58 = { features = ["alloc"], workspace = true } [features] default = ["std"] @@ -25,7 +24,6 @@ std = [ "codec/std", "frame-benchmarking?/std", "polkadot-primitives/std", - "sp-std/std", "sp-tracing/std", ] runtime-metrics = ["frame-benchmarking", "sp-tracing/with-tracing"] diff --git a/polkadot/runtime/metrics/src/lib.rs b/polkadot/runtime/metrics/src/lib.rs index 6164d71f112a4..479ec7a69c3aa 100644 --- a/polkadot/runtime/metrics/src/lib.rs +++ b/polkadot/runtime/metrics/src/lib.rs @@ -22,6 +22,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + #[cfg(feature = "runtime-metrics")] mod with_runtime_metrics; #[cfg(feature = "runtime-metrics")] diff --git a/polkadot/runtime/metrics/src/with_runtime_metrics.rs b/polkadot/runtime/metrics/src/with_runtime_metrics.rs index 1339df9ff6879..979d5eda9afc6 100644 --- a/polkadot/runtime/metrics/src/with_runtime_metrics.rs +++ b/polkadot/runtime/metrics/src/with_runtime_metrics.rs @@ -22,14 +22,13 @@ const TRACING_TARGET: &'static str = "metrics"; +use alloc::vec::Vec; use codec::Encode; use polkadot_primitives::{ metric_definitions::{CounterDefinition, CounterVecDefinition, HistogramDefinition}, RuntimeMetricLabelValues, RuntimeMetricOp, RuntimeMetricUpdate, }; -use sp_std::prelude::*; - /// Holds a set of counters that have different values for their labels, /// like Prometheus `CounterVec`. pub struct CounterVec { diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 250fee65beefe..7afdf49fe5516 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -10,66 +10,64 @@ license.workspace = true workspace = true [dependencies] -impl-trait-for-tuples = "0.2.2" -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +impl-trait-for-tuples = { workspace = true } +bitvec = { features = ["alloc"], workspace = true } +codec = { features = ["derive", "max-encoded-len"], workspace = true } log = { workspace = true } -rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } -derive_more = "0.99.17" -bitflags = "1.3.2" +derive_more = { workspace = true, default-features = true } +bitflags = { workspace = true } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features = ["serde"] } -sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = ["serde"] } -sp-keystore = { path = "../../../substrate/primitives/keystore", optional = true, default-features = false } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false, optional = true } -sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false, optional = true } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } +sp-api = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-session = { workspace = true } +sp-staking = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-keystore = { optional = true, workspace = true } +sp-application-crypto = { optional = true, workspace = true } +sp-tracing = { optional = true, workspace = true } +sp-arithmetic = { workspace = true } -pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-babe = { path = "../../../substrate/frame/babe", default-features = false } -pallet-broker = { path = "../../../substrate/frame/broker", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } +pallet-authority-discovery = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-babe = { workspace = true } +pallet-broker = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-session = { workspace = true } +pallet-staking = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-vesting = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } -polkadot-primitives = { path = "../../primitives", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } +polkadot-primitives = { workspace = true } -rand = { version = "0.8.5", default-features = false } -rand_chacha = { version = "0.3.1", default-features = false } -static_assertions = { version = "1.1.0", optional = true } -polkadot-parachain-primitives = { path = "../../parachain", default-features = false } -polkadot-runtime-metrics = { path = "../metrics", default-features = false } -polkadot-core-primitives = { path = "../../core-primitives", default-features = false } +rand = { workspace = true } +rand_chacha = { workspace = true } +static_assertions = { optional = true, workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-metrics = { workspace = true } +polkadot-core-primitives = { workspace = true } [dev-dependencies] -futures = "0.3.30" -hex-literal = "0.4.1" -sp-keyring = { path = "../../../substrate/primitives/keyring" } -frame-support-test = { path = "../../../substrate/frame/support/test" } -sc-keystore = { path = "../../../substrate/client/keystore" } -polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" } -thousands = "0.2.0" -assert_matches = "1" -rstest = "0.18.2" +futures = { workspace = true } +hex-literal = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +frame-support-test = { workspace = true } +sc-keystore = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +thousands = { workspace = true } +assert_matches = { workspace = true } +rstest = { workspace = true } serde_json = { workspace = true, default-features = true } [features] @@ -98,7 +96,6 @@ std = [ "polkadot-runtime-metrics/std", "rand/std", "rand_chacha/std", - "rustc-hex/std", "scale-info/std", "serde/std", "sp-api/std", @@ -112,7 +109,6 @@ std = [ "sp-runtime/std", "sp-session/std", "sp-staking/std", - "sp-std/std", "xcm-executor/std", "xcm/std", ] diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs index e68ac2664b898..9ed007919b81b 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs @@ -34,14 +34,13 @@ use crate::{ ParaId, }; +use alloc::{vec, vec::Vec}; use frame_support::{defensive, pallet_prelude::*}; use frame_system::pallet_prelude::*; use pallet_broker::CoreAssignment; use polkadot_primitives::CoreIndex; use sp_runtime::traits::{One, Saturating}; -use sp_std::prelude::*; - pub use pallet::*; /// Fraction expressed as a nominator with an assumed denominator of 57,600. diff --git a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs index 81a0988ea67cd..9b0cbcb2d7d69 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs @@ -26,10 +26,10 @@ use crate::{ paras::{ParaGenesisArgs, ParaKind}, scheduler::common::Assignment, }; +use alloc::collections::btree_map::BTreeMap; use frame_support::{assert_noop, assert_ok, pallet_prelude::*, traits::Currency}; use pallet_broker::TaskId; use polkadot_primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode}; -use sp_std::collections::btree_map::BTreeMap; fn schedule_blank_para(id: ParaId, parakind: ParaKind) { let validation_code: ValidationCode = vec![1, 2, 3].into(); @@ -74,6 +74,9 @@ fn run_to_block( Paras::initializer_initialize(b + 1); Scheduler::initializer_initialize(b + 1); + // Update the spot traffic and revenue on every block. + OnDemandAssigner::on_initialize(b + 1); + // In the real runtime this is expected to be called by the `InclusionInherent` pallet. Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs index ba6951a146921..b0ebfe77a9665 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs @@ -25,6 +25,7 @@ use crate::{ shared::Pallet as ParasShared, }; +use alloc::vec; use frame_benchmarking::v2::*; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs index 314be11adbeb7..03f63d7333b65 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs @@ -23,7 +23,7 @@ use frame_support::{ mod v0 { use super::*; - use sp_std::collections::vec_deque::VecDeque; + use alloc::collections::vec_deque::VecDeque; #[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone)] pub(super) struct EnqueuedOrder { @@ -50,7 +50,7 @@ mod v1 { use crate::assigner_on_demand::LOG_TARGET; /// Migration to V1 - pub struct UncheckedMigrateToV1(sp_std::marker::PhantomData); + pub struct UncheckedMigrateToV1(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { let mut weight: Weight = Weight::zero(); @@ -88,7 +88,7 @@ mod v1 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { let n: u32 = v0::OnDemandQueue::::get().len() as u32; log::info!( @@ -100,7 +100,7 @@ mod v1 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + fn post_upgrade(state: alloc::vec::Vec) -> Result<(), sp_runtime::TryRuntimeError> { log::info!(target: LOG_TARGET, "Running post_upgrade()"); ensure!( diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 043a36d99c497..f045e957a6907 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -31,38 +31,39 @@ //! occupying multiple cores in on-demand, we will likely add a separate order type, where the //! intent can be made explicit. +use sp_runtime::traits::Zero; mod benchmarking; pub mod migration; mod mock_helpers; +mod types; extern crate alloc; #[cfg(test)] mod tests; -use core::mem::take; - use crate::{configuration, paras, scheduler::common::Assignment}; - +use alloc::collections::BinaryHeap; +use core::mem::take; use frame_support::{ pallet_prelude::*, traits::{ + defensive_prelude::*, Currency, ExistenceRequirement::{self, AllowDeath, KeepAlive}, WithdrawReasons, }, + PalletId, }; -use frame_system::pallet_prelude::*; -use polkadot_primitives::{CoreIndex, Id as ParaId, ON_DEMAND_MAX_QUEUE_MAX_SIZE}; +use frame_system::{pallet_prelude::*, Pallet as System}; +use polkadot_primitives::{CoreIndex, Id as ParaId}; use sp_runtime::{ - traits::{One, SaturatedConversion}, + traits::{AccountIdConversion, One, SaturatedConversion}, FixedPointNumber, FixedPointOperand, FixedU128, Perbill, Saturating, }; - -use alloc::collections::BinaryHeap; -use sp_std::{ - cmp::{Ord, Ordering, PartialOrd}, - prelude::*, +use types::{ + BalanceOf, CoreAffinityCount, EnqueuedOrder, QueuePushDirection, QueueStatusType, + SpotTrafficCalculationErr, }; const LOG_TARGET: &str = "runtime::parachains::assigner-on-demand"; @@ -87,217 +88,6 @@ impl WeightInfo for TestWeightInfo { } } -/// Meta data for full queue. -/// -/// This includes elements with affinity and free entries. -/// -/// The actual queue is implemented via multiple priority queues. One for each core, for entries -/// which currently have a core affinity and one free queue, with entries without any affinity yet. -/// -/// The design aims to have most queue accessess be O(1) or O(log(N)). Absolute worst case is O(N). -/// Importantly this includes all accessess that happen in a single block. Even with 50 cores, the -/// total complexity of all operations in the block should maintain above complexities. In -/// particular O(N) stays O(N), it should never be O(N*cores). -/// -/// More concrete rundown on complexity: -/// -/// - insert: O(1) for placing an order, O(log(N)) for push backs. -/// - pop_assignment_for_core: O(log(N)), O(N) worst case: Can only happen for one core, next core -/// is already less work. -/// - report_processed & push back: If affinity dropped to 0, then O(N) in the worst case. Again -/// this divides per core. -/// -/// Reads still exist, also improved slightly, but worst case we fetch all entries. -#[derive(Encode, Decode, TypeInfo)] -struct QueueStatusType { - /// Last calculated traffic value. - traffic: FixedU128, - /// The next index to use. - next_index: QueueIndex, - /// Smallest index still in use. - /// - /// In case of a completely empty queue (free + affinity queues), `next_index - smallest_index - /// == 0`. - smallest_index: QueueIndex, - /// Indices that have been freed already. - /// - /// But have a hole to `smallest_index`, so we can not yet bump `smallest_index`. This binary - /// heap is roughly bounded in the number of on demand cores: - /// - /// For a single core, elements will always be processed in order. With each core added, a - /// level of out of order execution is added. - freed_indices: BinaryHeap, -} - -impl Default for QueueStatusType { - fn default() -> QueueStatusType { - QueueStatusType { - traffic: FixedU128::default(), - next_index: QueueIndex(0), - smallest_index: QueueIndex(0), - freed_indices: BinaryHeap::new(), - } - } -} - -impl QueueStatusType { - /// How many orders are queued in total? - /// - /// This includes entries which have core affinity. - fn size(&self) -> u32 { - self.next_index - .0 - .overflowing_sub(self.smallest_index.0) - .0 - .saturating_sub(self.freed_indices.len() as u32) - } - - /// Get current next index - /// - /// to use for an element newly pushed to the back of the queue. - fn push_back(&mut self) -> QueueIndex { - let QueueIndex(next_index) = self.next_index; - self.next_index = QueueIndex(next_index.overflowing_add(1).0); - QueueIndex(next_index) - } - - /// Push something to the front of the queue - fn push_front(&mut self) -> QueueIndex { - self.smallest_index = QueueIndex(self.smallest_index.0.overflowing_sub(1).0); - self.smallest_index - } - - /// The given index is no longer part of the queue. - /// - /// This updates `smallest_index` if need be. - fn consume_index(&mut self, removed_index: QueueIndex) { - if removed_index != self.smallest_index { - self.freed_indices.push(removed_index.reverse()); - return; - } - let mut index = self.smallest_index.0.overflowing_add(1).0; - // Even more to advance? - while self.freed_indices.peek() == Some(&ReverseQueueIndex(index)) { - index = index.overflowing_add(1).0; - self.freed_indices.pop(); - } - self.smallest_index = QueueIndex(index); - } -} - -/// Keeps track of how many assignments a scheduler currently has at a specific `CoreIndex` for a -/// specific `ParaId`. -#[derive(Encode, Decode, Default, Clone, Copy, TypeInfo)] -#[cfg_attr(test, derive(PartialEq, RuntimeDebug))] -struct CoreAffinityCount { - core_index: CoreIndex, - count: u32, -} - -/// An indicator as to which end of the `OnDemandQueue` an assignment will be placed. -#[cfg_attr(test, derive(RuntimeDebug))] -enum QueuePushDirection { - Back, - Front, -} - -/// Shorthand for the Balance type the runtime is using. -type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; - -/// Errors that can happen during spot traffic calculation. -#[derive(PartialEq, RuntimeDebug)] -enum SpotTrafficCalculationErr { - /// The order queue capacity is at 0. - QueueCapacityIsZero, - /// The queue size is larger than the queue capacity. - QueueSizeLargerThanCapacity, - /// Arithmetic error during division, either division by 0 or over/underflow. - Division, -} - -/// Type used for priority indices. -// NOTE: The `Ord` implementation for this type is unsound in the general case. -// Do not use it for anything but it's intended purpose. -#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] -struct QueueIndex(u32); - -/// QueueIndex with reverse ordering. -/// -/// Same as `Reverse(QueueIndex)`, but with all the needed traits implemented. -#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] -struct ReverseQueueIndex(u32); - -impl QueueIndex { - fn reverse(self) -> ReverseQueueIndex { - ReverseQueueIndex(self.0) - } -} - -impl Ord for QueueIndex { - fn cmp(&self, other: &Self) -> Ordering { - let diff = self.0.overflowing_sub(other.0).0; - if diff == 0 { - Ordering::Equal - } else if diff <= ON_DEMAND_MAX_QUEUE_MAX_SIZE { - Ordering::Greater - } else { - Ordering::Less - } - } -} - -impl PartialOrd for QueueIndex { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for ReverseQueueIndex { - fn cmp(&self, other: &Self) -> Ordering { - QueueIndex(other.0).cmp(&QueueIndex(self.0)) - } -} -impl PartialOrd for ReverseQueueIndex { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(&other)) - } -} - -/// Internal representation of an order after it has been enqueued already. -/// -/// This data structure is provided for a min BinaryHeap (Ord compares in reverse order with regards -/// to its elements) -#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq)] -struct EnqueuedOrder { - para_id: ParaId, - idx: QueueIndex, -} - -impl EnqueuedOrder { - fn new(idx: QueueIndex, para_id: ParaId) -> Self { - Self { idx, para_id } - } -} - -impl PartialOrd for EnqueuedOrder { - fn partial_cmp(&self, other: &Self) -> Option { - match other.idx.partial_cmp(&self.idx) { - Some(Ordering::Equal) => other.para_id.partial_cmp(&self.para_id), - o => o, - } - } -} - -impl Ord for EnqueuedOrder { - fn cmp(&self, other: &Self) -> Ordering { - match other.idx.cmp(&self.idx) { - Ordering::Equal => other.para_id.cmp(&self.para_id), - o => o, - } - } -} - #[frame_support::pallet] pub mod pallet { @@ -324,6 +114,15 @@ pub mod pallet { /// The default value for the spot traffic multiplier. #[pallet::constant] type TrafficDefaultValue: Get; + + /// The maximum number of blocks some historical revenue + /// information stored for. + #[pallet::constant] + type MaxHistoricalRevenue: Get; + + /// Identifier for the internal revenue balance. + #[pallet::constant] + type PalletId: Get; } /// Creates an empty queue status for an empty queue with initial traffic value. @@ -365,6 +164,11 @@ pub mod pallet { EntriesOnEmpty, >; + /// Keeps track of accumulated revenue from on demand order sales. + #[pallet::storage] + pub type Revenue = + StorageValue<_, BoundedVec, T::MaxHistoricalRevenue>, ValueQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -386,6 +190,19 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(_now: BlockNumberFor) -> Weight { + // Update revenue information storage. + Revenue::::mutate(|revenue| { + if let Some(overdue) = + revenue.force_insert_keep_left(0, 0u32.into()).defensive_unwrap_or(None) + { + // We have some overdue revenue not claimed by the Coretime Chain, let's + // accumulate it at the oldest stored block + if let Some(last) = revenue.last_mut() { + *last = last.saturating_add(overdue); + } + } + }); + let config = configuration::ActiveConfig::::get(); // We need to update the spot traffic on block initialize in order to account for idle // blocks. @@ -393,8 +210,9 @@ pub mod pallet { Self::update_spot_traffic(&config, queue_status); }); - // 2 reads in config and queuestatus, at maximum 1 write to queuestatus. - T::DbWeight::get().reads_writes(2, 1) + // Reads: `Revenue`, `ActiveConfig`, `QueueStatus` + // Writes: `Revenue`, `QueueStatus` + T::DbWeight::get().reads_writes(3, 2) } } @@ -527,7 +345,8 @@ where } /// Helper function for `place_order_*` calls. Used to differentiate between placing orders - /// with a keep alive check or to allow the account to be reaped. + /// with a keep alive check or to allow the account to be reaped. The amount charged is + /// stored to the pallet account to be later paid out as revenue. /// /// Parameters: /// - `sender`: The sender of the call, funds will be withdrawn from this account. @@ -562,18 +381,40 @@ where // Is the current price higher than `max_amount` ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); - // Charge the sending account the spot price - let _ = T::Currency::withdraw( + ensure!( + queue_status.size() < config.scheduler_params.on_demand_queue_max_size, + Error::::QueueFull + ); + + // Charge the sending account the spot price. The amount will be teleported to the + // broker chain once it requests revenue information. + let amt = T::Currency::withdraw( &sender, spot_price, WithdrawReasons::FEE, existence_requirement, )?; - ensure!( - queue_status.size() < config.scheduler_params.on_demand_queue_max_size, - Error::::QueueFull - ); + // Consume the negative imbalance and deposit it into the pallet account. Make sure the + // account preserves even without the existential deposit. + let pot = Self::account_id(); + if !System::::account_exists(&pot) { + System::::inc_providers(&pot); + } + T::Currency::resolve_creating(&pot, amt); + + // Add the amount to the current block's (index 0) revenue information. + Revenue::::mutate(|bounded_revenue| { + if let Some(current_block) = bounded_revenue.get_mut(0) { + *current_block = current_block.saturating_add(spot_price); + } else { + // Revenue has already been claimed in the same block, including the block + // itself. It shouldn't normally happen as revenue claims in the future are + // not allowed. + bounded_revenue.try_push(spot_price).defensive_ok(); + } + }); + Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Back); Pallet::::deposit_event(Event::::OnDemandOrderPlaced { para_id, @@ -790,6 +631,29 @@ where }) } + /// Collect the revenue from the `when` blockheight + pub fn claim_revenue_until(when: BlockNumberFor) -> BalanceOf { + let now = >::block_number(); + let mut amount: BalanceOf = BalanceOf::::zero(); + Revenue::::mutate(|revenue| { + while !revenue.is_empty() { + let index = (revenue.len() - 1) as u32; + if when > now.saturating_sub(index.into()) { + amount = amount.saturating_add(revenue.pop().defensive_unwrap_or(0u32.into())); + } else { + break + } + } + }); + + amount + } + + /// Account of the pallet pot, where the funds from instantaneous coretime sale are accumulated. + pub fn account_id() -> T::AccountId { + T::PalletId::get().into_account_truncating() + } + /// Getter for the affinity tracker. #[cfg(test)] fn get_affinity_map(para_id: ParaId) -> Option { @@ -831,4 +695,9 @@ where fn get_traffic_default_value() -> FixedU128 { ::TrafficDefaultValue::get() } + + #[cfg(test)] + fn get_revenue() -> Vec> { + Revenue::::get().to_vec() + } } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index 5747413e71478..0bad4346cfd9d 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -17,7 +17,12 @@ use super::*; use crate::{ - assigner_on_demand::{mock_helpers::GenesisConfigBuilder, Error}, + assigner_on_demand::{ + self, + mock_helpers::GenesisConfigBuilder, + types::{QueueIndex, ReverseQueueIndex}, + Error, + }, initializer::SessionChangeNotification, mock::{ new_test_ext, Balances, OnDemandAssigner, Paras, ParasShared, RuntimeOrigin, Scheduler, @@ -25,10 +30,14 @@ use crate::{ }, paras::{ParaGenesisArgs, ParaKind}, }; -use frame_support::{assert_noop, assert_ok, error::BadOrigin}; +use alloc::collections::btree_map::BTreeMap; +use core::cmp::{Ord, Ordering}; +use frame_support::{assert_noop, assert_ok}; use pallet_balances::Error as BalancesError; -use polkadot_primitives::{BlockNumber, SessionIndex, ValidationCode}; -use sp_std::collections::btree_map::BTreeMap; +use polkadot_primitives::{ + BlockNumber, SessionIndex, ValidationCode, ON_DEMAND_MAX_QUEUE_MAX_SIZE, +}; +use sp_runtime::traits::BadOrigin; fn schedule_blank_para(id: ParaId, parakind: ParaKind) { let validation_code: ValidationCode = vec![1, 2, 3].into(); @@ -73,7 +82,7 @@ fn run_to_block( Paras::initializer_initialize(b + 1); Scheduler::initializer_initialize(b + 1); - // We need to update the spot traffic on every block. + // Update the spot traffic and revenue on every block. OnDemandAssigner::on_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. @@ -81,16 +90,26 @@ fn run_to_block( } } -fn place_order(para_id: ParaId) { +fn place_order_run_to_blocknumber(para_id: ParaId, blocknumber: Option) { let alice = 100u64; let amt = 10_000_000u128; Balances::make_free_balance_be(&alice, amt); - run_to_block(101, |n| if n == 101 { Some(Default::default()) } else { None }); + if let Some(bn) = blocknumber { + run_to_block(bn, |n| if n == bn { Some(Default::default()) } else { None }); + } OnDemandAssigner::place_order_allow_death(RuntimeOrigin::signed(alice), amt, para_id).unwrap() } +fn place_order_run_to_101(para_id: ParaId) { + place_order_run_to_blocknumber(para_id, Some(101)); +} + +fn place_order(para_id: ParaId) { + place_order_run_to_blocknumber(para_id, None); +} + #[test] fn spot_traffic_capacity_zero_returns_none() { match OnDemandAssigner::calculate_spot_traffic( @@ -377,8 +396,8 @@ fn push_back_assignment_works() { run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); // Add enough assignments to the order queue. - place_order(para_a); - place_order(para_b); + place_order_run_to_101(para_a); + place_order_run_to_101(para_b); // Pop order a assert_eq!( @@ -424,9 +443,9 @@ fn affinity_prohibits_parallel_scheduling() { assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); // Add 2 assignments for para_a for every para_b. - place_order(para_a); - place_order(para_a); - place_order(para_b); + place_order_run_to_101(para_a); + place_order_run_to_101(para_a); + place_order_run_to_101(para_b); // Approximate having 1 core. for _ in 0..3 { @@ -448,9 +467,9 @@ fn affinity_prohibits_parallel_scheduling() { OnDemandAssigner::report_processed(para_b, 0.into()); // Add 2 assignments for para_a for every para_b. - place_order(para_a); - place_order(para_a); - place_order(para_b); + place_order_run_to_101(para_a); + place_order_run_to_101(para_a); + place_order_run_to_101(para_b); // Approximate having 3 cores. CoreIndex 2 should be unable to obtain an assignment for _ in 0..3 { @@ -490,7 +509,7 @@ fn affinity_changes_work() { // Add enough assignments to the order queue. for _ in 0..10 { - place_order(para_a); + place_order_run_to_101(para_a); } // There should be no affinity before the scheduler pops. @@ -554,7 +573,7 @@ fn new_affinity_for_a_core_must_come_from_free_entries() { // Place orders for all chains. parachains.iter().for_each(|chain| { - place_order(*chain); + place_order_run_to_101(*chain); }); // There are 4 entries in free_entries. @@ -679,8 +698,8 @@ fn queue_status_size_fn_works() { // Place orders for all chains. parachains.iter().for_each(|chain| { // 2 per chain for a total of 6 - place_order(*chain); - place_order(*chain); + place_order_run_to_101(*chain); + place_order_run_to_101(*chain); }); // 6 orders in free entries @@ -707,3 +726,112 @@ fn queue_status_size_fn_works() { assert_eq!(OnDemandAssigner::get_queue_status().size(), 4) }); } + +#[test] +fn revenue_information_fetching_works() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + schedule_blank_para(para_a, ParaKind::Parathread); + // Mock assigner sets max revenue history to 10. + run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + let revenue = OnDemandAssigner::claim_revenue_until(10); + + // No revenue should be recorded. + assert_eq!(revenue, 0); + + // Place one order + place_order_run_to_blocknumber(para_a, Some(11)); + let revenue = OnDemandAssigner::get_revenue(); + let claim = OnDemandAssigner::claim_revenue_until(11); + + // Revenue until the current block is still zero as "until" is non-inclusive + assert_eq!(claim, 0); + + run_to_block(12, |n| if n == 12 { Some(Default::default()) } else { None }); + let claim = OnDemandAssigner::claim_revenue_until(12); + + // Revenue for a single order should be recorded and shouldn't have been pruned by the + // previous call + assert_eq!(claim, revenue[0]); + + // Place many orders + place_order(para_a); + place_order(para_a); + + run_to_block(13, |n| if n == 13 { Some(Default::default()) } else { None }); + + place_order(para_a); + + run_to_block(15, |n| if n == 14 { Some(Default::default()) } else { None }); + + let revenue = OnDemandAssigner::claim_revenue_until(15); + + // All 3 orders should be accounted for. + assert_eq!(revenue, 30_000); + + // Place one order + place_order_run_to_blocknumber(para_a, Some(16)); + + let revenue = OnDemandAssigner::claim_revenue_until(15); + + // Order is not in range of the revenue_until call + assert_eq!(revenue, 0); + + run_to_block(21, |n| if n == 20 { Some(Default::default()) } else { None }); + let revenue = OnDemandAssigner::claim_revenue_until(21); + assert_eq!(revenue, 10_000); + + // Make sure overdue revenue is accumulated + for i in 21..=35 { + run_to_block(i, |n| if n % 10 == 0 { Some(Default::default()) } else { None }); + place_order(para_a); + } + run_to_block(36, |_| None); + let revenue = OnDemandAssigner::claim_revenue_until(36); + assert_eq!(revenue, 150_000); + }); +} + +#[test] +fn pot_account_is_immortal() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + let pot = OnDemandAssigner::account_id(); + assert!(!System::account_exists(&pot)); + schedule_blank_para(para_a, ParaKind::Parathread); + // Mock assigner sets max revenue history to 10. + + run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + place_order_run_to_blocknumber(para_a, Some(12)); + let purchase_revenue = Balances::free_balance(&pot); + assert!(purchase_revenue > 0); + + run_to_block(15, |_| None); + let _imb = ::Currency::withdraw( + &pot, + purchase_revenue, + WithdrawReasons::FEE, + ExistenceRequirement::AllowDeath, + ); + assert_eq!(Balances::free_balance(&pot), 0); + assert!(System::account_exists(&pot)); + assert_eq!(System::providers(&pot), 1); + + // One more cycle to make sure providers are not increased on every transition from zero + run_to_block(20, |n| if n == 20 { Some(Default::default()) } else { None }); + place_order_run_to_blocknumber(para_a, Some(22)); + let purchase_revenue = Balances::free_balance(&pot); + assert!(purchase_revenue > 0); + + run_to_block(25, |_| None); + let _imb = ::Currency::withdraw( + &pot, + purchase_revenue, + WithdrawReasons::FEE, + ExistenceRequirement::AllowDeath, + ); + assert_eq!(Balances::free_balance(&pot), 0); + assert!(System::account_exists(&pot)); + assert_eq!(System::providers(&pot), 1); + }); +} diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/types.rs b/polkadot/runtime/parachains/src/assigner_on_demand/types.rs new file mode 100644 index 0000000000000..96367b971fed5 --- /dev/null +++ b/polkadot/runtime/parachains/src/assigner_on_demand/types.rs @@ -0,0 +1,238 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! On demand module types. + +use super::{alloc, pallet::Config}; +use alloc::collections::BinaryHeap; +use core::cmp::{Ord, Ordering, PartialOrd}; +use frame_support::{ + pallet_prelude::{Decode, Encode, RuntimeDebug, TypeInfo}, + traits::Currency, +}; +use polkadot_primitives::{CoreIndex, Id as ParaId, ON_DEMAND_MAX_QUEUE_MAX_SIZE}; +use sp_runtime::FixedU128; + +/// Shorthand for the Balance type the runtime is using. +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +/// Meta data for full queue. +/// +/// This includes elements with affinity and free entries. +/// +/// The actual queue is implemented via multiple priority queues. One for each core, for entries +/// which currently have a core affinity and one free queue, with entries without any affinity yet. +/// +/// The design aims to have most queue accessess be O(1) or O(log(N)). Absolute worst case is O(N). +/// Importantly this includes all accessess that happen in a single block. Even with 50 cores, the +/// total complexity of all operations in the block should maintain above complexities. In +/// particular O(N) stays O(N), it should never be O(N*cores). +/// +/// More concrete rundown on complexity: +/// +/// - insert: O(1) for placing an order, O(log(N)) for push backs. +/// - pop_assignment_for_core: O(log(N)), O(N) worst case: Can only happen for one core, next core +/// is already less work. +/// - report_processed & push back: If affinity dropped to 0, then O(N) in the worst case. Again +/// this divides per core. +/// +/// Reads still exist, also improved slightly, but worst case we fetch all entries. +#[derive(Encode, Decode, TypeInfo)] +pub struct QueueStatusType { + /// Last calculated traffic value. + pub traffic: FixedU128, + /// The next index to use. + pub next_index: QueueIndex, + /// Smallest index still in use. + /// + /// In case of a completely empty queue (free + affinity queues), `next_index - smallest_index + /// == 0`. + pub smallest_index: QueueIndex, + /// Indices that have been freed already. + /// + /// But have a hole to `smallest_index`, so we can not yet bump `smallest_index`. This binary + /// heap is roughly bounded in the number of on demand cores: + /// + /// For a single core, elements will always be processed in order. With each core added, a + /// level of out of order execution is added. + pub freed_indices: BinaryHeap, +} + +impl Default for QueueStatusType { + fn default() -> QueueStatusType { + QueueStatusType { + traffic: FixedU128::default(), + next_index: QueueIndex(0), + smallest_index: QueueIndex(0), + freed_indices: BinaryHeap::new(), + } + } +} + +impl QueueStatusType { + /// How many orders are queued in total? + /// + /// This includes entries which have core affinity. + pub fn size(&self) -> u32 { + self.next_index + .0 + .overflowing_sub(self.smallest_index.0) + .0 + .saturating_sub(self.freed_indices.len() as u32) + } + + /// Get current next index + /// + /// to use for an element newly pushed to the back of the queue. + pub fn push_back(&mut self) -> QueueIndex { + let QueueIndex(next_index) = self.next_index; + self.next_index = QueueIndex(next_index.overflowing_add(1).0); + QueueIndex(next_index) + } + + /// Push something to the front of the queue + pub fn push_front(&mut self) -> QueueIndex { + self.smallest_index = QueueIndex(self.smallest_index.0.overflowing_sub(1).0); + self.smallest_index + } + + /// The given index is no longer part of the queue. + /// + /// This updates `smallest_index` if need be. + pub fn consume_index(&mut self, removed_index: QueueIndex) { + if removed_index != self.smallest_index { + self.freed_indices.push(removed_index.reverse()); + return; + } + let mut index = self.smallest_index.0.overflowing_add(1).0; + // Even more to advance? + while self.freed_indices.peek() == Some(&ReverseQueueIndex(index)) { + index = index.overflowing_add(1).0; + self.freed_indices.pop(); + } + self.smallest_index = QueueIndex(index); + } +} + +/// Type used for priority indices. +// NOTE: The `Ord` implementation for this type is unsound in the general case. +// Do not use it for anything but it's intended purpose. +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] +pub struct QueueIndex(pub u32); + +/// QueueIndex with reverse ordering. +/// +/// Same as `Reverse(QueueIndex)`, but with all the needed traits implemented. +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] +pub struct ReverseQueueIndex(pub u32); + +impl QueueIndex { + fn reverse(self) -> ReverseQueueIndex { + ReverseQueueIndex(self.0) + } +} + +impl Ord for QueueIndex { + fn cmp(&self, other: &Self) -> Ordering { + let diff = self.0.overflowing_sub(other.0).0; + if diff == 0 { + Ordering::Equal + } else if diff <= ON_DEMAND_MAX_QUEUE_MAX_SIZE { + Ordering::Greater + } else { + Ordering::Less + } + } +} + +impl PartialOrd for QueueIndex { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ReverseQueueIndex { + fn cmp(&self, other: &Self) -> Ordering { + QueueIndex(other.0).cmp(&QueueIndex(self.0)) + } +} +impl PartialOrd for ReverseQueueIndex { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(&other)) + } +} + +/// Internal representation of an order after it has been enqueued already. +/// +/// This data structure is provided for a min BinaryHeap (Ord compares in reverse order with regards +/// to its elements) +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq)] +pub struct EnqueuedOrder { + pub para_id: ParaId, + pub idx: QueueIndex, +} + +impl EnqueuedOrder { + pub fn new(idx: QueueIndex, para_id: ParaId) -> Self { + Self { idx, para_id } + } +} + +impl PartialOrd for EnqueuedOrder { + fn partial_cmp(&self, other: &Self) -> Option { + match other.idx.partial_cmp(&self.idx) { + Some(Ordering::Equal) => other.para_id.partial_cmp(&self.para_id), + o => o, + } + } +} + +impl Ord for EnqueuedOrder { + fn cmp(&self, other: &Self) -> Ordering { + match other.idx.cmp(&self.idx) { + Ordering::Equal => other.para_id.cmp(&self.para_id), + o => o, + } + } +} + +/// Keeps track of how many assignments a scheduler currently has at a specific `CoreIndex` for a +/// specific `ParaId`. +#[derive(Encode, Decode, Default, Clone, Copy, TypeInfo)] +#[cfg_attr(test, derive(PartialEq, RuntimeDebug))] +pub struct CoreAffinityCount { + pub core_index: CoreIndex, + pub count: u32, +} + +/// An indicator as to which end of the `OnDemandQueue` an assignment will be placed. +#[cfg_attr(test, derive(RuntimeDebug))] +pub enum QueuePushDirection { + Back, + Front, +} + +/// Errors that can happen during spot traffic calculation. +#[derive(PartialEq, RuntimeDebug)] +pub enum SpotTrafficCalculationErr { + /// The order queue capacity is at 0. + QueueCapacityIsZero, + /// The queue size is larger than the queue capacity. + QueueSizeLargerThanCapacity, + /// Arithmetic error during division, either division by 0 or over/underflow. + Division, +} diff --git a/polkadot/runtime/parachains/src/assigner_parachains/tests.rs b/polkadot/runtime/parachains/src/assigner_parachains/tests.rs index 14cb1a8978602..817e43a7138dd 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains/tests.rs @@ -23,9 +23,9 @@ use crate::{ }, paras::{ParaGenesisArgs, ParaKind}, }; +use alloc::collections::btree_map::BTreeMap; use frame_support::{assert_ok, pallet_prelude::*}; use polkadot_primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode}; -use sp_std::collections::btree_map::BTreeMap; fn schedule_blank_para(id: ParaId, parakind: ParaKind) { let validation_code: ValidationCode = vec![1, 2, 3].into(); diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index c046526ba372b..ec07cca2107e9 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -21,6 +21,11 @@ use crate::{ scheduler::{self, common::AssignmentProvider, CoreOccupied, ParasEntry}, session_info, shared, }; +use alloc::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}, + vec, + vec::Vec, +}; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -39,11 +44,6 @@ use sp_runtime::{ traits::{Header as HeaderT, One, TrailingZeroInput, Zero}, RuntimeAppPublic, }; -use sp_std::{ - collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}, - prelude::Vec, - vec, -}; fn mock_validation_code() -> ValidationCode { ValidationCode(vec![1, 2, 3]) @@ -112,7 +112,7 @@ pub(crate) struct BenchBuilder { fill_claimqueue: bool, /// Cores which should not be available when being populated with pending candidates. unavailable_cores: Vec, - _phantom: sp_std::marker::PhantomData, + _phantom: core::marker::PhantomData, } /// Paras inherent `enter` benchmark scenario. @@ -143,7 +143,7 @@ impl BenchBuilder { code_upgrade: None, fill_claimqueue: true, unavailable_cores: vec![], - _phantom: sp_std::marker::PhantomData::, + _phantom: core::marker::PhantomData::, } } diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index bffeab4a0d21b..d09962ef2b441 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -19,6 +19,7 @@ //! Configuration can change only at session boundaries and is buffered until then. use crate::{inclusion::MAX_UPWARD_MESSAGE_SIZE_BOUND, shared}; +use alloc::vec::Vec; use codec::{Decode, Encode}; use frame_support::{pallet_prelude::*, DefaultNoBound}; use frame_system::pallet_prelude::*; @@ -31,7 +32,6 @@ use polkadot_primitives::{ MAX_POV_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill, Percent}; -use sp_std::prelude::*; #[cfg(test)] mod tests; @@ -345,7 +345,7 @@ pub enum InconsistentError { impl HostConfiguration where - BlockNumber: Zero + PartialOrd + sp_std::fmt::Debug + Clone + From, + BlockNumber: Zero + PartialOrd + core::fmt::Debug + Clone + From, { /// Checks that this instance is consistent with the requirements on each individual member. /// @@ -1469,7 +1469,7 @@ impl Pallet { /// The implementation of `Get<(u32, u32)>` which reads `ActiveConfig` and returns `P` percent of /// `hrmp_channel_max_message_size` / `hrmp_channel_max_capacity`. -pub struct ActiveConfigHrmpChannelSizeAndCapacityRatio(sp_std::marker::PhantomData<(T, P)>); +pub struct ActiveConfigHrmpChannelSizeAndCapacityRatio(core::marker::PhantomData<(T, P)>); impl> Get<(u32, u32)> for ActiveConfigHrmpChannelSizeAndCapacityRatio { diff --git a/polkadot/runtime/parachains/src/configuration/migration/v10.rs b/polkadot/runtime/parachains/src/configuration/migration/v10.rs index c53f58faaf03a..9375af88306fd 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v10.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v10.rs @@ -17,6 +17,7 @@ //! A module that is responsible for migration of storage. use crate::configuration::{Config, Pallet}; +use alloc::vec::Vec; use frame_support::{ pallet_prelude::*, traits::{Defensive, UncheckedOnRuntimeUpgrade}, @@ -28,7 +29,6 @@ use polkadot_primitives::{ LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::Perbill; -use sp_std::vec::Vec; use super::v9::V9HostConfiguration; // All configuration of the runtime with respect to paras. @@ -164,7 +164,7 @@ mod v10 { >; } -pub struct VersionUncheckedMigrateToV10(sp_std::marker::PhantomData); +pub struct VersionUncheckedMigrateToV10(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV10 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { diff --git a/polkadot/runtime/parachains/src/configuration/migration/v11.rs b/polkadot/runtime/parachains/src/configuration/migration/v11.rs index 4d1bfc26196ca..4dce48fe52b0e 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v11.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v11.rs @@ -17,6 +17,7 @@ //! A module that is responsible for migration of storage. use crate::configuration::{self, Config, Pallet}; +use alloc::vec::Vec; use frame_support::{ migrations::VersionedMigration, pallet_prelude::*, @@ -28,7 +29,6 @@ use polkadot_primitives::{ ApprovalVotingParams, AsyncBackingParams, ExecutorParams, NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; -use sp_std::vec::Vec; use polkadot_core_primitives::Balance; use sp_arithmetic::Perbill; @@ -177,7 +177,7 @@ pub type MigrateToV11 = VersionedMigration< ::DbWeight, >; -pub struct UncheckedMigrateToV11(sp_std::marker::PhantomData); +pub struct UncheckedMigrateToV11(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV11 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { diff --git a/polkadot/runtime/parachains/src/configuration/migration/v12.rs b/polkadot/runtime/parachains/src/configuration/migration/v12.rs index 126597ed84544..6b77655687f0d 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v12.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v12.rs @@ -17,6 +17,7 @@ //! A module that is responsible for migration of storage. use crate::configuration::{self, migration::v11::V11HostConfiguration, Config, Pallet}; +use alloc::vec::Vec; use frame_support::{ migrations::VersionedMigration, pallet_prelude::*, @@ -26,7 +27,6 @@ use frame_system::pallet_prelude::BlockNumberFor; use polkadot_primitives::vstaging::SchedulerParams; use sp_core::Get; use sp_staking::SessionIndex; -use sp_std::vec::Vec; type V12HostConfiguration = configuration::HostConfiguration; @@ -68,7 +68,7 @@ pub type MigrateToV12 = VersionedMigration< ::DbWeight, >; -pub struct UncheckedMigrateToV12(sp_std::marker::PhantomData); +pub struct UncheckedMigrateToV12(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV12 { #[cfg(feature = "try-runtime")] diff --git a/polkadot/runtime/parachains/src/configuration/migration/v6.rs b/polkadot/runtime/parachains/src/configuration/migration/v6.rs index bec41d3ea0dc5..468bf78692a12 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v6.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v6.rs @@ -17,13 +17,11 @@ //! Contains the V6 storage definition of the host configuration. use crate::configuration::{Config, Pallet}; +use alloc::vec::Vec; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::BlockNumberFor; -use sp_std::vec::Vec; use polkadot_primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; -#[cfg(feature = "try-runtime")] -use sp_std::prelude::*; #[derive(codec::Encode, codec::Decode, Debug, Clone)] pub struct V6HostConfiguration { diff --git a/polkadot/runtime/parachains/src/configuration/migration/v7.rs b/polkadot/runtime/parachains/src/configuration/migration/v7.rs index 8fe4087cf9b17..9acd28d0f764e 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v7.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v7.rs @@ -17,6 +17,7 @@ //! A module that is responsible for migration of storage. use crate::configuration::{self, Config, Pallet}; +use alloc::vec::Vec; use frame_support::{ pallet_prelude::*, traits::{Defensive, StorageVersion}, @@ -24,7 +25,6 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; use polkadot_primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; -use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; @@ -154,7 +154,7 @@ mod v7 { >; } -pub struct MigrateToV7(sp_std::marker::PhantomData); +pub struct MigrateToV7(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV7 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { diff --git a/polkadot/runtime/parachains/src/configuration/migration/v8.rs b/polkadot/runtime/parachains/src/configuration/migration/v8.rs index 0aa7f550b102a..81ced74bebb97 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v8.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v8.rs @@ -17,6 +17,7 @@ //! A module that is responsible for migration of storage. use crate::configuration::{self, Config, Pallet}; +use alloc::vec::Vec; use frame_support::{ pallet_prelude::*, traits::{Defensive, StorageVersion}, @@ -27,7 +28,6 @@ use polkadot_primitives::{ AsyncBackingParams, Balance, ExecutorParams, SessionIndex, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::Perbill; -use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; @@ -161,7 +161,7 @@ mod v8 { >; } -pub struct MigrateToV8(sp_std::marker::PhantomData); +pub struct MigrateToV8(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV8 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { diff --git a/polkadot/runtime/parachains/src/configuration/migration/v9.rs b/polkadot/runtime/parachains/src/configuration/migration/v9.rs index 6afdd3cec29ef..dff5fdb17a697 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v9.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v9.rs @@ -17,6 +17,7 @@ //! A module that is responsible for migration of storage. use crate::configuration::{self, Config, Pallet}; +use alloc::vec::Vec; use frame_support::{ pallet_prelude::*, traits::{Defensive, StorageVersion}, @@ -28,7 +29,6 @@ use polkadot_primitives::{ ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::Perbill; -use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; @@ -164,7 +164,7 @@ mod v9 { >; } -pub struct MigrateToV9(sp_std::marker::PhantomData); +pub struct MigrateToV9(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV9 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { diff --git a/polkadot/runtime/parachains/src/coretime/benchmarking.rs b/polkadot/runtime/parachains/src/coretime/benchmarking.rs index d1ac71f580ee0..028250e188ee9 100644 --- a/polkadot/runtime/parachains/src/coretime/benchmarking.rs +++ b/polkadot/runtime/parachains/src/coretime/benchmarking.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! On demand assigner pallet benchmarking. +//! Coretime pallet benchmarking. #![cfg(feature = "runtime-benchmarks")] @@ -28,6 +28,30 @@ mod benchmarks { use super::*; use assigner_coretime::PartsOf57600; + #[benchmark] + fn request_revenue_at() { + let root_origin = ::RuntimeOrigin::root(); + let mhr = ::MaxHistoricalRevenue::get(); + frame_system::Pallet::::set_block_number((mhr + 2).into()); + let minimum_balance = ::Currency::minimum_balance(); + let rev: BoundedVec< + <::Currency as frame_support::traits::Currency< + T::AccountId, + >>::Balance, + T::MaxHistoricalRevenue, + > = BoundedVec::try_from((1..=mhr).map(|v| minimum_balance * v.into()).collect::>()) + .unwrap(); + assigner_on_demand::Revenue::::put(rev); + + ::Currency::make_free_balance_be( + &>::account_id(), + minimum_balance * (mhr * (mhr + 1)).into(), + ); + + #[extrinsic_call] + _(root_origin as ::RuntimeOrigin, mhr + 1) + } + #[benchmark] fn request_core_count() { // Setup diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs index 3f82472da8aa4..4e75088675590 100644 --- a/polkadot/runtime/parachains/src/coretime/migration.rs +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -26,10 +26,12 @@ mod v_coretime { coretime::{mk_coretime_call, Config, PartsOf57600, WeightInfo}, paras, }; + use alloc::{vec, vec::Vec}; #[cfg(feature = "try-runtime")] use codec::Decode; #[cfg(feature = "try-runtime")] use codec::Encode; + use core::{iter, result}; #[cfg(feature = "try-runtime")] use frame_support::ensure; use frame_support::{ @@ -43,9 +45,6 @@ mod v_coretime { use sp_arithmetic::traits::SaturatedConversion; use sp_core::Get; use sp_runtime::BoundedVec; - #[cfg(feature = "try-runtime")] - use sp_std::vec::Vec; - use sp_std::{iter, prelude::*, result}; use xcm::prelude::{send_xcm, Instruction, Junction, Location, SendError, WeightLimit, Xcm}; /// Return information about a legacy lease of a parachain. @@ -59,7 +58,7 @@ mod v_coretime { /// This assumes that the `Coretime` and the `AssignerCoretime` pallets are added at the same /// time to a runtime. pub struct MigrateToCoretime( - sp_std::marker::PhantomData<(T, SendXcm, LegacyLease)>, + core::marker::PhantomData<(T, SendXcm, LegacyLease)>, ); impl>> diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index dedffb733d33e..1c38b3989232b 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -18,20 +18,37 @@ //! //! -use sp_std::{prelude::*, result}; - -use frame_support::{pallet_prelude::*, traits::Currency}; +use alloc::{vec, vec::Vec}; +use core::result; +use frame_support::{ + pallet_prelude::*, + traits::{defensive_prelude::*, Currency}, +}; use frame_system::pallet_prelude::*; pub use pallet::*; use pallet_broker::{CoreAssignment, CoreIndex as BrokerCoreIndex}; -use polkadot_primitives::{CoreIndex, Id as ParaId}; +use polkadot_primitives::{Balance, BlockNumber, CoreIndex, Id as ParaId}; use sp_arithmetic::traits::SaturatedConversion; -use xcm::prelude::{ - send_xcm, Instruction, Junction, Location, OriginKind, SendXcm, WeightLimit, Xcm, +use sp_runtime::traits::TryConvert; +use xcm::{ + prelude::{send_xcm, Instruction, Junction, Location, OriginKind, SendXcm, WeightLimit, Xcm}, + v4::{ + Asset, + AssetFilter::Wild, + AssetId, Assets, Error as XcmError, + Fungibility::Fungible, + Instruction::{DepositAsset, ReceiveTeleportedAsset}, + Junctions::Here, + Reanchorable, + WildAsset::AllCounted, + XcmContext, + }, }; +use xcm_executor::traits::TransactAsset; use crate::{ assigner_coretime::{self, PartsOf57600}, + assigner_on_demand, initializer::{OnNewSession, SessionChangeNotification}, origin::{ensure_parachain, Origin}, }; @@ -39,9 +56,11 @@ use crate::{ mod benchmarking; pub mod migration; +const LOG_TARGET: &str = "runtime::parachains::coretime"; + pub trait WeightInfo { fn request_core_count() -> Weight; - //fn request_revenue_info_at() -> Weight; + fn request_revenue_at() -> Weight; //fn credit_account() -> Weight; fn assign_core(s: u32) -> Weight; } @@ -53,19 +72,23 @@ impl WeightInfo for TestWeightInfo { fn request_core_count() -> Weight { Weight::MAX } - // TODO: Add real benchmarking functionality for each of these to - // benchmarking.rs, then uncomment here and in trait definition. - /*fn request_revenue_info_at() -> Weight { + fn request_revenue_at() -> Weight { Weight::MAX } - fn credit_account() -> Weight { - Weight::MAX - }*/ + // TODO: Add real benchmarking functionality for each of these to + // benchmarking.rs, then uncomment here and in trait definition. + //fn credit_account() -> Weight { + // Weight::MAX + //} fn assign_core(_s: u32) -> Weight { Weight::MAX } } +/// Shorthand for the Balance type the runtime is using. +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + /// Broker pallet index on the coretime chain. Used to /// /// construct remote calls. The codec index must correspond to the index of `Broker` in the @@ -85,6 +108,8 @@ enum CoretimeCalls { SetLease(pallet_broker::TaskId, pallet_broker::Timeslice), #[codec(index = 19)] NotifyCoreCount(u16), + #[codec(index = 20)] + NotifyRevenue((BlockNumber, Balance)), #[codec(index = 99)] SwapLeases(ParaId, ParaId), } @@ -92,6 +117,9 @@ enum CoretimeCalls { #[frame_support::pallet] pub mod pallet { use crate::configuration; + use sp_runtime::traits::TryConvert; + use xcm::v4::InteriorLocation; + use xcm_executor::traits::TransactAsset; use super::*; @@ -100,7 +128,9 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + assigner_coretime::Config { + pub trait Config: + frame_system::Config + assigner_coretime::Config + assigner_on_demand::Config + { type RuntimeOrigin: From<::RuntimeOrigin> + Into::RuntimeOrigin>>; type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -109,9 +139,17 @@ pub mod pallet { /// The ParaId of the coretime chain. #[pallet::constant] type BrokerId: Get; + /// The coretime chain pot location. + #[pallet::constant] + type BrokerPotLocation: Get; /// Something that provides the weight of this pallet. type WeightInfo: WeightInfo; + /// The XCM sender. type SendXcm: SendXcm; + /// The asset transactor. + type AssetTransactor: TransactAsset; + /// AccountId to Location converter + type AccountToLocation: for<'a> TryConvert<&'a Self::AccountId, Location>; /// Maximum weight for any XCM transact call that should be executed on the coretime chain. /// @@ -132,6 +170,11 @@ pub mod pallet { pub enum Error { /// The paraid making the call is not the coretime brokerage system parachain. NotBroker, + /// Requested revenue information `when` parameter was in the future from the current + /// block height. + RequestedFutureRevenue, + /// Failed to transfer assets to the coretime chain + AssetTransferFailed, } #[pallet::hooks] @@ -154,17 +197,17 @@ pub mod pallet { configuration::Pallet::::set_coretime_cores_unchecked(u32::from(count)) } - //// TODO Impl me! - ////#[pallet::weight(::WeightInfo::request_revenue_info_at())] - //#[pallet::call_index(2)] - //pub fn request_revenue_info_at( - // origin: OriginFor, - // _when: BlockNumberFor, - //) -> DispatchResult { - // // Ignore requests not coming from the coretime chain or root. - // Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; - // Ok(()) - //} + /// Request to claim the instantaneous coretime sales revenue starting from the block it was + /// last claimed until and up to the block specified. The claimed amount value is sent back + /// to the Coretime chain in a `notify_revenue` message. At the same time, the amount is + /// teleported to the Coretime chain. + #[pallet::weight(::WeightInfo::request_revenue_at())] + #[pallet::call_index(2)] + pub fn request_revenue_at(origin: OriginFor, when: BlockNumber) -> DispatchResult { + // Ignore requests not coming from the Coretime Chain or Root. + Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; + Self::notify_revenue(when) + } //// TODO Impl me! ////#[pallet::weight(::WeightInfo::credit_account())] @@ -244,11 +287,43 @@ impl Pallet { Location::new(0, [Junction::Parachain(T::BrokerId::get())]), message, ) { - log::error!("Sending `NotifyCoreCount` to coretime chain failed: {:?}", err); + log::error!(target: LOG_TARGET, "Sending `NotifyCoreCount` to coretime chain failed: {:?}", err); } } } + /// Provide the amount of revenue accumulated from Instantaneous Coretime Sales from Relay-chain + /// block number last_until to until, not including until itself. last_until is defined as being + /// the until argument of the last notify_revenue message sent, or zero for the first call. If + /// revenue is None, this indicates that the information is no longer available. This explicitly + /// disregards the possibility of multiple parachains requesting and being notified of revenue + /// information. + /// + /// The Relay-chain must be configured to ensure that only a single revenue information + /// destination exists. + pub fn notify_revenue(until: BlockNumber) -> DispatchResult { + let now = >::block_number(); + let until_bnf: BlockNumberFor = until.into(); + + // When cannot be in the future. + ensure!(until_bnf <= now, Error::::RequestedFutureRevenue); + + let amount = >::claim_revenue_until(until_bnf); + log::debug!(target: LOG_TARGET, "Revenue info requested: {:?}", amount); + + let raw_revenue: Balance = amount.try_into().map_err(|_| { + log::error!(target: LOG_TARGET, "Converting on demand revenue for `NotifyRevenue` failed"); + Error::::AssetTransferFailed + })?; + + do_notify_revenue::(until, raw_revenue).map_err(|err| { + log::error!(target: LOG_TARGET, "notify_revenue failed: {err:?}"); + Error::::AssetTransferFailed + })?; + + Ok(()) + } + // Handle legacy swaps in coretime. Notifies coretime chain that a lease swap has occurred via // XCM message. This function is meant to be used in an implementation of `OnSwap` trait. pub fn on_legacy_lease_swap(one: ParaId, other: ParaId) { @@ -263,7 +338,7 @@ impl Pallet { Location::new(0, [Junction::Parachain(T::BrokerId::get())]), message, ) { - log::error!("Sending `SwapLeases` to coretime chain failed: {:?}", err); + log::error!(target: LOG_TARGET, "Sending `SwapLeases` to coretime chain failed: {:?}", err); } } } @@ -281,3 +356,55 @@ fn mk_coretime_call(call: crate::coretime::CoretimeCalls) -> Instruct call: BrokerRuntimePallets::Broker(call).encode().into(), } } + +fn do_notify_revenue(when: BlockNumber, raw_revenue: Balance) -> Result<(), XcmError> { + let dest = Junction::Parachain(T::BrokerId::get()).into_location(); + let mut message = Vec::new(); + let asset = Asset { id: AssetId(Location::here()), fun: Fungible(raw_revenue) }; + let dummy_xcm_context = XcmContext { origin: None, message_id: [0; 32], topic: None }; + + if raw_revenue > 0 { + let on_demand_pot = + T::AccountToLocation::try_convert(&>::account_id()) + .map_err(|err| { + log::error!( + target: LOG_TARGET, + "Failed to convert on-demand pot account to XCM location: {err:?}", + ); + XcmError::InvalidLocation + })?; + + let withdrawn = T::AssetTransactor::withdraw_asset(&asset, &on_demand_pot, None)?; + + T::AssetTransactor::can_check_out(&dest, &asset, &dummy_xcm_context)?; + + let assets_reanchored = Into::::into(withdrawn) + .reanchored(&dest, &Here.into()) + .defensive_map_err(|_| XcmError::ReanchorFailed)?; + + message.extend( + [ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + ReceiveTeleportedAsset(assets_reanchored), + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: T::BrokerPotLocation::get().into_location(), + }, + ] + .into_iter(), + ); + } + + message.push(mk_coretime_call::(CoretimeCalls::NotifyRevenue((when, raw_revenue)))); + + send_xcm::(dest.clone(), Xcm(message))?; + + if raw_revenue > 0 { + T::AssetTransactor::check_out(&dest, &asset, &dummy_xcm_context); + } + + Ok(()) +} diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index 4a0f2390b45dc..f86573dadf562 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -19,8 +19,10 @@ use crate::{ configuration, initializer::SessionChangeNotification, metrics::METRICS, session_info, }; +use alloc::{collections::btree_set::BTreeSet, vec::Vec}; use bitvec::{bitvec, order::Lsb0 as BitOrderLsb0}; use codec::{Decode, Encode}; +use core::cmp::Ordering; use frame_support::{ensure, weights::Weight}; use frame_system::pallet_prelude::*; use polkadot_primitives::{ @@ -36,7 +38,6 @@ use sp_runtime::{ traits::{AppVerify, One, Saturating, Zero}, DispatchError, RuntimeDebug, SaturatedConversion, }; -use sp_std::{cmp::Ordering, collections::btree_set::BTreeSet, prelude::*}; #[cfg(test)] #[allow(unused_imports)] diff --git a/polkadot/runtime/parachains/src/disputes/migration.rs b/polkadot/runtime/parachains/src/disputes/migration.rs index e12edffb51b38..dd32340c9f64f 100644 --- a/polkadot/runtime/parachains/src/disputes/migration.rs +++ b/polkadot/runtime/parachains/src/disputes/migration.rs @@ -21,16 +21,16 @@ use frame_support::traits::StorageVersion; pub mod v1 { use super::*; use crate::disputes::{Config, Pallet}; + use alloc::vec::Vec; use frame_support::{ pallet_prelude::*, storage_alias, traits::OnRuntimeUpgrade, weights::Weight, }; use polkadot_primitives::SessionIndex; - use sp_std::prelude::*; #[storage_alias] type SpamSlots = StorageMap, Twox64Concat, SessionIndex, Vec>; - pub struct MigrateToV1(sp_std::marker::PhantomData); + pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { let mut weight: Weight = Weight::zero(); diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index b50853ecc696c..4b76fb47e1f8d 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -50,6 +50,12 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; +use alloc::{ + boxed::Box, + collections::{btree_map::Entry, btree_set::BTreeSet}, + vec, + vec::Vec, +}; use polkadot_primitives::{ slashing::{DisputeProof, DisputesTimeSlot, PendingSlashes, SlashingOffenceKind}, CandidateHash, SessionIndex, ValidatorId, ValidatorIndex, @@ -65,10 +71,6 @@ use sp_runtime::{ }; use sp_session::{GetSessionNumber, GetValidatorCount}; use sp_staking::offence::{Kind, Offence, OffenceError, ReportOffence}; -use sp_std::{ - collections::{btree_map::Entry, btree_set::BTreeSet}, - prelude::*, -}; const LOG_TARGET: &str = "runtime::parachains::slashing"; @@ -158,7 +160,7 @@ impl SlashingOffence { /// This type implements `SlashingHandler`. pub struct SlashValidatorsForDisputes { - _phantom: sp_std::marker::PhantomData, + _phantom: core::marker::PhantomData, } impl Default for SlashValidatorsForDisputes { @@ -640,7 +642,7 @@ fn is_known_offence( /// When configured properly, should be instantiated with /// `T::KeyOwnerIdentification, Offences, ReportLongevity` parameters. pub struct SlashingReportHandler { - _phantom: sp_std::marker::PhantomData<(I, R, L)>, + _phantom: core::marker::PhantomData<(I, R, L)>, } impl Default for SlashingReportHandler { diff --git a/polkadot/runtime/parachains/src/dmp.rs b/polkadot/runtime/parachains/src/dmp.rs index c0e1635ba1692..54e112d1b8b44 100644 --- a/polkadot/runtime/parachains/src/dmp.rs +++ b/polkadot/runtime/parachains/src/dmp.rs @@ -46,6 +46,8 @@ use crate::{ configuration::{self, HostConfiguration}, initializer, FeeTracker, }; +use alloc::vec::Vec; +use core::fmt; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::BlockNumberFor; use polkadot_primitives::{DownwardMessage, Hash, Id as ParaId, InboundDownwardMessage}; @@ -54,7 +56,6 @@ use sp_runtime::{ traits::{BlakeTwo256, Hash as HashT, SaturatedConversion}, FixedU128, Saturating, }; -use sp_std::{fmt, prelude::*}; use xcm::latest::SendError; pub use pallet::*; diff --git a/polkadot/runtime/parachains/src/hrmp.rs b/polkadot/runtime/parachains/src/hrmp.rs index e34e4a03e711c..8b01a755c3c7b 100644 --- a/polkadot/runtime/parachains/src/hrmp.rs +++ b/polkadot/runtime/parachains/src/hrmp.rs @@ -18,7 +18,13 @@ use crate::{ configuration::{self, HostConfiguration}, dmp, ensure_parachain, initializer, paras, }; +use alloc::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + vec, + vec::Vec, +}; use codec::{Decode, Encode}; +use core::{fmt, mem}; use frame_support::{pallet_prelude::*, traits::ReservableCurrency, DefaultNoBound}; use frame_system::pallet_prelude::*; use polkadot_parachain_primitives::primitives::{HorizontalMessages, IsSystem}; @@ -31,11 +37,6 @@ use sp_runtime::{ traits::{AccountIdConversion, BlakeTwo256, Hash as HashT, UniqueSaturatedInto, Zero}, ArithmeticError, }; -use sp_std::{ - collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - fmt, mem, - prelude::*, -}; pub use pallet::*; @@ -487,7 +488,7 @@ pub mod pallet { #[derive(DefaultNoBound)] pub struct GenesisConfig { #[serde(skip)] - _config: sp_std::marker::PhantomData, + _config: core::marker::PhantomData, preopen_hrmp_channels: Vec<(ParaId, ParaId, u32, u32)>, } diff --git a/polkadot/runtime/parachains/src/hrmp/tests.rs b/polkadot/runtime/parachains/src/hrmp/tests.rs index 4fcbc69e98ad8..52db932c7962b 100644 --- a/polkadot/runtime/parachains/src/hrmp/tests.rs +++ b/polkadot/runtime/parachains/src/hrmp/tests.rs @@ -27,8 +27,9 @@ use crate::{ }, shared, }; -use frame_support::{assert_noop, assert_ok, error::BadOrigin}; +use frame_support::{assert_noop, assert_ok}; use polkadot_primitives::{BlockNumber, InboundDownwardMessage}; +use sp_runtime::traits::BadOrigin; use std::collections::BTreeMap; pub(crate) fn run_to_block(to: BlockNumber, new_session: Option>) { diff --git a/polkadot/runtime/parachains/src/inclusion/migration.rs b/polkadot/runtime/parachains/src/inclusion/migration.rs index a340d52643e05..36a810d341c65 100644 --- a/polkadot/runtime/parachains/src/inclusion/migration.rs +++ b/polkadot/runtime/parachains/src/inclusion/migration.rs @@ -73,9 +73,9 @@ mod v1 { CandidatePendingAvailability as V1CandidatePendingAvailability, Config, Pallet, PendingAvailability as V1PendingAvailability, }; + use alloc::{collections::vec_deque::VecDeque, vec::Vec}; use frame_support::{traits::UncheckedOnRuntimeUpgrade, weights::Weight}; use sp_core::Get; - use sp_std::{collections::vec_deque::VecDeque, vec::Vec}; #[cfg(feature = "try-runtime")] use codec::{Decode, Encode}; @@ -85,7 +85,7 @@ mod v1 { traits::{GetStorageVersion, StorageVersion}, }; - pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); + pub struct VersionUncheckedMigrateToV1(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 88162cdd238f6..8511689719560 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -27,8 +27,15 @@ use crate::{ shared::{self, AllowedRelayParentsTracker}, util::make_persisted_validation_data_with_parent, }; +use alloc::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}, + vec, + vec::Vec, +}; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use codec::{Decode, Encode}; +#[cfg(feature = "std")] +use core::fmt; use frame_support::{ defensive, pallet_prelude::*, @@ -46,12 +53,6 @@ use polkadot_primitives::{ }; use scale_info::TypeInfo; use sp_runtime::{traits::One, DispatchError, SaturatedConversion, Saturating}; -#[cfg(feature = "std")] -use sp_std::fmt; -use sp_std::{ - collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}, - prelude::*, -}; pub use pallet::*; diff --git a/polkadot/runtime/parachains/src/initializer.rs b/polkadot/runtime/parachains/src/initializer.rs index fd0f1c3c06511..340f727097b58 100644 --- a/polkadot/runtime/parachains/src/initializer.rs +++ b/polkadot/runtime/parachains/src/initializer.rs @@ -25,6 +25,7 @@ use crate::{ disputes::{self, DisputesHandler as _, SlashingHandler as _}, dmp, hrmp, inclusion, paras, scheduler, session_info, shared, }; +use alloc::vec::Vec; use codec::{Decode, Encode}; use frame_support::{ traits::{OneSessionHandler, Randomness}, @@ -33,7 +34,6 @@ use frame_support::{ use frame_system::limits::BlockWeights; use polkadot_primitives::{BlockNumber, ConsensusLog, SessionIndex, ValidatorId}; use scale_info::TypeInfo; -use sp_std::prelude::*; #[cfg(test)] mod tests; @@ -249,7 +249,7 @@ impl Pallet { // TODO: audit usage of randomness API // https://github.com/paritytech/polkadot/issues/2601 let (random_hash, _) = T::Randomness::random(&b"paras"[..]); - let len = sp_std::cmp::min(32, random_hash.as_ref().len()); + let len = core::cmp::min(32, random_hash.as_ref().len()); buf[..len].copy_from_slice(&random_hash.as_ref()[..len]); buf }; diff --git a/polkadot/runtime/parachains/src/lib.rs b/polkadot/runtime/parachains/src/lib.rs index 51110e89416c3..f2995d770e716 100644 --- a/polkadot/runtime/parachains/src/lib.rs +++ b/polkadot/runtime/parachains/src/lib.rs @@ -53,6 +53,8 @@ mod mock; #[cfg(test)] mod ump_tests; +extern crate alloc; + pub use origin::{ensure_parachain, Origin}; pub use paras::{ParaLifecycle, UpgradeStrategy}; use polkadot_primitives::{HeadData, Id as ParaId, ValidationCode}; diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 943f7451395e7..6bb933683de1d 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -36,6 +36,7 @@ use frame_support::{ Currency, ProcessMessage, ProcessMessageError, ValidatorSet, ValidatorSetWithIdentification, }, weights::{Weight, WeightMeter}, + PalletId, }; use frame_support_test::TestRandomness; use frame_system::limits; @@ -50,14 +51,13 @@ use sp_runtime::{ transaction_validity::TransactionPriority, BuildStorage, FixedU128, Perbill, Permill, }; -use sp_std::{ +use std::{ cell::RefCell, - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + collections::{btree_map::BTreeMap, vec_deque::VecDeque, HashMap}, }; -use std::collections::HashMap; use xcm::{ prelude::XcmVersion, - v4::{Assets, Location, SendError, SendResult, SendXcm, Xcm, XcmHash}, + v4::{Assets, InteriorLocation, Location, SendError, SendResult, SendXcm, Xcm, XcmHash}, IntoVersion, VersionedXcm, WrapVersion, }; @@ -143,20 +143,11 @@ parameter_types! { pub static ExistentialDeposit: u64 = 1; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } parameter_types! { @@ -404,17 +395,23 @@ impl pallet_message_queue::Config for Test { type IdleMaxServiceWeight = (); } +impl assigner_parachains::Config for Test {} + parameter_types! { pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); + // Production chains should keep this numbar around twice the + // defined Timeslice for Coretime. + pub const MaxHistoricalRevenue: BlockNumber = 2 * 5; + pub const OnDemandPalletId: PalletId = PalletId(*b"py/ondmd"); } -impl assigner_parachains::Config for Test {} - impl assigner_on_demand::Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type TrafficDefaultValue = OnDemandTrafficDefaultValue; type WeightInfo = crate::assigner_on_demand::TestWeightInfo; + type MaxHistoricalRevenue = MaxHistoricalRevenue; + type PalletId = OnDemandPalletId; } impl assigner_coretime::Config for Test {} @@ -424,6 +421,13 @@ parameter_types! { pub MaxXcmTransactWeight: Weight = Weight::from_parts(10_000_000, 10_000); } +pub struct BrokerPot; +impl Get for BrokerPot { + fn get() -> InteriorLocation { + unimplemented!() + } +} + impl coretime::Config for Test { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; @@ -432,6 +436,9 @@ impl coretime::Config for Test { type WeightInfo = crate::coretime::TestWeightInfo; type SendXcm = DummyXcmSender; type MaxXcmTransactWeight = MaxXcmTransactWeight; + type BrokerPotLocation = BrokerPot; + type AssetTransactor = (); + type AccountToLocation = (); } pub struct DummyXcmSender; @@ -673,7 +680,7 @@ impl inclusion::RewardValidators for TestRewardValidators { /// Create a new set of test externalities. pub fn new_test_ext(state: MockGenesisConfig) -> TestExternalities { use sp_keystore::{testing::MemoryKeystore, KeystoreExt, KeystorePtr}; - use sp_std::sync::Arc; + use std::sync::Arc; sp_tracing::try_init_simple(); diff --git a/polkadot/runtime/parachains/src/origin.rs b/polkadot/runtime/parachains/src/origin.rs index 5202cba232d20..fd22929b08ff5 100644 --- a/polkadot/runtime/parachains/src/origin.rs +++ b/polkadot/runtime/parachains/src/origin.rs @@ -16,9 +16,9 @@ //! Declaration of the parachain specific origin and a pallet that hosts it. +use core::result; use polkadot_primitives::Id as ParaId; use sp_runtime::traits::BadOrigin; -use sp_std::result; pub use pallet::*; diff --git a/polkadot/runtime/parachains/src/paras/benchmarking.rs b/polkadot/runtime/parachains/src/paras/benchmarking.rs index 0f3318612a77c..630b86132ab88 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking.rs @@ -16,6 +16,7 @@ use super::*; use crate::configuration::HostConfiguration; +use alloc::vec; use frame_benchmarking::benchmarks; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use polkadot_primitives::{ diff --git a/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs b/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs index 0bf5fe783a0e7..80443c7626e2b 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs @@ -17,6 +17,7 @@ //! This module focuses on the benchmarking of the `include_pvf_check_statement` dispatchable. use crate::{configuration, paras::*, shared::Pallet as ParasShared}; +use alloc::{vec, vec::Vec}; use frame_support::assert_ok; use frame_system::RawOrigin; use polkadot_primitives::{HeadData, Id as ParaId, ValidationCode, ValidatorId, ValidatorIndex}; diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 8cffcbbbb024f..3f0b8659b1599 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -113,8 +113,10 @@ use crate::{ initializer::SessionChangeNotification, shared, }; +use alloc::{collections::btree_set::BTreeSet, vec::Vec}; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use codec::{Decode, Encode}; +use core::{cmp, mem}; use frame_support::{pallet_prelude::*, traits::EstimateNextSessionRotation, DefaultNoBound}; use frame_system::pallet_prelude::*; use polkadot_primitives::{ @@ -127,7 +129,6 @@ use sp_runtime::{ traits::{AppVerify, One, Saturating}, DispatchResult, SaturatedConversion, }; -use sp_std::{cmp, collections::btree_set::BTreeSet, mem, prelude::*}; use serde::{Deserialize, Serialize}; @@ -863,7 +864,7 @@ pub mod pallet { #[derive(DefaultNoBound)] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, pub paras: Vec<(ParaId, ParaGenesisArgs)>, } diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index 4c8b093451ed5..c5284ba1dd1f8 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -16,9 +16,10 @@ use super::*; use crate::{inclusion, ParaId}; +use alloc::collections::btree_map::BTreeMap; +use core::cmp::min; use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use frame_system::RawOrigin; -use sp_std::{cmp::min, collections::btree_map::BTreeMap}; use polkadot_primitives::v7::GroupIndex; diff --git a/polkadot/runtime/parachains/src/paras_inherent/misc.rs b/polkadot/runtime/parachains/src/paras_inherent/misc.rs index dac9e6e256d0e..2858c3f95de26 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/misc.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/misc.rs @@ -14,7 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use sp_std::{cmp::Ordering, vec::Vec}; +use alloc::vec::Vec; +use core::cmp::Ordering; /// A helper trait to allow calling retain while getting access /// to the index of the item in the `vec`. diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index a3c3af3a18c6f..3e71dc1d8eb49 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -32,6 +32,11 @@ use crate::{ shared::{self, AllowedRelayParentsTracker}, ParaId, }; +use alloc::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + vec, + vec::Vec, +}; use bitvec::prelude::BitVec; use frame_support::{ defensive, @@ -53,11 +58,6 @@ use polkadot_primitives::{ use rand::{seq::SliceRandom, SeedableRng}; use scale_info::TypeInfo; use sp_runtime::traits::{Header as HeaderT, One}; -use sp_std::{ - collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - prelude::*, - vec::Vec, -}; mod misc; mod weights; @@ -295,7 +295,7 @@ impl Pallet { fn process_inherent_data( data: ParachainsInherentData>, context: ProcessInherentDataContext, - ) -> sp_std::result::Result< + ) -> core::result::Result< (ParachainsInherentData>, PostDispatchInfo), DispatchErrorWithPostInfo, > { @@ -783,7 +783,7 @@ pub(crate) fn apply_weight_limit( let mut chained_candidates: Vec> = Vec::new(); let mut current_para_id = None; - for candidate in sp_std::mem::take(candidates).into_iter() { + for candidate in core::mem::take(candidates).into_iter() { let candidate_para_id = candidate.descriptor().para_id; if Some(candidate_para_id) == current_para_id { let chain = chained_candidates diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 4de7263abe4b2..be11691eb1155 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -53,13 +53,13 @@ mod enter { }, session_info, }; + use alloc::collections::btree_map::BTreeMap; use assert_matches::assert_matches; use core::panic; use frame_support::assert_ok; use frame_system::limits; use polkadot_primitives::{vstaging::SchedulerParams, AvailabilityBitfield, UncheckedSigned}; use sp_runtime::Perbill; - use sp_std::collections::btree_map::BTreeMap; struct TestConfig { dispute_statements: BTreeMap, @@ -1795,8 +1795,8 @@ mod sanitizers { scheduler::{common::Assignment, ParasEntry}, util::{make_persisted_validation_data, make_persisted_validation_data_with_parent}, }; + use alloc::collections::vec_deque::VecDeque; use polkadot_primitives::ValidationCode; - use sp_std::collections::vec_deque::VecDeque; use super::*; diff --git a/polkadot/runtime/parachains/src/reward_points.rs b/polkadot/runtime/parachains/src/reward_points.rs index 5f45445b0ba2a..69ef2db756c21 100644 --- a/polkadot/runtime/parachains/src/reward_points.rs +++ b/polkadot/runtime/parachains/src/reward_points.rs @@ -22,9 +22,9 @@ //! for the time being, although we will build schemes to do so in the future. use crate::{session_info, shared}; +use alloc::collections::btree_set::BTreeSet; use frame_support::traits::{Defensive, ValidatorSet}; use polkadot_primitives::{SessionIndex, ValidatorIndex}; -use sp_std::collections::btree_set::BTreeSet; /// The amount of era points given by backing a candidate that is included. pub const BACKING_POINTS: u32 = 20; @@ -32,7 +32,7 @@ pub const BACKING_POINTS: u32 = 20; pub const DISPUTE_STATEMENT_POINTS: u32 = 20; /// Rewards validators for participating in parachains with era points in pallet-staking. -pub struct RewardValidatorsWithEraPoints(sp_std::marker::PhantomData); +pub struct RewardValidatorsWithEraPoints(core::marker::PhantomData); impl RewardValidatorsWithEraPoints where diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs index 4417ec75abd67..6978902322113 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs @@ -22,6 +22,7 @@ use crate::{ scheduler::{self, CoreOccupied}, session_info, shared, }; +use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; use frame_support::traits::{GetStorageVersion, StorageVersion}; use frame_system::pallet_prelude::*; use polkadot_primitives::{ @@ -37,7 +38,6 @@ use polkadot_primitives::{ ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_runtime::traits::One; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; /// Implementation for the `validators` function of the runtime API. pub fn validators() -> Vec { diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 62e96e9fbb051..4aa381e33b1bc 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -17,21 +17,21 @@ //! Put implementations of functions from staging APIs here. use crate::{configuration, inclusion, initializer, scheduler}; -use polkadot_primitives::{CommittedCandidateReceipt, CoreIndex, Id as ParaId}; -use sp_runtime::traits::One; -use sp_std::{ +use alloc::{ collections::{btree_map::BTreeMap, vec_deque::VecDeque}, vec::Vec, }; +use polkadot_primitives::{CommittedCandidateReceipt, CoreIndex, Id as ParaId}; +use sp_runtime::traits::One; /// Returns the claimqueue from the scheduler pub fn claim_queue() -> BTreeMap> { let now = >::block_number() + One::one(); - // This explicit update is only strictly required for session boundaries: - // - // At the end of a session we clear the claim queues: Without this update call, nothing would be - // scheduled to the client. + // This is needed so that the claim queue always has the right size (equal to + // scheduling_lookahead). Otherwise, if a candidate is backed in the same block where the + // previous candidate is included, the claim queue will have already pop()-ed the next item + // from the queue and the length would be `scheduling_lookahead - 1`. >::free_cores_and_fill_claim_queue(Vec::new(), now); let config = configuration::ActiveConfig::::get(); // Extra sanity, config should already never be smaller than 1: diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index 33b4d849c490f..445583d929aba 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -39,6 +39,13 @@ use core::iter::Peekable; use crate::{configuration, initializer::SessionChangeNotification, paras}; +use alloc::{ + collections::{ + btree_map::{self, BTreeMap}, + vec_deque::VecDeque, + }, + vec::Vec, +}; use frame_support::{pallet_prelude::*, traits::Defensive}; use frame_system::pallet_prelude::BlockNumberFor; pub use polkadot_core_primitives::v2::BlockNumber; @@ -46,13 +53,6 @@ use polkadot_primitives::{ CoreIndex, GroupIndex, GroupRotationInfo, Id as ParaId, ScheduledCore, ValidatorIndex, }; use sp_runtime::traits::One; -use sp_std::{ - collections::{ - btree_map::{self, BTreeMap}, - vec_deque::VecDeque, - }, - prelude::*, -}; pub mod common; @@ -314,10 +314,8 @@ impl Pallet { .into_iter() .filter(|(freed_index, _)| (freed_index.0 as usize) < c_len) .for_each(|(freed_index, freed_reason)| { - match sp_std::mem::replace( - &mut cores[freed_index.0 as usize], - CoreOccupied::Free, - ) { + match core::mem::replace(&mut cores[freed_index.0 as usize], CoreOccupied::Free) + { CoreOccupied::Free => {}, CoreOccupied::Paras(entry) => { match freed_reason { @@ -351,6 +349,9 @@ impl Pallet { } /// Note that the given cores have become occupied. Update the claim queue accordingly. + /// This will not push a new entry onto the claim queue, so the length after this call will be + /// the expected length - 1. The claim_queue runtime API will take care of adding another entry + /// here, to ensure the right lookahead. pub(crate) fn occupied( now_occupied: BTreeMap, ) -> BTreeMap { @@ -566,7 +567,7 @@ impl Pallet { fn push_occupied_cores_to_assignment_provider() { AvailabilityCores::::mutate(|cores| { for core in cores.iter_mut() { - match sp_std::mem::replace(core, CoreOccupied::Free) { + match core::mem::replace(core, CoreOccupied::Free) { CoreOccupied::Free => continue, CoreOccupied::Paras(entry) => { Self::maybe_push_assignment(entry); diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index 84d7d4b567102..125f105ef7066 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -17,6 +17,7 @@ //! A module that is responsible for migration of storage. use super::*; +use alloc::vec::Vec; use frame_support::{ migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, traits::UncheckedOnRuntimeUpgrade, weights::Weight, @@ -164,7 +165,7 @@ mod v1 { } /// Migration to V1 - pub struct UncheckedMigrateToV1(sp_std::marker::PhantomData); + pub struct UncheckedMigrateToV1(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { let mut weight: Weight = Weight::zero(); @@ -301,7 +302,7 @@ mod v2 { } /// Migration to V2 - pub struct UncheckedMigrateToV2(sp_std::marker::PhantomData); + pub struct UncheckedMigrateToV2(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV2 { fn on_runtime_upgrade() -> Weight { diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index 32811241e171c..f3866146e8112 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -16,12 +16,12 @@ use super::*; +use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use frame_support::assert_ok; use polkadot_primitives::{ vstaging::SchedulerParams, BlockNumber, SessionIndex, ValidationCode, ValidatorId, }; use sp_keyring::Sr25519Keyring; -use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use crate::{ configuration::HostConfiguration, diff --git a/polkadot/runtime/parachains/src/session_info.rs b/polkadot/runtime/parachains/src/session_info.rs index ff032f7e34d5e..ea05c1aacaa94 100644 --- a/polkadot/runtime/parachains/src/session_info.rs +++ b/polkadot/runtime/parachains/src/session_info.rs @@ -24,6 +24,7 @@ use crate::{ configuration, paras, scheduler, shared, util::{take_active_subset, take_active_subset_and_inactive}, }; +use alloc::vec::Vec; use frame_support::{ pallet_prelude::*, traits::{OneSessionHandler, ValidatorSet, ValidatorSetWithIdentification}, @@ -32,7 +33,6 @@ use frame_system::pallet_prelude::BlockNumberFor; use polkadot_primitives::{ AssignmentId, AuthorityDiscoveryId, ExecutorParams, SessionIndex, SessionInfo, }; -use sp_std::vec::Vec; pub use pallet::*; diff --git a/polkadot/runtime/parachains/src/shared.rs b/polkadot/runtime/parachains/src/shared.rs index 417de1fa3fb0d..154b7cfefc3ac 100644 --- a/polkadot/runtime/parachains/src/shared.rs +++ b/polkadot/runtime/parachains/src/shared.rs @@ -19,14 +19,14 @@ //! To avoid cyclic dependencies, it is important that this pallet is not //! dependent on any of the other pallets. +use alloc::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + vec::Vec, +}; use frame_support::{pallet_prelude::*, traits::DisabledValidators}; use frame_system::pallet_prelude::BlockNumberFor; use polkadot_primitives::{SessionIndex, ValidatorId, ValidatorIndex}; use sp_runtime::traits::AtLeast32BitUnsigned; -use sp_std::{ - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, - vec::Vec, -}; use rand::{seq::SliceRandom, SeedableRng}; use rand_chacha::ChaCha20Rng; diff --git a/polkadot/runtime/parachains/src/ump_tests.rs b/polkadot/runtime/parachains/src/ump_tests.rs index 4d6da8c9e3c1b..d914bf8b66612 100644 --- a/polkadot/runtime/parachains/src/ump_tests.rs +++ b/polkadot/runtime/parachains/src/ump_tests.rs @@ -34,7 +34,6 @@ use frame_support::{ use polkadot_primitives::{well_known_keys, Id as ParaId, UpwardMessage}; use sp_crypto_hashing::{blake2_256, twox_64}; use sp_runtime::traits::Bounded; -use sp_std::prelude::*; pub(super) struct GenesisConfigBuilder { max_upward_message_size: u32, diff --git a/polkadot/runtime/parachains/src/util.rs b/polkadot/runtime/parachains/src/util.rs index cb2deffd7f659..3588e494438d0 100644 --- a/polkadot/runtime/parachains/src/util.rs +++ b/polkadot/runtime/parachains/src/util.rs @@ -17,9 +17,9 @@ //! Utilities that don't belong to any particular module but may draw //! on all modules. +use alloc::{collections::btree_set::BTreeSet, vec::Vec}; use frame_system::pallet_prelude::BlockNumberFor; use polkadot_primitives::{HeadData, Id as ParaId, PersistedValidationData, ValidatorIndex}; -use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; use crate::{configuration, hrmp, paras}; @@ -118,7 +118,7 @@ pub fn take_active_subset(active: &[ValidatorIndex], set: &[T]) -> Vec #[cfg(test)] mod tests { - use sp_std::vec::Vec; + use alloc::vec::Vec; use crate::util::{split_active_subset, take_active_subset}; use polkadot_primitives::ValidatorIndex; diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index d342926d3c5a0..f93a3ad65754c 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -11,117 +11,116 @@ license.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } serde = { workspace = true } serde_derive = { optional = true, workspace = true } serde_json = { features = ["alloc"], workspace = true } -static_assertions = "1.1.0" -smallvec = "1.8.0" -bitvec = { version = "1.0.1", default-features = false, features = ["alloc"] } +static_assertions = { workspace = true, default-features = true } +smallvec = { workspace = true, default-features = true } +bitvec = { features = ["alloc"], workspace = true } -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery", default-features = false } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe", default-features = false } -sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy", default-features = false } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false } -binary-merkle-tree = { path = "../../../substrate/utils/binary-merkle-tree", default-features = false } -rococo-runtime-constants = { package = "rococo-runtime-constants", path = "constants", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } -sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-storage = { path = "../../../substrate/primitives/storage", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } +sp-authority-discovery = { workspace = true } +sp-consensus-babe = { workspace = true } +sp-consensus-beefy = { workspace = true } +sp-consensus-grandpa = { workspace = true } +binary-merkle-tree = { workspace = true } +rococo-runtime-constants = { workspace = true } +sp-api = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-arithmetic = { workspace = true } +sp-io = { workspace = true } +sp-mmr-primitives = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +sp-core = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-version = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-block-builder = { workspace = true } -pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-babe = { path = "../../../substrate/frame/babe", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-beefy = { path = "../../../substrate/frame/beefy", default-features = false } -pallet-beefy-mmr = { path = "../../../substrate/frame/beefy-mmr", default-features = false } -pallet-bounties = { path = "../../../substrate/frame/bounties", default-features = false } -pallet-child-bounties = { path = "../../../substrate/frame/child-bounties", default-features = false } -pallet-state-trie-migration = { path = "../../../substrate/frame/state-trie-migration", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-collective = { path = "../../../substrate/frame/collective", default-features = false } -pallet-conviction-voting = { path = "../../../substrate/frame/conviction-voting", default-features = false } -pallet-democracy = { path = "../../../substrate/frame/democracy", default-features = false } -pallet-elections-phragmen = { path = "../../../substrate/frame/elections-phragmen", default-features = false } -pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false } -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } -pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } -pallet-indices = { path = "../../../substrate/frame/indices", default-features = false } -pallet-membership = { path = "../../../substrate/frame/membership", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-mmr = { path = "../../../substrate/frame/merkle-mountain-range", default-features = false } -pallet-multisig = { path = "../../../substrate/frame/multisig", default-features = false } -pallet-nis = { path = "../../../substrate/frame/nis", default-features = false } -pallet-offences = { path = "../../../substrate/frame/offences", default-features = false } -pallet-parameters = { path = "../../../substrate/frame/parameters", default-features = false } -pallet-preimage = { path = "../../../substrate/frame/preimage", default-features = false } -pallet-proxy = { path = "../../../substrate/frame/proxy", default-features = false } -pallet-ranked-collective = { path = "../../../substrate/frame/ranked-collective", default-features = false } -pallet-recovery = { path = "../../../substrate/frame/recovery", default-features = false } -pallet-referenda = { path = "../../../substrate/frame/referenda", default-features = false } -pallet-scheduler = { path = "../../../substrate/frame/scheduler", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -pallet-society = { path = "../../../substrate/frame/society", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["tuples-96"] } -pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-tips = { path = "../../../substrate/frame/tips", default-features = false } -pallet-treasury = { path = "../../../substrate/frame/treasury", default-features = false } -pallet-utility = { path = "../../../substrate/frame/utility", default-features = false } -pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } -pallet-whitelist = { path = "../../../substrate/frame/whitelist", default-features = false } -pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -pallet-root-testing = { path = "../../../substrate/frame/root-testing", default-features = false } +pallet-authority-discovery = { workspace = true } +pallet-authorship = { workspace = true } +pallet-babe = { workspace = true } +pallet-balances = { workspace = true } +pallet-beefy = { workspace = true } +pallet-beefy-mmr = { workspace = true } +pallet-bounties = { workspace = true } +pallet-child-bounties = { workspace = true } +pallet-state-trie-migration = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-collective = { workspace = true } +pallet-conviction-voting = { workspace = true } +pallet-democracy = { workspace = true } +pallet-elections-phragmen = { workspace = true } +pallet-asset-rate = { workspace = true } +frame-executive = { workspace = true } +pallet-grandpa = { workspace = true } +pallet-identity = { workspace = true } +pallet-indices = { workspace = true } +pallet-membership = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-mmr = { workspace = true } +pallet-multisig = { workspace = true } +pallet-nis = { workspace = true } +pallet-offences = { workspace = true } +pallet-parameters = { workspace = true } +pallet-preimage = { workspace = true } +pallet-proxy = { workspace = true } +pallet-ranked-collective = { workspace = true } +pallet-recovery = { workspace = true } +pallet-referenda = { workspace = true } +pallet-scheduler = { workspace = true } +pallet-session = { workspace = true } +pallet-society = { workspace = true } +pallet-sudo = { workspace = true } +frame-support = { features = ["tuples-96"], workspace = true } +pallet-staking = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-tips = { workspace = true } +pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } +pallet-vesting = { workspace = true } +pallet-whitelist = { workspace = true } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +pallet-root-testing = { workspace = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } -frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } -frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -hex-literal = { version = "0.4.1" } +frame-benchmarking = { optional = true, workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +hex-literal = { workspace = true, default-features = true } -polkadot-runtime-common = { path = "../common", default-features = false } -polkadot-runtime-parachains = { path = "../parachains", default-features = false } -polkadot-primitives = { path = "../../primitives", default-features = false } -polkadot-parachain-primitives = { path = "../../parachain", default-features = false } +polkadot-runtime-common = { workspace = true } +polkadot-runtime-parachains = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-parachain-primitives = { workspace = true } -xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-builder = { workspace = true } +xcm-runtime-apis = { workspace = true } [dev-dependencies] -tiny-keccak = { version = "2.0.2", features = ["keccak"] } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } -sp-trie = { path = "../../../substrate/primitives/trie" } -separator = "0.4.1" +tiny-keccak = { features = ["keccak"], workspace = true } +sp-keyring = { workspace = true, default-features = true } +remote-externalities = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +separator = { workspace = true } serde_json = { workspace = true, default-features = true } -sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } -tokio = { version = "1.24.2", features = ["macros"] } +sp-tracing = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -209,7 +208,6 @@ std = [ "sp-runtime/std", "sp-session/std", "sp-staking/std", - "sp-std/std", "sp-storage/std", "sp-tracing/std", "sp-transaction-pool/std", @@ -217,7 +215,7 @@ std = [ "substrate-wasm-builder", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] runtime-benchmarks = [ @@ -270,7 +268,7 @@ runtime-benchmarks = [ "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "frame-executive/try-runtime", diff --git a/polkadot/runtime/rococo/constants/Cargo.toml b/polkadot/runtime/rococo/constants/Cargo.toml index 2c49488077e6f..b67c36d71fd87 100644 --- a/polkadot/runtime/rococo/constants/Cargo.toml +++ b/polkadot/runtime/rococo/constants/Cargo.toml @@ -10,17 +10,17 @@ license.workspace = true workspace = true [dependencies] -smallvec = "1.8.0" +smallvec = { workspace = true, default-features = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -polkadot-primitives = { path = "../../../primitives", default-features = false } -polkadot-runtime-common = { path = "../../common", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-weights = { path = "../../../../substrate/primitives/weights", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } +frame-support = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { workspace = true } +sp-core = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../xcm/xcm-builder", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } [features] default = ["std"] diff --git a/polkadot/runtime/rococo/constants/src/lib.rs b/polkadot/runtime/rococo/constants/src/lib.rs index 1dcafdcbc4d9a..47b50bf6c1069 100644 --- a/polkadot/runtime/rococo/constants/src/lib.rs +++ b/polkadot/runtime/rococo/constants/src/lib.rs @@ -121,6 +121,17 @@ pub mod system_parachain { /// All system parachains of Rococo. pub type SystemParachains = IsChildSystemParachain; + + /// Coretime constants + pub mod coretime { + /// Coretime timeslice period in blocks + /// WARNING: This constant is used accross chains, so additional care should be taken + /// when changing it. + #[cfg(feature = "fast-runtime")] + pub const TIMESLICE_PERIOD: u32 = 20; + #[cfg(not(feature = "fast-runtime"))] + pub const TIMESLICE_PERIOD: u32 = 80; + } } /// Rococo Treasury pallet instance. diff --git a/polkadot/runtime/rococo/src/genesis_config_presets.rs b/polkadot/runtime/rococo/src/genesis_config_presets.rs index 1c70c94ce0484..67dcd6cd7a510 100644 --- a/polkadot/runtime/rococo/src/genesis_config_presets.rs +++ b/polkadot/runtime/rococo/src/genesis_config_presets.rs @@ -17,6 +17,9 @@ //! Genesis configs presets for the Rococo runtime use crate::{SessionKeys, BABE_GENESIS_EPOCH_CONFIG}; +#[cfg(not(feature = "std"))] +use alloc::format; +use alloc::vec::Vec; use polkadot_primitives::{ vstaging::SchedulerParams, AccountId, AccountPublic, AssignmentId, ValidatorId, }; @@ -27,9 +30,6 @@ use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::IdentifyAccount; -#[cfg(not(feature = "std"))] -use sp_std::alloc::format; -use sp_std::vec::Vec; /// Helper function to generate a crypto pair from seed fn get_from_seed(seed: &str) -> ::Public { @@ -530,7 +530,7 @@ fn wococo_local_testnet_genesis() -> serde_json::Value { } /// Provides the JSON representation of predefined genesis config for given `id`. -pub fn get_preset(id: &sp_genesis_builder::PresetId) -> Option> { +pub fn get_preset(id: &sp_genesis_builder::PresetId) -> Option> { let patch = match id.try_into() { Ok("local_testnet") => rococo_local_testnet_genesis(), Ok("development") => rococo_development_config_genesis(), diff --git a/polkadot/runtime/rococo/src/governance/fellowship.rs b/polkadot/runtime/rococo/src/governance/fellowship.rs index a589b768afde2..27a58a0eebd18 100644 --- a/polkadot/runtime/rococo/src/governance/fellowship.rs +++ b/polkadot/runtime/rococo/src/governance/fellowship.rs @@ -356,6 +356,7 @@ impl pallet_ranked_collective::Config for Runtime type MinRankOfClass = sp_runtime::traits::Identity; type MemberSwappedHandler = (); type VoteWeight = pallet_ranked_collective::Geometric; + type MaxMemberCount = (); #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = (); } diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs index 7b5c7b1fb4aca..a4440a1c6e0b9 100644 --- a/polkadot/runtime/rococo/src/impls.rs +++ b/polkadot/runtime/rococo/src/impls.rs @@ -15,13 +15,14 @@ // along with Polkadot. If not, see . use crate::xcm_config; +use alloc::{boxed::Box, vec}; use codec::{Decode, Encode}; +use core::marker::PhantomData; use frame_support::pallet_prelude::DispatchResult; use frame_system::RawOrigin; use polkadot_primitives::Balance; use polkadot_runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; use rococo_runtime_constants::currency::*; -use sp_std::{marker::PhantomData, prelude::*}; use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm}; use xcm_executor::traits::TransactAsset; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 91ca5eb5e31d1..5adffbd7422f9 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -20,7 +20,15 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit. #![recursion_limit = "512"] +extern crate alloc; + +use alloc::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + vec, + vec::Vec, +}; use codec::{Decode, Encode, MaxEncodedLen}; +use core::cmp::Ordering; use frame_support::{ dynamic_params::{dynamic_pallet_params, dynamic_params}, traits::FromContains, @@ -60,7 +68,7 @@ use polkadot_runtime_parachains::{ scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; -use rococo_runtime_constants::system_parachain::BROKER_ID; +use rococo_runtime_constants::system_parachain::{coretime::TIMESLICE_PERIOD, BROKER_ID}; use scale_info::TypeInfo; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_beefy::{ @@ -68,11 +76,6 @@ use sp_consensus_beefy::{ mmr::{BeefyDataProvider, MmrLeafVersion}, }; use sp_genesis_builder::PresetId; -use sp_std::{ - cmp::Ordering, - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, - prelude::*, -}; use frame_support::{ construct_runtime, derive_impl, @@ -92,12 +95,13 @@ use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_identity::legacy::IdentityInfo; use pallet_session::historical as session_historical; use pallet_transaction_payment::{FeeDetails, FungibleAdapter, RuntimeDispatchInfo}; -use sp_core::{ConstU128, ConstU8, OpaqueMetadata, H256}; +use sp_core::{ConstU128, ConstU8, Get, OpaqueMetadata, H256}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{ - BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, Extrinsic as ExtrinsicT, - IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, + AccountIdConversion, BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, + Extrinsic as ExtrinsicT, IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, + Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, RuntimeDebug, @@ -131,7 +135,7 @@ use governance::{ pallet_custom_origins, AuctionAdmin, Fellows, GeneralAdmin, LeaseAdmin, Treasurer, TreasurySpender, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -162,7 +166,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, @@ -476,9 +480,6 @@ parameter_types! { } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); - pub const ProposalBondMinimum: Balance = 2000 * CENTS; - pub const ProposalBondMaximum: Balance = 1 * GRAND; pub const SpendPeriod: BlockNumber = 6 * DAYS; pub const Burn: Permill = Permill::from_perthousand(2); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); @@ -501,13 +502,8 @@ parameter_types! { impl pallet_treasury::Config for Runtime { type PalletId = TreasuryPalletId; type Currency = Balances; - type ApproveOrigin = EitherOfDiverse, Treasurer>; type RejectOrigin = EitherOfDiverse, Treasurer>; type RuntimeEvent = RuntimeEvent; - type OnSlash = Treasury; - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ProposalBondMinimum; - type ProposalBondMaximum = ProposalBondMaximum; type SpendPeriod = SpendPeriod; type Burn = Burn; type BurnDestination = Society; @@ -566,6 +562,7 @@ impl pallet_bounties::Config for Runtime { type RuntimeEvent = RuntimeEvent; type MaximumReasonLength = MaximumReasonLength; type WeightInfo = weights::pallet_bounties::WeightInfo; + type OnSlash = Treasury; } parameter_types! { @@ -1064,21 +1061,39 @@ impl parachains_scheduler::Config for Runtime { parameter_types! { pub const BrokerId: u32 = BROKER_ID; + pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); pub MaxXcmTransactWeight: Weight = Weight::from_parts(200_000_000, 20_000); } +pub struct BrokerPot; +impl Get for BrokerPot { + fn get() -> InteriorLocation { + Junction::AccountId32 { network: None, id: BrokerPalletId::get().into_account_truncating() } + .into() + } +} + impl coretime::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; type Currency = Balances; type BrokerId = BrokerId; + type BrokerPotLocation = BrokerPot; type WeightInfo = weights::runtime_parachains_coretime::WeightInfo; type SendXcm = crate::xcm_config::XcmRouter; + type AssetTransactor = crate::xcm_config::LocalAssetTransactor; + type AccountToLocation = xcm_builder::AliasesIntoAccountId32< + xcm_config::ThisNetwork, + ::AccountId, + >; type MaxXcmTransactWeight = MaxXcmTransactWeight; } parameter_types! { pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); + // Keep 2 timeslices worth of revenue information. + pub const MaxHistoricalRevenue: BlockNumber = 2 * TIMESLICE_PERIOD; + pub const OnDemandPalletId: PalletId = PalletId(*b"py/ondmd"); } impl parachains_assigner_on_demand::Config for Runtime { @@ -1086,6 +1101,8 @@ impl parachains_assigner_on_demand::Config for Runtime { type Currency = Balances; type TrafficDefaultValue = OnDemandTrafficDefaultValue; type WeightInfo = weights::runtime_parachains_assigner_on_demand::WeightInfo; + type MaxHistoricalRevenue = MaxHistoricalRevenue; + type PalletId = OnDemandPalletId; } impl parachains_assigner_coretime::Config for Runtime {} @@ -1269,6 +1286,7 @@ impl pallet_beefy::Config for Runtime { type MaxNominators = ConstU32<0>; type MaxSetIdSessionEntries = BeefySetIdSessionEntries; type OnNewValidatorSet = MmrLeaf; + type AncestryHelper = MmrLeaf; type WeightInfo = (); type KeyOwnerProof = >::Proof; type EquivocationReportSystem = @@ -1703,13 +1721,13 @@ mod benches { // the that path resolves correctly in the generated file. [polkadot_runtime_common::assigned_slots, AssignedSlots] [polkadot_runtime_common::auctions, Auctions] - [polkadot_runtime_common::coretime, Coretime] [polkadot_runtime_common::crowdloan, Crowdloan] [polkadot_runtime_common::claims, Claims] [polkadot_runtime_common::identity_migrator, IdentityMigrator] [polkadot_runtime_common::slots, Slots] [polkadot_runtime_common::paras_registrar, Registrar] [polkadot_runtime_parachains::configuration, Configuration] + [polkadot_runtime_parachains::coretime, Coretime] [polkadot_runtime_parachains::hrmp, Hrmp] [polkadot_runtime_parachains::disputes, ParasDisputes] [polkadot_runtime_parachains::inclusion, ParaInclusion] @@ -1767,7 +1785,7 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; XcmPallet::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -1780,11 +1798,11 @@ sp_api::impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -1799,7 +1817,7 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { XcmPallet::dry_run_call::(origin, call) } @@ -1809,6 +1827,18 @@ sp_api::impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationConverter, + >::convert_location(location) + } + } + impl sp_api::Metadata for Runtime { fn metadata() -> OpaqueMetadata { OpaqueMetadata::new(Runtime::metadata().into()) @@ -1818,7 +1848,7 @@ sp_api::impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -2026,7 +2056,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(3)] + #[api_version(4)] impl sp_consensus_beefy::BeefyApi for Runtime { fn beefy_genesis() -> Option { pallet_beefy::GenesisBlock::::get() @@ -2036,7 +2066,7 @@ sp_api::impl_runtime_apis! { Beefy::validator_set() } - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( equivocation_proof: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, @@ -2046,7 +2076,7 @@ sp_api::impl_runtime_apis! { ) -> Option<()> { let key_owner_proof = key_owner_proof.decode()?; - Beefy::submit_unsigned_equivocation_report( + Beefy::submit_unsigned_double_voting_report( equivocation_proof, key_owner_proof, ) @@ -2362,7 +2392,7 @@ sp_api::impl_runtime_apis! { } fn set_up_complex_asset_transfer( - ) -> Option<(Assets, u32, Location, Box)> { + ) -> Option<(Assets, u32, Location, alloc::boxed::Box)> { // Relay supports only native token, either reserve transfer it to non-system parachains, // or teleport it to system parachain. Use the teleport case for benchmarking as it's // slightly heavier. diff --git a/polkadot/runtime/rococo/src/validator_manager.rs b/polkadot/runtime/rococo/src/validator_manager.rs index 0677ba7fbb2b2..ecfbff4fa0688 100644 --- a/polkadot/runtime/rococo/src/validator_manager.rs +++ b/polkadot/runtime/rococo/src/validator_manager.rs @@ -16,8 +16,8 @@ //! A pallet for managing validators on Rococo. +use alloc::vec::Vec; use sp_staking::SessionIndex; -use sp_std::vec::Vec; pub use pallet::*; diff --git a/polkadot/runtime/rococo/src/weights/pallet_session.rs b/polkadot/runtime/rococo/src/weights/pallet_session.rs index dbeca534add82..7f573d4e3952e 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_session.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_session.rs @@ -38,7 +38,7 @@ #![allow(unused_imports)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `pallet_session`. pub struct WeightInfo(PhantomData); diff --git a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs index 144e9d5b87238..06246ada72f16 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs @@ -63,51 +63,6 @@ impl pallet_treasury::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `143` - // Estimated: `1489` - // Minimum execution time: 354_000_000 picoseconds. - Weight::from_parts(376_000_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Treasury Proposals (r:1 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `301` - // Estimated: `3593` - // Minimum execution time: 547_000_000 picoseconds. - Weight::from_parts(550_000_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Treasury Proposals (r:1 w:0) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `470 + p * (8 ยฑ0)` - // Estimated: `3573` - // Minimum execution time: 104_000_000 picoseconds. - Weight::from_parts(121_184_402, 0) - .saturating_add(Weight::from_parts(0, 3573)) - // Standard Error: 42_854 - .saturating_add(Weight::from_parts(153_112, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } /// Storage: Treasury Approvals (r:1 w:1) /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs index 9f275e7b8cdc2..abcc1893c29b1 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::assigner_on_demand` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-1pho9goo-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -50,6 +50,10 @@ pub struct WeightInfo(PhantomData); impl polkadot_runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) @@ -57,19 +61,23 @@ impl polkadot_runtime_parachains::assigner_on_demand::W /// The range of component `s` is `[1, 9999]`. fn place_order_keep_alive(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `218 + s * (8 ยฑ0)` - // Estimated: `3681 + s * (8 ยฑ0)` - // Minimum execution time: 21_053_000 picoseconds. - Weight::from_parts(17_291_897, 0) - .saturating_add(Weight::from_parts(0, 3681)) - // Standard Error: 104 - .saturating_add(Weight::from_parts(18_779, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `270 + s * (8 ยฑ0)` + // Estimated: `3733 + s * (8 ยฑ0)` + // Minimum execution time: 28_422_000 picoseconds. + Weight::from_parts(28_146_882, 0) + .saturating_add(Weight::from_parts(0, 3733)) + // Standard Error: 140 + .saturating_add(Weight::from_parts(21_283, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) @@ -77,15 +85,15 @@ impl polkadot_runtime_parachains::assigner_on_demand::W /// The range of component `s` is `[1, 9999]`. fn place_order_allow_death(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `218 + s * (8 ยฑ0)` - // Estimated: `3681 + s * (8 ยฑ0)` - // Minimum execution time: 20_843_000 picoseconds. - Weight::from_parts(16_881_986, 0) - .saturating_add(Weight::from_parts(0, 3681)) - // Standard Error: 104 - .saturating_add(Weight::from_parts(18_788, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `270 + s * (8 ยฑ0)` + // Estimated: `3733 + s * (8 ยฑ0)` + // Minimum execution time: 28_680_000 picoseconds. + Weight::from_parts(31_024_579, 0) + .saturating_add(Weight::from_parts(0, 3733)) + // Standard Error: 119 + .saturating_add(Weight::from_parts(20_989, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs index 0ad32996c4959..b2329c098cead 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_coretime.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Autogenerated weights for `runtime_parachains::coretime` +//! Autogenerated weights for `runtime_common::coretime` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-1pho9goo-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -45,28 +45,61 @@ use frame_support::{traits::Get, weights::Weight}; use core::marker::PhantomData; -use polkadot_runtime_parachains::configuration::{self, WeightInfo as ConfigWeightInfo}; - /// Weight functions for `runtime_common::coretime`. pub struct WeightInfo(PhantomData); -impl polkadot_runtime_parachains::coretime::WeightInfo for WeightInfo { +impl polkadot_runtime_parachains::coretime::WeightInfo for WeightInfo { + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn request_revenue_at() -> Weight { + // Proof Size summary in bytes: + // Measured: `2963` + // Estimated: `6428` + // Minimum execution time: 36_613_000 picoseconds. + Weight::from_parts(37_637_000, 0) + .saturating_add(Weight::from_parts(0, 6428)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn request_core_count() -> Weight { - ::WeightInfo::set_config_with_u32() + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_527_000 picoseconds. + Weight::from_parts(7_784_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `CoreTimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoreTimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CoreTimeAssignmentProvider::CoreSchedules` (r:0 w:1) - /// Proof: `CoreTimeAssignmentProvider::CoreSchedules` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreSchedules` (r:0 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreSchedules` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 100]`. fn assign_core(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `3541` - // Minimum execution time: 6_275_000 picoseconds. - Weight::from_parts(6_883_543, 0) - .saturating_add(Weight::from_parts(0, 3541)) - // Standard Error: 202 - .saturating_add(Weight::from_parts(15_028, 0).saturating_mul(s.into())) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 9_220_000 picoseconds. + Weight::from_parts(9_905_773, 0) + .saturating_add(Weight::from_parts(0, 3645)) + // Standard Error: 257 + .saturating_add(Weight::from_parts(12_400, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/polkadot/runtime/rococo/src/weights/xcm/mod.rs b/polkadot/runtime/rococo/src/weights/xcm/mod.rs index 12f3df897b1ee..bd2b0fbb8c061 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/mod.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/mod.rs @@ -18,8 +18,8 @@ mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; use crate::Runtime; +use alloc::vec::Vec; use frame_support::weights::Weight; -use sp_std::prelude::*; use xcm::{latest::prelude::*, DoubleEncoded}; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmBalancesWeight; diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index c4d78b1081a62..ac379b69e3f2c 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -11,68 +11,67 @@ license.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { workspace = true } -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery", default-features = false } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe", default-features = false } -sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } -frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } +sp-authority-discovery = { workspace = true } +sp-consensus-babe = { workspace = true } +sp-consensus-beefy = { workspace = true } +sp-api = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-mmr-primitives = { workspace = true } +sp-session = { workspace = true } +sp-version = { workspace = true } +frame-election-provider-support = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-block-builder = { workspace = true } -pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-babe = { path = "../../../substrate/frame/babe", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } -pallet-indices = { path = "../../../substrate/frame/indices", default-features = false } -pallet-offences = { path = "../../../substrate/frame/offences", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } -pallet-staking-reward-curve = { path = "../../../substrate/frame/staking/reward-curve" } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } -test-runtime-constants = { package = "test-runtime-constants", path = "constants", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } +pallet-authority-discovery = { workspace = true } +pallet-authorship = { workspace = true } +pallet-babe = { workspace = true } +pallet-balances = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +frame-executive = { workspace = true } +pallet-grandpa = { workspace = true } +pallet-indices = { workspace = true } +pallet-offences = { workspace = true } +pallet-session = { workspace = true } +frame-support = { workspace = true } +pallet-staking = { workspace = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +test-runtime-constants = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-sudo = { workspace = true } +pallet-vesting = { workspace = true } -polkadot-runtime-common = { path = "../common", default-features = false } -polkadot-primitives = { path = "../../primitives", default-features = false } -pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } -polkadot-runtime-parachains = { path = "../parachains", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } -xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } +polkadot-runtime-common = { workspace = true } +polkadot-primitives = { workspace = true } +pallet-xcm = { workspace = true } +polkadot-runtime-parachains = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm = { workspace = true } [dev-dependencies] -hex-literal = "0.4.1" -tiny-keccak = { version = "2.0.2", features = ["keccak"] } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-trie = { path = "../../../substrate/primitives/trie" } +hex-literal = { workspace = true, default-features = true } +tiny-keccak = { features = ["keccak"], workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] @@ -125,7 +124,6 @@ std = [ "sp-runtime/std", "sp-session/std", "sp-staking/std", - "sp-std/std", "sp-transaction-pool/std", "sp-version/std", "test-runtime-constants/std", diff --git a/polkadot/runtime/test-runtime/constants/Cargo.toml b/polkadot/runtime/test-runtime/constants/Cargo.toml index ed10ece54f67c..807774be71369 100644 --- a/polkadot/runtime/test-runtime/constants/Cargo.toml +++ b/polkadot/runtime/test-runtime/constants/Cargo.toml @@ -10,11 +10,11 @@ license.workspace = true workspace = true [dependencies] -smallvec = "1.8.0" +smallvec = { workspace = true, default-features = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -polkadot-primitives = { path = "../../../primitives", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +frame-support = { workspace = true } +polkadot-primitives = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 8178639946f8d..a8a369a68e669 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -20,12 +20,15 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] -use codec::Encode; -use pallet_transaction_payment::FungibleAdapter; -use sp_std::{ +extern crate alloc; + +use alloc::{ collections::{btree_map::BTreeMap, vec_deque::VecDeque}, - prelude::*, + vec, + vec::Vec, }; +use codec::Encode; +use pallet_transaction_payment::FungibleAdapter; use polkadot_runtime_parachains::{ assigner_parachains as parachains_assigner_parachains, @@ -53,6 +56,7 @@ use frame_support::{ }; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_session::historical as session_historical; +use pallet_timestamp::Now; use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; use polkadot_primitives::{ slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, @@ -792,7 +796,7 @@ sp_api::impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> Vec { Runtime::metadata_versions() } } @@ -1014,7 +1018,7 @@ sp_api::impl_runtime_apis! { None } - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( _equivocation_proof: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, @@ -1186,7 +1190,7 @@ sp_api::impl_runtime_apis! { impl crate::GetLastTimestamp for Runtime { fn get_last_timestamp() -> u64 { - Timestamp::now() + Now::::get() } } diff --git a/polkadot/runtime/test-runtime/src/xcm_config.rs b/polkadot/runtime/test-runtime/src/xcm_config.rs index b1d86ff9a85e9..b424b9a3ee55b 100644 --- a/polkadot/runtime/test-runtime/src/xcm_config.rs +++ b/polkadot/runtime/test-runtime/src/xcm_config.rs @@ -54,7 +54,7 @@ pub type LocalOriginToLocation = ( /// This implementation ensures that messages with non-reanchored assets return higher /// prices than messages with reanchored assets. /// Useful for `deposit_reserve_asset_works_for_any_xcm_sender` integration test. -pub struct TestDeliveryPrice(sp_std::marker::PhantomData<(A, F)>); +pub struct TestDeliveryPrice(core::marker::PhantomData<(A, F)>); impl, F: FeeTracker> PriceForMessageDelivery for TestDeliveryPrice { type Id = F::Id; diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index ccb8a02b981cc..9e739f4c7fe6c 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -11,123 +11,121 @@ license.workspace = true workspace = true [dependencies] -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +bitvec = { features = ["alloc"], workspace = true } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } -rustc-hex = { version = "2.1.0", default-features = false } serde = { workspace = true } serde_derive = { optional = true, workspace = true } -smallvec = "1.8.0" +smallvec = { workspace = true, default-features = true } -sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery", default-features = false } -sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe", default-features = false } -sp-consensus-beefy = { path = "../../../substrate/primitives/consensus/beefy", default-features = false } -binary-merkle-tree = { path = "../../../substrate/utils/binary-merkle-tree", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } -sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-storage = { path = "../../../substrate/primitives/storage", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } -sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", default-features = false } +sp-authority-discovery = { workspace = true } +sp-consensus-babe = { workspace = true } +sp-consensus-beefy = { workspace = true } +binary-merkle-tree = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-arithmetic = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-io = { workspace = true } +sp-mmr-primitives = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +sp-core = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-version = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-block-builder = { workspace = true } +sp-npos-elections = { workspace = true } -frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["experimental", "tuples-96"] } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } -westend-runtime-constants = { package = "westend-runtime-constants", path = "constants", default-features = false } -pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false } -pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-babe = { path = "../../../substrate/frame/babe", default-features = false } -pallet-bags-list = { path = "../../../substrate/frame/bags-list", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-beefy = { path = "../../../substrate/frame/beefy", default-features = false } -pallet-beefy-mmr = { path = "../../../substrate/frame/beefy-mmr", default-features = false } -pallet-collective = { path = "../../../substrate/frame/collective", default-features = false } -pallet-democracy = { path = "../../../substrate/frame/democracy", default-features = false } -pallet-elections-phragmen = { package = "pallet-elections-phragmen", path = "../../../substrate/frame/elections-phragmen", default-features = false } -pallet-election-provider-multi-phase = { path = "../../../substrate/frame/election-provider-multi-phase", default-features = false } -pallet-fast-unstake = { path = "../../../substrate/frame/fast-unstake", default-features = false } -pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } -pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } -pallet-indices = { path = "../../../substrate/frame/indices", default-features = false } -pallet-membership = { path = "../../../substrate/frame/membership", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-mmr = { path = "../../../substrate/frame/merkle-mountain-range", default-features = false } -pallet-multisig = { path = "../../../substrate/frame/multisig", default-features = false } -pallet-nomination-pools = { path = "../../../substrate/frame/nomination-pools", default-features = false } -pallet-conviction-voting = { path = "../../../substrate/frame/conviction-voting", default-features = false } -pallet-offences = { path = "../../../substrate/frame/offences", default-features = false } -pallet-preimage = { path = "../../../substrate/frame/preimage", default-features = false } -pallet-proxy = { path = "../../../substrate/frame/proxy", default-features = false } -pallet-recovery = { path = "../../../substrate/frame/recovery", default-features = false } -pallet-referenda = { path = "../../../substrate/frame/referenda", default-features = false } -pallet-scheduler = { path = "../../../substrate/frame/scheduler", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -pallet-society = { path = "../../../substrate/frame/society", default-features = false } -pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } -pallet-staking-reward-curve = { package = "pallet-staking-reward-curve", path = "../../../substrate/frame/staking/reward-curve" } -pallet-staking-runtime-api = { path = "../../../substrate/frame/staking/runtime-api", default-features = false } -pallet-delegated-staking = { path = "../../../substrate/frame/delegated-staking", default-features = false } -pallet-state-trie-migration = { path = "../../../substrate/frame/state-trie-migration", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-nomination-pools-runtime-api = { path = "../../../substrate/frame/nomination-pools/runtime-api", default-features = false } -pallet-treasury = { path = "../../../substrate/frame/treasury", default-features = false } -pallet-utility = { path = "../../../substrate/frame/utility", default-features = false } -pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } -pallet-whitelist = { path = "../../../substrate/frame/whitelist", default-features = false } -pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } -pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -pallet-root-testing = { path = "../../../substrate/frame/root-testing", default-features = false } +frame-election-provider-support = { workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { features = ["experimental", "tuples-96"], workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +westend-runtime-constants = { workspace = true } +pallet-asset-rate = { workspace = true } +pallet-authority-discovery = { workspace = true } +pallet-authorship = { workspace = true } +pallet-babe = { workspace = true } +pallet-bags-list = { workspace = true } +pallet-balances = { workspace = true } +pallet-beefy = { workspace = true } +pallet-beefy-mmr = { workspace = true } +pallet-collective = { workspace = true } +pallet-democracy = { workspace = true } +pallet-elections-phragmen = { workspace = true } +pallet-election-provider-multi-phase = { workspace = true } +pallet-fast-unstake = { workspace = true } +pallet-grandpa = { workspace = true } +pallet-identity = { workspace = true } +pallet-indices = { workspace = true } +pallet-membership = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-mmr = { workspace = true } +pallet-multisig = { workspace = true } +pallet-nomination-pools = { workspace = true } +pallet-conviction-voting = { workspace = true } +pallet-offences = { workspace = true } +pallet-preimage = { workspace = true } +pallet-proxy = { workspace = true } +pallet-recovery = { workspace = true } +pallet-referenda = { workspace = true } +pallet-scheduler = { workspace = true } +pallet-session = { workspace = true } +pallet-society = { workspace = true } +pallet-staking = { workspace = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-staking-runtime-api = { workspace = true } +pallet-delegated-staking = { workspace = true } +pallet-state-trie-migration = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-nomination-pools-runtime-api = { workspace = true } +pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } +pallet-vesting = { workspace = true } +pallet-whitelist = { workspace = true } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +pallet-root-testing = { workspace = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } -frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -pallet-election-provider-support-benchmarking = { path = "../../../substrate/frame/election-provider-support/benchmarking", default-features = false, optional = true } -pallet-nomination-pools-benchmarking = { path = "../../../substrate/frame/nomination-pools/benchmarking", default-features = false, optional = true } -pallet-offences-benchmarking = { path = "../../../substrate/frame/offences/benchmarking", default-features = false, optional = true } -pallet-session-benchmarking = { path = "../../../substrate/frame/session/benchmarking", default-features = false, optional = true } -hex-literal = { version = "0.4.1", optional = true } +frame-benchmarking = { optional = true, workspace = true } +frame-try-runtime = { optional = true, workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +pallet-election-provider-support-benchmarking = { optional = true, workspace = true } +pallet-nomination-pools-benchmarking = { optional = true, workspace = true } +pallet-offences-benchmarking = { optional = true, workspace = true } +pallet-session-benchmarking = { optional = true, workspace = true } +hex-literal = { optional = true, workspace = true, default-features = true } -polkadot-runtime-common = { path = "../common", default-features = false } -polkadot-primitives = { path = "../../primitives", default-features = false } -polkadot-parachain-primitives = { path = "../../parachain", default-features = false } -polkadot-runtime-parachains = { path = "../parachains", default-features = false } +polkadot-runtime-common = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-parachains = { workspace = true } -xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } -xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-builder = { workspace = true } +xcm-runtime-apis = { workspace = true } [dev-dependencies] -hex-literal = "0.4.1" -tiny-keccak = { version = "2.0.2", features = ["keccak"] } -sp-keyring = { path = "../../../substrate/primitives/keyring" } +hex-literal = { workspace = true, default-features = true } +tiny-keccak = { features = ["keccak"], workspace = true } +sp-keyring = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } -tokio = { version = "1.24.2", features = ["macros"] } -sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } +remote-externalities = { workspace = true, default-features = true } +tokio = { features = ["macros"], workspace = true, default-features = true } +sp-tracing = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] @@ -201,7 +199,6 @@ std = [ "polkadot-primitives/std", "polkadot-runtime-common/std", "polkadot-runtime-parachains/std", - "rustc-hex/std", "scale-info/std", "serde/std", "serde_derive", @@ -222,7 +219,6 @@ std = [ "sp-runtime/std", "sp-session/std", "sp-staking/std", - "sp-std/std", "sp-storage/std", "sp-tracing/std", "sp-transaction-pool/std", @@ -230,7 +226,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] runtime-benchmarks = [ @@ -288,7 +284,7 @@ runtime-benchmarks = [ "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "frame-election-provider-support/try-runtime", diff --git a/polkadot/runtime/westend/constants/Cargo.toml b/polkadot/runtime/westend/constants/Cargo.toml index d50b168fac52e..f9b99ea5284d3 100644 --- a/polkadot/runtime/westend/constants/Cargo.toml +++ b/polkadot/runtime/westend/constants/Cargo.toml @@ -10,17 +10,17 @@ license.workspace = true workspace = true [dependencies] -smallvec = "1.8.0" +smallvec = { workspace = true, default-features = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -polkadot-primitives = { path = "../../../primitives", default-features = false } -polkadot-runtime-common = { path = "../../common", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-weights = { path = "../../../../substrate/primitives/weights", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } +frame-support = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { workspace = true } +sp-core = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../xcm/xcm-builder", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } [features] default = ["std"] @@ -34,3 +34,6 @@ std = [ "xcm-builder/std", "xcm/std", ] + +# Set timing constants (e.g. session period) to faster versions to speed up testing. +fast-runtime = [] diff --git a/polkadot/runtime/westend/constants/src/lib.rs b/polkadot/runtime/westend/constants/src/lib.rs index 58048272e791a..8d66ac2868d0b 100644 --- a/polkadot/runtime/westend/constants/src/lib.rs +++ b/polkadot/runtime/westend/constants/src/lib.rs @@ -116,6 +116,17 @@ pub mod system_parachain { /// All system parachains of Westend. pub type SystemParachains = IsChildSystemParachain; + + /// Coretime constants + pub mod coretime { + /// Coretime timeslice period in blocks + /// WARNING: This constant is used accross chains, so additional care should be taken + /// when changing it. + #[cfg(feature = "fast-runtime")] + pub const TIMESLICE_PERIOD: u32 = 20; + #[cfg(not(feature = "fast-runtime"))] + pub const TIMESLICE_PERIOD: u32 = 80; + } } /// Westend Treasury pallet instance. diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs index d7ca677a7620e..11665953bd8e1 100644 --- a/polkadot/runtime/westend/src/impls.rs +++ b/polkadot/runtime/westend/src/impls.rs @@ -15,12 +15,13 @@ // along with Polkadot. If not, see . use crate::xcm_config; +use alloc::{boxed::Box, vec}; use codec::{Decode, Encode}; +use core::marker::PhantomData; use frame_support::pallet_prelude::DispatchResult; use frame_system::RawOrigin; use polkadot_primitives::Balance; use polkadot_runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; -use sp_std::{marker::PhantomData, prelude::*}; use westend_runtime_constants::currency::*; use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm}; use xcm_executor::traits::TransactAsset; diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 77262a98a94c8..f0b16e731d9e5 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -20,6 +20,13 @@ // `#[frame_support::runtime]!` does a lot of recursion and requires us to increase the limit. #![recursion_limit = "512"] +extern crate alloc; + +use alloc::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + vec, + vec::Vec, +}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_election_provider_support::{bounds::ElectionBoundsBuilder, onchain, SequentialPhragmen}; use frame_support::{ @@ -29,7 +36,7 @@ use frame_support::{ traits::{ fungible::HoldConsideration, tokens::UnityOrOuterConversion, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, FromContains, InstanceFilter, KeyOwnerProofSystem, - LinearStoragePrice, ProcessMessage, ProcessMessageError, WithdrawReasons, + LinearStoragePrice, ProcessMessage, ProcessMessageError, VariantCountOf, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, PalletId, @@ -88,24 +95,20 @@ use sp_runtime::{ curve::PiecewiseLinear, generic, impl_opaque_keys, traits::{ - BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, IdentityLookup, - Keccak256, OpaqueKeys, SaturatedConversion, Verify, + AccountIdConversion, BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, + IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, }; use sp_staking::SessionIndex; -use sp_std::{ - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, - prelude::*, -}; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; use xcm::{latest::prelude::*, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; use xcm_builder::PayOverXcm; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -122,7 +125,12 @@ use sp_runtime::traits::Get; pub use sp_runtime::BuildStorage; /// Constant values used within the runtime. -use westend_runtime_constants::{currency::*, fee::*, system_parachain::BROKER_ID, time::*}; +use westend_runtime_constants::{ + currency::*, + fee::*, + system_parachain::{coretime::TIMESLICE_PERIOD, BROKER_ID}, + time::*, +}; mod bag_thresholds; mod weights; @@ -154,7 +162,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_012_000, + spec_version: 1_014_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, @@ -310,7 +318,7 @@ impl pallet_balances::Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; + type MaxFreezes = VariantCountOf; } parameter_types! { @@ -323,6 +331,7 @@ impl pallet_beefy::Config for Runtime { type MaxNominators = MaxNominators; type MaxSetIdSessionEntries = BeefySetIdSessionEntries; type OnNewValidatorSet = BeefyMmrLeaf; + type AncestryHelper = BeefyMmrLeaf; type WeightInfo = (); type KeyOwnerProof = sp_session::MembershipProof; type EquivocationReportSystem = @@ -661,9 +670,6 @@ impl pallet_fast_unstake::Config for Runtime { } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); - pub const ProposalBondMinimum: Balance = 2000 * CENTS; - pub const ProposalBondMaximum: Balance = 1 * GRAND; pub const SpendPeriod: BlockNumber = 6 * DAYS; pub const Burn: Permill = Permill::from_perthousand(2); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); @@ -686,13 +692,8 @@ parameter_types! { impl pallet_treasury::Config for Runtime { type PalletId = TreasuryPalletId; type Currency = Balances; - type ApproveOrigin = EitherOfDiverse, Treasurer>; type RejectOrigin = EitherOfDiverse, Treasurer>; type RuntimeEvent = RuntimeEvent; - type OnSlash = Treasury; - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ProposalBondMinimum; - type ProposalBondMaximum = ProposalBondMaximum; type SpendPeriod = SpendPeriod; type Burn = Burn; type BurnDestination = (); @@ -1193,21 +1194,39 @@ impl parachains_scheduler::Config for Runtime { parameter_types! { pub const BrokerId: u32 = BROKER_ID; + pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); pub MaxXcmTransactWeight: Weight = Weight::from_parts(200_000_000, 20_000); } +pub struct BrokerPot; +impl Get for BrokerPot { + fn get() -> InteriorLocation { + Junction::AccountId32 { network: None, id: BrokerPalletId::get().into_account_truncating() } + .into() + } +} + impl coretime::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; type Currency = Balances; type BrokerId = BrokerId; + type BrokerPotLocation = BrokerPot; type WeightInfo = weights::runtime_parachains_coretime::WeightInfo; type SendXcm = crate::xcm_config::XcmRouter; + type AssetTransactor = crate::xcm_config::LocalAssetTransactor; + type AccountToLocation = xcm_builder::AliasesIntoAccountId32< + xcm_config::ThisNetwork, + ::AccountId, + >; type MaxXcmTransactWeight = MaxXcmTransactWeight; } parameter_types! { pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); + // Keep 2 timeslices worth of revenue information. + pub const MaxHistoricalRevenue: BlockNumber = 2 * TIMESLICE_PERIOD; + pub const OnDemandPalletId: PalletId = PalletId(*b"py/ondmd"); } impl parachains_assigner_on_demand::Config for Runtime { @@ -1215,6 +1234,8 @@ impl parachains_assigner_on_demand::Config for Runtime { type Currency = Balances; type TrafficDefaultValue = OnDemandTrafficDefaultValue; type WeightInfo = weights::runtime_parachains_assigner_on_demand::WeightInfo; + type MaxHistoricalRevenue = MaxHistoricalRevenue; + type PalletId = OnDemandPalletId; } impl parachains_assigner_coretime::Config for Runtime {} @@ -1784,7 +1805,7 @@ sp_api::impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -1992,6 +2013,7 @@ sp_api::impl_runtime_apis! { } } + #[api_version(4)] impl sp_consensus_beefy::BeefyApi for Runtime { fn beefy_genesis() -> Option { pallet_beefy::GenesisBlock::::get() @@ -2001,7 +2023,7 @@ sp_api::impl_runtime_apis! { Beefy::validator_set() } - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( equivocation_proof: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, @@ -2011,7 +2033,7 @@ sp_api::impl_runtime_apis! { ) -> Option<()> { let key_owner_proof = key_owner_proof.decode()?; - Beefy::submit_unsigned_equivocation_report( + Beefy::submit_unsigned_double_voting_report( equivocation_proof, key_owner_proof, ) @@ -2229,7 +2251,7 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; XcmPallet::query_acceptable_payment_assets(xcm_version, acceptable_assets) @@ -2242,11 +2264,11 @@ sp_api::impl_runtime_apis! { Ok(WeightToFee::weight_to_fee(&weight)) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) }, Err(_) => { - log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); Err(XcmPaymentApiError::VersionedConversionFailed) } } @@ -2261,7 +2283,7 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { XcmPallet::dry_run_call::(origin, call) } @@ -2271,6 +2293,18 @@ sp_api::impl_runtime_apis! { } } + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationConverter, + >::convert_location(location) + } + } + impl pallet_nomination_pools_runtime_api::NominationPoolsApi< Block, AccountId, @@ -2389,6 +2423,8 @@ sp_api::impl_runtime_apis! { use xcm_config::{AssetHub, TokenLocation}; + use alloc::boxed::Box; + parameter_types! { pub ExistentialDepositAsset: Option = Some(( TokenLocation::get(), diff --git a/polkadot/runtime/westend/src/weights/pallet_treasury.rs b/polkadot/runtime/westend/src/weights/pallet_treasury.rs index 144e9d5b87238..06246ada72f16 100644 --- a/polkadot/runtime/westend/src/weights/pallet_treasury.rs +++ b/polkadot/runtime/westend/src/weights/pallet_treasury.rs @@ -63,51 +63,6 @@ impl pallet_treasury::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `143` - // Estimated: `1489` - // Minimum execution time: 354_000_000 picoseconds. - Weight::from_parts(376_000_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Treasury Proposals (r:1 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `301` - // Estimated: `3593` - // Minimum execution time: 547_000_000 picoseconds. - Weight::from_parts(550_000_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Treasury Proposals (r:1 w:0) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `470 + p * (8 ยฑ0)` - // Estimated: `3573` - // Minimum execution time: 104_000_000 picoseconds. - Weight::from_parts(121_184_402, 0) - .saturating_add(Weight::from_parts(0, 3573)) - // Standard Error: 42_854 - .saturating_add(Weight::from_parts(153_112, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } /// Storage: Treasury Approvals (r:1 w:1) /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs index 8b046f5d34ad7..1bd9fa31b81b2 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::assigner_on_demand` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-1pho9goo-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -50,6 +50,10 @@ pub struct WeightInfo(PhantomData); impl polkadot_runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) @@ -57,19 +61,23 @@ impl polkadot_runtime_parachains::assigner_on_demand::W /// The range of component `s` is `[1, 9999]`. fn place_order_keep_alive(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `218 + s * (8 ยฑ0)` - // Estimated: `3681 + s * (8 ยฑ0)` - // Minimum execution time: 21_396_000 picoseconds. - Weight::from_parts(20_585_695, 0) - .saturating_add(Weight::from_parts(0, 3681)) - // Standard Error: 127 - .saturating_add(Weight::from_parts(20_951, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `270 + s * (8 ยฑ0)` + // Estimated: `3733 + s * (8 ยฑ0)` + // Minimum execution time: 29_427_000 picoseconds. + Weight::from_parts(26_756_913, 0) + .saturating_add(Weight::from_parts(0, 3733)) + // Standard Error: 121 + .saturating_add(Weight::from_parts(20_849, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) @@ -77,15 +85,15 @@ impl polkadot_runtime_parachains::assigner_on_demand::W /// The range of component `s` is `[1, 9999]`. fn place_order_allow_death(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `218 + s * (8 ยฑ0)` - // Estimated: `3681 + s * (8 ยฑ0)` - // Minimum execution time: 21_412_000 picoseconds. - Weight::from_parts(19_731_554, 0) - .saturating_add(Weight::from_parts(0, 3681)) - // Standard Error: 128 - .saturating_add(Weight::from_parts(21_055, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `270 + s * (8 ยฑ0)` + // Estimated: `3733 + s * (8 ยฑ0)` + // Minimum execution time: 29_329_000 picoseconds. + Weight::from_parts(26_415_340, 0) + .saturating_add(Weight::from_parts(0, 3733)) + // Standard Error: 129 + .saturating_add(Weight::from_parts(20_909, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } } diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs index 443651a6fda44..9df382875f5f1 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_coretime.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::coretime` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-1pho9goo-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -48,6 +48,28 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::coretime`. pub struct WeightInfo(PhantomData); impl polkadot_runtime_parachains::coretime::WeightInfo for WeightInfo { + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn request_revenue_at() -> Weight { + // Proof Size summary in bytes: + // Measured: `2930` + // Estimated: `6395` + // Minimum execution time: 34_947_000 picoseconds. + Weight::from_parts(35_550_000, 0) + .saturating_add(Weight::from_parts(0, 6395)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) + } /// Storage: `Configuration::PendingConfigs` (r:1 w:1) /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) @@ -58,8 +80,8 @@ impl polkadot_runtime_parachains::coretime::WeightInfo // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_486_000 picoseconds. - Weight::from_parts(7_889_000, 0) + // Minimum execution time: 7_519_000 picoseconds. + Weight::from_parts(7_803_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -73,11 +95,11 @@ impl polkadot_runtime_parachains::coretime::WeightInfo // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 9_409_000 picoseconds. - Weight::from_parts(10_177_115, 0) + // Minimum execution time: 9_697_000 picoseconds. + Weight::from_parts(10_610_219, 0) .saturating_add(Weight::from_parts(0, 3612)) - // Standard Error: 259 - .saturating_add(Weight::from_parts(13_932, 0).saturating_mul(s.into())) + // Standard Error: 732 + .saturating_add(Weight::from_parts(10_364, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/polkadot/runtime/westend/src/weights/xcm/mod.rs b/polkadot/runtime/westend/src/weights/xcm/mod.rs index 09e883a9f7af5..cb5894ea51e3c 100644 --- a/polkadot/runtime/westend/src/weights/xcm/mod.rs +++ b/polkadot/runtime/westend/src/weights/xcm/mod.rs @@ -18,8 +18,8 @@ mod pallet_xcm_benchmarks_fungible; mod pallet_xcm_benchmarks_generic; use crate::Runtime; +use alloc::vec::Vec; use frame_support::weights::Weight; -use sp_std::prelude::*; use xcm::{ latest::{prelude::*, QueryResponseInfo}, DoubleEncoded, diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 7181afd9989ec..53ea0b74463bc 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -10,7 +10,7 @@ description = "Stores messages other authorities issue about candidates in Polka workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-core = { path = "../../substrate/primitives/core" } -polkadot-primitives = { path = "../primitives" } -gum = { package = "tracing-gum", path = "../node/gum" } +codec = { features = ["derive"], workspace = true } +sp-core = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +gum = { workspace = true, default-features = true } diff --git a/polkadot/utils/generate-bags/Cargo.toml b/polkadot/utils/generate-bags/Cargo.toml index ad6d7259d2483..16205b0f51f57 100644 --- a/polkadot/utils/generate-bags/Cargo.toml +++ b/polkadot/utils/generate-bags/Cargo.toml @@ -10,9 +10,9 @@ description = "CLI to generate voter bags for Polkadot runtimes" workspace = true [dependencies] -clap = { version = "4.5.3", features = ["derive"] } +clap = { features = ["derive"], workspace = true } -generate-bags = { path = "../../../substrate/utils/frame/generate-bags" } -sp-io = { path = "../../../substrate/primitives/io" } +generate-bags = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } -westend-runtime = { path = "../../runtime/westend" } +westend-runtime = { workspace = true } diff --git a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml index 20e4130f888bc..206ca8cf19a90 100644 --- a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml +++ b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml @@ -10,14 +10,14 @@ license.workspace = true workspace = true [dependencies] -westend-runtime = { path = "../../../runtime/westend" } -westend-runtime-constants = { path = "../../../runtime/westend/constants" } +westend-runtime = { workspace = true } +westend-runtime-constants = { workspace = true, default-features = true } -pallet-bags-list-remote-tests = { path = "../../../../substrate/frame/bags-list/remote-tests" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -frame-system = { path = "../../../../substrate/frame/system" } -sp-core = { path = "../../../../substrate/primitives/core" } +pallet-bags-list-remote-tests = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } -clap = { version = "4.5.3", features = ["derive"] } +clap = { features = ["derive"], workspace = true } log = { workspace = true, default-features = true } -tokio = { version = "1.24.2", features = ["macros"] } +tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index 690fb377dad78..72174bda2340c 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -10,23 +10,23 @@ license.workspace = true workspace = true [dependencies] -array-bytes = "6.2.2" -bounded-collections = { version = "0.2.0", default-features = false, features = ["serde"] } -derivative = { version = "2.2.0", default-features = false, features = ["use_core"] } -impl-trait-for-tuples = "0.2.2" +array-bytes = { workspace = true, default-features = true } +bounded-collections = { features = ["serde"], workspace = true } +derivative = { features = ["use_core"], workspace = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +sp-weights = { features = ["serde"], workspace = true } serde = { features = ["alloc", "derive", "rc"], workspace = true } -schemars = { version = "0.8.13", default-features = true, optional = true } -xcm-procedural = { path = "procedural" } -environmental = { version = "1.1.4", default-features = false } +schemars = { default-features = true, optional = true, workspace = true } +xcm-procedural = { workspace = true, default-features = true } +environmental = { workspace = true } [dev-dependencies] -sp-io = { path = "../../substrate/primitives/io" } -hex = "0.4.3" -hex-literal = "0.4.1" +sp-io = { workspace = true, default-features = true } +hex = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/xcm/docs/Cargo.toml b/polkadot/xcm/docs/Cargo.toml index 9820bd36dc0b1..9d8f4c0a6430b 100644 --- a/polkadot/xcm/docs/Cargo.toml +++ b/polkadot/xcm/docs/Cargo.toml @@ -10,30 +10,30 @@ publish = false [dependencies] # For XCM stuff -xcm = { path = "../../xcm", package = "staging-xcm" } -xcm-executor = { path = "../../xcm/xcm-executor", package = "staging-xcm-executor" } -xcm-builder = { path = "../../xcm/xcm-builder", package = "staging-xcm-builder" } -xcm-simulator = { path = "../../xcm/xcm-simulator" } -pallet-xcm = { path = "../../xcm/pallet-xcm" } +xcm = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } +xcm-simulator = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } # For building FRAME runtimes -frame = { package = "polkadot-sdk-frame", path = "../../../substrate/frame", features = ["experimental", "runtime"] } -codec = { package = "parity-scale-codec", version = "3.6.9" } -scale-info = { version = "2.6.0", default-features = false } -polkadot-parachain-primitives = { path = "../../../polkadot/parachain" } -polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains" } -polkadot-primitives = { path = "../../../polkadot/primitives" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-std = { path = "../../../substrate/primitives/std" } -sp-io = { path = "../../../substrate/primitives/io" } +frame = { features = ["experimental", "runtime"], workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +scale-info = { workspace = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } # Some pallets -pallet-message-queue = { path = "../../../substrate/frame/message-queue" } -pallet-balances = { path = "../../../substrate/frame/balances" } +pallet-message-queue = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } # For building docs simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", branch = "main" } -docify = "0.2.6" +docify = { workspace = true } [dev-dependencies] -test-log = "0.2.14" +test-log = { workspace = true } diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs index e3fdda2e73337..23d6664bdafcb 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs @@ -16,7 +16,7 @@ //! # Runtime -use frame::{deps::frame_system, prelude::*, runtime::prelude::*, traits::IdentityLookup}; +use frame::{deps::frame_system, runtime::prelude::*, traits::IdentityLookup}; use xcm_executor::XcmExecutor; use xcm_simulator::mock_message_queue; @@ -36,7 +36,7 @@ construct_runtime! { } } -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type Block = Block; type AccountId = AccountId; @@ -49,8 +49,7 @@ impl mock_message_queue::Config for Runtime { type XcmExecutor = XcmExecutor; } -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = Balance; type AccountStore = System; } diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs index 25c35dd4aaa83..686f86b37b732 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs @@ -36,7 +36,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type AccountId = AccountId; type Lookup = IdentityLookup; @@ -44,7 +44,7 @@ impl frame_system::Config for Runtime { type AccountData = pallet_balances::AccountData; } -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { type AccountStore = System; } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index 8bf3b9abf6634..b07bdfdca3d19 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -13,29 +13,28 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false } -xcm = { package = "staging-xcm", path = "..", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-io = { workspace = true } +xcm-executor = { workspace = true } +frame-benchmarking = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } log = { workspace = true, default-features = true } [dev-dependencies] -pallet-balances = { path = "../../../substrate/frame/balances" } -pallet-assets = { path = "../../../substrate/frame/assets" } -sp-tracing = { path = "../../../substrate/primitives/tracing" } -xcm = { package = "staging-xcm", path = ".." } +pallet-balances = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +xcm = { workspace = true, default-features = true } # temp -pallet-xcm = { path = "../pallet-xcm" } -polkadot-runtime-common = { path = "../../runtime/common" } +pallet-xcm = { workspace = true, default-features = true } +polkadot-runtime-common = { workspace = true, default-features = true } # westend-runtime = { path = "../../runtime/westend", features = ["runtime-benchmarks"] } -polkadot-primitives = { path = "../../primitives" } +polkadot-primitives = { workspace = true, default-features = true } [features] default = ["std"] @@ -48,7 +47,6 @@ std = [ "scale-info/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "xcm-builder/std", "xcm-executor/std", ] diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs index d99da9184b5d8..6ce49074a6e2b 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs @@ -16,6 +16,7 @@ use super::*; use crate::{account_and_location, new_executor, AssetTransactorOf, EnsureDelivery, XcmCallOf}; +use alloc::{vec, vec::Vec}; use frame_benchmarking::{benchmarks_instance_pallet, BenchmarkError, BenchmarkResult}; use frame_support::{ pallet_prelude::Get, @@ -23,7 +24,6 @@ use frame_support::{ weights::Weight, }; use sp_runtime::traits::{Bounded, Zero}; -use sp_std::{prelude::*, vec}; use xcm::latest::{prelude::*, MAX_ITEMS_IN_ASSETS}; use xcm_executor::traits::{ConvertLocation, FeeReason, TransactAsset}; @@ -37,7 +37,7 @@ benchmarks_instance_pallet! { >::Balance as TryInto - >::Error: sp_std::fmt::Debug, + >::Error: core::fmt::Debug, } withdraw_asset { diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 760b21f93566e..40a7da58a687c 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -16,10 +16,10 @@ use super::*; use crate::{account_and_location, new_executor, EnsureDelivery, XcmCallOf}; +use alloc::{vec, vec::Vec}; use codec::Encode; use frame_benchmarking::{benchmarks, BenchmarkError}; use frame_support::{dispatch::GetDispatchInfo, traits::fungible::Inspect}; -use sp_std::{prelude::*, vec}; use xcm::{ latest::{prelude::*, MaxDispatchErrorLen, MaybeErrorCode, Weight, MAX_ITEMS_IN_ASSETS}, DoubleEncoded, diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs index a43f27bf47e72..4a12bb7f47c66 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs @@ -18,9 +18,11 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::vec::Vec; use codec::Encode; use frame_benchmarking::{account, BenchmarkError}; -use sp_std::prelude::*; use xcm::latest::prelude::*; use xcm_builder::EnsureDelivery; use xcm_executor::{traits::ConvertLocation, Config as XcmConfig}; diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 6f9b389ab6f12..ed4b441d7c33c 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -10,32 +10,31 @@ license.workspace = true workspace = true [dependencies] -bounded-collections = { version = "0.2.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +bounded-collections = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } -xcm = { package = "staging-xcm", path = "..", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } -xcm-fee-payment-runtime-api = { path = "../xcm-fee-payment-runtime-api", default-features = false } +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-builder = { workspace = true } +xcm-runtime-apis = { workspace = true } # marked optional, used in benchmarking -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +pallet-balances = { optional = true, workspace = true } [dev-dependencies] -pallet-assets = { path = "../../../substrate/frame/assets" } -polkadot-runtime-parachains = { path = "../../runtime/parachains" } -polkadot-parachain-primitives = { path = "../../parachain" } +pallet-assets = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } [features] default = ["std"] @@ -52,10 +51,9 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "xcm-builder/std", "xcm-executor/std", - "xcm-fee-payment-runtime-api/std", + "xcm-runtime-apis/std", "xcm/std", ] runtime-benchmarks = [ @@ -69,7 +67,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", - "xcm-fee-payment-runtime-api/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index da46a6a37c065..d09c81bf434e2 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -18,7 +18,6 @@ use super::*; use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; use frame_support::{assert_ok, weights::Weight}; use frame_system::RawOrigin; -use sp_std::prelude::*; use xcm::latest::prelude::*; use xcm_builder::EnsureDelivery; use xcm_executor::traits::FeeReason; diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 8f67e6e7d9496..6451901279b16 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -27,7 +27,11 @@ mod tests; pub mod migration; +extern crate alloc; + +use alloc::{boxed::Box, vec, vec::Vec}; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; +use core::{marker::PhantomData, result::Result}; use frame_support::{ dispatch::{ DispatchErrorWithPostInfo, GetDispatchInfo, PostDispatchInfo, WithPostDispatchInfo, @@ -49,7 +53,6 @@ use sp_runtime::{ }, Either, RuntimeDebug, }; -use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; use xcm::{latest::QueryResponseInfo, prelude::*}; use xcm_builder::{ ExecuteController, ExecuteControllerWeightInfo, InspectMessageQueues, QueryController, @@ -64,7 +67,7 @@ use xcm_executor::{ }, AssetsInHolding, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -792,7 +795,7 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, /// The default version to encode outgoing XCM messages with. pub safe_xcm_version: Option, } @@ -1376,7 +1379,7 @@ pub mod pallet { /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the /// fee on the `dest` (and possibly reserve) chains. /// - `assets_transfer_type`: The XCM `TransferType` used to transfer the `assets`. - /// - `remote_fees_id`: One of the included `assets` to be be used to pay fees. + /// - `remote_fees_id`: One of the included `assets` to be used to pay fees. /// - `fees_transfer_type`: The XCM `TransferType` used to transfer the `fees` assets. /// - `custom_xcm_on_dest`: The XCM to be executed on `dest` chain as the last step of the /// transfer, which also determines what happens to the assets on the destination chain. @@ -1438,8 +1441,8 @@ enum FeesHandling { Separate { local_xcm: Xcm<::RuntimeCall>, remote_xcm: Xcm<()> }, } -impl sp_std::fmt::Debug for FeesHandling { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { +impl core::fmt::Debug for FeesHandling { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::Batched { fees } => write!(f, "FeesHandling::Batched({:?})", fees), Self::Separate { local_xcm, remote_xcm } => write!( @@ -1840,8 +1843,8 @@ impl Pallet { FeesHandling::Separate { local_xcm: mut local_fees, remote_xcm: mut remote_fees } => { // fees are handled by separate XCM instructions, prepend fees instructions (for // remote XCM they have to be prepended instead of appended to pass barriers). - sp_std::mem::swap(local, &mut local_fees); - sp_std::mem::swap(remote, &mut remote_fees); + core::mem::swap(local, &mut local_fees); + core::mem::swap(remote, &mut remote_fees); // these are now swapped so fees actually go first local.inner_mut().append(&mut local_fees.into_inner()); remote.inner_mut().append(&mut remote_fees.into_inner()); @@ -2442,7 +2445,7 @@ impl Pallet { /// /// Returns not only the call result and events, but also the local XCM, if any, /// and any XCMs forwarded to other locations. - /// Meant to be used in the `xcm_fee_payment_runtime_api::dry_run::DryRunApi` runtime API. + /// Meant to be used in the `xcm_runtime_apis::dry_run::DryRunApi` runtime API. pub fn dry_run_call( origin: OriginCaller, call: RuntimeCall, @@ -2474,7 +2477,7 @@ impl Pallet { /// Dry-runs `xcm` with the given `origin_location`. /// /// Returns execution result, events, and any forwarded XCMs to other locations. - /// Meant to be used in the `xcm_fee_payment_runtime_api::dry_run::DryRunApi` runtime API. + /// Meant to be used in the `xcm_runtime_apis::dry_run::DryRunApi` runtime API. pub fn dry_run_xcm( origin_location: VersionedLocation, xcm: VersionedXcm, diff --git a/polkadot/xcm/pallet-xcm/src/migration.rs b/polkadot/xcm/pallet-xcm/src/migration.rs index b157e6b5c3d5f..0aec97ab41051 100644 --- a/polkadot/xcm/pallet-xcm/src/migration.rs +++ b/polkadot/xcm/pallet-xcm/src/migration.rs @@ -34,7 +34,7 @@ pub mod v1 { /// enacted on-chain. /// /// Use experimental [`MigrateToV1`] instead. - pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); + pub struct VersionUncheckedMigrateToV1(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { let mut weight = T::DbWeight::get().reads(1); @@ -81,7 +81,7 @@ pub mod v1 { /// `XCM_VERSION`. /// /// NOTE: This migration can be permanently added to the runtime migrations. -pub struct MigrateToLatestXcmVersion(sp_std::marker::PhantomData); +pub struct MigrateToLatestXcmVersion(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToLatestXcmVersion { fn on_runtime_upgrade() -> Weight { CurrentMigration::::put(VersionMigrationStage::default()); diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index ead98e1d04600..3941d104b81c6 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -15,6 +15,7 @@ // along with Polkadot. If not, see . use codec::Encode; +pub use core::cell::RefCell; use frame_support::{ construct_runtime, derive_impl, parameter_types, traits::{ @@ -28,7 +29,6 @@ use polkadot_parachain_primitives::primitives::Id as ParaId; use polkadot_runtime_parachains::origin; use sp_core::H256; use sp_runtime::{traits::IdentityLookup, AccountId32, BuildStorage}; -pub use sp_std::cell::RefCell; use xcm::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, @@ -266,24 +266,13 @@ impl frame_system::Config for Test { parameter_types! { pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } #[cfg(feature = "runtime-benchmarks")] diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index ca9fb351bd3ca..a7db183bcdbf8 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -14,11 +14,11 @@ workspace = true proc-macro = true [dependencies] -proc-macro2 = "1.0.56" +proc-macro2 = { workspace = true } quote = { workspace = true } syn = { workspace = true } -Inflector = "0.11.4" +Inflector = { workspace = true } [dev-dependencies] -trybuild = { version = "1.0.88", features = ["diff"] } -xcm = { package = "staging-xcm", path = ".." } +trybuild = { features = ["diff"], workspace = true } +xcm = { workspace = true, default-features = true } diff --git a/polkadot/xcm/procedural/src/builder_pattern.rs b/polkadot/xcm/procedural/src/builder_pattern.rs index 0a33d52580fca..09ead1389d19d 100644 --- a/polkadot/xcm/procedural/src/builder_pattern.rs +++ b/polkadot/xcm/procedural/src/builder_pattern.rs @@ -233,6 +233,32 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result = data_enum + .variants + .iter() + .filter(|variant| variant.ident == "ClearOrigin") + .map(|variant| { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(method_name_string, variant_name.span()); + let docs = get_doc_comments(variant); + let method = match &variant.fields { + Fields::Unit => { + quote! { + #(#docs)* + pub fn #method_name(mut self) -> XcmBuilder { + self.instructions.push(#name::::#variant_name); + self + } + } + }, + _ => return Err(Error::new_spanned(variant, "ClearOrigin should have no fields")), + }; + Ok(method) + }) + .collect::, _>>()?; + // Then we require fees to be paid let buy_execution_method = data_enum .variants @@ -276,6 +302,7 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result XcmBuilder { + #(#allowed_after_load_holding_methods)* #buy_execution_method } }; diff --git a/polkadot/xcm/procedural/tests/builder_pattern.rs b/polkadot/xcm/procedural/tests/builder_pattern.rs index 96b16fb7e4565..4202309bf3f71 100644 --- a/polkadot/xcm/procedural/tests/builder_pattern.rs +++ b/polkadot/xcm/procedural/tests/builder_pattern.rs @@ -79,3 +79,24 @@ fn default_builder_requires_buy_execution() { ]) ); } + +#[test] +fn default_builder_allows_clear_origin_before_buy_execution() { + let asset: Asset = (Here, 100u128).into(); + let beneficiary: Location = [0u8; 32].into(); + let message: Xcm<()> = Xcm::builder() + .receive_teleported_asset(asset.clone()) + .clear_origin() + .buy_execution(asset.clone(), Unlimited) + .deposit_asset(asset.clone(), beneficiary.clone()) + .build(); + assert_eq!( + message, + Xcm(vec![ + ReceiveTeleportedAsset(asset.clone().into()), + ClearOrigin, + BuyExecution { fees: asset.clone(), weight_limit: Unlimited }, + DepositAsset { assets: asset.into(), beneficiary }, + ]) + ); +} diff --git a/polkadot/xcm/src/v2/mod.rs b/polkadot/xcm/src/v2/mod.rs index 38e55d0ea51e5..1afc120f500c6 100644 --- a/polkadot/xcm/src/v2/mod.rs +++ b/polkadot/xcm/src/v2/mod.rs @@ -62,7 +62,10 @@ use super::{ }; use alloc::{vec, vec::Vec}; use bounded_collections::{ConstU32, WeakBoundedVec}; -use codec::{self, Decode, Encode, MaxEncodedLen}; +use codec::{ + self, decode_vec_with_len, Compact, Decode, Encode, Error as CodecError, Input as CodecInput, + MaxEncodedLen, +}; use core::{fmt::Debug, result}; use derivative::Derivative; use scale_info::TypeInfo; @@ -237,7 +240,7 @@ pub enum BodyPart { #[codec(compact)] denom: u32, }, - /// More than than the given proportion of members of the body. + /// More than the given proportion of members of the body. MoreThanProportion { #[codec(compact)] nom: u32, @@ -278,7 +281,7 @@ pub const VERSION: super::Version = 2; pub type QueryId = u64; /// DEPRECATED. Please use XCMv3 or XCMv4 instead. -#[derive(Derivative, Default, Encode, Decode, TypeInfo)] +#[derive(Derivative, Default, Encode, TypeInfo)] #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] #[codec(encode_bound())] #[codec(decode_bound())] @@ -286,6 +289,31 @@ pub type QueryId = u64; #[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct Xcm(pub Vec>); +environmental::environmental!(instructions_count: u8); + +impl Decode for Xcm { + fn decode(input: &mut I) -> core::result::Result { + instructions_count::using_once(&mut 0, || { + let number_of_instructions: u32 = >::decode(input)?.into(); + instructions_count::with(|count| { + *count = count.saturating_add(number_of_instructions as u8); + if *count > MAX_INSTRUCTIONS_TO_DECODE { + return Err(CodecError::from("Max instructions exceeded")) + } + Ok(()) + }) + .unwrap_or(Ok(()))?; + let decoded_instructions = decode_vec_with_len(input, number_of_instructions as usize)?; + Ok(Self(decoded_instructions)) + }) + } +} + +/// The maximal number of instructions in an XCM before decoding fails. +/// +/// This is a deliberate limit - not a technical one. +pub const MAX_INSTRUCTIONS_TO_DECODE: u8 = 100; + impl Xcm { /// Create an empty instance. pub fn new() -> Self { @@ -1157,3 +1185,38 @@ impl TryFrom> for Instruction(vec![ClearOrigin; MAX_INSTRUCTIONS_TO_DECODE as usize]); + let encoded = max_xcm.encode(); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_ok()); + + let big_xcm = Xcm::<()>(vec![ClearOrigin; MAX_INSTRUCTIONS_TO_DECODE as usize + 1]); + let encoded = big_xcm.encode(); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); + + let nested_xcm = Xcm::<()>(vec![ + DepositReserveAsset { + assets: All.into(), + dest: Here.into(), + xcm: max_xcm, + max_assets: 1, + }; + (MAX_INSTRUCTIONS_TO_DECODE / 2) as usize + ]); + let encoded = nested_xcm.encode(); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); + + let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm); 64]); + let encoded = even_more_nested_xcm.encode(); + assert_eq!(encoded.len(), 345730); + // This should not decode since the limit is 100 + assert_eq!(MAX_INSTRUCTIONS_TO_DECODE, 100, "precondition"); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); + } +} diff --git a/polkadot/xcm/src/v3/junction.rs b/polkadot/xcm/src/v3/junction.rs index aea4e03725159..24348bf2e6721 100644 --- a/polkadot/xcm/src/v3/junction.rs +++ b/polkadot/xcm/src/v3/junction.rs @@ -241,7 +241,7 @@ pub enum BodyPart { #[codec(compact)] denom: u32, }, - /// More than than the given proportion of members of the body. + /// More than the given proportion of members of the body. MoreThanProportion { #[codec(compact)] nom: u32, diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 79c601b98b4fd..7702e2f9be07d 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -10,34 +10,33 @@ version = "7.0.0" workspace = true [dependencies] -impl-trait-for-tuples = "0.2.1" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -xcm = { package = "staging-xcm", path = "..", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-weights = { path = "../../../substrate/primitives/weights", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +impl-trait-for-tuples = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } +sp-arithmetic = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-transaction-payment = { workspace = true } log = { workspace = true } # Polkadot dependencies -polkadot-parachain-primitives = { path = "../../parachain", default-features = false } +polkadot-parachain-primitives = { workspace = true } [dev-dependencies] -primitive-types = "0.12.1" -pallet-balances = { path = "../../../substrate/frame/balances" } -pallet-xcm = { path = "../pallet-xcm" } -pallet-salary = { path = "../../../substrate/frame/salary" } -pallet-assets = { path = "../../../substrate/frame/assets" } -polkadot-primitives = { path = "../../primitives" } -polkadot-runtime-parachains = { path = "../../runtime/parachains" } -assert_matches = "1.5.0" -polkadot-test-runtime = { path = "../../runtime/test-runtime" } +primitive-types = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } +pallet-salary = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +assert_matches = { workspace = true } +polkadot-test-runtime = { workspace = true } [features] default = ["std"] @@ -66,7 +65,6 @@ std = [ "sp-arithmetic/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "sp-weights/std", "xcm-executor/std", "xcm/std", diff --git a/polkadot/xcm/xcm-builder/src/asset_conversion.rs b/polkadot/xcm/xcm-builder/src/asset_conversion.rs index 520ce87448ea4..16ae05c20795e 100644 --- a/polkadot/xcm/xcm-builder/src/asset_conversion.rs +++ b/polkadot/xcm/xcm-builder/src/asset_conversion.rs @@ -16,9 +16,9 @@ //! Adapters to work with [`frame_support::traits::fungibles`] through XCM. +use core::{marker::PhantomData, result}; use frame_support::traits::{Contains, Get}; use sp_runtime::traits::MaybeEquivalence; -use sp_std::{marker::PhantomData, prelude::*, result}; use xcm::latest::prelude::*; use xcm_executor::traits::{Error as MatchError, MatchesFungibles, MatchesNonFungibles}; diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index 11e9122f9a121..5d95005eb6630 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -17,12 +17,12 @@ //! Various implementations for `ShouldExecute`. use crate::{CreateMatcher, MatchXcm}; +use core::{cell::Cell, marker::PhantomData, ops::ControlFlow, result::Result}; use frame_support::{ ensure, traits::{Contains, Get, ProcessMessageError}, }; use polkadot_parachain_primitives::primitives::IsSystem; -use sp_std::{cell::Cell, marker::PhantomData, ops::ControlFlow, result::Result}; use xcm::prelude::*; use xcm_executor::traits::{CheckSuspension, OnResponse, Properties, ShouldExecute}; diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs index 04b19eaa58700..d4ce2ca5b353c 100644 --- a/polkadot/xcm/xcm-builder/src/controller.rs +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -18,11 +18,11 @@ //! Controller traits defined in this module are high-level traits that will rely on other traits //! from `xcm-executor` to perform their tasks. +use alloc::boxed::Box; use frame_support::{ dispatch::{DispatchErrorWithPostInfo, WithPostDispatchInfo}, pallet_prelude::DispatchError, }; -use sp_std::boxed::Box; use xcm::prelude::*; pub use xcm_executor::traits::QueryHandler; diff --git a/polkadot/xcm/xcm-builder/src/currency_adapter.rs b/polkadot/xcm/xcm-builder/src/currency_adapter.rs index 99a736d6ac1f9..355d6ad85388c 100644 --- a/polkadot/xcm/xcm-builder/src/currency_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/currency_adapter.rs @@ -19,9 +19,9 @@ #![allow(deprecated)] use super::MintLocation; +use core::{marker::PhantomData, result}; use frame_support::traits::{ExistenceRequirement::AllowDeath, Get, WithdrawReasons}; use sp_runtime::traits::CheckedSub; -use sp_std::{marker::PhantomData, result}; use xcm::latest::{Asset, Error as XcmError, Location, Result, XcmContext}; use xcm_executor::{ traits::{ConvertLocation, MatchesFungible, TransactAsset}, diff --git a/polkadot/xcm/xcm-builder/src/filter_asset_location.rs b/polkadot/xcm/xcm-builder/src/filter_asset_location.rs index d80c5d70deea8..16b7be7f3ba98 100644 --- a/polkadot/xcm/xcm-builder/src/filter_asset_location.rs +++ b/polkadot/xcm/xcm-builder/src/filter_asset_location.rs @@ -17,8 +17,9 @@ //! Various implementations of `ContainsPair` or //! `Contains<(Location, Vec)>`. +use alloc::vec::Vec; +use core::marker::PhantomData; use frame_support::traits::{Contains, ContainsPair, Get}; -use sp_std::{marker::PhantomData, vec::Vec}; use xcm::latest::{Asset, AssetFilter, AssetId, Location, WildAsset}; /// Accepts an asset iff it is a native asset. @@ -44,7 +45,7 @@ impl> ContainsPair for Case /// implementation of the given `Location` and if every asset from `assets` matches at least one of /// the `AssetFilter` instances provided by the `Get` implementation of `AssetFilters`. pub struct LocationWithAssetFilters( - sp_std::marker::PhantomData<(LocationFilter, AssetFilters)>, + core::marker::PhantomData<(LocationFilter, AssetFilters)>, ); impl, AssetFilters: Get>> Contains<(Location, Vec)> for LocationWithAssetFilters @@ -75,7 +76,7 @@ impl, AssetFilters: Get>> pub struct AllAssets; impl Get> for AllAssets { fn get() -> Vec { - sp_std::vec![AssetFilter::Wild(WildAsset::All)] + alloc::vec![AssetFilter::Wild(WildAsset::All)] } } @@ -96,11 +97,11 @@ mod tests { pub AssetYLocation: Location = Location::new(1, [GeneralIndex(2222)]); pub AssetZLocation: Location = Location::new(1, [GeneralIndex(3333)]); - pub OnlyAssetXOrAssetY: sp_std::vec::Vec = sp_std::vec![ + pub OnlyAssetXOrAssetY: alloc::vec::Vec = alloc::vec![ Wild(AllOf { fun: WildFungible, id: AssetId(AssetXLocation::get()) }), Wild(AllOf { fun: WildFungible, id: AssetId(AssetYLocation::get()) }), ]; - pub OnlyAssetZ: sp_std::vec::Vec = sp_std::vec![ + pub OnlyAssetZ: alloc::vec::Vec = alloc::vec![ Wild(AllOf { fun: WildFungible, id: AssetId(AssetZLocation::get()) }) ]; } diff --git a/polkadot/xcm/xcm-builder/src/fungible_adapter.rs b/polkadot/xcm/xcm-builder/src/fungible_adapter.rs index 45a0e2bdca286..25a705a39eb73 100644 --- a/polkadot/xcm/xcm-builder/src/fungible_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/fungible_adapter.rs @@ -17,6 +17,7 @@ //! Adapters to work with [`frame_support::traits::fungible`] through XCM. use super::MintLocation; +use core::{marker::PhantomData, result}; use frame_support::traits::{ tokens::{ fungible, @@ -27,7 +28,6 @@ use frame_support::traits::{ }, Get, }; -use sp_std::{marker::PhantomData, prelude::*, result}; use xcm::latest::prelude::*; use xcm_executor::{ traits::{ConvertLocation, Error as MatchError, MatchesFungible, TransactAsset}, diff --git a/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs b/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs index 88bbf01d9e1f8..a259afc6e6825 100644 --- a/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs @@ -16,6 +16,7 @@ //! Adapters to work with [`frame_support::traits::fungibles`] through XCM. +use core::{marker::PhantomData, result}; use frame_support::traits::{ tokens::{ fungibles, @@ -26,7 +27,6 @@ use frame_support::traits::{ }, Contains, Get, }; -use sp_std::{marker::PhantomData, prelude::*, result}; use xcm::latest::prelude::*; use xcm_executor::traits::{ConvertLocation, Error as MatchError, MatchesFungibles, TransactAsset}; @@ -101,7 +101,7 @@ impl AssetChecking for NoChecking { /// Implementation of `AssetChecking` which subjects a given set of assets `T` to having their /// teleportations recorded with a `MintLocation::Local`. -pub struct LocalMint(sp_std::marker::PhantomData); +pub struct LocalMint(core::marker::PhantomData); impl> AssetChecking for LocalMint { fn asset_checking(asset: &AssetId) -> Option { match T::contains(asset) { @@ -113,7 +113,7 @@ impl> AssetChecking for LocalMint { /// Implementation of `AssetChecking` which subjects a given set of assets `T` to having their /// teleportations recorded with a `MintLocation::NonLocal`. -pub struct NonLocalMint(sp_std::marker::PhantomData); +pub struct NonLocalMint(core::marker::PhantomData); impl> AssetChecking for NonLocalMint { fn asset_checking(asset: &AssetId) -> Option { match T::contains(asset) { @@ -126,7 +126,7 @@ impl> AssetChecking for NonLocalMint { /// Implementation of `AssetChecking` which subjects a given set of assets `L` to having their /// teleportations recorded with a `MintLocation::Local` and a second set of assets `R` to having /// their teleportations recorded with a `MintLocation::NonLocal`. -pub struct DualMint(sp_std::marker::PhantomData<(L, R)>); +pub struct DualMint(core::marker::PhantomData<(L, R)>); impl, R: Contains> AssetChecking for DualMint { diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index cc06c298a418d..c3495601cd875 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -20,6 +20,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + #[cfg(test)] mod tests; diff --git a/polkadot/xcm/xcm-builder/src/location_conversion.rs b/polkadot/xcm/xcm-builder/src/location_conversion.rs index f95258492381b..1d840e9c0dde4 100644 --- a/polkadot/xcm/xcm-builder/src/location_conversion.rs +++ b/polkadot/xcm/xcm-builder/src/location_conversion.rs @@ -15,11 +15,12 @@ // along with Polkadot. If not, see . use crate::universal_exports::ensure_is_remote; +use alloc::vec::Vec; use codec::{Compact, Decode, Encode}; +use core::marker::PhantomData; use frame_support::traits::Get; use sp_io::hashing::blake2_256; use sp_runtime::traits::{AccountIdConversion, TrailingZeroInput, TryConvert}; -use sp_std::{marker::PhantomData, prelude::*}; use xcm::latest::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -460,7 +461,9 @@ impl #[cfg(test)] mod tests { use super::*; + use alloc::vec; use polkadot_primitives::AccountId; + pub type ForeignChainAliasAccount = HashedDescription; diff --git a/polkadot/xcm/xcm-builder/src/matches_location.rs b/polkadot/xcm/xcm-builder/src/matches_location.rs index b6c2807e6b29d..71c5ec1efd6f9 100644 --- a/polkadot/xcm/xcm-builder/src/matches_location.rs +++ b/polkadot/xcm/xcm-builder/src/matches_location.rs @@ -17,14 +17,14 @@ //! Various implementations and utilities for matching and filtering `Location` and //! `InteriorLocation` types. +use core::marker::PhantomData; use frame_support::traits::{Contains, Get}; use sp_runtime::traits::MaybeEquivalence; -use sp_std::marker::PhantomData; use xcm::latest::{InteriorLocation, Location, NetworkId}; /// An implementation of `Contains` that checks for `Location` or /// `InteriorLocation` if starts with the provided type `T`. -pub struct StartsWith(sp_std::marker::PhantomData<(T, L)>); +pub struct StartsWith(core::marker::PhantomData<(T, L)>); impl, L: TryInto + Clone> Contains for StartsWith { fn contains(location: &L) -> bool { let latest_location: Location = @@ -42,7 +42,7 @@ impl> Contains for StartsWith { /// An implementation of `Contains` that checks for `Location` or /// `InteriorLocation` if starts with expected `GlobalConsensus(NetworkId)` provided as type /// `T`. -pub struct StartsWithExplicitGlobalConsensus(sp_std::marker::PhantomData); +pub struct StartsWithExplicitGlobalConsensus(core::marker::PhantomData); impl> Contains for StartsWithExplicitGlobalConsensus { fn contains(location: &Location) -> bool { matches!(location.interior().global_consensus(), Ok(requested_network) if requested_network.eq(&T::get())) diff --git a/polkadot/xcm/xcm-builder/src/matches_token.rs b/polkadot/xcm/xcm-builder/src/matches_token.rs index e49fd18f88d80..095c50a5a25b0 100644 --- a/polkadot/xcm/xcm-builder/src/matches_token.rs +++ b/polkadot/xcm/xcm-builder/src/matches_token.rs @@ -16,8 +16,8 @@ //! Various implementations for the `MatchesFungible` trait. +use core::marker::PhantomData; use frame_support::traits::Get; -use sp_std::marker::PhantomData; use xcm::latest::{ Asset, AssetId, AssetInstance, Fungibility::{Fungible, NonFungible}, diff --git a/polkadot/xcm/xcm-builder/src/nonfungible_adapter.rs b/polkadot/xcm/xcm-builder/src/nonfungible_adapter.rs index b69002eafc5b9..8e6232ea64d27 100644 --- a/polkadot/xcm/xcm-builder/src/nonfungible_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/nonfungible_adapter.rs @@ -17,11 +17,11 @@ //! Adapters to work with [`frame_support::traits::tokens::nonfungible`] through XCM. use crate::MintLocation; +use core::{marker::PhantomData, result}; use frame_support::{ ensure, traits::{tokens::nonfungible, Get}, }; -use sp_std::{marker::PhantomData, prelude::*, result}; use xcm::latest::prelude::*; use xcm_executor::traits::{ ConvertLocation, Error as MatchError, MatchesNonFungible, TransactAsset, diff --git a/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs b/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs index 3fce953848ebd..b111a05a4f1fc 100644 --- a/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs @@ -17,11 +17,11 @@ //! Adapters to work with [`frame_support::traits::tokens::nonfungibles`] through XCM. use crate::{AssetChecking, MintLocation}; +use core::{marker::PhantomData, result}; use frame_support::{ ensure, traits::{tokens::nonfungibles, Get}, }; -use sp_std::{marker::PhantomData, prelude::*, result}; use xcm::latest::prelude::*; use xcm_executor::traits::{ ConvertLocation, Error as MatchError, MatchesNonFungibles, TransactAsset, diff --git a/polkadot/xcm/xcm-builder/src/origin_aliases.rs b/polkadot/xcm/xcm-builder/src/origin_aliases.rs index bbf810463a7c5..d568adc3127ce 100644 --- a/polkadot/xcm/xcm-builder/src/origin_aliases.rs +++ b/polkadot/xcm/xcm-builder/src/origin_aliases.rs @@ -16,8 +16,8 @@ //! Implementation for `ContainsPair`. +use core::marker::PhantomData; use frame_support::traits::{Contains, ContainsPair}; -use sp_std::marker::PhantomData; use xcm::latest::prelude::*; /// Alias a Foreign `AccountId32` with a local `AccountId32` if the foreign `AccountId32` matches diff --git a/polkadot/xcm/xcm-builder/src/origin_conversion.rs b/polkadot/xcm/xcm-builder/src/origin_conversion.rs index f64b5660f6674..6e73c0dae7b69 100644 --- a/polkadot/xcm/xcm-builder/src/origin_conversion.rs +++ b/polkadot/xcm/xcm-builder/src/origin_conversion.rs @@ -16,11 +16,11 @@ //! Various implementations for `ConvertOrigin`. +use core::marker::PhantomData; use frame_support::traits::{EnsureOrigin, Get, GetBacking, OriginTrait}; use frame_system::RawOrigin as SystemRawOrigin; use polkadot_parachain_primitives::primitives::IsSystem; use sp_runtime::traits::TryConvert; -use sp_std::marker::PhantomData; use xcm::latest::{BodyId, BodyPart, Junction, Junctions::*, Location, NetworkId, OriginKind}; use xcm_executor::traits::{ConvertLocation, ConvertOrigin}; diff --git a/polkadot/xcm/xcm-builder/src/pay.rs b/polkadot/xcm/xcm-builder/src/pay.rs index 35b624b041539..978c6870cdaf1 100644 --- a/polkadot/xcm/xcm-builder/src/pay.rs +++ b/polkadot/xcm/xcm-builder/src/pay.rs @@ -16,12 +16,13 @@ //! `PayOverXcm` struct for paying through XCM and getting the status back. +use alloc::vec; +use core::marker::PhantomData; use frame_support::traits::{ tokens::{Pay, PaymentStatus}, Get, }; use sp_runtime::traits::TryConvert; -use sp_std::{marker::PhantomData, vec}; use xcm::{opaque::lts::Weight, prelude::*}; use xcm_executor::traits::{QueryHandler, QueryResponseStatus}; @@ -199,7 +200,7 @@ pub struct LocatableAssetId { /// Adapter `struct` which implements a conversion from any `AssetKind` into a [`LocatableAssetId`] /// value using a fixed `Location` for the `location` field. -pub struct FixedLocation(sp_std::marker::PhantomData); +pub struct FixedLocation(core::marker::PhantomData); impl, AssetKind: Into> TryConvert for FixedLocation { diff --git a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs index ef8c71fc24951..2e6f8c5fb5661 100644 --- a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs +++ b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs @@ -17,9 +17,9 @@ //! Implementation of `ProcessMessage` for an `ExecuteXcm` implementation. use codec::{Decode, FullCodec, MaxEncodedLen}; +use core::{fmt::Debug, marker::PhantomData}; use frame_support::traits::{ProcessMessage, ProcessMessageError}; use scale_info::TypeInfo; -use sp_std::{fmt::Debug, marker::PhantomData}; use sp_weights::{Weight, WeightMeter}; use xcm::prelude::*; @@ -118,6 +118,7 @@ impl< #[cfg(test)] mod tests { use super::*; + use alloc::vec; use codec::Encode; use frame_support::{ assert_err, assert_ok, diff --git a/polkadot/xcm/xcm-builder/src/routing.rs b/polkadot/xcm/xcm-builder/src/routing.rs index 543aef97c3409..03ef780ef0325 100644 --- a/polkadot/xcm/xcm-builder/src/routing.rs +++ b/polkadot/xcm/xcm-builder/src/routing.rs @@ -16,9 +16,10 @@ //! Various implementations for `SendXcm`. +use alloc::vec::Vec; use codec::Encode; +use core::{marker::PhantomData, result::Result}; use frame_system::unique; -use sp_std::{marker::PhantomData, result::Result, vec::Vec}; use xcm::prelude::*; use xcm_executor::{traits::FeeReason, FeesMode}; @@ -173,7 +174,7 @@ impl InspectMessageQueues for Tuple { /// `Inner::Ticket`. Therefore, this router aims to validate at least the passed `message`. /// /// NOTE: For use in mock runtimes which don't have the DMP/UMP/HRMP XCM validations. -pub struct EnsureDecodableXcm(sp_std::marker::PhantomData); +pub struct EnsureDecodableXcm(core::marker::PhantomData); impl SendXcm for EnsureDecodableXcm { type Ticket = Inner::Ticket; diff --git a/polkadot/xcm/xcm-builder/src/test_utils.rs b/polkadot/xcm/xcm-builder/src/test_utils.rs index 3131dece37570..37a49a1b3dc7e 100644 --- a/polkadot/xcm/xcm-builder/src/test_utils.rs +++ b/polkadot/xcm/xcm-builder/src/test_utils.rs @@ -16,11 +16,11 @@ // Shared test utilities and implementations for the XCM Builder. +use alloc::vec::Vec; use frame_support::{ parameter_types, traits::{Contains, CrateVersion, PalletInfoData, PalletsInfoAccess}, }; -use sp_std::vec::Vec; pub use xcm::latest::{prelude::*, Weight}; use xcm_executor::traits::{ClaimAssets, DropAssets, VersionChangeNotifier}; pub use xcm_executor::{ diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index f35c73bdb6857..ac43d217ff3ff 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -26,7 +26,12 @@ pub use crate::{ AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, FixedRateOfFungible, FixedWeightBounds, TakeWeightCredit, }; +pub use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; pub use codec::{Decode, Encode}; +pub use core::{ + cell::{Cell, RefCell}, + fmt::Debug, +}; use frame_support::traits::{ContainsPair, Everything}; pub use frame_support::{ dispatch::{DispatchInfo, DispatchResultWithPostInfo, GetDispatchInfo, PostDispatchInfo}, @@ -34,11 +39,6 @@ pub use frame_support::{ sp_runtime::{traits::Dispatchable, DispatchError, DispatchErrorWithPostInfo}, traits::{Contains, Get, IsInVec}, }; -pub use sp_std::{ - cell::{Cell, RefCell}, - collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - fmt::Debug, -}; pub use xcm::latest::{prelude::*, QueryId, Weight}; use xcm_executor::traits::{Properties, QueryHandler, QueryResponseStatus}; pub use xcm_executor::{ diff --git a/polkadot/xcm/xcm-builder/src/tests/mod.rs b/polkadot/xcm/xcm-builder/src/tests/mod.rs index 16ce3d2cf8ffe..379baaf5e3767 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mod.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mod.rs @@ -15,6 +15,7 @@ // along with Polkadot. If not, see . use super::{test_utils::*, *}; +use alloc::{vec, vec::Vec}; use frame_support::{ assert_err, traits::{ConstU32, ContainsPair, ProcessMessageError}, diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index 10e9f4c6c0855..18bde3aab485a 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -299,6 +299,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { (1, TreasuryAccountId::get(), INITIAL_BALANCE), (100, TreasuryAccountId::get(), INITIAL_BALANCE), ], + next_asset_id: None, } .assimilate_storage(&mut t) .unwrap(); diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index 9820d535f7efd..8aa9602fcc297 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -17,9 +17,10 @@ //! Traits and utilities to help with origin mutation and bridging. use crate::InspectMessageQueues; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; +use core::{convert::TryInto, marker::PhantomData}; use frame_support::{ensure, traits::Get}; -use sp_std::{convert::TryInto, marker::PhantomData, prelude::*}; use xcm::prelude::*; use xcm_executor::traits::{validate_export, ExportXcm}; use SendError::*; @@ -149,7 +150,7 @@ impl NetworkExportTableItem { /// An adapter for the implementation of `ExporterFor`, which attempts to find the /// `(bridge_location, payment)` for the requested `network` and `remote_location` in the provided /// `T` table containing various exporters. -pub struct NetworkExportTable(sp_std::marker::PhantomData); +pub struct NetworkExportTable(core::marker::PhantomData); impl>> ExporterFor for NetworkExportTable { fn exporter_for( network: &NetworkId, @@ -649,7 +650,7 @@ mod tests { pub PaymentForNetworkAAndParachain2000: Asset = (Location::parent(), 150).into(); - pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ + pub BridgeTable: alloc::vec::Vec = alloc::vec![ // NetworkA allows `Parachain(1000)` as remote location WITHOUT payment. NetworkExportTableItem::new( NetworkA::get(), diff --git a/polkadot/xcm/xcm-builder/src/weight.rs b/polkadot/xcm/xcm-builder/src/weight.rs index 1efa42ce95601..7861fdcc2e579 100644 --- a/polkadot/xcm/xcm-builder/src/weight.rs +++ b/polkadot/xcm/xcm-builder/src/weight.rs @@ -15,6 +15,7 @@ // along with Polkadot. If not, see . use codec::Decode; +use core::{marker::PhantomData, result::Result}; use frame_support::{ dispatch::GetDispatchInfo, traits::{ @@ -27,7 +28,6 @@ use frame_support::{ }, }; use sp_runtime::traits::{SaturatedConversion, Saturating, Zero}; -use sp_std::{marker::PhantomData, result::Result}; use xcm::latest::{prelude::*, GetWeight, Weight}; use xcm_executor::{ traits::{WeightBounds, WeightTrader}, diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 62b448a9f430c..0468b0a5410c4 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -15,15 +15,15 @@ // along with Polkadot. If not, see . use codec::Encode; +use core::cell::RefCell; use frame_support::{ construct_runtime, derive_impl, parameter_types, - traits::{ConstU32, Everything, Nothing}, + traits::{Everything, Nothing}, weights::Weight, }; use frame_system::EnsureRoot; use primitive_types::H256; use sp_runtime::{traits::IdentityLookup, AccountId32, BuildStorage}; -use sp_std::cell::RefCell; use polkadot_parachain_primitives::primitives::Id as ParaId; use polkadot_runtime_parachains::{configuration, origin, shared}; @@ -102,24 +102,14 @@ impl frame_system::Config for Runtime { parameter_types! { pub ExistentialDeposit: Balance = 1 * CENTS; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl shared::Config for Runtime { diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index 3b30b4f13e2dd..cc966f91fe4db 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -10,20 +10,19 @@ version = "7.0.0" workspace = true [dependencies] -impl-trait-for-tuples = "0.2.2" -environmental = { version = "1.1.4", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -xcm = { package = "staging-xcm", path = "..", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-weights = { path = "../../../substrate/primitives/weights", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -log = { workspace = true } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +impl-trait-for-tuples = { workspace = true } +environmental = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +xcm = { workspace = true } +sp-io = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { workspace = true } +frame-support = { workspace = true } +tracing = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } [features] default = ["std"] @@ -37,13 +36,12 @@ std = [ "environmental/std", "frame-benchmarking/std", "frame-support/std", - "log/std", "scale-info/std", "sp-arithmetic/std", "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "sp-weights/std", + "tracing/std", "xcm/std", ] diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index 37c2117e7b06f..e669e5d2b2312 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -11,24 +11,24 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system" } -futures = "0.3.30" -pallet-transaction-payment = { path = "../../../../substrate/frame/transaction-payment" } -pallet-xcm = { path = "../../pallet-xcm" } -polkadot-test-client = { path = "../../../node/test/client" } -polkadot-test-runtime = { path = "../../../runtime/test-runtime" } -polkadot-test-service = { path = "../../../node/test/service" } -polkadot-service = { path = "../../../node/service" } -sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } -sp-keyring = { path = "../../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } -xcm = { package = "staging-xcm", path = "../..", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = ".." } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sp-core = { path = "../../../../substrate/primitives/core" } +codec = { workspace = true, default-features = true } +frame-support = { workspace = true } +frame-system = { workspace = true, default-features = true } +futures = { workspace = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } +polkadot-test-client = { workspace = true } +polkadot-test-runtime = { workspace = true } +polkadot-test-service = { workspace = true } +polkadot-service = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-state-machine = { workspace = true, default-features = true } +xcm = { workspace = true } +xcm-executor = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/xcm/xcm-executor/src/assets.rs b/polkadot/xcm/xcm-executor/src/assets.rs index 4407752f70242..09e7535ebf81d 100644 --- a/polkadot/xcm/xcm-executor/src/assets.rs +++ b/polkadot/xcm/xcm-executor/src/assets.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use sp_runtime::{traits::Saturating, RuntimeDebug}; -use sp_std::{ +use alloc::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - mem, - prelude::*, + vec::Vec, }; +use core::mem; +use sp_runtime::{traits::Saturating, RuntimeDebug}; use xcm::latest::{ Asset, AssetFilter, AssetId, AssetInstance, Assets, Fungibility::{Fungible, NonFungible}, @@ -520,7 +520,9 @@ impl AssetsInHolding { #[cfg(test)] mod tests { use super::*; + use alloc::vec; use xcm::latest::prelude::*; + #[allow(non_snake_case)] /// Concrete fungible constructor fn CF(amount: u128) -> Asset { diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index da9de93ca0f6f..1daf5ae750cfb 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -16,7 +16,11 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; +use core::{fmt::Debug, marker::PhantomData}; use frame_support::{ dispatch::GetDispatchInfo, ensure, @@ -24,7 +28,6 @@ use frame_support::{ }; use sp_core::defer; use sp_io::hashing::blake2_128; -use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; use sp_weights::Weight; use xcm::latest::prelude::*; @@ -208,9 +211,12 @@ impl ExecuteXcm for XcmExecutor Outcome { let origin = origin.into(); - log::trace!( + tracing::trace!( target: "xcm::execute", - "origin: {origin:?}, message: {message:?}, weight_credit: {weight_credit:?}", + ?origin, + ?message, + ?weight_credit, + "Executing message", ); let mut properties = Properties { weight_credit, message_id: None }; @@ -226,10 +232,13 @@ impl ExecuteXcm for XcmExecutor ExecuteXcm for XcmExecutor for frame_benchmarking::BenchmarkError { fn from(error: ExecutorError) -> Self { - log::error!( - "XCM ERROR >> Index: {:?}, Error: {:?}, Weight: {:?}", - error.index, - error.xcm_error, - error.weight + tracing::error!( + index = ?error.index, + xcm_error = ?error.xcm_error, + weight = ?error.weight, + "XCM ERROR", ); Self::Stop("xcm executor error: see error logs") } @@ -326,10 +335,12 @@ impl XcmExecutor { let mut weight_used = xcm_weight.saturating_sub(self.total_surplus); if !self.holding.is_empty() { - log::trace!( + tracing::trace!( target: "xcm::post_process", - "Trapping assets in holding register: {:?}, context: {:?} (original_origin: {:?})", - self.holding, self.context, self.original_origin, + holding_register = ?self.holding, + context = ?self.context, + original_origin = ?self.original_origin, + "Trapping assets in holding register", ); let effective_origin = self.context.origin.as_ref().unwrap_or(&self.original_origin); let trap_weight = @@ -342,7 +353,13 @@ impl XcmExecutor { // TODO: #2841 #REALWEIGHT We should deduct the cost of any instructions following // the error which didn't end up being executed. Some((_i, e)) => { - log::trace!(target: "xcm::post_process", "Execution errored at {:?}: {:?} (original_origin: {:?})", _i, e, self.original_origin); + tracing::trace!( + target: "xcm::post_process", + instruction = ?_i, + error = ?e, + original_origin = ?self.original_origin, + "Execution failed", + ); Outcome::Incomplete { used: weight_used, error: e } }, } @@ -363,8 +380,12 @@ impl XcmExecutor { msg: Xcm<()>, reason: FeeReason, ) -> Result { - log::trace!( - target: "xcm::send", "Sending msg: {msg:?}, to destination: {dest:?}, (reason: {reason:?})" + tracing::trace!( + target: "xcm::send", + ?msg, + destination = ?dest, + reason = ?reason, + "Sending msg", ); let (ticket, fee) = validate_send::(dest, msg)?; self.take_fee(fee, reason)?; @@ -374,7 +395,7 @@ impl XcmExecutor { /// Remove the registered error handler and return it. Do not refund its weight. fn take_error_handler(&mut self) -> Xcm { let mut r = Xcm::(vec![]); - sp_std::mem::swap(&mut self.error_handler, &mut r); + core::mem::swap(&mut self.error_handler, &mut r); self.error_handler_weight = Weight::zero(); r } @@ -389,7 +410,7 @@ impl XcmExecutor { /// Remove the registered appendix and return it. fn take_appendix(&mut self) -> Xcm { let mut r = Xcm::(vec![]); - sp_std::mem::swap(&mut self.appendix, &mut r); + core::mem::swap(&mut self.appendix, &mut r); self.appendix_weight = Weight::zero(); r } @@ -400,7 +421,12 @@ impl XcmExecutor { // `holding_limit` items (which has a best case outcome of holding.len() == holding_limit), // then the operation is guaranteed to succeed. let worst_case_holding_len = self.holding.len() + assets_length; - log::trace!(target: "xcm::ensure_can_subsume_assets", "worst_case_holding_len: {:?}, holding_limit: {:?}", worst_case_holding_len, self.holding_limit); + tracing::trace!( + target: "xcm::ensure_can_subsume_assets", + ?worst_case_holding_len, + holding_limit = ?self.holding_limit, + "Ensuring subsume assets work", + ); ensure!(worst_case_holding_len <= self.holding_limit * 2, XcmError::HoldingWouldOverflow); Ok(()) } @@ -408,12 +434,12 @@ impl XcmExecutor { /// Refund any unused weight. fn refund_surplus(&mut self) -> Result<(), XcmError> { let current_surplus = self.total_surplus.saturating_sub(self.total_refunded); - log::trace!( + tracing::trace!( target: "xcm::refund_surplus", - "total_surplus: {:?}, total_refunded: {:?}, current_surplus: {:?}", - self.total_surplus, - self.total_refunded, - current_surplus, + total_surplus = ?self.total_surplus, + total_refunded = ?self.total_refunded, + ?current_surplus, + "Refunding surplus", ); if current_surplus.any_gt(Weight::zero()) { if let Some(w) = self.trader.refund_weight(current_surplus, &self.context) { @@ -426,7 +452,7 @@ impl XcmExecutor { .defensive_proof( "refund_weight returned an asset capable of buying weight; qed", ); - log::error!( + tracing::error!( target: "xcm::refund_surplus", "error: HoldingWouldOverflow", ); @@ -436,10 +462,9 @@ impl XcmExecutor { self.holding.subsume_assets(w.into()); } } - log::trace!( + tracing::trace!( target: "xcm::refund_surplus", - "total_refunded: {:?}", - self.total_refunded, + total_refunded = ?self.total_refunded, ); Ok(()) } @@ -448,13 +473,13 @@ impl XcmExecutor { if Config::FeeManager::is_waived(self.origin_ref(), reason.clone()) { return Ok(()) } - log::trace!( + tracing::trace!( target: "xcm::fees", - "taking fee: {:?} from origin_ref: {:?} in fees_mode: {:?} for a reason: {:?}", - fee, - self.origin_ref(), - self.fees_mode, - reason, + ?fee, + origin_ref = ?self.origin_ref(), + fees_mode = ?self.fees_mode, + ?reason, + "Taking fees", ); let paid = if self.fees_mode.jit_withdraw { let origin = self.origin_ref().ok_or(XcmError::BadOrigin)?; @@ -507,7 +532,7 @@ impl XcmExecutor { let reanchor_context = Config::UniversalLocation::get(); let reanchored = reanchorable.reanchored(&destination, &reanchor_context).map_err(|error| { - log::error!(target: "xcm::reanchor", "Failed reanchoring with error {error:?}"); + tracing::error!(target: "xcm::reanchor", ?error, "Failed reanchoring with error"); XcmError::ReanchorFailed })?; Ok((reanchored, reanchor_context)) @@ -530,13 +555,12 @@ impl XcmExecutor { } fn process(&mut self, xcm: Xcm) -> Result<(), ExecutorError> { - log::trace!( + tracing::trace!( target: "xcm::process", - "origin: {:?}, total_surplus/refunded: {:?}/{:?}, error_handler_weight: {:?}", - self.origin_ref(), - self.total_surplus, - self.total_refunded, - self.error_handler_weight, + origin = ?self.origin_ref(), + total_surplus = ?self.total_surplus, + total_refunded = ?self.total_refunded, + error_handler_weight = ?self.error_handler_weight, ); let mut result = Ok(()); for (i, instr) in xcm.0.into_iter().enumerate() { @@ -566,7 +590,7 @@ impl XcmExecutor { self.process_instruction(instr) }); if let Err(e) = inst_res { - log::trace!(target: "xcm::execute", "!!! ERROR: {:?}", e); + tracing::trace!(target: "xcm::execute", "!!! ERROR: {:?}", e); *r = Err(ExecutorError { index: i as u32, xcm_error: e, @@ -588,11 +612,12 @@ impl XcmExecutor { &mut self, instr: Instruction, ) -> Result<(), XcmError> { - log::trace!( + tracing::trace!( target: "xcm::process_instruction", - "=== {:?}", - instr + instruction = ?instr, + "Processing instruction", ); + match instr { WithdrawAsset(assets) => { let origin = self.origin_ref().ok_or(XcmError::BadOrigin)?; @@ -694,7 +719,7 @@ impl XcmExecutor { Transact { origin_kind, require_weight_at_most, mut call } => { // We assume that the Relay-chain is allowed to use transact on this parachain. let origin = self.cloned_origin().ok_or_else(|| { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", "No origin provided", ); @@ -704,7 +729,7 @@ impl XcmExecutor { // TODO: #2841 #TRANSACTFILTER allow the trait to issue filters for the relay-chain let message_call = call.take_decoded().map_err(|_| { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", "Failed to decode call", ); @@ -712,13 +737,14 @@ impl XcmExecutor { XcmError::FailedToDecode })?; - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", - "Processing call: {message_call:?}", + ?call, + "Processing call", ); if !Config::SafeCallFilter::contains(&message_call) { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", "Call filtered by `SafeCallFilter`", ); @@ -729,26 +755,31 @@ impl XcmExecutor { let dispatch_origin = Config::OriginConverter::convert_origin(origin.clone(), origin_kind).map_err( |_| { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", - "Failed to convert origin {origin:?} and origin kind {origin_kind:?} to a local origin." + ?origin, + ?origin_kind, + "Failed to convert origin to a local origin." ); XcmError::BadOrigin }, )?; - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", - "Dispatching with origin: {dispatch_origin:?}", + origin = ?dispatch_origin, + "Dispatching with origin", ); let weight = message_call.get_dispatch_info().weight; if !weight.all_lte(require_weight_at_most) { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", - "Max {weight} bigger than require at most {require_weight_at_most}", + %weight, + %require_weight_at_most, + "Max weight bigger than require at most", ); return Err(XcmError::MaxWeightInvalid) @@ -757,17 +788,19 @@ impl XcmExecutor { let maybe_actual_weight = match Config::CallDispatcher::dispatch(message_call, dispatch_origin) { Ok(post_info) => { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", - "Dispatch successful: {post_info:?}" + ?post_info, + "Dispatch successful" ); self.transact_status = MaybeErrorCode::Success; post_info.actual_weight }, Err(error_and_info) => { - log::trace!( + tracing::trace!( target: "xcm::process_instruction::transact", - "Dispatch failed {error_and_info:?}" + ?error_and_info, + "Dispatch failed" ); self.transact_status = error_and_info.error.encode().into(); diff --git a/polkadot/xcm/xcm-executor/src/traits/asset_lock.rs b/polkadot/xcm/xcm-executor/src/traits/asset_lock.rs index b6270c5294521..b4e9f32983fd5 100644 --- a/polkadot/xcm/xcm-executor/src/traits/asset_lock.rs +++ b/polkadot/xcm/xcm-executor/src/traits/asset_lock.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use sp_std::convert::Infallible; +use core::convert::Infallible; use xcm::prelude::*; #[derive(Debug)] diff --git a/polkadot/xcm/xcm-executor/src/traits/conversion.rs b/polkadot/xcm/xcm-executor/src/traits/conversion.rs index 9e2f4c83997ac..bc62ad6556697 100644 --- a/polkadot/xcm/xcm-executor/src/traits/conversion.rs +++ b/polkadot/xcm/xcm-executor/src/traits/conversion.rs @@ -14,9 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use core::{marker::PhantomData, result::Result}; use frame_support::traits::{Contains, OriginTrait}; use sp_runtime::{traits::Dispatchable, DispatchErrorWithPostInfo}; -use sp_std::{marker::PhantomData, result::Result}; use xcm::latest::prelude::*; /// Means of converting a location into an account identifier. @@ -88,19 +88,45 @@ pub trait ConvertOrigin { #[impl_trait_for_tuples::impl_for_tuples(30)] impl ConvertOrigin for Tuple { fn convert_origin(origin: impl Into, kind: OriginKind) -> Result { + let origin = origin.into(); + + tracing::trace!( + target: "xcm::convert_origin", + ?origin, + ?kind, + "Converting origin", + ); + for_tuples!( #( + let convert_origin = core::any::type_name::(); + let origin = match Tuple::convert_origin(origin, kind) { - Err(o) => o, - r => return r + Err(o) => { + tracing::trace!( + target: "xcm::convert_origin", + %convert_origin, + "Convert origin step failed", + ); + + o + }, + Ok(o) => { + tracing::trace!( + target: "xcm::convert_origin", + %convert_origin, + "Convert origin step succeeded", + ); + + return Ok(o) + } }; )* ); - let origin = origin.into(); - log::trace!( + + tracing::trace!( target: "xcm::convert_origin", - "could not convert: origin: {:?}, kind: {:?}", - origin, - kind, + "Converting origin failed", ); + Err(origin) } } diff --git a/polkadot/xcm/xcm-executor/src/traits/on_response.rs b/polkadot/xcm/xcm-executor/src/traits/on_response.rs index 5d2412d613755..a4ed6014b4fc8 100644 --- a/polkadot/xcm/xcm-executor/src/traits/on_response.rs +++ b/polkadot/xcm/xcm-executor/src/traits/on_response.rs @@ -16,10 +16,9 @@ use crate::{Junctions::Here, Xcm}; use codec::{Decode, Encode}; -use core::result; +use core::{fmt::Debug, result}; use frame_support::{pallet_prelude::Get, parameter_types}; use sp_arithmetic::traits::Zero; -use sp_std::fmt::Debug; use xcm::latest::{ Error as XcmError, InteriorLocation, Location, QueryId, Response, Result as XcmResult, Weight, XcmContext, diff --git a/polkadot/xcm/xcm-executor/src/traits/should_execute.rs b/polkadot/xcm/xcm-executor/src/traits/should_execute.rs index e76d56bfe6164..ec9ef70cc817e 100644 --- a/polkadot/xcm/xcm-executor/src/traits/should_execute.rs +++ b/polkadot/xcm/xcm-executor/src/traits/should_execute.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use core::result::Result; use frame_support::traits::ProcessMessageError; -use sp_std::result::Result; use xcm::latest::{Instruction, Location, Weight, XcmHash}; /// Properties of an XCM message and its imminent execution. @@ -59,19 +59,35 @@ impl ShouldExecute for Tuple { properties: &mut Properties, ) -> Result<(), ProcessMessageError> { for_tuples!( #( - match Tuple::should_execute(origin, instructions, max_weight, properties) { - Ok(()) => return Ok(()), - _ => (), + let barrier = core::any::type_name::(); + match Tuple::should_execute(origin, instructions, max_weight, properties) { + Ok(()) => { + tracing::trace!( + target: "xcm::should_execute", + ?origin, + ?instructions, + ?max_weight, + ?properties, + %barrier, + "pass barrier", + ); + return Ok(()) + }, + Err(error) => { + tracing::trace!( + target: "xcm::should_execute", + ?origin, + ?instructions, + ?max_weight, + ?properties, + ?error, + %barrier, + "did not pass barrier", + ); + }, } )* ); - log::trace!( - target: "xcm::should_execute", - "did not pass barrier: origin: {:?}, instructions: {:?}, max_weight: {:?}, properties: {:?}", - origin, - instructions, - max_weight, - properties, - ); + Err(ProcessMessageError::Unsupported) } } diff --git a/polkadot/xcm/xcm-executor/src/traits/token_matching.rs b/polkadot/xcm/xcm-executor/src/traits/token_matching.rs index e9a7e3ad845da..aa44aee4f9de3 100644 --- a/polkadot/xcm/xcm-executor/src/traits/token_matching.rs +++ b/polkadot/xcm/xcm-executor/src/traits/token_matching.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use sp_std::result; +use core::result; use xcm::latest::prelude::*; pub trait MatchesFungible { @@ -27,7 +27,7 @@ impl MatchesFungible for Tuple { for_tuples!( #( match Tuple::matches_fungible(a) { o @ Some(_) => return o, _ => () } )* ); - log::trace!(target: "xcm::matches_fungible", "did not match fungible asset: {:?}", &a); + tracing::trace!(target: "xcm::matches_fungible", asset = ?a, "did not match fungible asset"); None } } @@ -42,7 +42,7 @@ impl MatchesNonFungible for Tuple { for_tuples!( #( match Tuple::matches_nonfungible(a) { o @ Some(_) => return o, _ => () } )* ); - log::trace!(target: "xcm::matches_non_fungible", "did not match non-fungible asset: {:?}", &a); + tracing::trace!(target: "xcm::matches_non_fungible", asset = ?a, "did not match non-fungible asset"); None } } @@ -86,7 +86,7 @@ impl MatchesFungibles for Tuple { for_tuples!( #( match Tuple::matches_fungibles(a) { o @ Ok(_) => return o, _ => () } )* ); - log::trace!(target: "xcm::matches_fungibles", "did not match fungibles asset: {:?}", &a); + tracing::trace!(target: "xcm::matches_fungibles", asset = ?a, "did not match fungibles asset"); Err(Error::AssetNotHandled) } } @@ -101,7 +101,7 @@ impl MatchesNonFungibles for Tuple { for_tuples!( #( match Tuple::matches_nonfungibles(a) { o @ Ok(_) => return o, _ => () } )* ); - log::trace!(target: "xcm::matches_non_fungibles", "did not match fungibles asset: {:?}", &a); + tracing::trace!(target: "xcm::matches_non_fungibles", asset = ?a, "did not match fungibles asset"); Err(Error::AssetNotHandled) } } diff --git a/polkadot/xcm/xcm-executor/src/traits/transact_asset.rs b/polkadot/xcm/xcm-executor/src/traits/transact_asset.rs index e8a52d8256851..c2331f805b4bd 100644 --- a/polkadot/xcm/xcm-executor/src/traits/transact_asset.rs +++ b/polkadot/xcm/xcm-executor/src/traits/transact_asset.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use crate::AssetsInHolding; -use sp_std::result::Result; +use core::result::Result; use xcm::latest::{Asset, Error as XcmError, Location, Result as XcmResult, XcmContext}; /// Facility for asset transacting. @@ -148,12 +148,12 @@ impl TransactAsset for Tuple { r => return r, } )* ); - log::trace!( + tracing::trace!( target: "xcm::TransactAsset::can_check_in", - "asset not found: what: {:?}, origin: {:?}, context: {:?}", - what, - origin, - context, + ?what, + ?origin, + ?context, + "asset not found", ); Err(XcmError::AssetNotFound) } @@ -171,12 +171,12 @@ impl TransactAsset for Tuple { r => return r, } )* ); - log::trace!( + tracing::trace!( target: "xcm::TransactAsset::can_check_out", - "asset not found: what: {:?}, dest: {:?}, context: {:?}", - what, - dest, - context, + ?what, + ?dest, + ?context, + "asset not found", ); Err(XcmError::AssetNotFound) } @@ -194,12 +194,12 @@ impl TransactAsset for Tuple { r => return r, } )* ); - log::trace!( + tracing::trace!( target: "xcm::TransactAsset::deposit_asset", - "did not deposit asset: what: {:?}, who: {:?}, context: {:?}", - what, - who, - context, + ?what, + ?who, + ?context, + "did not deposit asset", ); Err(XcmError::AssetNotFound) } @@ -215,12 +215,12 @@ impl TransactAsset for Tuple { r => return r, } )* ); - log::trace!( + tracing::trace!( target: "xcm::TransactAsset::withdraw_asset", - "did not withdraw asset: what: {:?}, who: {:?}, maybe_context: {:?}", - what, - who, - maybe_context, + ?what, + ?who, + ?maybe_context, + "did not withdraw asset", ); Err(XcmError::AssetNotFound) } @@ -237,13 +237,13 @@ impl TransactAsset for Tuple { r => return r, } )* ); - log::trace!( + tracing::trace!( target: "xcm::TransactAsset::internal_transfer_asset", - "did not transfer asset: what: {:?}, from: {:?}, to: {:?}, context: {:?}", - what, - from, - to, - context, + ?what, + ?from, + ?to, + ?context, + "did not transfer asset", ); Err(XcmError::AssetNotFound) } diff --git a/polkadot/xcm/xcm-executor/src/traits/weight.rs b/polkadot/xcm/xcm-executor/src/traits/weight.rs index efb9a2dfb6efd..72de3e0f433b9 100644 --- a/polkadot/xcm/xcm-executor/src/traits/weight.rs +++ b/polkadot/xcm/xcm-executor/src/traits/weight.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use crate::AssetsInHolding; -use sp_std::result::Result; +use core::result::Result; use xcm::latest::{prelude::*, Weight}; /// Determine the weight of an XCM message. @@ -80,18 +80,38 @@ impl WeightTrader for Tuple { let mut too_expensive_error_found = false; let mut last_error = None; for_tuples!( #( + let weight_trader = core::any::type_name::(); + match Tuple.buy_weight(weight, payment.clone(), context) { - Ok(assets) => return Ok(assets), - Err(e) => { - if let XcmError::TooExpensive = e { + Ok(assets) => { + tracing::trace!( + target: "xcm::buy_weight", + %weight_trader, + "Buy weight succeeded", + ); + + return Ok(assets) + }, + Err(error) => { + if let XcmError::TooExpensive = error { too_expensive_error_found = true; } - last_error = Some(e) + last_error = Some(error); + + tracing::trace!( + target: "xcm::buy_weight", + ?error, + %weight_trader, + "Weight trader failed", + ); } } )* ); - log::trace!(target: "xcm::buy_weight", "last_error: {:?}, too_expensive_error_found: {}", last_error, too_expensive_error_found); + tracing::trace!( + target: "xcm::buy_weight", + "Buy weight failed", + ); // if we have multiple traders, and first one returns `TooExpensive` and others fail e.g. // `AssetNotFound` then it is more accurate to return `TooExpensive` then `AssetNotFound` diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml deleted file mode 100644 index 6fa0236dfb41d..0000000000000 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml +++ /dev/null @@ -1,71 +0,0 @@ -[package] -name = "xcm-fee-payment-runtime-api" -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -repository.workspace = true -description = "XCM fee payment runtime API" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ - "derive", -] } - -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = [ - "derive", - "serde", -] } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-weights = { path = "../../../substrate/primitives/weights", default-features = false } -xcm = { package = "staging-xcm", path = "../", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } - -[dev-dependencies] -frame-system = { path = "../../../substrate/frame/system", default-features = false } -pallet-xcm = { path = "../pallet-xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-assets = { path = "../../../substrate/frame/assets", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -log = { workspace = true } -env_logger = "0.9.0" - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-executive/std", - "frame-support/std", - "frame-system/std", - "log/std", - "pallet-assets/std", - "pallet-balances/std", - "pallet-xcm/std", - "scale-info/std", - "sp-api/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "sp-weights/std", - "xcm-builder/std", - "xcm-executor/std", - "xcm/std", -] -runtime-benchmarks = [ - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] diff --git a/polkadot/xcm/xcm-runtime-apis/Cargo.toml b/polkadot/xcm/xcm-runtime-apis/Cargo.toml new file mode 100644 index 0000000000000..748d5af68a1fd --- /dev/null +++ b/polkadot/xcm/xcm-runtime-apis/Cargo.toml @@ -0,0 +1,63 @@ +[package] +name = "xcm-runtime-apis" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +repository.workspace = true +description = "XCM runtime APIs" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } + +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-weights = { workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } + +[dev-dependencies] +frame-system = { workspace = true } +sp-io = { workspace = true } +xcm-builder = { workspace = true } +hex-literal = { workspace = true } +pallet-xcm = { workspace = true } +pallet-balances = { workspace = true } +pallet-assets = { workspace = true } +xcm-executor = { workspace = true } +frame-executive = { workspace = true } +log = { workspace = true } +env_logger = { workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-executive/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-assets/std", + "pallet-balances/std", + "pallet-xcm/std", + "scale-info/std", + "sp-api/std", + "sp-io/std", + "sp-weights/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] diff --git a/polkadot/xcm/xcm-runtime-apis/src/conversions.rs b/polkadot/xcm/xcm-runtime-apis/src/conversions.rs new file mode 100644 index 0000000000000..e5eeac013fee6 --- /dev/null +++ b/polkadot/xcm/xcm-runtime-apis/src/conversions.rs @@ -0,0 +1,56 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Contains runtime APIs for useful conversions, such as between XCM `Location` and `AccountId`. + +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use xcm::VersionedLocation; +use xcm_executor::traits::ConvertLocation; + +sp_api::decl_runtime_apis! { + /// API for useful conversions between XCM `Location` and `AccountId`. + pub trait LocationToAccountApi where AccountId: Decode { + /// Converts `Location` to `AccountId`. + fn convert_location(location: VersionedLocation) -> Result; + } +} + +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum Error { + /// Requested `Location` is not supported by the local conversion. + #[codec(index = 0)] + Unsupported, + + /// Converting a versioned data structure from one version to another failed. + #[codec(index = 1)] + VersionedConversionFailed, +} + +/// A helper implementation that can be used for `LocationToAccountApi` implementations. +/// It is useful when you already have a `ConvertLocation` implementation and a default +/// `Ss58Prefix`. +pub struct LocationToAccountHelper( + core::marker::PhantomData<(AccountId, Conversion)>, +); +impl> + LocationToAccountHelper +{ + pub fn convert_location(location: VersionedLocation) -> Result { + let location = location.try_into().map_err(|_| Error::VersionedConversionFailed)?; + Conversion::convert_location(&location).ok_or(Error::Unsupported) + } +} diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/src/dry_run.rs b/polkadot/xcm/xcm-runtime-apis/src/dry_run.rs similarity index 99% rename from polkadot/xcm/xcm-fee-payment-runtime-api/src/dry_run.rs rename to polkadot/xcm/xcm-runtime-apis/src/dry_run.rs index 9828acab40230..2a1a0daf0d5d5 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/src/dry_run.rs +++ b/polkadot/xcm/xcm-runtime-apis/src/dry_run.rs @@ -18,9 +18,9 @@ //! This API can be used to simulate XCMs and, for example, find the fees //! that need to be paid. +use alloc::vec::Vec; use codec::{Decode, Encode}; use frame_support::pallet_prelude::{DispatchResultWithPostInfo, TypeInfo}; -use sp_std::vec::Vec; use xcm::prelude::*; /// Effects of dry-running an extrinsic. diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/src/fees.rs b/polkadot/xcm/xcm-runtime-apis/src/fees.rs similarity index 99% rename from polkadot/xcm/xcm-fee-payment-runtime-api/src/fees.rs rename to polkadot/xcm/xcm-runtime-apis/src/fees.rs index 572d4edf53386..3445d42ecab3b 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/src/fees.rs +++ b/polkadot/xcm/xcm-runtime-apis/src/fees.rs @@ -16,9 +16,9 @@ //! Runtime API definition for getting XCM fees. +use alloc::vec::Vec; use codec::{Decode, Encode}; use frame_support::pallet_prelude::TypeInfo; -use sp_std::vec::Vec; use sp_weights::Weight; use xcm::{Version, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs b/polkadot/xcm/xcm-runtime-apis/src/lib.rs similarity index 74% rename from polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs rename to polkadot/xcm/xcm-runtime-apis/src/lib.rs index 616ee4c2eccb0..b106836c1132b 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs +++ b/polkadot/xcm/xcm-runtime-apis/src/lib.rs @@ -14,19 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Runtime APIs for estimating xcm fee payment. -//! This crate offers two APIs, one for estimating fees, -//! which can be used for any type of message, and another one -//! for returning the specific messages used for transfers, a common -//! feature. -//! Users of these APIs should call the transfers API and pass the result to the -//! fees API. +//! Various runtime APIs to support XCM processing and manipulation. #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +/// Exposes runtime APIs for various XCM-related conversions. +pub mod conversions; + /// Dry-run API. /// Given an extrinsic or an XCM program, it returns the outcome of its execution. pub mod dry_run; + /// Fee estimation API. /// Given an XCM program, it will return the fees needed to execute it properly or send it. pub mod fees; diff --git a/polkadot/xcm/xcm-runtime-apis/tests/conversions.rs b/polkadot/xcm/xcm-runtime-apis/tests/conversions.rs new file mode 100644 index 0000000000000..7f0f0923b0921 --- /dev/null +++ b/polkadot/xcm/xcm-runtime-apis/tests/conversions.rs @@ -0,0 +1,83 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +mod mock; + +use frame_support::{ + assert_err, assert_ok, + sp_runtime::{ + testing::H256, + traits::{IdentifyAccount, Verify}, + AccountId32, MultiSignature, + }, +}; +use mock::*; +use sp_api::ProvideRuntimeApi; +use xcm::prelude::*; +use xcm_runtime_apis::conversions::{ + Error as LocationToAccountApiError, LocationToAccountApi, LocationToAccountHelper, +}; + +#[test] +fn convert_location_to_account_works() { + sp_io::TestExternalities::default().execute_with(|| { + let client = TestClient {}; + let runtime_api = client.runtime_api(); + + // Test unknown conversion for `Here` location + assert_err!( + runtime_api + .convert_location(H256::zero(), VersionedLocation::from(Location::here())) + .unwrap(), + LocationToAccountApiError::Unsupported + ); + + // Test known conversion for sibling parachain location + assert_ok!( + runtime_api + .convert_location(H256::zero(), VersionedLocation::from((Parent, Parachain(1000)))) + .unwrap(), + 1000_u64 + ); + }) +} + +#[test] +fn location_to_account_helper_with_multi_signature_works() { + type Signature = MultiSignature; + type AccountIdForConversions = <::Signer as IdentifyAccount>::AccountId; + // We alias only `Location::parent()` + pub type LocationToAccountIdForConversions = + (xcm_builder::ParentIsPreset,); + + // Test unknown conversion for `Here` location + assert_err!( + LocationToAccountHelper::< + AccountIdForConversions, + LocationToAccountIdForConversions, + >::convert_location(Location::here().into_versioned()), + LocationToAccountApiError::Unsupported + ); + + // Test known conversion for `Parent` location + assert_ok!( + LocationToAccountHelper::< + AccountIdForConversions, + LocationToAccountIdForConversions, + >::convert_location(Location::parent().into_versioned()), + AccountId32::from(hex_literal::hex!("506172656e740000000000000000000000000000000000000000000000000000")) + ); +} diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs similarity index 99% rename from polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs rename to polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs index 33611c8a471c0..59ee179738056 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs @@ -16,11 +16,11 @@ //! Tests for using both the XCM fee payment API and the dry-run API. +use frame_support::sp_runtime::testing::H256; use frame_system::RawOrigin; use sp_api::ProvideRuntimeApi; -use sp_runtime::testing::H256; use xcm::prelude::*; -use xcm_fee_payment_runtime_api::{dry_run::DryRunApi, fees::XcmPaymentApi}; +use xcm_runtime_apis::{dry_run::DryRunApi, fees::XcmPaymentApi}; mod mock; use mock::{ diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs similarity index 95% rename from polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs rename to polkadot/xcm/xcm-runtime-apis/tests/mock.rs index aa6c1422b608c..c76b26fcd2a33 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs @@ -18,8 +18,13 @@ //! Implements both runtime APIs for fee estimation and getting the messages for transfers. use codec::Encode; +use core::{cell::RefCell, marker::PhantomData}; use frame_support::{ - construct_runtime, derive_impl, parameter_types, + construct_runtime, derive_impl, parameter_types, sp_runtime, + sp_runtime::{ + traits::{Dispatchable, Get, IdentityLookup, MaybeEquivalence, TryConvert}, + BuildStorage, SaturatedConversion, + }, traits::{ AsEnsureOriginWithArg, ConstU128, ConstU32, Contains, ContainsPair, Everything, Nothing, OriginTrait, @@ -28,11 +33,6 @@ use frame_support::{ }; use frame_system::{EnsureRoot, RawOrigin as SystemRawOrigin}; use pallet_xcm::TestWeightInfo; -use sp_runtime::{ - traits::{Dispatchable, Get, IdentityLookup, MaybeEquivalence, TryConvert}, - BuildStorage, SaturatedConversion, -}; -use sp_std::{cell::RefCell, marker::PhantomData}; use xcm::{prelude::*, Version as XcmVersion}; use xcm_builder::{ AllowTopLevelPaidExecutionFrom, ConvertedConcreteId, EnsureXcmOrigin, FixedRateOfFungible, @@ -44,7 +44,8 @@ use xcm_executor::{ XcmExecutor, }; -use xcm_fee_payment_runtime_api::{ +use xcm_runtime_apis::{ + conversions::{Error as LocationToAccountApiError, LocationToAccountApi}, dry_run::{CallDryRunEffects, DryRunApi, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::{Error as XcmPaymentApiError, XcmPaymentApi}, }; @@ -352,6 +353,7 @@ impl pallet_xcm::Config for TestRuntime { type WeightInfo = TestWeightInfo; } +#[allow(dead_code)] pub fn new_test_ext_with_balances(balances: Vec<(AccountId, Balance)>) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); @@ -364,6 +366,7 @@ pub fn new_test_ext_with_balances(balances: Vec<(AccountId, Balance)>) -> sp_io: ext } +#[allow(dead_code)] pub fn new_test_ext_with_balances_and_assets( balances: Vec<(AccountId, Balance)>, assets: Vec<(AssetIdForAssetsPallet, AccountId, Balance)>, @@ -386,6 +389,7 @@ pub fn new_test_ext_with_balances_and_assets( (1, "Relay Token".into(), "RLY".into(), 12), ], accounts: assets, + next_asset_id: None, } .assimilate_storage(&mut t) .unwrap(); @@ -410,6 +414,14 @@ impl sp_api::ProvideRuntimeApi for TestClient { } sp_api::mock_impl_runtime_apis! { + impl LocationToAccountApi for RuntimeApi { + fn convert_location(location: VersionedLocation) -> Result { + let location = location.try_into().map_err(|_| LocationToAccountApiError::VersionedConversionFailed)?; + LocationToAccountId::convert_location(&location) + .ok_or(LocationToAccountApiError::Unsupported) + } + } + impl XcmPaymentApi for RuntimeApi { fn query_acceptable_payment_assets(xcm_version: XcmVersion) -> Result, XcmPaymentApiError> { Ok(vec![ diff --git a/polkadot/xcm/xcm-simulator/Cargo.toml b/polkadot/xcm/xcm-simulator/Cargo.toml index fc09b5e31861c..c7caa49393ed5 100644 --- a/polkadot/xcm/xcm-simulator/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/Cargo.toml @@ -10,20 +10,20 @@ license.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -scale-info = { version = "2.6.0", default-features = false } -paste = "1.0.7" +codec = { workspace = true, default-features = true } +scale-info = { workspace = true } +paste = { workspace = true, default-features = true } -frame-support = { path = "../../../substrate/frame/support" } -frame-system = { path = "../../../substrate/frame/system" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-std = { path = "../../../substrate/primitives/std" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } +frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } -xcm = { package = "staging-xcm", path = ".." } -xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor" } -xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder" } -polkadot-primitives = { path = "../../primitives" } -polkadot-core-primitives = { path = "../../core-primitives" } -polkadot-parachain-primitives = { path = "../../parachain" } -polkadot-runtime-parachains = { path = "../../runtime/parachains" } +xcm = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index 8b04170e3032f..e0aff9b7782a7 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -10,29 +10,29 @@ version = "7.0.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -scale-info = { version = "2.11.1", features = ["derive"] } +codec = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } log = { workspace = true } -frame-system = { path = "../../../../substrate/frame/system" } -frame-support = { path = "../../../../substrate/frame/support" } -pallet-balances = { path = "../../../../substrate/frame/balances" } -pallet-message-queue = { path = "../../../../substrate/frame/message-queue" } -pallet-uniques = { path = "../../../../substrate/frame/uniques" } -sp-std = { path = "../../../../substrate/primitives/std" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-io = { path = "../../../../substrate/primitives/io" } -sp-tracing = { path = "../../../../substrate/primitives/tracing" } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } +pallet-uniques = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } -xcm = { package = "staging-xcm", path = "../.." } -xcm-simulator = { path = ".." } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm-executor" } -xcm-builder = { package = "staging-xcm-builder", path = "../../xcm-builder" } -pallet-xcm = { path = "../../pallet-xcm" } -polkadot-core-primitives = { path = "../../../core-primitives" } -polkadot-runtime-parachains = { path = "../../../runtime/parachains" } -polkadot-parachain-primitives = { path = "../../../parachain" } +xcm = { workspace = true, default-features = true } +xcm-simulator = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } [features] default = [] diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs index 93c8302757cb0..bfb455aba3f93 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs @@ -31,7 +31,6 @@ use sp_runtime::{ traits::{Get, IdentityLookup}, AccountId32, }; -use sp_std::prelude::*; use xcm::latest::prelude::*; use xcm_builder::{EnsureXcmOrigin, SignedToAccountId32}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; @@ -101,7 +100,7 @@ impl EnsureOriginWithArg for ForeignCreators { fn try_origin( o: RuntimeOrigin, a: &Location, - ) -> sp_std::result::Result { + ) -> core::result::Result { let origin_location = pallet_xcm::EnsureXcm::::try_origin(o.clone())?; if !a.starts_with(&origin_location) { return Err(o); diff --git a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml index 6b3b4018d9fbb..04f8ba1151734 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -11,30 +11,30 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -honggfuzz = "0.5.55" -arbitrary = "1.3.2" -scale-info = { version = "2.11.1", features = ["derive"] } +codec = { workspace = true, default-features = true } +honggfuzz = { workspace = true } +arbitrary = { workspace = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } -frame-system = { path = "../../../../substrate/frame/system" } -frame-support = { path = "../../../../substrate/frame/support" } -frame-executive = { path = "../../../../substrate/frame/executive" } -frame-try-runtime = { path = "../../../../substrate/frame/try-runtime" } -pallet-balances = { path = "../../../../substrate/frame/balances" } -pallet-message-queue = { path = "../../../../substrate/frame/message-queue" } -sp-std = { path = "../../../../substrate/primitives/std" } -sp-core = { path = "../../../../substrate/primitives/core" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } -sp-io = { path = "../../../../substrate/primitives/io" } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-executive = { workspace = true, default-features = true } +frame-try-runtime = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } -xcm = { package = "staging-xcm", path = "../.." } -xcm-simulator = { path = ".." } -xcm-executor = { package = "staging-xcm-executor", path = "../../xcm-executor" } -xcm-builder = { package = "staging-xcm-builder", path = "../../xcm-builder" } -pallet-xcm = { path = "../../pallet-xcm" } -polkadot-core-primitives = { path = "../../../core-primitives" } -polkadot-runtime-parachains = { path = "../../../runtime/parachains" } -polkadot-parachain-primitives = { path = "../../../parachain" } +xcm = { workspace = true, default-features = true } +xcm-simulator = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } [features] try-runtime = [ diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index 502bcca2d4427..616329a2f06b4 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -24,13 +24,11 @@ use frame_support::{ }; use frame_system::EnsureRoot; -use sp_core::ConstU32; use sp_runtime::{ generic, traits::{AccountIdLookup, BlakeTwo256, Hash, IdentifyAccount, Verify}, MultiAddress, MultiSignature, }; -use sp_std::prelude::*; use pallet_xcm::XcmPassthrough; use polkadot_core_primitives::BlockNumber as RelayBlockNumber; @@ -73,24 +71,13 @@ impl frame_system::Config for Runtime { parameter_types! { pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } parameter_types! { diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 4740aee83d870..459d2640b6d90 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -72,24 +72,13 @@ impl frame_system::Config for Runtime { parameter_types! { pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl shared::Config for Runtime { diff --git a/polkadot/xcm/xcm-simulator/src/lib.rs b/polkadot/xcm/xcm-simulator/src/lib.rs index a6747a4789edf..59df394406ea0 100644 --- a/polkadot/xcm/xcm-simulator/src/lib.rs +++ b/polkadot/xcm/xcm-simulator/src/lib.rs @@ -20,15 +20,18 @@ /// Used for sending messages. pub mod mock_message_queue; +extern crate alloc; + pub use codec::Encode; pub use paste; +pub use alloc::collections::vec_deque::VecDeque; +pub use core::{cell::RefCell, marker::PhantomData}; pub use frame_support::{ traits::{EnqueueMessage, Get, ProcessMessage, ProcessMessageError, ServiceQueues}, weights::{Weight, WeightMeter}, }; pub use sp_io::{hashing::blake2_256, TestExternalities}; -pub use sp_std::{cell::RefCell, collections::vec_deque::VecDeque, marker::PhantomData}; pub use polkadot_core_primitives::BlockNumber as RelayBlockNumber; pub use polkadot_parachain_primitives::primitives::{ diff --git a/polkadot/xcm/xcm-simulator/src/mock_message_queue.rs b/polkadot/xcm/xcm-simulator/src/mock_message_queue.rs index 96b47999fe952..bf7b0e15967c0 100644 --- a/polkadot/xcm/xcm-simulator/src/mock_message_queue.rs +++ b/polkadot/xcm/xcm-simulator/src/mock_message_queue.rs @@ -24,7 +24,6 @@ use polkadot_parachain_primitives::primitives::{ use polkadot_primitives::BlockNumber as RelayBlockNumber; use sp_runtime::traits::{Get, Hash}; -use sp_std::prelude::*; use xcm::{latest::prelude::*, VersionedXcm}; pub use pallet::*; diff --git a/polkadot/zombienet_tests/assign-core.js b/polkadot/zombienet_tests/assign-core.js new file mode 100644 index 0000000000000..5ddb86930f5a0 --- /dev/null +++ b/polkadot/zombienet_tests/assign-core.js @@ -0,0 +1,48 @@ +async function run(nodeName, networkInfo, args) { + const wsUri = networkInfo.nodesByName[nodeName].wsUri; + const api = await zombie.connect(wsUri); + + let core = Number(args[0]); + + let assignments = []; + + for (let i = 1; i < args.length; i += 2) { + let [para, parts] = [args[i], args[i + 1]]; + + console.log(`Assigning para ${para} to core ${core}`); + + assignments.push( + [{ task: para }, parts] + ); + } + await zombie.util.cryptoWaitReady(); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.coretime.assignCore(core, 0, assignments, null)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + return 0; +} + +module.exports = { run }; diff --git a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml index 83f5434edddb1..611978a33a5f1 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml +++ b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml @@ -7,11 +7,9 @@ timeout = 1000 [relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] max_validators_per_core = 1 - scheduling_lookahead = 2 num_cores = 3 [relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] - needed_approvals = 3 max_approval_coalesce_count = 5 [relaychain] @@ -48,4 +46,4 @@ addToGenesis = true [types.Header] number = "u64" parent_hash = "Hash" -post_state = "Hash" \ No newline at end of file +post_state = "Hash" diff --git a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl index d624cbaf9df6a..d47ef8f415f7a 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl +++ b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl @@ -11,8 +11,8 @@ elastic-validator-4: reports node_roles is 4 # Register 2 extra cores to this some-parachain. -elastic-validator-0: js-script ./assign-core.js with "2000,0" return is 0 within 600 seconds -elastic-validator-0: js-script ./assign-core.js with "2000,1" return is 0 within 600 seconds +elastic-validator-0: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds +elastic-validator-0: js-script ./assign-core.js with "1,2000,57600" return is 0 within 600 seconds # Wait for 20 relay chain blocks elastic-validator-0: reports substrate_block_height{status="best"} is at least 20 within 600 seconds diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl index 900a3befbc6fc..7ba896e1c9039 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl +++ b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl @@ -11,8 +11,8 @@ validator: reports substrate_block_height{status="finalized"} is at least 10 wit validator: parachain 2000 block height is at least 10 within 200 seconds # Register the second core assigned to this parachain. -alice: js-script ./assign-core.js with "2000,0" return is 0 within 600 seconds -alice: js-script ./assign-core.js with "2000,1" return is 0 within 600 seconds +alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds +alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds diff --git a/polkadot/zombienet_tests/elastic_scaling/assign-core.js b/polkadot/zombienet_tests/elastic_scaling/assign-core.js deleted file mode 100644 index add63b6d30859..0000000000000 --- a/polkadot/zombienet_tests/elastic_scaling/assign-core.js +++ /dev/null @@ -1,39 +0,0 @@ -async function run(nodeName, networkInfo, args) { - const wsUri = networkInfo.nodesByName[nodeName].wsUri; - const api = await zombie.connect(wsUri); - - let para = Number(args[0]); - let core = Number(args[1]); - console.log(`Assigning para ${para} to core ${core}`); - - await zombie.util.cryptoWaitReady(); - - // account to submit tx - const keyring = new zombie.Keyring({ type: "sr25519" }); - const alice = keyring.addFromUri("//Alice"); - - await new Promise(async (resolve, reject) => { - const unsub = await api.tx.sudo - .sudo(api.tx.coretime.assignCore(core, 0, [[{ task: para }, 57600]], null)) - .signAndSend(alice, ({ status, isError }) => { - if (status.isInBlock) { - console.log( - `Transaction included at blockhash ${status.asInBlock}`, - ); - } else if (status.isFinalized) { - console.log( - `Transaction finalized at blockHash ${status.asFinalized}`, - ); - unsub(); - return resolve(); - } else if (isError) { - console.log(`Transaction error`); - reject(`Transaction error`); - } - }); - }); - - return 0; -} - -module.exports = { run }; diff --git a/polkadot/zombienet_tests/elastic_scaling/assign-core.js b/polkadot/zombienet_tests/elastic_scaling/assign-core.js new file mode 120000 index 0000000000000..eeb6402c06f5e --- /dev/null +++ b/polkadot/zombienet_tests/elastic_scaling/assign-core.js @@ -0,0 +1 @@ +../assign-core.js \ No newline at end of file diff --git a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml new file mode 100644 index 0000000000000..fed30e0db0532 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml @@ -0,0 +1,44 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] + max_candidate_depth = 3 + allowed_ancestry_len = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + lookahead = 2 + num_cores = 4 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + needed_approvals = 3 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.node_groups]] + name = "validator" + args = ["-lruntime=debug,parachain=debug,parachain::backing=trace,parachain::collator-protocol=trace,parachain::prospective-parachains=trace,runtime::parachains::scheduler=trace,runtime::inclusion-inherent=trace,runtime::inclusion=trace" ] + count = 4 + +{% for id in range(2000,2004) %} +[[parachains]] +id = {{id}} +register_para = false +onboard_as_parachain = false +add_to_genesis = false +chain = "glutton-westend-local-{{id}}" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator-{{id}}" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug"] + +{% endfor %} diff --git a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl new file mode 100644 index 0000000000000..b8b8887df8578 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl @@ -0,0 +1,16 @@ +Description: CT shared core test +Network: ./0015-coretime-shared-core.toml +Creds: config + +validator: reports node_roles is 4 + +# register paras 2 by 2 to speed up the test. registering all at once will exceed the weight limit. +validator-0: js-script ./0015-force-register-paras.js with "2000,2001" return is 0 within 600 seconds +validator-0: js-script ./0015-force-register-paras.js with "2002,2003" return is 0 within 600 seconds +# assign core 0 to be shared by all paras. +validator-0: js-script ./assign-core.js with "0,2000,14400,2001,14400,2002,14400,2003,14400" return is 0 within 600 seconds + +collator-2000: reports block height is at least 6 within 200 seconds +collator-2001: reports block height is at least 6 within 50 seconds +collator-2002: reports block height is at least 6 within 50 seconds +collator-2003: reports block height is at least 6 within 50 seconds diff --git a/polkadot/zombienet_tests/functional/0015-force-register-paras.js b/polkadot/zombienet_tests/functional/0015-force-register-paras.js new file mode 100644 index 0000000000000..f82163b01105a --- /dev/null +++ b/polkadot/zombienet_tests/functional/0015-force-register-paras.js @@ -0,0 +1,63 @@ +async function run(nodeName, networkInfo, args) { + const init = networkInfo.nodesByName[nodeName]; + let wsUri = init.wsUri; + let userDefinedTypes = init.userDefinedTypes; + const api = await zombie.connect(wsUri, userDefinedTypes); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + let calls = []; + + for (let i = 0; i < args.length; i++) { + let para = args[i]; + const sec = networkInfo.nodesByName["collator-" + para]; + const api_collator = await zombie.connect(sec.wsUri, sec.userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // Get the genesis header and the validation code of the parachain + const genesis_header = await api_collator.rpc.chain.getHeader(); + const validation_code = await api_collator.rpc.state.getStorage("0x3A636F6465"); + + calls.push( + api.tx.paras.addTrustedValidationCode(validation_code.toHex()) + ); + calls.push( + api.tx.registrar.forceRegister( + alice.address, + 0, + Number(para), + genesis_header.toHex(), + validation_code.toHex(), + ) + ); + } + + const sudo_batch = api.tx.sudo.sudo(api.tx.utility.batch(calls)); + + await new Promise(async (resolve, reject) => { + const unsub = await sudo_batch + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + return 0; +} + +module.exports = { run }; diff --git a/polkadot/zombienet_tests/functional/assign-core.js b/polkadot/zombienet_tests/functional/assign-core.js new file mode 120000 index 0000000000000..eeb6402c06f5e --- /dev/null +++ b/polkadot/zombienet_tests/functional/assign-core.js @@ -0,0 +1 @@ +../assign-core.js \ No newline at end of file diff --git a/prdoc/pr_1223.prdoc b/prdoc/1.13.0/pr_1223.prdoc similarity index 100% rename from prdoc/pr_1223.prdoc rename to prdoc/1.13.0/pr_1223.prdoc diff --git a/prdoc/pr_1644.prdoc b/prdoc/1.13.0/pr_1644.prdoc similarity index 100% rename from prdoc/pr_1644.prdoc rename to prdoc/1.13.0/pr_1644.prdoc diff --git a/prdoc/pr_3393.prdoc b/prdoc/1.13.0/pr_3393.prdoc similarity index 100% rename from prdoc/pr_3393.prdoc rename to prdoc/1.13.0/pr_3393.prdoc diff --git a/prdoc/pr_3905.prdoc b/prdoc/1.13.0/pr_3905.prdoc similarity index 100% rename from prdoc/pr_3905.prdoc rename to prdoc/1.13.0/pr_3905.prdoc diff --git a/prdoc/pr_3935.prdoc b/prdoc/1.13.0/pr_3935.prdoc similarity index 100% rename from prdoc/pr_3935.prdoc rename to prdoc/1.13.0/pr_3935.prdoc diff --git a/prdoc/pr_3952.prdoc b/prdoc/1.13.0/pr_3952.prdoc similarity index 100% rename from prdoc/pr_3952.prdoc rename to prdoc/1.13.0/pr_3952.prdoc diff --git a/prdoc/pr_4131.prdoc b/prdoc/1.13.0/pr_4131.prdoc similarity index 100% rename from prdoc/pr_4131.prdoc rename to prdoc/1.13.0/pr_4131.prdoc diff --git a/prdoc/pr_4198.prdoc b/prdoc/1.13.0/pr_4198.prdoc similarity index 100% rename from prdoc/pr_4198.prdoc rename to prdoc/1.13.0/pr_4198.prdoc diff --git a/prdoc/pr_4233.prdoc b/prdoc/1.13.0/pr_4233.prdoc similarity index 100% rename from prdoc/pr_4233.prdoc rename to prdoc/1.13.0/pr_4233.prdoc diff --git a/prdoc/pr_4249.prdoc b/prdoc/1.13.0/pr_4249.prdoc similarity index 100% rename from prdoc/pr_4249.prdoc rename to prdoc/1.13.0/pr_4249.prdoc diff --git a/prdoc/pr_4274.prdoc b/prdoc/1.13.0/pr_4274.prdoc similarity index 100% rename from prdoc/pr_4274.prdoc rename to prdoc/1.13.0/pr_4274.prdoc diff --git a/prdoc/pr_4339.prdoc b/prdoc/1.13.0/pr_4339.prdoc similarity index 100% rename from prdoc/pr_4339.prdoc rename to prdoc/1.13.0/pr_4339.prdoc diff --git a/prdoc/pr_4380.prdoc b/prdoc/1.13.0/pr_4380.prdoc similarity index 100% rename from prdoc/pr_4380.prdoc rename to prdoc/1.13.0/pr_4380.prdoc diff --git a/prdoc/pr_4392.prdoc b/prdoc/1.13.0/pr_4392.prdoc similarity index 100% rename from prdoc/pr_4392.prdoc rename to prdoc/1.13.0/pr_4392.prdoc diff --git a/prdoc/pr_4410.prdoc b/prdoc/1.13.0/pr_4410.prdoc similarity index 100% rename from prdoc/pr_4410.prdoc rename to prdoc/1.13.0/pr_4410.prdoc diff --git a/prdoc/pr_4418.prdoc b/prdoc/1.13.0/pr_4418.prdoc similarity index 100% rename from prdoc/pr_4418.prdoc rename to prdoc/1.13.0/pr_4418.prdoc diff --git a/prdoc/pr_4431.prdoc b/prdoc/1.13.0/pr_4431.prdoc similarity index 100% rename from prdoc/pr_4431.prdoc rename to prdoc/1.13.0/pr_4431.prdoc diff --git a/prdoc/pr_4444.prdoc b/prdoc/1.13.0/pr_4444.prdoc similarity index 100% rename from prdoc/pr_4444.prdoc rename to prdoc/1.13.0/pr_4444.prdoc diff --git a/prdoc/pr_4465.prdoc b/prdoc/1.13.0/pr_4465.prdoc similarity index 100% rename from prdoc/pr_4465.prdoc rename to prdoc/1.13.0/pr_4465.prdoc diff --git a/prdoc/pr_4471.prdoc b/prdoc/1.13.0/pr_4471.prdoc similarity index 100% rename from prdoc/pr_4471.prdoc rename to prdoc/1.13.0/pr_4471.prdoc diff --git a/prdoc/pr_4472.prdoc b/prdoc/1.13.0/pr_4472.prdoc similarity index 100% rename from prdoc/pr_4472.prdoc rename to prdoc/1.13.0/pr_4472.prdoc diff --git a/prdoc/pr_4475.prdoc b/prdoc/1.13.0/pr_4475.prdoc similarity index 100% rename from prdoc/pr_4475.prdoc rename to prdoc/1.13.0/pr_4475.prdoc diff --git a/prdoc/pr_4478.prdoc b/prdoc/1.13.0/pr_4478.prdoc similarity index 100% rename from prdoc/pr_4478.prdoc rename to prdoc/1.13.0/pr_4478.prdoc diff --git a/prdoc/pr_4503.prdoc b/prdoc/1.13.0/pr_4503.prdoc similarity index 100% rename from prdoc/pr_4503.prdoc rename to prdoc/1.13.0/pr_4503.prdoc diff --git a/prdoc/pr_4510.prdoc b/prdoc/1.13.0/pr_4510.prdoc similarity index 100% rename from prdoc/pr_4510.prdoc rename to prdoc/1.13.0/pr_4510.prdoc diff --git a/prdoc/pr_4514.prdoc b/prdoc/1.13.0/pr_4514.prdoc similarity index 100% rename from prdoc/pr_4514.prdoc rename to prdoc/1.13.0/pr_4514.prdoc diff --git a/prdoc/pr_4521.prdoc b/prdoc/1.13.0/pr_4521.prdoc similarity index 100% rename from prdoc/pr_4521.prdoc rename to prdoc/1.13.0/pr_4521.prdoc diff --git a/prdoc/pr_4533.prdoc b/prdoc/1.13.0/pr_4533.prdoc similarity index 100% rename from prdoc/pr_4533.prdoc rename to prdoc/1.13.0/pr_4533.prdoc diff --git a/prdoc/pr_4534.prdoc b/prdoc/1.13.0/pr_4534.prdoc similarity index 100% rename from prdoc/pr_4534.prdoc rename to prdoc/1.13.0/pr_4534.prdoc diff --git a/prdoc/pr_4537.prdoc b/prdoc/1.13.0/pr_4537.prdoc similarity index 100% rename from prdoc/pr_4537.prdoc rename to prdoc/1.13.0/pr_4537.prdoc diff --git a/prdoc/pr_4541.prdoc b/prdoc/1.13.0/pr_4541.prdoc similarity index 100% rename from prdoc/pr_4541.prdoc rename to prdoc/1.13.0/pr_4541.prdoc diff --git a/prdoc/pr_4542.prdoc b/prdoc/1.13.0/pr_4542.prdoc similarity index 100% rename from prdoc/pr_4542.prdoc rename to prdoc/1.13.0/pr_4542.prdoc diff --git a/prdoc/pr_4555.prdoc b/prdoc/1.13.0/pr_4555.prdoc similarity index 100% rename from prdoc/pr_4555.prdoc rename to prdoc/1.13.0/pr_4555.prdoc diff --git a/prdoc/pr_4571.prdoc b/prdoc/1.13.0/pr_4571.prdoc similarity index 100% rename from prdoc/pr_4571.prdoc rename to prdoc/1.13.0/pr_4571.prdoc diff --git a/prdoc/pr_4595.prdoc b/prdoc/1.13.0/pr_4595.prdoc similarity index 100% rename from prdoc/pr_4595.prdoc rename to prdoc/1.13.0/pr_4595.prdoc diff --git a/prdoc/pr_4621.prdoc b/prdoc/1.13.0/pr_4621.prdoc similarity index 100% rename from prdoc/pr_4621.prdoc rename to prdoc/1.13.0/pr_4621.prdoc diff --git a/prdoc/pr_4633.prdoc b/prdoc/1.13.0/pr_4633.prdoc similarity index 100% rename from prdoc/pr_4633.prdoc rename to prdoc/1.13.0/pr_4633.prdoc diff --git a/prdoc/pr_4634.prdoc b/prdoc/1.13.0/pr_4634.prdoc similarity index 100% rename from prdoc/pr_4634.prdoc rename to prdoc/1.13.0/pr_4634.prdoc diff --git a/prdoc/pr_4645.prdoc b/prdoc/1.13.0/pr_4645.prdoc similarity index 100% rename from prdoc/pr_4645.prdoc rename to prdoc/1.13.0/pr_4645.prdoc diff --git a/prdoc/pr_4646.prdoc b/prdoc/1.13.0/pr_4646.prdoc similarity index 100% rename from prdoc/pr_4646.prdoc rename to prdoc/1.13.0/pr_4646.prdoc diff --git a/prdoc/pr_4721.prdoc b/prdoc/1.13.0/pr_4721.prdoc similarity index 100% rename from prdoc/pr_4721.prdoc rename to prdoc/1.13.0/pr_4721.prdoc diff --git a/prdoc/1.14.0/pr_1631.prdoc b/prdoc/1.14.0/pr_1631.prdoc new file mode 100644 index 0000000000000..f73d00968552a --- /dev/null +++ b/prdoc/1.14.0/pr_1631.prdoc @@ -0,0 +1,39 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Upgrade libp2p to 0.52.4 + +doc: + - audience: [Node Dev, Node Operator] + description: | + Upgrade libp2p from 0.51.4 to 0.52.4 + +crates: + - name: sc-authority-discovery + bump: minor + - name: sc-cli + bump: minor + - name: sc-mixnet + bump: minor + - name: sc-network + bump: minor + - name: sc-network-gossip + bump: minor + - name: sc-network-common + bump: minor + - name: sc-network-light + bump: minor + - name: sc-network-statement + bump: minor + - name: sc-network-sync + bump: minor + - name: sc-network-test + bump: minor + - name: sc-network-transactions + bump: minor + - name: sc-network-types + bump: minor + - name: sc-offchain + bump: major + - name: sc-telemetry + bump: major diff --git a/prdoc/1.14.0/pr_3374.prdoc b/prdoc/1.14.0/pr_3374.prdoc new file mode 100644 index 0000000000000..76744f778db0a --- /dev/null +++ b/prdoc/1.14.0/pr_3374.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: removed `pallet::getter` from `pallet-timestamp` + +doc: + - audience: Runtime Dev + description: | + This PR removes all the `pallet::getter` usages from `pallet-timestamp`, and updates depdendant runtimes accordingly. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-timestamp \ No newline at end of file diff --git a/prdoc/pr_3679.prdoc b/prdoc/1.14.0/pr_3679.prdoc similarity index 100% rename from prdoc/pr_3679.prdoc rename to prdoc/1.14.0/pr_3679.prdoc diff --git a/prdoc/1.14.0/pr_3820.prdoc b/prdoc/1.14.0/pr_3820.prdoc new file mode 100644 index 0000000000000..33e8129df92a3 --- /dev/null +++ b/prdoc/1.14.0/pr_3820.prdoc @@ -0,0 +1,32 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove deprecated calls from treasury pallet + +doc: + - audience: Runtime User + description: | + This PR remove deprecated calls, relevant tests from `pallet-treasury`. + - Remove deprecated calls `propose_spend`, `reject_proposal`, `approve_proposal`. + - Replace the code flow of `propose_spend` then `approve_proposal` with `spend_local` + - Remove deprecated calls' related weight functions and test cases. + - Remove deprecated parameter types: ProposalBond, ProposalBondMaximum, ProposalBondMinimum + - Remove pallet treasury's relevant deprecated code in pallet-tips, pallet-bounties and pallet-child-bounties + +crates: + - name: pallet-treasury + bump: major + - name: pallet-tips + bump: patch + - name: pallet-child-bounties + bump: patch + - name: pallet-bounties + bump: patch + - name: polkadot-runtime-common + bump: patch + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: collectives-westend-runtime + bump: patch diff --git a/prdoc/1.14.0/pr_3828.prdoc b/prdoc/1.14.0/pr_3828.prdoc new file mode 100644 index 0000000000000..426625d5f23ef --- /dev/null +++ b/prdoc/1.14.0/pr_3828.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[FRAME] Remove storage migration type" + +doc: + - audience: Runtime Dev + description: | + Introduce migration type to remove data associated with a specific storage of a pallet. + +crates: + - name: frame-support + bump: minor diff --git a/prdoc/1.14.0/pr_3843.prdoc b/prdoc/1.14.0/pr_3843.prdoc new file mode 100644 index 0000000000000..e01900dcc25b9 --- /dev/null +++ b/prdoc/1.14.0/pr_3843.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Introduce a new dispatchable function `set_partial_params` in `pallet-core-fellowship` + +doc: + - audience: Runtime Dev + description: | + This PR adds a new dispatchable function `set_partial_params` + to update config with multiple arguments without duplicating the + fields that does not need to update. + +crates: + - name: pallet-core-fellowship + bump: major + - name: collectives-westend-runtime + bump: patch diff --git a/prdoc/1.14.0/pr_3940.prdoc b/prdoc/1.14.0/pr_3940.prdoc new file mode 100644 index 0000000000000..590afa77bb1ed --- /dev/null +++ b/prdoc/1.14.0/pr_3940.prdoc @@ -0,0 +1,31 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "RFC-5: Add request revenue info" + +doc: + - audience: Runtime Dev + description: | + Partially implemented RFC-5 in terms of revenue requests and notifications + - audience: Runtime User + description: | + Instantaneous Coretime sold on the relay chain now generates revenue for its provider. + The revenue may be claimed by its provider on the Coretime chain. + +crates: + - name: polkadot-runtime-parachains + bump: minor + - name: rococo-runtime + bump: minor + - name: westend-runtime + bump: minor + - name: pallet-broker + bump: minor + - name: rococo-runtime-constants + bump: minor + - name: westend-runtime-constants + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor diff --git a/prdoc/1.14.0/pr_3951.prdoc b/prdoc/1.14.0/pr_3951.prdoc new file mode 100644 index 0000000000000..3a8096e6f4487 --- /dev/null +++ b/prdoc/1.14.0/pr_3951.prdoc @@ -0,0 +1,30 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Pallet Assets Freezer + +doc: + - audience: Runtime Dev + description: | + This pallet is an extension of `pallet-assets`, supporting + freezes similar to `pallet-balances`. + To use this pallet, set `Freezer` of `pallet-assets` Config to the according instance of + `pallet-assets-freezer`. + - audience: Runtime User + description: | + The storage of this pallet contains a Vecs of account freezes. Applications UIs and Developer + Tools might benefit from observing it. + +crates: + - name: frame-support + bump: minor + - name: pallet-assets-freezer + bump: major + - name: pallet-assets + bump: patch + - name: pallet-balances + bump: patch + - name: asset-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor diff --git a/prdoc/1.14.0/pr_4513.prdoc b/prdoc/1.14.0/pr_4513.prdoc new file mode 100644 index 0000000000000..e7363d211c170 --- /dev/null +++ b/prdoc/1.14.0/pr_4513.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from pallet-elections-phragmen + +doc: + - audience: Runtime Dev + description: | + This PR removed the `pallet::getter`s from `pallet-elections-phragmen`. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-elections-phragmen + bump: major diff --git a/prdoc/1.14.0/pr_4596.prdoc b/prdoc/1.14.0/pr_4596.prdoc new file mode 100644 index 0000000000000..d47aa3aedfb85 --- /dev/null +++ b/prdoc/1.14.0/pr_4596.prdoc @@ -0,0 +1,18 @@ +title: "Frame: `Consideration` trait generic over `Footprint` and handles zero cost" + +doc: + - audience: Runtime Dev + description: | + `Consideration` trait generic over `Footprint` and can handle zero cost for a give footprint. + + `Consideration` trait is generic over `Footprint` (currently defined over the type with the same name). This makes it possible to setup a custom footprint (e.g. current number of proposals in the storage). + + `Consideration::new` and `Consideration::update` return an `Option` instead `Self`, this make it possible to define no cost for a specific footprint (e.g. current number of proposals in the storage < max_proposal_count / 2). + +crates: + - name: frame-support + bump: major + - name: pallet-preimage + bump: major + - name: pallet-balances + bump: patch diff --git a/prdoc/1.14.0/pr_4618.prdoc b/prdoc/1.14.0/pr_4618.prdoc new file mode 100644 index 0000000000000..3dd0fce81eeee --- /dev/null +++ b/prdoc/1.14.0/pr_4618.prdoc @@ -0,0 +1,20 @@ +title: Unify logic for fetching the `:code` of a block + +doc: + - audience: Node Operator + description: | + Fixes an issue on parachains when running with a custom `substitute` of the on chain wasm code + and having replaced the wasm code on the relay chain. The relay chain was rejecting blocks + build this way, because the collator was reporting the actual on chain wasm code hash + to the relay chain. However, the relay chain was expecting the code hash of the wasm code substitute + that was also registered on the relay chain. + - audience: Node Dev + description: | + `Client::code_at` will now use the same `substitute` to determine the code for a given block as it is + done when executing any runtime call. + +crates: + - name: cumulus-client-consensus-aura + bump: minor + - name: sc-service + bump: minor diff --git a/prdoc/pr_4662.prdoc b/prdoc/1.14.0/pr_4662.prdoc similarity index 100% rename from prdoc/pr_4662.prdoc rename to prdoc/1.14.0/pr_4662.prdoc diff --git a/prdoc/1.14.0/pr_4684.prdoc b/prdoc/1.14.0/pr_4684.prdoc new file mode 100644 index 0000000000000..b1c429c578224 --- /dev/null +++ b/prdoc/1.14.0/pr_4684.prdoc @@ -0,0 +1,13 @@ +title: "Refactor of the parachain template" + +doc: + - audience: Runtime Dev + description: | + Introduce the construct runtime V2 to the parachain template runtime. In addition, url links in the parachain pallet + template now direct to the polkadot sdk docs. + +crates: + - name: pallet-parachain-template + bump: none + - name: parachain-template-runtime + bump: none diff --git a/prdoc/1.14.0/pr_4685.prdoc b/prdoc/1.14.0/pr_4685.prdoc new file mode 100644 index 0000000000000..e212919ba2e5b --- /dev/null +++ b/prdoc/1.14.0/pr_4685.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Chain-spec-builder supports `codeSubstitutes`. + +doc: + - audience: Node Operator + description: | + A new subcommand `add-code-substitute` is available for the `chain-spec-builder` binary. It allows users to provide a runtime that should be used from a given + block onwards. The `codeSubstitutes` field in the chain spec is used to force usage of a given runtime at a given block until the next runtime upgrade. It can be + used to progress chains that are stalled due to runtime bugs that prevent block-building. However, parachain usage is only possible in combination with an updated + validation function on the relay chain. + +crates: + - name: staging-chain-spec-builder + bump: minor diff --git a/prdoc/pr_4691.prdoc b/prdoc/1.14.0/pr_4691.prdoc similarity index 100% rename from prdoc/pr_4691.prdoc rename to prdoc/1.14.0/pr_4691.prdoc diff --git a/prdoc/1.14.0/pr_4710.prdoc b/prdoc/1.14.0/pr_4710.prdoc new file mode 100644 index 0000000000000..d7d31d817208a --- /dev/null +++ b/prdoc/1.14.0/pr_4710.prdoc @@ -0,0 +1,11 @@ +title: "Dont partially modify HRMP pages" + +doc: + - audience: Runtime Dev + description: | + The xcmp-queue pallet now does not partially modify a page anymore when the next message does + not fully fit into it but instead cleanly creates a new one. + +crates: + - name: cumulus-pallet-xcmp-queue + bump: patch diff --git a/prdoc/1.14.0/pr_4724.prdoc b/prdoc/1.14.0/pr_4724.prdoc new file mode 100644 index 0000000000000..3723c2a70246a --- /dev/null +++ b/prdoc/1.14.0/pr_4724.prdoc @@ -0,0 +1,24 @@ +title: Fix core sharing and make use of scheduling_lookahead during backing + +doc: + - audience: Node Dev + description: | + Core sharing (two or more parachains scheduled on the same core with interlaced assignments) was not working correctly. + Adds the neccessary fixes to the backing subsystems. Moreover, adds support for backing collations which are built + and advertised ahead of time (with up to `scheduling_lookahead` relay chain blocks in advance). + +crates: + - name: polkadot-node-core-backing + bump: patch + - name: polkadot-node-core-prospective-parachains + bump: patch + - name: polkadot-collator-protocol + bump: patch + - name: polkadot-statement-distribution + bump: patch + - name: polkadot-node-subsystem-util + bump: minor + - name: polkadot-runtime-parachains + bump: none + - name: polkadot + bump: none diff --git a/prdoc/1.14.0/pr_4728.prdoc b/prdoc/1.14.0/pr_4728.prdoc new file mode 100644 index 0000000000000..1494fbdbb2b9f --- /dev/null +++ b/prdoc/1.14.0/pr_4728.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Glutton - add support for bloating the parachain block length" + +doc: + - audience: [Runtime Dev, Runtime User] + description: | + Introduce a new configuration parameter `block_length` which can be configured via a call to + `set_block_length`. This sets the ration of the block length that is to be filled with trash. + This is implemented by an inherent that takes trash data as a parameter filling the block length. + +crates: + - name: pallet-glutton + bump: major + - name: glutton-westend-runtime + bump: major diff --git a/prdoc/1.14.0/pr_4730.prdoc b/prdoc/1.14.0/pr_4730.prdoc new file mode 100644 index 0000000000000..9af14534bcbd2 --- /dev/null +++ b/prdoc/1.14.0/pr_4730.prdoc @@ -0,0 +1,25 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: rpc upgrade jsonrpsee to v0.23.1 + +doc: + - audience: Node Dev + description: | + Upgrade the rpc library jsonrpsee to v0.23.1 to utilize: + + - Add Extensions which we now is using to get the connection id (used by the rpc spec v2) + - Update hyper to v1.0, http v1.0, soketto and related crates (hyper::service::make_service_fn is removed) + - The subscription API for the client is modified to know why a subscription was closed. + +crates: + - name: sc-rpc-spec-v2 + bump: patch + - name: sc-rpc + bump: patch + - name: sc-rpc-server + bump: patch + - name: cumulus-relay-chain-rpc-interface + bump: patch + - name: frame-remote-externalities + bump: patch diff --git a/prdoc/pr_4733.prdoc b/prdoc/1.14.0/pr_4733.prdoc similarity index 100% rename from prdoc/pr_4733.prdoc rename to prdoc/1.14.0/pr_4733.prdoc diff --git a/prdoc/1.14.0/pr_4756.prdoc b/prdoc/1.14.0/pr_4756.prdoc new file mode 100644 index 0000000000000..064a79fb06648 --- /dev/null +++ b/prdoc/1.14.0/pr_4756.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Do not make pallet-nfts benchmarks signature-dependent + +doc: + - audience: Runtime Dev + description: | + - Adds extra functionality to pallet-nfts's BenchmarkHelper to provide signers and sign message. + - Abstracts away the explicit link with Sr25519 schema in the benchmarks, allowing parachains with a different one to be able to run them and calculate the weights. + - Adds a default implementation for the empty tuple that leaves the code equivalent. + +crates: + - name: pallet-nfts + bump: minor diff --git a/prdoc/1.14.0/pr_4757.prdoc b/prdoc/1.14.0/pr_4757.prdoc new file mode 100644 index 0000000000000..d94a20d7bb1a6 --- /dev/null +++ b/prdoc/1.14.0/pr_4757.prdoc @@ -0,0 +1,18 @@ +title: "pallet assets: optional auto-increment for the asset ID" + +doc: + - audience: Runtime Dev + description: | + Introduce an optional auto-increment setup for the IDs of new assets. + +crates: + - name: pallet-assets + bump: major + - name: staging-xcm-builder + bump: patch + - name: staging-xcm + bump: patch + - name: pallet-assets-freezer + bump: patch + - name: pallet-contracts + bump: patch diff --git a/prdoc/1.14.0/pr_4765.prdoc b/prdoc/1.14.0/pr_4765.prdoc new file mode 100644 index 0000000000000..f64b2fdc51ab1 --- /dev/null +++ b/prdoc/1.14.0/pr_4765.prdoc @@ -0,0 +1,18 @@ +title: CheckWeight - account for extrinsic len as proof size + +doc: + - audience: Runtime Dev + description: | + This changes how CheckWeight extension works. It will now account for the extrinsic length + as proof size. When `on_idle` is called, the remaining weight parameter reflects this. + +crates: + - name: frame-system + bump: patch + - name: frame-executive + bump: none + - name: cumulus-primitives-storage-weight-reclaim + bump: none + + + diff --git a/prdoc/1.14.0/pr_4769.prdoc b/prdoc/1.14.0/pr_4769.prdoc new file mode 100644 index 0000000000000..e9691ba6f8974 --- /dev/null +++ b/prdoc/1.14.0/pr_4769.prdoc @@ -0,0 +1,20 @@ +title: Use real rust type for pallet alias in `runtime` macro + +doc: + - audience: Runtime Dev + description: | + This PR adds the ability to use a real rust type for pallet alias in the new `runtime` macro: + ```rust + #[runtime::pallet_index(0)] + pub type System = frame_system::Pallet; + ``` + + Please note that the current syntax still continues to be supported. + +crates: + - name: frame-support-procedural + bump: patch + - name: frame-support + bump: patch + - name: minimal-template-runtime + bump: patch diff --git a/prdoc/1.14.0/pr_4799.prdoc b/prdoc/1.14.0/pr_4799.prdoc new file mode 100644 index 0000000000000..c4e68e316c22f --- /dev/null +++ b/prdoc/1.14.0/pr_4799.prdoc @@ -0,0 +1,24 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "network: Upgrade `litep2p` to v0.6.0" + +doc: + - audience: Node Operator + description: | + This PR brings the latest `litep2p` v0.6.0 to polkadot-sdk with stability improvements, + security fixes, and performance optimizations. + + Specifically: + - Incoming DHT records are now validated also with experimental litep2p network backend. + - Performance of TCP & WebSocket connections improved by setting `TCP_NODELAY` flag. + - Stability of secondary connection establishment improved. + - Multiple possible panics in litep2p library eliminated. + +crates: + - name: sc-authority-discovery + bump: patch + - name: sc-network + bump: patch + - name: sc-network-types + bump: patch diff --git a/prdoc/1.14.0/pr_4802.prdoc b/prdoc/1.14.0/pr_4802.prdoc new file mode 100644 index 0000000000000..5757c4cbae184 --- /dev/null +++ b/prdoc/1.14.0/pr_4802.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add `health/readiness endpoint` to the rpc server + +doc: + - audience: Node Operator + description: | + Add `/health/readiness endpoint` to the rpc server which returns HTTP status code 200 if the chain is synced + and can connect to the rest of the network otherwise status code 500 is returned. + The endpoint can be reached by performing a HTTP GET request to the + endpoint such as `$ curl /health/readiness` + +crates: + - name: sc-rpc-server + bump: patch diff --git a/prdoc/1.14.0/pr_4807.prdoc b/prdoc/1.14.0/pr_4807.prdoc new file mode 100644 index 0000000000000..b60bfb524510a --- /dev/null +++ b/prdoc/1.14.0/pr_4807.prdoc @@ -0,0 +1,11 @@ +title: "pallet ranked collective: max member count per rank" + +doc: + - audience: Runtime Dev + description: | + Configuration for the maximum member count per rank, with the option for no limit. + +crates: + - name: pallet-ranked-collective + bump: major + diff --git a/prdoc/1.14.0/pr_4823.prdoc b/prdoc/1.14.0/pr_4823.prdoc new file mode 100644 index 0000000000000..a498b33f7bfa9 --- /dev/null +++ b/prdoc/1.14.0/pr_4823.prdoc @@ -0,0 +1,11 @@ +title: "`pallet-referenda`: Ensure to schedule referendas earliest at the next block" + +doc: + - audience: Runtime User + description: | + Ensure that referendas are scheduled earliest at the next block when they are enacted. + Otherwise the scheduling may fails and thus, the enactment of the referenda. + +crates: + - name: pallet-referenda + bump: patch diff --git a/prdoc/1.14.0/pr_4831.prdoc b/prdoc/1.14.0/pr_4831.prdoc new file mode 100644 index 0000000000000..8629ead6e81d8 --- /dev/null +++ b/prdoc/1.14.0/pr_4831.prdoc @@ -0,0 +1,25 @@ +title: "treasury pallet: - remove unused config parameters" + +doc: + - audience: Runtime Dev + description: | + Remove unused config parameters `ApproveOrigin` and `OnSlash` from the treasury pallet. + Add `OnSlash` config parameter to the bounties and tips pallets. + +crates: + - name: pallet-treasury + bump: major + - name: pallet-bounties + bump: major + - name: pallet-tips + bump: major + - name: collectives-westend-runtime + bump: patch + - name: polkadot-runtime-common + bump: patch + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: kitchensink-runtime + bump: patch diff --git a/prdoc/1.14.0/pr_4833.prdoc b/prdoc/1.14.0/pr_4833.prdoc new file mode 100644 index 0000000000000..a5cf853696eee --- /dev/null +++ b/prdoc/1.14.0/pr_4833.prdoc @@ -0,0 +1,12 @@ +title: "Reinitialize should allow to override existing config in collationGeneration" + +doc: + - audience: Node Dev + description: | + The Reinitialize collationGeneration subsystem message currently fails if no other config is already set. + As it is difficult to query the collationGeneration subsystem to check when to call Initialize or Reinitialize, this PR + proposes that Reinitialize overrides the configuration regardless if there was one already set. + +crates: + - name: polkadot-node-collation-generation + bump: minor diff --git a/prdoc/1.14.0/pr_4844.prdoc b/prdoc/1.14.0/pr_4844.prdoc new file mode 100644 index 0000000000000..999e63c84ed9a --- /dev/null +++ b/prdoc/1.14.0/pr_4844.prdoc @@ -0,0 +1,34 @@ +title: Make `Verifier::verify` and `BlockImport::check_block` use `&self` instead of `&mut self` + +doc: + - audience: Node Dev + description: | + `Verifier::verify` and `BlockImport::check_block` were refactored to use `&self` instead of `&mut self` + because there is no fundamental requirement for those operations to be exclusive in nature. + +crates: +- name: sc-consensus + bump: major + validate: false +- name: sc-consensus-aura + bump: major +- name: sc-consensus-babe + bump: major +- name: sc-consensus-beefy + bump: major +- name: sc-consensus-grandpa + bump: major +- name: sc-consensus-manual-seal + bump: major +- name: sc-consensus-pow + bump: major +- name: sc-service + bump: major +- name: cumulus-client-consensus-common + bump: major +- name: cumulus-client-consensus-aura + bump: major +- name: cumulus-client-consensus-relay-chain + bump: major +- name: polkadot-parachain-bin + validate: false diff --git a/prdoc/1.14.0/pr_4857.prdoc b/prdoc/1.14.0/pr_4857.prdoc new file mode 100644 index 0000000000000..d515e4257622e --- /dev/null +++ b/prdoc/1.14.0/pr_4857.prdoc @@ -0,0 +1,50 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[xcm] runtime api for LocationToAccount conversions" + +doc: + - audience: Runtime Dev + description: | + Introduces a new runtime API to help with conversions of XCM `Location` to the runtime's `AccountId`, + showing an Ss58 formatted address for easier verification. + + Besides that, the `xcm-fee-payment-runtime-api` module was merged into the new `xcm-runtime-apis`. + If you are using the `xcm-fee-payment-runtime-api` dependency, you just need to change it to `xcm-runtime-apis` + and update the imports from `use xcm_fee_payment_runtime_api::*` to `use xcm_runtime_apis::*`. + +crates: + - name: xcm-runtime-apis + bump: none + - name: polkadot-sdk + bump: patch + - name: pallet-xcm + bump: patch + - name: polkadot-service + bump: patch + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: asset-hub-rococo-runtime + bump: patch + - name: asset-hub-westend-runtime + bump: patch + - name: bridge-hub-rococo-runtime + bump: patch + - name: bridge-hub-westend-runtime + bump: patch + - name: collectives-westend-runtime + bump: patch + - name: people-rococo-runtime + bump: patch + - name: people-westend-runtime + bump: patch + - name: penpal-runtime + bump: patch + - name: contracts-rococo-runtime + bump: patch + - name: coretime-rococo-runtime + bump: patch + - name: coretime-westend-runtime + bump: none diff --git a/prdoc/1.14.0/pr_4865.prdoc b/prdoc/1.14.0/pr_4865.prdoc new file mode 100644 index 0000000000000..48ffd04219cf5 --- /dev/null +++ b/prdoc/1.14.0/pr_4865.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Implement trait `ContainsLengthBound` for pallet-membership + +doc: + - audience: Runtime Dev + description: | + Implement trait ContainsLengthBound for pallet membership otherwise we can't use it with pallet-tips without wrapper + +crates: + - name: pallet-membership + bump: minor diff --git a/prdoc/1.14.0/pr_4877.prdoc b/prdoc/1.14.0/pr_4877.prdoc new file mode 100644 index 0000000000000..ede536aee4502 --- /dev/null +++ b/prdoc/1.14.0/pr_4877.prdoc @@ -0,0 +1,13 @@ +title: "Core-Fellowship: new promote_fast call" + +doc: + - audience: Runtime User + description: | + Adds the ability to quickly promote someone within a collective by bypassing the promotion + cooldown. This can help in special situations and comes with a new origin: `FastPromoteOrigin`. + +crates: + - name: pallet-core-fellowship + bump: major + - name: collectives-westend-runtime + bump: major diff --git a/prdoc/pr_3286.prdoc b/prdoc/pr_3286.prdoc new file mode 100644 index 0000000000000..6ec3f6552a4a7 --- /dev/null +++ b/prdoc/pr_3286.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Assets: can_decrease/increase for destroying asset is not successful" + +doc: + - audience: Runtime Dev + description: | + Functions `can_decrease` and `can_increase` do not return successful consequence results + for assets undergoing destruction; instead, they return the `UnknownAsset` consequence variant. + This update aligns their behavior with similar functions, such as `reducible_balance`, + `increase_balance`, `decrease_balance`, and `burn`, which return an `AssetNotLive` error + for assets in the process of being destroyed. + +crates: + - name: pallet-assets diff --git a/prdoc/pr_4097.prdoc b/prdoc/pr_4097.prdoc new file mode 100644 index 0000000000000..2804a9571c79e --- /dev/null +++ b/prdoc/pr_4097.prdoc @@ -0,0 +1,45 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Introduce experimental slot-based collator + +doc: + - audience: Node Operator + description: | + Introduces an experimental collator that is fit fot elastic-scaling. + It can be activated on `test-parachain` and `polkadot-parachain` binaries via + `--experimental-use-slot-based` flag. The current implementation is MVP status and purely + for testing. Behaviour can change any time and should not be relied upon in environments with + any stability requirements. + +crates: + - name: cumulus-client-consensus-aura + bump: major + - name: cumulus-client-consensus-common + bump: minor + - name: cumulus-client-pov-recovery + bump: none + validate: false + - name: cumulus-pallet-aura-ext + bump: patch + - name: cumulus-relay-chain-interface + bump: major + validate: false + - name: sc-consensus-slots + bump: minor + - name: sc-basic-authorship + bump: patch + - name: cumulus-client-network + bump: none + validate: false + - name: cumulus-relay-chain-inprocess-interface + bump: minor + - name: sc-consensus-aura + bump: patch + - name: cumulus-relay-chain-rpc-interface + bump: minor + - name: polkadot-parachain-bin + bump: patch + - name: polkadot + bump: none + validate: false diff --git a/prdoc/pr_4522.prdoc b/prdoc/pr_4522.prdoc new file mode 100644 index 0000000000000..c8fdcfa51a419 --- /dev/null +++ b/prdoc/pr_4522.prdoc @@ -0,0 +1,39 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Added runtime support for reporting BEEFY fork voting + +doc: + - audience: + - Runtime Dev + - Runtime User + description: | + This PR adds the `report_fork_voting`, `report_future_voting` extrinsics to `pallet-beefy` + and renames the `report_equivocation` extrinsic to `report_double_voting`. + `report_fork_voting` can't be called yet, since it uses `Weight::MAX` weight. We will + add benchmarks for it and set the proper weight in a future PR. + Also a new `AncestryHelper` associated trait was added to `pallet_beefy::Config`. + - audience: Node Dev + description: | + This PR renames the `submit_report_equivocation_unsigned_extrinsic` in `BeefyApi` to + `submit_report_double_voting_unsigned_extrinsic`and bumps the `BeefyApi` version from 3 to 4. + +crates: + - name: pallet-beefy + bump: major + - name: pallet-beefy-mmr + bump: minor + - name: pallet-mmr + bump: major + - name: sc-consensus-beefy + bump: patch + - name: kitchensink-runtime + bump: major + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: sp-consensus-beefy + bump: major + - name: polkadot-service + bump: patch diff --git a/prdoc/pr_4563.prdoc b/prdoc/pr_4563.prdoc new file mode 100644 index 0000000000000..3780eee5898b5 --- /dev/null +++ b/prdoc/pr_4563.prdoc @@ -0,0 +1,12 @@ +title: Try State Hook for Bounties. + +doc: + - audience: Runtime User + description: | + Invariants for storage items in the bounties pallet. Enforces the following Invariants: + 1.`BountyCount` should be greater or equals to the length of the number of items in `Bounties`. + 2.`BountyCount` should be greater or equals to the length of the number of items in `BountyDescriptions`. + 3. Number of items in `Bounties` should be the same as `BountyDescriptions` length. +crates: +- name: pallet-bounties + bump: minor diff --git a/prdoc/pr_4566.prdoc b/prdoc/pr_4566.prdoc new file mode 100644 index 0000000000000..ea2979bb363aa --- /dev/null +++ b/prdoc/pr_4566.prdoc @@ -0,0 +1,23 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet_contracts] Add support for transient storage in contracts host functions" + +doc: + - audience: Runtime User + description: | + This PR implements transient storage, which behaves identically to regular storage + but is kept only in memory and discarded after every transaction. + This functionality is similar to the `TSTORE` and `TLOAD` operations used in Ethereum. + The following new host functions have been introduced: `get_transient_storage`, + `set_transient_storage`, `take_transient_storage`, `clear_transient_storage` and + `contains_transient_storage`. + These functions are declared as unstable and thus are not activated. + +crates: + - name: pallet-contracts + bump: major + - name: pallet-contracts-uapi + bump: major + - name: contracts-rococo-runtime + bump: minor diff --git a/prdoc/pr_4663.prdoc b/prdoc/pr_4663.prdoc new file mode 100644 index 0000000000000..74b1274828d5c --- /dev/null +++ b/prdoc/pr_4663.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add elastic scaling MVP guide + +doc: + - audience: Node Operator + description: | + Adds a guide for parachains that want to use the experimental elastic scaling MVP. + Will be viewable at: https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/guides/enable_elastic_scaling_mvp/index.html + +crates: + - name: polkadot-parachain-bin + bump: none diff --git a/prdoc/pr_4738.prdoc b/prdoc/pr_4738.prdoc new file mode 100644 index 0000000000000..751f318e64f34 --- /dev/null +++ b/prdoc/pr_4738.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add CheckMetadata SignedExtension to Rococo and Westend Coretime chains + +doc: + - audience: Runtime User + description: | + This brings support for the new Ledger app and similar hardware wallets to the Coretime + Chain on Rococo and Westend. These hardware wallets will be able to decode the transaction + using the metadata. The runtime will ensure that the metadata used for this decoding process + is correct and that the online wallet did not try to trick you. + +crates: + - name: coretime-rococo-runtime + bump: major + - name: coretime-westend-runtime + bump: major diff --git a/prdoc/pr_4755.prdoc b/prdoc/pr_4755.prdoc new file mode 100644 index 0000000000000..1018446cb67e7 --- /dev/null +++ b/prdoc/pr_4755.prdoc @@ -0,0 +1,24 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Send PeerViewChange with high priority + +doc: + - audience: Node Dev + description: | + - orchestra updated to 0.4.0, which introduces support for prioritizing system messages. + - PeerViewChange sent with high priority and should be processed first in a queue. + - To count them in tests added tracker to TestSender and TestOverseer. It acts more like a smoke test though. + + +crates: + - name: polkadot-overseer + bump: minor + - name: polkadot-network-bridge + bump: patch + - name: polkadot-availability-distribution + bump: patch + - name: polkadot-test-malus + bump: patch + - name: polkadot-node-subsystem-test-helpers + bump: patch diff --git a/prdoc/pr_4777.prdoc b/prdoc/pr_4777.prdoc new file mode 100644 index 0000000000000..07fa8decebe08 --- /dev/null +++ b/prdoc/pr_4777.prdoc @@ -0,0 +1,27 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: XCM builder pattern allows clear_origin before buy_execution. + +doc: + - audience: Runtime Dev + description: | + Added clear_origin as an allowed command after commands that load the holdings register, in the safe xcm builder. + Previously, although it's logically allowed, an XCM could not be built like this: + ```rust + let xcm = Xcm::builder() + .withdraw_asset((Parent, 100u128)) + .clear_origin() + .buy_execution((Parent, 1u128)) + .deposit_asset(All, [0u8; 32]) + .build(); + ``` + You had to use the unsafe_builder. + Now, it's allowed using the default builder. + +crates: +- name: "xcm-procedural" + bump: minor +- name: "staging-xcm" + bump: minor + diff --git a/prdoc/pr_4839.prdoc b/prdoc/pr_4839.prdoc new file mode 100644 index 0000000000000..84bb393d4c454 --- /dev/null +++ b/prdoc/pr_4839.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from pallet-insecure-randomness-collective-flip + +doc: + - audience: Runtime Dev + description: | + This PR removed the `pallet::getter`s from `pallet-insecure-randomness-collective-flip`. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-insecure-randomness-collective-flip + bump: patch diff --git a/prdoc/pr_4840.prdoc b/prdoc/pr_4840.prdoc new file mode 100644 index 0000000000000..265e1f41c3f38 --- /dev/null +++ b/prdoc/pr_4840.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from pallet-membership + +doc: + - audience: Runtime Dev + description: | + This PR removed the `pallet::getter`s from `pallet-membership`. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-membership + bump: minor \ No newline at end of file diff --git a/prdoc/pr_4848.prdoc b/prdoc/pr_4848.prdoc new file mode 100644 index 0000000000000..cbc0c8322d772 --- /dev/null +++ b/prdoc/pr_4848.prdoc @@ -0,0 +1,14 @@ +title: Optimize logic for gossiping assignments + +doc: + - audience: Node Dev + description: | + Optimize the logic for gossiping assignments by obtaining the list of peer ids + from the topology instead of iterating through all connected validators, this + gives us a 15% to 20% reduction in cpu usage. + +crates: +- name: polkadot-approval-distribution + bump: minor +- name: polkadot-node-network-protocol + bump: minor \ No newline at end of file diff --git a/prdoc/pr_4863.prdoc b/prdoc/pr_4863.prdoc new file mode 100644 index 0000000000000..eb43b67a45c5c --- /dev/null +++ b/prdoc/pr_4863.prdoc @@ -0,0 +1,10 @@ +title: "Make `tracing::log` work in the runtime" + +doc: + - audience: Runtime Dev + description: | + Make `tracing::log` work in the runtime as `log` works in the runtime. + +crates: + - name: sp-runtime + bump: patch diff --git a/prdoc/pr_4871.prdoc b/prdoc/pr_4871.prdoc new file mode 100644 index 0000000000000..6ff36f59d7008 --- /dev/null +++ b/prdoc/pr_4871.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from the pallet-tips + +doc: + - audience: Runtime Dev + description: | + This PR removed `pallet::getter`s from `pallet-tips`s storage items. + When accessed inside the pallet, use the syntax `StorageItem::::get()`. + +crates: + - name: pallet-tips + bump: minor diff --git a/prdoc/pr_4885.prdoc b/prdoc/pr_4885.prdoc new file mode 100644 index 0000000000000..50dc31bc1b8fa --- /dev/null +++ b/prdoc/pr_4885.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from the pallet-transaction-storage + +doc: + - audience: Runtime Dev + description: | + This PR removed `pallet::getter`s from `pallet-transaction-storage`s storage items. + When accessed inside the pallet, use the syntax `StorageItem::::get()`. + +crates: + - name: pallet-transaction-storage + bump: minor diff --git a/prdoc/pr_4888.prdoc b/prdoc/pr_4888.prdoc new file mode 100644 index 0000000000000..e8cfb25d924d2 --- /dev/null +++ b/prdoc/pr_4888.prdoc @@ -0,0 +1,35 @@ +title: "Allow any asset over the bridge lane between the two Asset Hubs" + +doc: + - audience: Runtime User + description: | + Allow all Rococo-native, Westend-native and Ethereum-native assets to flow over + the bridge between the Rococo and Westend AssetHubs. + + On Rococo Asset Hub, we allow Westend Asset Hub to act as reserve for any asset + native to the Westend ecosystem. + We also allow Ethereum contracts to act as reserves for the foreign assets + identified by the same respective contracts locations (on the other side of Snowbridge). + + On Westend Asset Hub, we allow Rococo Asset Hub to act as reserve for any asset + native to the Rococo or Ethereum ecosystems (practically providing Westend access + to Ethereum assets through double bridging: Ethereum <> Rococo <> Westend). + +crates: + - name: assets-common + bump: major + - name: asset-hub-rococo-runtime + bump: major + - name: asset-hub-westend-runtime + bump: major + - name: asset-hub-rococo-emulated-chain + bump: minor + - name: asset-hub-rococo-integration-tests + bump: minor + - name: bridge-hub-rococo-integration-tests + bump: minor + - name: bridge-hub-westend-integration-tests + bump: minor + - name: emulated-integration-tests-common + bump: minor + diff --git a/prdoc/pr_4902.prdoc b/prdoc/pr_4902.prdoc new file mode 100644 index 0000000000000..19fe168a74abe --- /dev/null +++ b/prdoc/pr_4902.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from the pallet-vesting + +doc: + - audience: Runtime Dev + description: | + This PR removed `pallet::getter`s from `pallet-vesting`s storage items. + When accessed inside the pallet, use the syntax `StorageItem::::get()`. + +crates: + - name: pallet-vesting + bump: minor diff --git a/prdoc/pr_4912.prdoc b/prdoc/pr_4912.prdoc new file mode 100644 index 0000000000000..dd96054b81fa3 --- /dev/null +++ b/prdoc/pr_4912.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from the pallet-babe + +doc: + - audience: Runtime Dev + description: | + This PR removed `pallet::getter`s from `pallet-babe`s storage items. + When accessed inside the pallet, use the syntax `StorageItem::::get()`. + When accessed outside the pallet, use the public functions of storage. + +crates: + - name: pallet-babe + bump: minor diff --git a/prdoc/pr_4922.prdoc b/prdoc/pr_4922.prdoc new file mode 100644 index 0000000000000..2e2dd26947c0d --- /dev/null +++ b/prdoc/pr_4922.prdoc @@ -0,0 +1,15 @@ +title: Optimize finalization performance + +doc: + - audience: Node Dev + description: | + Finalization algorithm was replaced with a more efficient version, data structures refactored to be faster and do + fewer memory allocations. As the result some APIs have changed in a minor, but incompatible way. + +crates: +- name: sc-client-api + bump: major +- name: sc-client-db + bump: major +- name: sp-blockchain + bump: major diff --git a/prdoc/pr_4932.prdoc b/prdoc/pr_4932.prdoc new file mode 100644 index 0000000000000..94af00d9249eb --- /dev/null +++ b/prdoc/pr_4932.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove relay-chain consensus authoring support for asset-hub chains from polkadot-parachain. + +doc: + - audience: Node Operator + description: | + The polkadot-parachain node had special handling for asset-hub parachains. They started out + using relay-chain consensus and later migrated to Aura as soon as it became available. The codepath for authoring + with relay chain consensus has been removed, since all asset hub chains have long migrated. + +crates: + - name: polkadot-parachain-bin + bump: major diff --git a/prdoc/pr_4935.prdoc b/prdoc/pr_4935.prdoc new file mode 100644 index 0000000000000..2b06899b63398 --- /dev/null +++ b/prdoc/pr_4935.prdoc @@ -0,0 +1,75 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Bridges V2 refactoring backport and `pallet_bridge_messages` simplifications" + +doc: + - audience: Runtime Dev + description: | + This introduces several simplifications to the pallet_bridge_messages::Config configuration. + Types like `BridgedChainId`, `MaxUnrewardedRelayerEntriesAtInboundLane`, `MaxUnconfirmedMessagesAtInboundLane`, `MaximalOutboundPayloadSize`, + `InboundRelayer`, `TargetHeaderChain`, and `SourceHeaderChain` were removed. + Now, you only need to provide specific bridging chain configurations for `ThisChain`, `BridgedChain`, and `BridgedHeaderChain`. + + If you previously specified implementations for the bp_runtime::Chain* traits, those will fit here exactly, for example: + ``` + type ThisChain = bp_bridge_hub_rococo::BridgeHubRococo; + type BridgedChain = bp_bridge_hub_westend::BridgeHubWestend; + type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< + Runtime, + BridgeParachainWestendInstance, + bp_bridge_hub_westend::BridgeHubWestend, + >; + ``` + +crates: + - name: pallet-bridge-messages + bump: major + - name: bridge-runtime-common + bump: major + - name: bp-header-chain + bump: major + - name: bp-runtime + bump: major + - name: bp-messages + bump: major + - name: bp-polkadot-core + bump: patch + - name: bp-bridge-hub-kusama + bump: minor + - name: bp-bridge-hub-polkadot + bump: minor + - name: bp-bridge-hub-rococo + bump: minor + - name: bp-bridge-hub-westend + bump: minor + - name: bp-kusama + bump: minor + - name: bp-polkadot + bump: minor + - name: bp-polkadot-bulletin + bump: minor + - name: bp-rococo + bump: minor + - name: bp-test-utils + bump: patch + - name: bp-westend + bump: minor + - name: bridge-hub-test-utils + bump: major + - name: pallet-bridge-grandpa + bump: patch + - name: pallet-bridge-parachains + bump: patch + - name: pallet-bridge-relayers + bump: patch + - name: pallet-xcm-bridge-hub + bump: patch + - name: asset-hub-rococo-runtime + bump: patch + - name: asset-hub-westend-runtime + bump: patch + - name: bridge-hub-rococo-runtime + bump: major + - name: bridge-hub-westend-runtime + bump: major diff --git a/prdoc/pr_4943.prdoc b/prdoc/pr_4943.prdoc new file mode 100644 index 0000000000000..705325126060b --- /dev/null +++ b/prdoc/pr_4943.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Update definition of frozen balance (docs PR) + +doc: + - audience: Runtime Dev + description: | + This PR fixes a bug in the docs located in the definition of frozen balances. In addition, it extends that definition for completeness. + +crates: +- name: frame-support + bump: patch \ No newline at end of file diff --git a/prdoc/pr_4972.prdoc b/prdoc/pr_4972.prdoc new file mode 100644 index 0000000000000..dd9f1b531aad1 --- /dev/null +++ b/prdoc/pr_4972.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Remove `pallet::getter` usage from pallet-session" + +doc: + - audience: Runtime Dev + description: | + This PR removes the `pallet::getter`s from `pallet-session`. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-session + bump: minor diff --git a/prdoc/pr_4978.prdoc b/prdoc/pr_4978.prdoc new file mode 100644 index 0000000000000..1f86d512f2c78 --- /dev/null +++ b/prdoc/pr_4978.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add MAX_INSTRUCTIONS_TO_DECODE to XCMv2 + +doc: + - audience: Runtime User + description: | + Added a max number of instructions to XCMv2. If using XCMv2, you'll have to take this limit into account. + It was set to 100. + - audience: Runtime Dev + description: | + Added a max number of instructions to XCMv2. If using XCMv2, you'll have to take this limit into account. + It was set to 100. + +crates: + - name: staging-xcm + bump: minor diff --git a/prdoc/pr_5011.prdoc b/prdoc/pr_5011.prdoc new file mode 100644 index 0000000000000..cb827bae6c591 --- /dev/null +++ b/prdoc/pr_5011.prdoc @@ -0,0 +1,29 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Use `BadOrigin` from `sp_runtime`" + +doc: + - audience: Runtime Dev + description: | + This PR refactor usages of deprecated `frame_support::error::BadOrigin` to `sp_runtime::traits::BadOrigin` + +crates: +- name: pallet-collective-content + bump: patch +- name: polkadot-runtime-common + bump: patch +- name: polkadot-runtime-parachains + bump: patch +- name: pallet-alliance + bump: patch +- name: pallet-contracts + bump: patch +- name: pallet-democracy + bump: patch +- name: pallet-nomination-pools + bump: patch +- name: pallet-ranked-collective + bump: patch +- name: pallet-utility + bump: patch diff --git a/scripts/bench-all.sh b/scripts/bench-all.sh new file mode 100755 index 0000000000000..e5512e26bbad7 --- /dev/null +++ b/scripts/bench-all.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -eu -o pipefail +shopt -s inherit_errexit +shopt -s globstar + +. "$(realpath "$(dirname "${BASH_SOURCE[0]}")/command-utils.sh")" + +get_arg optional --pallet "$@" +PALLET="${out:-""}" + +if [[ ! -z "$PALLET" ]]; then + . "$(dirname "${BASH_SOURCE[0]}")/lib/bench-all-pallet.sh" "$@" +else + . "$(dirname "${BASH_SOURCE[0]}")/bench.sh" --subcommand=all "$@" +fi diff --git a/scripts/bench.sh b/scripts/bench.sh new file mode 100755 index 0000000000000..2f4ef7ec6a141 --- /dev/null +++ b/scripts/bench.sh @@ -0,0 +1,117 @@ +#!/bin/bash +# Initially based on https://github.com/paritytech/bench-bot/blob/cd3b2943d911ae29e41fe6204788ef99c19412c3/bench.js + +# Most external variables used in this script, such as $GH_CONTRIBUTOR, are +# related to https://github.com/paritytech/try-runtime-bot + +# This script relies on $GITHUB_TOKEN which is probably a protected GitLab CI +# variable; if this assumption holds true, it is implied that this script should +# be ran only on protected pipelines + +set -eu -o pipefail +shopt -s inherit_errexit + +# realpath allows to reuse the current +BENCH_ROOT_DIR=$(realpath "$(dirname "${BASH_SOURCE[0]}")") + +. "$(realpath "$(dirname "${BASH_SOURCE[0]}")/command-utils.sh")" + +repository_name="$(basename "$PWD")" + +get_arg optional --target_dir "$@" +target_dir="${out:-""}" + +get_arg optional --noexit "$@" +noexit="${out:-""}" + +output_path="." + +profile="production" + +if [[ "$repository_name" == "polkadot-sdk" ]]; then + output_path="./$target_dir" +fi + +cargo_run_benchmarks="cargo run --quiet --profile=${profile}" + +echo "Repository: $repository_name" +echo "Target Dir: $target_dir" +echo "Output Path: $output_path" + +cargo_run() { + echo "Running $cargo_run_benchmarks" "${args[@]}" + + # if not patched with PATCH_something=123 then use --locked + if [[ -z "${BENCH_PATCHED:-}" ]]; then + cargo_run_benchmarks+=" --locked" + fi + + $cargo_run_benchmarks "${args[@]}" +} + + +main() { + + # Remove the "github" remote since the same repository might be reused by a + # GitLab runner, therefore the remote might already exist from a previous run + # in case it was not cleaned up properly for some reason + &>/dev/null git remote remove github || : + + tmp_dirs=() + cleanup() { + exit_code=$? + # Clean up the "github" remote at the end since it contains the + # $GITHUB_TOKEN secret, which is only available for protected pipelines on + # GitLab + &>/dev/null git remote remove github || : + rm -rf "${tmp_dirs[@]}" + echo "Done, exit: $exit_code" + exit $exit_code + } + + # avoid exit if --noexit is passed + if [ -z "$noexit" ]; then + trap cleanup EXIT + fi + + # set -x + + get_arg required --subcommand "$@" + local subcommand="${out:-""}" + + case "$subcommand" in + runtime|pallet|xcm) + echo 'Running bench_pallet' + . "$BENCH_ROOT_DIR/lib/bench-pallet.sh" "$@" + ;; + overhead) + echo 'Running bench_overhead' + . "$BENCH_ROOT_DIR/lib/bench-overhead.sh" "$@" + ;; + all) + echo "Running all-$target_dir" + . "$BENCH_ROOT_DIR/lib/bench-all-${target_dir}.sh" "$@" + ;; + *) + die "Invalid subcommand $subcommand to process_args" + ;; + esac + + # set +x + + # in case we used diener to patch some dependency during benchmark execution, + # revert the patches so that they're not included in the diff + git checkout --quiet HEAD Cargo.toml + + # Save the generated weights to GitLab artifacts in case commit+push fails + echo "Showing weights diff for command" + git diff -P | tee -a "${ARTIFACTS_DIR}/weights.patch" + echo "Wrote weights patch to \"${ARTIFACTS_DIR}/weights.patch\"" + + + # instead of using `cargo run --locked`, we allow the Cargo files to be updated + # but avoid committing them. It is so `cmd_runner_apply_patches` can work + git restore --staged Cargo.* +} + +main "$@" diff --git a/scripts/command-utils.sh b/scripts/command-utils.sh new file mode 100644 index 0000000000000..252e4c86480e6 --- /dev/null +++ b/scripts/command-utils.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +if [ "${LOADED_UTILS_SH:-}" ]; then + return +else + export LOADED_UTILS_SH=true +fi + +export ARTIFACTS_DIR="$PWD/.git/.artifacts" + +die() { + if [ "${1:-}" ]; then + >&2 echo "$1" + fi + exit 1 +} + +get_arg() { + local arg_type="$1" + shift + + local is_required + case "$arg_type" in + required|required-many) + is_required=true + ;; + optional|optional-many) ;; + *) + die "Invalid is_required argument \"$2\" in get_arg" + ;; + esac + + local has_many_values + if [ "${arg_type: -6}" == "-many" ]; then + has_many_values=true + fi + + local option_arg="$1" + shift + + local args=("$@") + + unset out + out=() + + local get_next_arg + for arg in "${args[@]}"; do + if [ "${get_next_arg:-}" ]; then + out+=("$arg") + unset get_next_arg + if [ ! "${has_many_values:-}" ]; then + break + fi + # --foo=bar (get the value after '=') + elif [ "${arg:0:$(( ${#option_arg} + 1 ))}" == "$option_arg=" ]; then + out+=("${arg:$(( ${#option_arg} + 1 ))}") + if [ ! "${has_many_values:-}" ]; then + break + fi + # --foo bar (get the next argument) + elif [ "$arg" == "$option_arg" ]; then + get_next_arg=true + fi + done + + # arg list ended with --something but no argument was provided next + if [ "${get_next_arg:-}" ]; then + die "Expected argument after \"${args[-1]}"\" + fi + + if [ "${out[0]:-}" ]; then + if [ ! "${has_many_values:-}" ]; then + out="${out[0]}" + fi + elif [ "${is_required:-}" ]; then + die "Argument $option_arg is required, but was not found" + else + unset out + fi +} diff --git a/scripts/lib/bench-all-cumulus.sh b/scripts/lib/bench-all-cumulus.sh new file mode 100755 index 0000000000000..f4c2a35c6b6b7 --- /dev/null +++ b/scripts/lib/bench-all-cumulus.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +# originally moved from https://github.com/paritytech/cumulus/blob/445f9277ab55b4d930ced4fbbb38d27c617c6658/scripts/benchmarks-ci.sh + +# default RUST_LOG is warn, but could be overridden +export RUST_LOG="${RUST_LOG:-error}" + +THIS_DIR=$(dirname "${BASH_SOURCE[0]}") +. "$THIS_DIR/../command-utils.sh" + +POLKADOT_PARACHAIN="./target/$profile/polkadot-parachain" + +run_cumulus_bench() { + local artifactsDir="$ARTIFACTS_DIR" + local category=$1 + local runtimeName=$2 + local paraId=${3:-} + + local benchmarkOutput="$output_path/parachains/runtimes/$category/$runtimeName/src/weights" + local benchmarkRuntimeChain + if [[ ! -z "$paraId" ]]; then + benchmarkRuntimeChain="${runtimeName}-dev-$paraId" + else + benchmarkRuntimeChain="$runtimeName-dev" + fi + + local benchmarkMetadataOutputDir="$artifactsDir/$runtimeName" + mkdir -p "$benchmarkMetadataOutputDir" + + # Load all pallet names in an array. + echo "[+] Listing pallets for runtime $runtimeName for chain: $benchmarkRuntimeChain ..." + local pallets=($( + $POLKADOT_PARACHAIN benchmark pallet --list --chain="${benchmarkRuntimeChain}" |\ + tail -n+2 |\ + cut -d',' -f1 |\ + sort |\ + uniq + )) + + if [ ${#pallets[@]} -ne 0 ]; then + echo "[+] Benchmarking ${#pallets[@]} pallets for runtime $runtimeName for chain: $benchmarkRuntimeChain, pallets:" + for pallet in "${pallets[@]}"; do + echo " [+] $pallet" + done + else + echo "$runtimeName pallet list not found in benchmarks-ci.sh" + exit 1 + fi + + for pallet in "${pallets[@]}"; do + # (by default) do not choose output_file, like `pallet_assets.rs` because it does not work for multiple instances + # `benchmark pallet` command will decide the output_file name if there are multiple instances + local output_file="" + local extra_args="" + # a little hack for pallet_xcm_benchmarks - we want to force custom implementation for XcmWeightInfo + if [[ "$pallet" == "pallet_xcm_benchmarks::generic" ]] || [[ "$pallet" == "pallet_xcm_benchmarks::fungible" ]]; then + output_file="xcm/${pallet//::/_}.rs" + extra_args="--template=$output_path/templates/xcm-bench-template.hbs" + fi + $POLKADOT_PARACHAIN benchmark pallet \ + $extra_args \ + --chain="${benchmarkRuntimeChain}" \ + --wasm-execution=compiled \ + --pallet="$pallet" \ + --no-storage-info \ + --no-median-slopes \ + --no-min-squares \ + --extrinsic='*' \ + --steps=50 \ + --repeat=20 \ + --json \ + --header="$output_path/file_header.txt" \ + --output="${benchmarkOutput}/${output_file}" >> "$benchmarkMetadataOutputDir/${pallet//::/_}_benchmark.json" + done +} + + +echo "[+] Compiling benchmarks..." +cargo build --profile $profile --locked --features=runtime-benchmarks -p polkadot-parachain-bin + +# Run benchmarks for all pallets of a given runtime if runtime argument provided +get_arg optional --runtime "$@" +runtime="${out:-""}" + +if [[ $runtime ]]; then + paraId="" + case "$runtime" in + asset-*) + category="assets" + ;; + collectives-*) + category="collectives" + ;; + coretime-*) + category="coretime" + ;; + bridge-*) + category="bridge-hubs" + ;; + contracts-*) + category="contracts" + ;; + people-*) + category="people" + ;; + glutton-*) + category="glutton" + paraId="1300" + ;; + *) + echo "Unknown runtime: $runtime" + exit 1 + ;; + esac + + run_cumulus_bench $category $runtime $paraId + +else # run all + # Assets + run_cumulus_bench assets asset-hub-rococo + run_cumulus_bench assets asset-hub-westend + + # Collectives + run_cumulus_bench collectives collectives-westend + + # Coretime + run_cumulus_bench coretime coretime-rococo + run_cumulus_bench coretime coretime-westend + + # People + run_cumulus_bench people people-rococo + run_cumulus_bench people people-westend + + # Bridge Hubs + run_cumulus_bench bridge-hubs bridge-hub-rococo + run_cumulus_bench bridge-hubs bridge-hub-westend + + # Glutton + run_cumulus_bench glutton glutton-westend 1300 +fi diff --git a/scripts/lib/bench-all-pallet.sh b/scripts/lib/bench-all-pallet.sh new file mode 100644 index 0000000000000..e6908045ddbd7 --- /dev/null +++ b/scripts/lib/bench-all-pallet.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +set -eu -o pipefail +shopt -s inherit_errexit +shopt -s globstar + +. "$(dirname "${BASH_SOURCE[0]}")/../command-utils.sh" + +get_arg required --pallet "$@" +PALLET="${out:-""}" + +REPO_NAME="$(basename "$PWD")" +BASE_COMMAND="$(dirname "${BASH_SOURCE[0]}")/../../bench/bench.sh --noexit=true --subcommand=pallet" + +WEIGHT_FILE_PATHS=( $(find . -type f -name "${PALLET}.rs" -path "**/weights/*" | sed 's|^\./||g') ) + +# convert pallet_ranked_collective to ranked-collective +CLEAN_PALLET=$(echo $PALLET | sed 's/pallet_//g' | sed 's/_/-/g') + +# add substrate pallet weights to a list +SUBSTRATE_PALLET_PATH=$(ls substrate/frame/$CLEAN_PALLET/src/weights.rs || :) +if [ ! -z "${SUBSTRATE_PALLET_PATH}" ]; then + WEIGHT_FILE_PATHS+=("$SUBSTRATE_PALLET_PATH") +fi + +# add trappist pallet weights to a list +TRAPPIST_PALLET_PATH=$(ls pallet/$CLEAN_PALLET/src/weights.rs || :) +if [ ! -z "${TRAPPIST_PALLET_PATH}" ]; then + WEIGHT_FILE_PATHS+=("$TRAPPIST_PALLET_PATH") +fi + +COMMANDS=() + +if [ "${#WEIGHT_FILE_PATHS[@]}" -eq 0 ]; then + echo "No weights files found for pallet: $PALLET" + exit 1 +else + echo "Found weights files for pallet: $PALLET" +fi + +for f in ${WEIGHT_FILE_PATHS[@]}; do + echo "- $f" + # f examples: + # cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_balances.rs + # polkadot/runtime/rococo/src/weights/pallet_balances.rs + # runtime/trappist/src/weights/pallet_assets.rs + TARGET_DIR=$(echo $f | cut -d'/' -f 1) + + if [ "$REPO_NAME" == "polkadot-sdk" ]; then + case $TARGET_DIR in + cumulus) + TYPE=$(echo $f | cut -d'/' -f 2) + # Example: cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_balances.rs + if [ "$TYPE" == "parachains" ]; then + RUNTIME=$(echo $f | cut -d'/' -f 5) + RUNTIME_DIR=$(echo $f | cut -d'/' -f 4) + COMMANDS+=("$BASE_COMMAND --runtime=$RUNTIME --runtime_dir=$RUNTIME_DIR --target_dir=$TARGET_DIR --pallet=$PALLET") + fi + ;; + polkadot) + # Example: polkadot/runtime/rococo/src/weights/pallet_balances.rs + RUNTIME=$(echo $f | cut -d'/' -f 3) + COMMANDS+=("$BASE_COMMAND --runtime=$RUNTIME --target_dir=$TARGET_DIR --pallet=$PALLET") + ;; + substrate) + # Example: substrate/frame/contracts/src/weights.rs + COMMANDS+=("$BASE_COMMAND --target_dir=$TARGET_DIR --runtime=dev --pallet=$PALLET") + ;; + *) + echo "Unknown dir: $TARGET_DIR" + exit 1 + ;; + esac + fi + + if [ "$REPO_NAME" == "trappist" ]; then + case $TARGET_DIR in + runtime) + TYPE=$(echo $f | cut -d'/' -f 2) + if [ "$TYPE" == "trappist" || "$TYPE" == "stout" ]; then + # Example: runtime/trappist/src/weights/pallet_assets.rs + COMMANDS+=("$BASE_COMMAND --target_dir=trappist --runtime=$TYPE --pallet=$PALLET") + fi + ;; + *) + echo "Unknown dir: $TARGET_DIR" + exit 1 + ;; + esac + fi +done + +for cmd in "${COMMANDS[@]}"; do + echo "Running command: $cmd" + . $cmd +done diff --git a/scripts/lib/bench-all-polkadot.sh b/scripts/lib/bench-all-polkadot.sh new file mode 100644 index 0000000000000..ac52e00140e38 --- /dev/null +++ b/scripts/lib/bench-all-polkadot.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +# Runs all benchmarks for all pallets, for a given runtime, provided by $1 +# Should be run on a reference machine to gain accurate benchmarks +# current reference machine: https://github.com/paritytech/polkadot/pull/6508/files +# original source: https://github.com/paritytech/polkadot/blob/b9842c4b52f6791fef6c11ecd020b22fe614f041/scripts/run_all_benches.sh + +get_arg required --runtime "$@" +runtime="${out:-""}" + +# default RUST_LOG is error, but could be overridden +export RUST_LOG="${RUST_LOG:-error}" + +echo "[+] Compiling benchmarks..." +cargo build --profile $profile --locked --features=runtime-benchmarks -p polkadot + +POLKADOT_BIN="./target/$profile/polkadot" + +# Update the block and extrinsic overhead weights. +echo "[+] Benchmarking block and extrinsic overheads..." +OUTPUT=$( + $POLKADOT_BIN benchmark overhead \ + --chain="${runtime}-dev" \ + --wasm-execution=compiled \ + --weight-path="$output_path/runtime/${runtime}/constants/src/weights/" \ + --warmup=10 \ + --repeat=100 \ + --header="$output_path/file_header.txt" +) +if [ $? -ne 0 ]; then + echo "$OUTPUT" >> "$ERR_FILE" + echo "[-] Failed to benchmark the block and extrinsic overheads. Error written to $ERR_FILE; continuing..." +fi + + +# Load all pallet names in an array. +PALLETS=($( + $POLKADOT_BIN benchmark pallet --list --chain="${runtime}-dev" |\ + tail -n+2 |\ + cut -d',' -f1 |\ + sort |\ + uniq +)) + +echo "[+] Benchmarking ${#PALLETS[@]} pallets for runtime $runtime" + +# Define the error file. +ERR_FILE="${ARTIFACTS_DIR}/benchmarking_errors.txt" +# Delete the error file before each run. +rm -f $ERR_FILE + +# Benchmark each pallet. +for PALLET in "${PALLETS[@]}"; do + echo "[+] Benchmarking $PALLET for $runtime"; + + output_file="" + if [[ $PALLET == *"::"* ]]; then + # translates e.g. "pallet_foo::bar" to "pallet_foo_bar" + output_file="${PALLET//::/_}.rs" + fi + + OUTPUT=$( + $POLKADOT_BIN benchmark pallet \ + --chain="${runtime}-dev" \ + --steps=50 \ + --repeat=20 \ + --no-storage-info \ + --no-median-slopes \ + --no-min-squares \ + --pallet="$PALLET" \ + --extrinsic="*" \ + --execution=wasm \ + --wasm-execution=compiled \ + --header="$output_path/file_header.txt" \ + --output="$output_path/runtime/${runtime}/src/weights/${output_file}" 2>&1 + ) + if [ $? -ne 0 ]; then + echo "$OUTPUT" >> "$ERR_FILE" + echo "[-] Failed to benchmark $PALLET. Error written to $ERR_FILE; continuing..." + fi +done + +# Check if the error file exists. +if [ -f "$ERR_FILE" ]; then + echo "[-] Some benchmarks failed. See: $ERR_FILE" +else + echo "[+] All benchmarks passed." +fi diff --git a/scripts/lib/bench-all-substrate.sh b/scripts/lib/bench-all-substrate.sh new file mode 100644 index 0000000000000..eeb18cdd8bbb3 --- /dev/null +++ b/scripts/lib/bench-all-substrate.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash + +# This file is part of Substrate. +# Copyright (C) 2022 Parity Technologies (UK) Ltd. +# SPDX-License-Identifier: Apache-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script has three parts which all use the Substrate runtime: +# - Pallet benchmarking to update the pallet weights +# - Overhead benchmarking for the Extrinsic and Block weights +# - Machine benchmarking +# +# Should be run on a reference machine to gain accurate benchmarks +# current reference machine: https://github.com/paritytech/substrate/pull/5848 + +# Original source: https://github.com/paritytech/substrate/blob/ff9921a260a67e3a71f25c8b402cd5c7da787a96/scripts/run_all_benchmarks.sh +# Fail if any sub-command in a pipe fails, not just the last one. +set -o pipefail +# Fail on undeclared variables. +set -u +# Fail if any sub-command fails. +set -e +# Fail on traps. +# set -E + +# default RUST_LOG is warn, but could be overridden +export RUST_LOG="${RUST_LOG:-error}" + +echo "[+] Compiling Substrate benchmarks..." +cargo build --profile=$profile --locked --features=runtime-benchmarks -p staging-node-cli + +# The executable to use. +SUBSTRATE="./target/$profile/substrate-node" + +# Manually exclude some pallets. +EXCLUDED_PALLETS=( + # Helper pallets + "pallet_election_provider_support_benchmarking" + # Pallets without automatic benchmarking + "pallet_babe" + "pallet_grandpa" + "pallet_mmr" + "pallet_offences" + # Only used for testing, does not need real weights. + "frame_benchmarking_pallet_pov" + "pallet_example_tasks" + "pallet_example_basic" + "pallet_example_split" + "pallet_example_kitchensink" + "pallet_example_mbm" + "tasks_example" +) + +# Load all pallet names in an array. +ALL_PALLETS=($( + $SUBSTRATE benchmark pallet --list --chain=dev |\ + tail -n+2 |\ + cut -d',' -f1 |\ + sort |\ + uniq +)) + +# Define the error file. +ERR_FILE="${ARTIFACTS_DIR}/benchmarking_errors.txt" + +# Delete the error file before each run. +rm -f "$ERR_FILE" + +mkdir -p "$(dirname "$ERR_FILE")" + +# Update the block and extrinsic overhead weights. +echo "[+] Benchmarking block and extrinsic overheads..." +OUTPUT=$( + $SUBSTRATE benchmark overhead \ + --chain=dev \ + --wasm-execution=compiled \ + --weight-path="$output_path/frame/support/src/weights/" \ + --header="$output_path/HEADER-APACHE2" \ + --warmup=10 \ + --repeat=100 2>&1 +) +if [ $? -ne 0 ]; then + echo "$OUTPUT" >> "$ERR_FILE" + echo "[-] Failed to benchmark the block and extrinsic overheads. Error written to $ERR_FILE; continuing..." +fi + +echo "[+] Benchmarking ${#ALL_PALLETS[@]} Substrate pallets and excluding ${#EXCLUDED_PALLETS[@]}." + +echo "[+] Excluded pallets ${EXCLUDED_PALLETS[@]}" +echo "[+] ------ " +echo "[+] Whole list pallets ${ALL_PALLETS[@]}" + +# Benchmark each pallet. +for PALLET in "${ALL_PALLETS[@]}"; do + FOLDER="$(echo "${PALLET#*_}" | tr '_' '-')"; + WEIGHT_FILE="$output_path/frame/${FOLDER}/src/weights.rs" + + # Skip the pallet if it is in the excluded list. + + if [[ " ${EXCLUDED_PALLETS[@]} " =~ " ${PALLET} " ]]; then + echo "[+] Skipping $PALLET as it is in the excluded list." + continue + fi + + echo "[+] Benchmarking $PALLET with weight file $WEIGHT_FILE"; + + set +e # Disable exit on error for the benchmarking of the pallets + OUTPUT=$( + $SUBSTRATE benchmark pallet \ + --chain=dev \ + --steps=50 \ + --repeat=20 \ + --pallet="$PALLET" \ + --no-storage-info \ + --no-median-slopes \ + --no-min-squares \ + --extrinsic="*" \ + --wasm-execution=compiled \ + --heap-pages=4096 \ + --output="$WEIGHT_FILE" \ + --header="$output_path/HEADER-APACHE2" \ + --template="$output_path/.maintain/frame-weight-template.hbs" 2>&1 + ) + if [ $? -ne 0 ]; then + echo -e "$PALLET: $OUTPUT\n" >> "$ERR_FILE" + echo "[-] Failed to benchmark $PALLET. Error written to $ERR_FILE; continuing..." + fi + set -e # Re-enable exit on error +done + + +# Check if the error file exists. +if [ -s "$ERR_FILE" ]; then + echo "[-] Some benchmarks failed. See: $ERR_FILE" + exit 1 +else + echo "[+] All benchmarks passed." +fi diff --git a/scripts/lib/bench-overhead.sh b/scripts/lib/bench-overhead.sh new file mode 100644 index 0000000000000..c4cca8b4c128c --- /dev/null +++ b/scripts/lib/bench-overhead.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +THIS_DIR=$(dirname "${BASH_SOURCE[0]}") +. "$THIS_DIR/../command-utils.sh" + +bench_overhead_common_args=( + -- + benchmark + overhead + --wasm-execution=compiled + --warmup=10 + --repeat=100 +) +bench_overhead() { + local args + case "$target_dir" in + substrate) + args=( + --bin=substrate + "${bench_overhead_common_args[@]}" + --header="$output_path/HEADER-APACHE2" + --weight-path="$output_path/frame/support/src/weights" + --chain="dev" + ) + ;; + polkadot) + get_arg required --runtime "$@" + local runtime="${out:-""}" + args=( + --bin=polkadot + "${bench_overhead_common_args[@]}" + --header="$output_path/file_header.txt" + --weight-path="$output_path/runtime/$runtime/constants/src/weights" + --chain="$runtime-dev" + ) + ;; + cumulus) + get_arg required --runtime "$@" + local runtime="${out:-""}" + args=( + -p=polkadot-parachain-bin + "${bench_overhead_common_args[@]}" + --header="$output_path/file_header.txt" + --weight-path="$output_path/parachains/runtimes/assets/$runtime/src/weights" + --chain="$runtime" + ) + ;; + trappist) + get_arg required --runtime "$@" + local runtime="${out:-""}" + args=( + "${bench_overhead_common_args[@]}" + --header="$output_path/templates/file_header.txt" + --weight-path="$output_path/runtime/$runtime/src/weights" + --chain="$runtime-dev" + ) + ;; + *) + die "Target Dir \"$target_dir\" is not supported in bench_overhead" + ;; + esac + + cargo_run "${args[@]}" +} + +bench_overhead "$@" diff --git a/scripts/lib/bench-pallet.sh b/scripts/lib/bench-pallet.sh new file mode 100644 index 0000000000000..15eac31e3a45c --- /dev/null +++ b/scripts/lib/bench-pallet.sh @@ -0,0 +1,178 @@ +#!/bin/bash + +THIS_DIR=$(dirname "${BASH_SOURCE[0]}") +. "$THIS_DIR/../command-utils.sh" + +bench_pallet_common_args=( + -- + benchmark + pallet + --steps=50 + --repeat=20 + --extrinsic="*" + --wasm-execution=compiled + --heap-pages=4096 + --json-file="${ARTIFACTS_DIR}/bench.json" +) +bench_pallet() { + get_arg required --subcommand "$@" + local subcommand="${out:-""}" + + get_arg required --runtime "$@" + local runtime="${out:-""}" + + get_arg required --pallet "$@" + local pallet="${out:-""}" + + local args + case "$target_dir" in + substrate) + args=( + --features=runtime-benchmarks + --manifest-path="$output_path/bin/node/cli/Cargo.toml" + "${bench_pallet_common_args[@]}" + --pallet="$pallet" + --chain="$runtime" + ) + + case "$subcommand" in + pallet) + # Translates e.g. "pallet_foo::bar" to "pallet_foo_bar" + local output_dir="${pallet//::/_}" + + # Substrate benchmarks are output to the "frame" directory but they aren't + # named exactly after the $pallet argument. For example: + # - When $pallet == pallet_balances, the output folder is frame/balances + # - When $pallet == frame_benchmarking, the output folder is frame/benchmarking + # The common pattern we infer from those examples is that we should remove + # the prefix + if [[ "$output_dir" =~ ^[A-Za-z]*[^A-Za-z](.*)$ ]]; then + output_dir="${BASH_REMATCH[1]}" + fi + + # We also need to translate '_' to '-' due to the folders' naming + # conventions + output_dir="${output_dir//_/-}" + + args+=( + --header="$output_path/HEADER-APACHE2" + --output="$output_path/frame/$output_dir/src/weights.rs" + --template="$output_path/.maintain/frame-weight-template.hbs" + ) + ;; + *) + die "Subcommand $subcommand is not supported for $target_dir in bench_pallet" + ;; + esac + ;; + polkadot) + # For backward compatibility: replace "-dev" with "" + runtime=${runtime/-dev/} + + local weights_dir="$output_path/runtime/${runtime}/src/weights" + + args=( + --bin=polkadot + --features=runtime-benchmarks + "${bench_pallet_common_args[@]}" + --pallet="$pallet" + --chain="${runtime}-dev" + ) + + case "$subcommand" in + pallet) + args+=( + --header="$output_path/file_header.txt" + --output="${weights_dir}/" + ) + ;; + xcm) + args+=( + --header="$output_path/file_header.txt" + --template="$output_path/xcm/pallet-xcm-benchmarks/template.hbs" + --output="${weights_dir}/xcm/" + ) + ;; + *) + die "Subcommand $subcommand is not supported for $target_dir in bench_pallet" + ;; + esac + ;; + cumulus) + get_arg required --runtime_dir "$@" + local runtime_dir="${out:-""}" + local chain="$runtime" + + # to support specifying parachain id from runtime name (e.g. ["glutton-westend", "glutton-westend-dev-1300"]) + # If runtime ends with "-dev" or "-dev-\d+", leave as it is, otherwise concat "-dev" at the end of $chain + if [[ ! "$runtime" =~ -dev(-[0-9]+)?$ ]]; then + chain="${runtime}-dev" + fi + + # replace "-dev" or "-dev-\d+" with "" for runtime + runtime=$(echo "$runtime" | sed 's/-dev.*//g') + + args=( + -p=polkadot-parachain-bin + --features=runtime-benchmarks + "${bench_pallet_common_args[@]}" + --pallet="$pallet" + --chain="${chain}" + --header="$output_path/file_header.txt" + ) + + case "$subcommand" in + pallet) + args+=( + --output="$output_path/parachains/runtimes/$runtime_dir/$runtime/src/weights/" + ) + ;; + xcm) + mkdir -p "$output_path/parachains/runtimes/$runtime_dir/$runtime/src/weights/xcm" + args+=( + --template="$output_path/templates/xcm-bench-template.hbs" + --output="$output_path/parachains/runtimes/$runtime_dir/$runtime/src/weights/xcm/" + ) + ;; + *) + die "Subcommand $subcommand is not supported for $target_dir in bench_pallet" + ;; + esac + ;; + trappist) + local weights_dir="$output_path/runtime/$runtime/src/weights" + + args=( + --features=runtime-benchmarks + "${bench_pallet_common_args[@]}" + --pallet="$pallet" + --chain="${runtime}-dev" + --header="$output_path/templates/file_header.txt" + ) + + case "$subcommand" in + pallet) + args+=( + --output="${weights_dir}/" + ) + ;; + xcm) + args+=( + --template="$output_path/templates/xcm-bench-template.hbs" + --output="${weights_dir}/xcm/" + ) + ;; + *) + die "Subcommand $subcommand is not supported for $target_dir in bench_pallet" + ;; + esac + ;; + *) + die "Repository $target_dir is not supported in bench_pallet" + ;; + esac + + cargo_run "${args[@]}" +} + +bench_pallet "$@" diff --git a/scripts/sync.sh b/scripts/sync.sh new file mode 100755 index 0000000000000..b5d8a52199371 --- /dev/null +++ b/scripts/sync.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +set -eu -o pipefail + +. "$(realpath "$(dirname "${BASH_SOURCE[0]}")/command-utils.sh")" + + +# Function to check syncing status +check_syncing() { + # Send the system_health request and parse the isSyncing field + RESPONSE=$(curl -sSX POST http://127.0.0.1:9944 \ + --header 'Content-Type: application/json' \ + --data-raw '{"jsonrpc": "2.0", "method": "system_health", "params": [], "id": "1"}') + + # Check for errors in the curl command + if [ $? -ne 0 ]; then + echo "Error: Unable to send request to Polkadot node" + fi + + IS_SYNCING=$(echo $RESPONSE | jq -r '.result.isSyncing') + + # Check for errors in the jq command or missing field in the response + if [ $? -ne 0 ] || [ "$IS_SYNCING" == "null" ]; then + echo "Error: Unable to parse sync status from response" + fi + + # Return the isSyncing value + echo $IS_SYNCING +} + +main() { + get_arg required --chain "$@" + local chain="${out:-""}" + + get_arg required --type "$@" + local type="${out:-""}" + + export RUST_LOG="${RUST_LOG:-remote-ext=debug,runtime=trace}" + + cargo build --release + + cp "./target/release/polkadot" ./polkadot-bin + + # Start sync. + # "&" runs the process in the background + # "> /dev/tty" redirects the output of the process to the terminal + ./polkadot-bin --sync="$type" --chain="$chain" > "$ARTIFACTS_DIR/sync.log" 2>&1 & + + # Get the PID of process + POLKADOT_SYNC_PID=$! + + sleep 10 + + # Poll the node every 100 seconds until syncing is complete + while :; do + SYNC_STATUS="$(check_syncing)" + if [ "$SYNC_STATUS" == "true" ]; then + echo "Node is still syncing..." + sleep 100 + elif [ "$SYNC_STATUS" == "false" ]; then + echo "Node sync is complete!" + kill "$POLKADOT_SYNC_PID" # Stop the Polkadot node process once syncing is complete + exit 0 # Success + elif [[ "$SYNC_STATUS" = Error:* ]]; then + echo "$SYNC_STATUS" + exit 1 # Error + else + echo "Unknown error: $SYNC_STATUS" + exit 1 # Unknown error + fi + done +} + +main "$@" diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index b756f3504655b..6b061955184ea 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -15,33 +15,33 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -array-bytes = "6.2.2" -clap = { version = "4.5.3", features = ["derive"] } +array-bytes = { workspace = true, default-features = true } +clap = { features = ["derive"], workspace = true } log = { workspace = true, default-features = true } -node-primitives = { path = "../primitives" } -node-testing = { path = "../testing" } -kitchensink-runtime = { path = "../runtime" } -sc-client-api = { path = "../../../client/api" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-state-machine = { path = "../../../primitives/state-machine" } +node-primitives = { workspace = true, default-features = true } +node-testing = { workspace = true } +kitchensink-runtime = { workspace = true } +sc-client-api = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -derive_more = { version = "0.99.17", default-features = false, features = ["display"] } -kvdb = "0.13.0" -kvdb-rocksdb = "0.19.0" -sp-trie = { path = "../../../primitives/trie" } -sp-core = { path = "../../../primitives/core" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sc-basic-authorship = { path = "../../../client/basic-authorship" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-timestamp = { path = "../../../primitives/timestamp", default-features = false } -sp-tracing = { path = "../../../primitives/tracing" } -hash-db = "0.16.0" -tempfile = "3.1.0" -fs_extra = "1" -rand = { version = "0.8.5", features = ["small_rng"] } -lazy_static = "1.4.0" -parity-db = "0.4.12" -sc-transaction-pool = { path = "../../../client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -futures = { version = "0.3.30", features = ["thread-pool"] } +derive_more = { features = ["display"], workspace = true } +kvdb = { workspace = true } +kvdb-rocksdb = { workspace = true } +sp-trie = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-timestamp = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +hash-db = { workspace = true, default-features = true } +tempfile = { workspace = true } +fs_extra = { workspace = true } +rand = { features = ["small_rng"], workspace = true, default-features = true } +lazy_static = { workspace = true } +parity-db = { workspace = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +futures = { features = ["thread-pool"], workspace = true } diff --git a/substrate/bin/node/bench/src/import.rs b/substrate/bin/node/bench/src/import.rs index 78b280076e0bd..e340869dea028 100644 --- a/substrate/bin/node/bench/src/import.rs +++ b/substrate/bin/node/bench/src/import.rs @@ -122,7 +122,8 @@ impl core::Benchmark for ImportBenchmark { match self.block_type { BlockType::RandomTransfersKeepAlive => { // should be 8 per signed extrinsic + 1 per unsigned - // we have 1 unsigned and the rest are signed in the block + // we have 2 unsigned (timestamp and glutton bloat) while the rest are + // signed in the block. // those 8 events per signed are: // - transaction paid for the transaction payment // - withdraw (Balances::Withdraw) for charging the transaction fee @@ -135,18 +136,18 @@ impl core::Benchmark for ImportBenchmark { // - extrinsic success assert_eq!( kitchensink_runtime::System::events().len(), - (self.block.extrinsics.len() - 1) * 8 + 1, + (self.block.extrinsics.len() - 2) * 8 + 2, ); }, BlockType::Noop => { assert_eq!( kitchensink_runtime::System::events().len(), // should be 2 per signed extrinsic + 1 per unsigned - // we have 1 unsigned and the rest are signed in the block + // we have 2 unsigned and the rest are signed in the block // those 2 events per signed are: // - deposit event for charging transaction fee // - extrinsic success - (self.block.extrinsics.len() - 1) * 2 + 1, + (self.block.extrinsics.len() - 2) * 2 + 2, ); }, _ => {}, diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 929cd6a29e388..ab665f0792a46 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -37,53 +37,53 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies -array-bytes = "6.1" -clap = { version = "4.5.3", features = ["derive"], optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12" } +array-bytes = { workspace = true, default-features = true } +clap = { features = ["derive"], optional = true, workspace = true } +codec = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } -jsonrpsee = { version = "0.22", features = ["server"] } -futures = "0.3.30" +jsonrpsee = { features = ["server"], workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -rand = "0.8" +rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } # The Polkadot-SDK: -polkadot-sdk = { path = "../../../../umbrella", features = ["node"] } +polkadot-sdk = { features = ["node"], workspace = true, default-features = true } # Shared code between the staging node and kitchensink runtime: -kitchensink-runtime = { path = "../runtime" } -node-rpc = { path = "../rpc" } -node-primitives = { path = "../primitives" } -node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } +kitchensink-runtime = { workspace = true } +node-rpc = { workspace = true } +node-primitives = { workspace = true, default-features = true } +node-inspect = { optional = true, workspace = true, default-features = true } [dev-dependencies] -futures = "0.3.30" -tempfile = "3.1.0" -assert_cmd = "2.0.2" -nix = { version = "0.28.0", features = ["signal"] } -regex = "1.6.0" -platforms = "3.0" -soketto = "0.7.1" -criterion = { version = "0.5.1", features = ["async_tokio"] } -tokio = { version = "1.22.0", features = ["macros", "parking_lot", "time"] } -tokio-util = { version = "0.7.4", features = ["compat"] } -wait-timeout = "0.2" -wat = "1.0" +futures = { workspace = true } +tempfile = { workspace = true } +assert_cmd = { workspace = true } +nix = { features = ["signal"], workspace = true } +regex = { workspace = true } +platforms = { workspace = true } +soketto = { workspace = true } +criterion = { features = ["async_tokio"], workspace = true, default-features = true } +tokio = { features = ["macros", "parking_lot", "time"], workspace = true, default-features = true } +tokio-util = { features = ["compat"], workspace = true } +wait-timeout = { workspace = true } +wat = { workspace = true } serde_json = { workspace = true, default-features = true } -scale-info = { version = "2.11.1", features = ["derive", "serde"] } +scale-info = { features = ["derive", "serde"], workspace = true, default-features = true } # These testing-only dependencies are not exported by the Polkadot-SDK crate: -node-testing = { path = "../testing" } -substrate-cli-test-utils = { path = "../../../test-utils/cli" } -sc-service-test = { path = "../../../client/service/test" } +node-testing = { workspace = true } +substrate-cli-test-utils = { workspace = true } +sc-service-test = { workspace = true } [build-dependencies] -clap = { version = "4.5.3", optional = true } -clap_complete = { version = "4.0.2", optional = true } +clap = { optional = true, workspace = true } +clap_complete = { optional = true, workspace = true } -node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } +node-inspect = { optional = true, workspace = true, default-features = true } -polkadot-sdk = { path = "../../../../umbrella", features = ["frame-benchmarking-cli", "sc-cli", "sc-storage-monitor", "substrate-build-script-utils"], optional = true } +polkadot-sdk = { features = ["frame-benchmarking-cli", "sc-cli", "sc-storage-monitor", "substrate-build-script-utils"], optional = true, workspace = true, default-features = true } [features] default = ["cli"] diff --git a/substrate/bin/node/cli/tests/res/default_genesis_config.json b/substrate/bin/node/cli/tests/res/default_genesis_config.json index e21fbb47da8c4..b63e5ff549ef9 100644 --- a/substrate/bin/node/cli/tests/res/default_genesis_config.json +++ b/substrate/bin/node/cli/tests/res/default_genesis_config.json @@ -16,6 +16,7 @@ "balances": { "balances": [] }, + "broker": {}, "transactionPayment": { "multiplier": "1000000000000000000" }, @@ -74,17 +75,20 @@ "glutton": { "compute": "0", "storage": "0", + "blockLength": "0", "trashDataCount": 0 }, "assets": { "assets": [], "metadata": [], - "accounts": [] + "accounts": [], + "nextAssetId": null }, "poolAssets": { "assets": [], "metadata": [], - "accounts": [] + "accounts": [], + "nextAssetId": null }, "transactionStorage": { "byteFee": 10, diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index 5e4488903bf45..68769ffb4fa44 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -15,17 +15,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12" } +clap = { features = ["derive"], workspace = true } +codec = { workspace = true, default-features = true } thiserror = { workspace = true } -sc-cli = { path = "../../../client/cli" } -sc-client-api = { path = "../../../client/api" } -sc-service = { path = "../../../client/service", default-features = false } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-core = { path = "../../../primitives/core" } -sp-io = { path = "../../../primitives/io" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-statement-store = { path = "../../../primitives/statement-store" } +sc-cli = { workspace = true } +sc-client-api = { workspace = true, default-features = true } +sc-service = { workspace = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } [features] runtime-benchmarks = [ diff --git a/substrate/bin/node/primitives/Cargo.toml b/substrate/bin/node/primitives/Cargo.toml index 24279ad09c3d9..de295fd59d45a 100644 --- a/substrate/bin/node/primitives/Cargo.toml +++ b/substrate/bin/node/primitives/Cargo.toml @@ -16,8 +16,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/bin/node/rpc/Cargo.toml b/substrate/bin/node/rpc/Cargo.toml index 6ae80eb578596..fa1e96e67e982 100644 --- a/substrate/bin/node/rpc/Cargo.toml +++ b/substrate/bin/node/rpc/Cargo.toml @@ -16,33 +16,33 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22", features = ["server"] } -node-primitives = { path = "../primitives" } -pallet-transaction-payment-rpc = { path = "../../../frame/transaction-payment/rpc" } -mmr-rpc = { path = "../../../client/merkle-mountain-range/rpc" } -sc-chain-spec = { path = "../../../client/chain-spec" } -sc-client-api = { path = "../../../client/api" } -sc-consensus-babe = { path = "../../../client/consensus/babe" } -sc-consensus-babe-rpc = { path = "../../../client/consensus/babe/rpc" } -sc-consensus-beefy = { path = "../../../client/consensus/beefy" } -sc-consensus-beefy-rpc = { path = "../../../client/consensus/beefy/rpc" } -sp-consensus-beefy = { path = "../../../primitives/consensus/beefy" } -sc-consensus-grandpa = { path = "../../../client/consensus/grandpa" } -sc-consensus-grandpa-rpc = { path = "../../../client/consensus/grandpa/rpc" } -sc-mixnet = { path = "../../../client/mixnet" } -sc-rpc = { path = "../../../client/rpc" } -sc-rpc-api = { path = "../../../client/rpc-api" } -sc-rpc-spec-v2 = { path = "../../../client/rpc-spec-v2" } -sc-sync-state-rpc = { path = "../../../client/sync-state-rpc" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -sp-api = { path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -sp-statement-store = { path = "../../../primitives/statement-store" } -substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } -substrate-state-trie-migration-rpc = { path = "../../../utils/frame/rpc/state-trie-migration-rpc" } +jsonrpsee = { features = ["server"], workspace = true } +node-primitives = { workspace = true, default-features = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } +mmr-rpc = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-babe-rpc = { workspace = true, default-features = true } +sc-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-beefy-rpc = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sc-consensus-grandpa-rpc = { workspace = true, default-features = true } +sc-mixnet = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-rpc-spec-v2 = { workspace = true, default-features = true } +sc-sync-state-rpc = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } +substrate-frame-rpc-system = { workspace = true, default-features = true } +substrate-state-trie-migration-rpc = { workspace = true, default-features = true } diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index e8cc7b3482b66..c1c470f1dcd6d 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -19,29 +19,29 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", "max-encoded-len", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -static_assertions = "1.1.0" +], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +static_assertions = { workspace = true, default-features = true } log = { workspace = true } serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } # pallet-asset-conversion: turn on "num-traits" feature -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } +primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } -polkadot-sdk = { path = "../../../../umbrella", features = ["runtime", "tuples-96"], default-features = false } +polkadot-sdk = { features = ["runtime", "tuples-96"], workspace = true } # shared code between runtime and node -node-primitives = { path = "../primitives", default-features = false } +node-primitives = { workspace = true } # Example pallets that are not published: -pallet-example-mbm = { path = "../../../frame/examples/multi-block-migrations", default-features = false } -pallet-example-tasks = { path = "../../../frame/examples/tasks", default-features = false } +pallet-example-mbm = { workspace = true } +pallet-example-tasks = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/bin/node/runtime/src/assets_api.rs b/substrate/bin/node/runtime/src/assets_api.rs index 38ec56507113f..98187e7391f3e 100644 --- a/substrate/bin/node/runtime/src/assets_api.rs +++ b/substrate/bin/node/runtime/src/assets_api.rs @@ -20,8 +20,8 @@ use polkadot_sdk::*; +use alloc::vec::Vec; use codec::Codec; -use sp_std::vec::Vec; sp_api::decl_runtime_apis! { pub trait AssetsApi diff --git a/substrate/bin/node/runtime/src/impls.rs b/substrate/bin/node/runtime/src/impls.rs index dbe562857c99f..6c121fad624b2 100644 --- a/substrate/bin/node/runtime/src/impls.rs +++ b/substrate/bin/node/runtime/src/impls.rs @@ -17,8 +17,7 @@ //! Some configurable implementations as associated type for the substrate runtime. -use polkadot_sdk::*; - +use alloc::boxed::Box; use frame_support::{ pallet_prelude::*, traits::{ @@ -29,7 +28,7 @@ use frame_support::{ use pallet_alliance::{IdentityVerifier, ProposalIndex, ProposalProvider}; use pallet_asset_tx_payment::HandleCredit; use pallet_identity::legacy::IdentityField; -use sp_std::prelude::*; +use polkadot_sdk::*; use crate::{ AccountId, AllianceCollective, AllianceMotion, Assets, Authorship, Balances, Hash, diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 8fb59a9d8474c..a1896325ee936 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -22,8 +22,11 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limits. #![recursion_limit = "1024"] +extern crate alloc; + use polkadot_sdk::*; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, @@ -49,7 +52,7 @@ use frame_support::{ AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, Contains, Currency, EitherOfDiverse, EnsureOriginWithArg, EqualPrivilegeOnly, Imbalance, InsideBoth, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, LockIdentifier, Nothing, - OnUnbalanced, WithdrawReasons, + OnUnbalanced, VariantCountOf, WithdrawReasons, }, weights::{ constants::{ @@ -100,7 +103,6 @@ use sp_runtime::{ ApplyExtrinsicResult, FixedPointNumber, FixedU128, Perbill, Percent, Permill, Perquintill, RuntimeDebug, }; -use sp_std::prelude::*; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -542,7 +544,7 @@ impl pallet_balances::Config for Runtime { type AccountStore = frame_system::Pallet; type WeightInfo = pallet_balances::weights::SubstrateWeight; type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; + type MaxFreezes = VariantCountOf; } parameter_types! { @@ -1038,6 +1040,7 @@ impl pallet_ranked_collective::Config for Runtime { type MinRankOfClass = traits::Identity; type VoteWeight = pallet_ranked_collective::Geometric; type MemberSwappedHandler = (CoreFellowship, Salary); + type MaxMemberCount = (); #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = (CoreFellowship, Salary); } @@ -1212,8 +1215,6 @@ impl pallet_membership::Config for Runtime { } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); - pub const ProposalBondMinimum: Balance = 1 * DOLLARS; pub const SpendPeriod: BlockNumber = 1 * DAYS; pub const Burn: Permill = Permill::from_percent(50); pub const TipCountdown: BlockNumber = 1 * DAYS; @@ -1230,19 +1231,11 @@ parameter_types! { impl pallet_treasury::Config for Runtime { type PalletId = TreasuryPalletId; type Currency = Balances; - type ApproveOrigin = EitherOfDiverse< - EnsureRoot, - pallet_collective::EnsureProportionAtLeast, - >; type RejectOrigin = EitherOfDiverse< EnsureRoot, pallet_collective::EnsureProportionMoreThan, >; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ProposalBondMinimum; - type ProposalBondMaximum = (); type SpendPeriod = SpendPeriod; type Burn = Burn; type BurnDestination = (); @@ -1296,6 +1289,7 @@ impl pallet_bounties::Config for Runtime { type MaximumReasonLength = MaximumReasonLength; type WeightInfo = pallet_bounties::weights::SubstrateWeight; type ChildBountyManager = ChildBounties; + type OnSlash = Treasury; } parameter_types! { @@ -1340,6 +1334,7 @@ impl pallet_tips::Config for Runtime { type TipReportDepositBase = TipReportDepositBase; type MaxTipAmount = ConstU128<{ 500 * DOLLARS }>; type WeightInfo = pallet_tips::weights::SubstrateWeight; + type OnSlash = Treasury; } parameter_types! { @@ -1378,6 +1373,7 @@ impl pallet_contracts::Config for Runtime { type UploadOrigin = EnsureSigned; type InstantiateOrigin = EnsureSigned; type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; + type MaxTransientStorageSize = ConstU32<{ 1 * 1024 * 1024 }>; type RuntimeHoldReason = RuntimeHoldReason; #[cfg(not(feature = "runtime-benchmarks"))] type Migrations = (); @@ -1874,6 +1870,7 @@ impl pallet_core_fellowship::Config for Runtime { type InductOrigin = pallet_core_fellowship::EnsureInducted; type ApproveOrigin = EnsureRootWithSuccess>; type PromoteOrigin = EnsureRootWithSuccess>; + type FastPromoteOrigin = Self::PromoteOrigin; type EvidenceSize = ConstU32<16_384>; type MaxRank = ConstU32<9>; } @@ -2103,10 +2100,6 @@ impl OnUnbalanced> for IntoAuthor { } } -parameter_types! { - pub storage CoretimeRevenue: Option<(BlockNumber, Balance)> = None; -} - pub struct CoretimeProvider; impl CoretimeInterface for CoretimeProvider { type AccountId = AccountId; @@ -2122,15 +2115,6 @@ impl CoretimeInterface for CoretimeProvider { _end_hint: Option, ) { } - fn check_notify_revenue_info() -> Option<(u32, Self::Balance)> { - let revenue = CoretimeRevenue::get(); - CoretimeRevenue::set(&None); - revenue - } - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(when: u32, revenue: Self::Balance) { - CoretimeRevenue::set(&Some((when, revenue))); - } } impl pallet_broker::Config for Runtime { @@ -2248,248 +2232,248 @@ mod runtime { pub struct Runtime; #[runtime::pallet_index(0)] - pub type System = frame_system; + pub type System = frame_system::Pallet; #[runtime::pallet_index(1)] - pub type Utility = pallet_utility; + pub type Utility = pallet_utility::Pallet; #[runtime::pallet_index(2)] - pub type Babe = pallet_babe; + pub type Babe = pallet_babe::Pallet; #[runtime::pallet_index(3)] - pub type Timestamp = pallet_timestamp; + pub type Timestamp = pallet_timestamp::Pallet; // Authorship must be before session in order to note author in the correct session and era // for im-online and staking. #[runtime::pallet_index(4)] - pub type Authorship = pallet_authorship; + pub type Authorship = pallet_authorship::Pallet; #[runtime::pallet_index(5)] - pub type Indices = pallet_indices; + pub type Indices = pallet_indices::Pallet; #[runtime::pallet_index(6)] - pub type Balances = pallet_balances; + pub type Balances = pallet_balances::Pallet; #[runtime::pallet_index(7)] - pub type TransactionPayment = pallet_transaction_payment; + pub type TransactionPayment = pallet_transaction_payment::Pallet; #[runtime::pallet_index(8)] - pub type AssetTxPayment = pallet_asset_tx_payment; + pub type AssetTxPayment = pallet_asset_tx_payment::Pallet; #[runtime::pallet_index(9)] - pub type AssetConversionTxPayment = pallet_asset_conversion_tx_payment; + pub type AssetConversionTxPayment = pallet_asset_conversion_tx_payment::Pallet; #[runtime::pallet_index(10)] - pub type ElectionProviderMultiPhase = pallet_election_provider_multi_phase; + pub type ElectionProviderMultiPhase = pallet_election_provider_multi_phase::Pallet; #[runtime::pallet_index(11)] - pub type Staking = pallet_staking; + pub type Staking = pallet_staking::Pallet; #[runtime::pallet_index(12)] - pub type Session = pallet_session; + pub type Session = pallet_session::Pallet; #[runtime::pallet_index(13)] - pub type Democracy = pallet_democracy; + pub type Democracy = pallet_democracy::Pallet; #[runtime::pallet_index(14)] - pub type Council = pallet_collective; + pub type Council = pallet_collective::Pallet; #[runtime::pallet_index(15)] - pub type TechnicalCommittee = pallet_collective; + pub type TechnicalCommittee = pallet_collective::Pallet; #[runtime::pallet_index(16)] - pub type Elections = pallet_elections_phragmen; + pub type Elections = pallet_elections_phragmen::Pallet; #[runtime::pallet_index(17)] - pub type TechnicalMembership = pallet_membership; + pub type TechnicalMembership = pallet_membership::Pallet; #[runtime::pallet_index(18)] - pub type Grandpa = pallet_grandpa; + pub type Grandpa = pallet_grandpa::Pallet; #[runtime::pallet_index(19)] - pub type Treasury = pallet_treasury; + pub type Treasury = pallet_treasury::Pallet; #[runtime::pallet_index(20)] - pub type AssetRate = pallet_asset_rate; + pub type AssetRate = pallet_asset_rate::Pallet; #[runtime::pallet_index(21)] - pub type Contracts = pallet_contracts; + pub type Contracts = pallet_contracts::Pallet; #[runtime::pallet_index(22)] - pub type Sudo = pallet_sudo; + pub type Sudo = pallet_sudo::Pallet; #[runtime::pallet_index(23)] - pub type ImOnline = pallet_im_online; + pub type ImOnline = pallet_im_online::Pallet; #[runtime::pallet_index(24)] - pub type AuthorityDiscovery = pallet_authority_discovery; + pub type AuthorityDiscovery = pallet_authority_discovery::Pallet; #[runtime::pallet_index(25)] - pub type Offences = pallet_offences; + pub type Offences = pallet_offences::Pallet; #[runtime::pallet_index(26)] - pub type Historical = pallet_session_historical; + pub type Historical = pallet_session_historical::Pallet; #[runtime::pallet_index(27)] - pub type RandomnessCollectiveFlip = pallet_insecure_randomness_collective_flip; + pub type RandomnessCollectiveFlip = pallet_insecure_randomness_collective_flip::Pallet; #[runtime::pallet_index(28)] - pub type Identity = pallet_identity; + pub type Identity = pallet_identity::Pallet; #[runtime::pallet_index(29)] - pub type Society = pallet_society; + pub type Society = pallet_society::Pallet; #[runtime::pallet_index(30)] - pub type Recovery = pallet_recovery; + pub type Recovery = pallet_recovery::Pallet; #[runtime::pallet_index(31)] - pub type Vesting = pallet_vesting; + pub type Vesting = pallet_vesting::Pallet; #[runtime::pallet_index(32)] - pub type Scheduler = pallet_scheduler; + pub type Scheduler = pallet_scheduler::Pallet; #[runtime::pallet_index(33)] - pub type Glutton = pallet_glutton; + pub type Glutton = pallet_glutton::Pallet; #[runtime::pallet_index(34)] - pub type Preimage = pallet_preimage; + pub type Preimage = pallet_preimage::Pallet; #[runtime::pallet_index(35)] - pub type Proxy = pallet_proxy; + pub type Proxy = pallet_proxy::Pallet; #[runtime::pallet_index(36)] - pub type Multisig = pallet_multisig; + pub type Multisig = pallet_multisig::Pallet; #[runtime::pallet_index(37)] - pub type Bounties = pallet_bounties; + pub type Bounties = pallet_bounties::Pallet; #[runtime::pallet_index(38)] - pub type Tips = pallet_tips; + pub type Tips = pallet_tips::Pallet; #[runtime::pallet_index(39)] - pub type Assets = pallet_assets; + pub type Assets = pallet_assets::Pallet; #[runtime::pallet_index(40)] - pub type PoolAssets = pallet_assets; + pub type PoolAssets = pallet_assets::Pallet; #[runtime::pallet_index(41)] - pub type Beefy = pallet_beefy; + pub type Beefy = pallet_beefy::Pallet; // MMR leaf construction must be after session in order to have a leaf's next_auth_set // refer to block. See issue polkadot-fellows/runtimes#160 for details. #[runtime::pallet_index(42)] - pub type Mmr = pallet_mmr; + pub type Mmr = pallet_mmr::Pallet; #[runtime::pallet_index(43)] - pub type MmrLeaf = pallet_beefy_mmr; + pub type MmrLeaf = pallet_beefy_mmr::Pallet; #[runtime::pallet_index(44)] - pub type Lottery = pallet_lottery; + pub type Lottery = pallet_lottery::Pallet; #[runtime::pallet_index(45)] - pub type Nis = pallet_nis; + pub type Nis = pallet_nis::Pallet; #[runtime::pallet_index(46)] - pub type Uniques = pallet_uniques; + pub type Uniques = pallet_uniques::Pallet; #[runtime::pallet_index(47)] - pub type Nfts = pallet_nfts; + pub type Nfts = pallet_nfts::Pallet; #[runtime::pallet_index(48)] - pub type NftFractionalization = pallet_nft_fractionalization; + pub type NftFractionalization = pallet_nft_fractionalization::Pallet; #[runtime::pallet_index(49)] - pub type Salary = pallet_salary; + pub type Salary = pallet_salary::Pallet; #[runtime::pallet_index(50)] - pub type CoreFellowship = pallet_core_fellowship; + pub type CoreFellowship = pallet_core_fellowship::Pallet; #[runtime::pallet_index(51)] - pub type TransactionStorage = pallet_transaction_storage; + pub type TransactionStorage = pallet_transaction_storage::Pallet; #[runtime::pallet_index(52)] - pub type VoterList = pallet_bags_list; + pub type VoterList = pallet_bags_list::Pallet; #[runtime::pallet_index(53)] - pub type StateTrieMigration = pallet_state_trie_migration; + pub type StateTrieMigration = pallet_state_trie_migration::Pallet; #[runtime::pallet_index(54)] - pub type ChildBounties = pallet_child_bounties; + pub type ChildBounties = pallet_child_bounties::Pallet; #[runtime::pallet_index(55)] - pub type Referenda = pallet_referenda; + pub type Referenda = pallet_referenda::Pallet; #[runtime::pallet_index(56)] - pub type Remark = pallet_remark; + pub type Remark = pallet_remark::Pallet; #[runtime::pallet_index(57)] - pub type RootTesting = pallet_root_testing; + pub type RootTesting = pallet_root_testing::Pallet; #[runtime::pallet_index(58)] - pub type ConvictionVoting = pallet_conviction_voting; + pub type ConvictionVoting = pallet_conviction_voting::Pallet; #[runtime::pallet_index(59)] - pub type Whitelist = pallet_whitelist; + pub type Whitelist = pallet_whitelist::Pallet; #[runtime::pallet_index(60)] - pub type AllianceMotion = pallet_collective; + pub type AllianceMotion = pallet_collective::Pallet; #[runtime::pallet_index(61)] - pub type Alliance = pallet_alliance; + pub type Alliance = pallet_alliance::Pallet; #[runtime::pallet_index(62)] - pub type NominationPools = pallet_nomination_pools; + pub type NominationPools = pallet_nomination_pools::Pallet; #[runtime::pallet_index(63)] - pub type RankedPolls = pallet_referenda; + pub type RankedPolls = pallet_referenda::Pallet; #[runtime::pallet_index(64)] - pub type RankedCollective = pallet_ranked_collective; + pub type RankedCollective = pallet_ranked_collective::Pallet; #[runtime::pallet_index(65)] - pub type AssetConversion = pallet_asset_conversion; + pub type AssetConversion = pallet_asset_conversion::Pallet; #[runtime::pallet_index(66)] - pub type FastUnstake = pallet_fast_unstake; + pub type FastUnstake = pallet_fast_unstake::Pallet; #[runtime::pallet_index(67)] - pub type MessageQueue = pallet_message_queue; + pub type MessageQueue = pallet_message_queue::Pallet; #[runtime::pallet_index(68)] - pub type Pov = frame_benchmarking_pallet_pov; + pub type Pov = frame_benchmarking_pallet_pov::Pallet; #[runtime::pallet_index(69)] - pub type TxPause = pallet_tx_pause; + pub type TxPause = pallet_tx_pause::Pallet; #[runtime::pallet_index(70)] - pub type SafeMode = pallet_safe_mode; + pub type SafeMode = pallet_safe_mode::Pallet; #[runtime::pallet_index(71)] - pub type Statement = pallet_statement; + pub type Statement = pallet_statement::Pallet; #[runtime::pallet_index(72)] - pub type MultiBlockMigrations = pallet_migrations; + pub type MultiBlockMigrations = pallet_migrations::Pallet; #[runtime::pallet_index(73)] - pub type Broker = pallet_broker; + pub type Broker = pallet_broker::Pallet; #[runtime::pallet_index(74)] - pub type TasksExample = pallet_example_tasks; + pub type TasksExample = pallet_example_tasks::Pallet; #[runtime::pallet_index(75)] - pub type Mixnet = pallet_mixnet; + pub type Mixnet = pallet_mixnet::Pallet; #[runtime::pallet_index(76)] - pub type Parameters = pallet_parameters; + pub type Parameters = pallet_parameters::Pallet; #[runtime::pallet_index(77)] - pub type SkipFeelessPayment = pallet_skip_feeless_payment; + pub type SkipFeelessPayment = pallet_skip_feeless_payment::Pallet; #[runtime::pallet_index(78)] - pub type PalletExampleMbms = pallet_example_mbm; + pub type PalletExampleMbms = pallet_example_mbm::Pallet; #[runtime::pallet_index(79)] - pub type AssetConversionMigration = pallet_asset_conversion_ops; + pub type AssetConversionMigration = pallet_asset_conversion_ops::Pallet; } /// The address format for describing accounts. @@ -2567,6 +2551,7 @@ impl pallet_beefy::Config for Runtime { type MaxNominators = ConstU32<0>; type MaxSetIdSessionEntries = BeefySetIdSessionEntries; type OnNewValidatorSet = MmrLeaf; + type AncestryHelper = MmrLeaf; type WeightInfo = (); type KeyOwnerProof = >::Proof; type EquivocationReportSystem = @@ -2678,7 +2663,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> alloc::vec::Vec { Runtime::metadata_versions() } } @@ -3051,7 +3036,7 @@ impl_runtime_apis! { } } - #[api_version(3)] + #[api_version(4)] impl sp_consensus_beefy::BeefyApi for Runtime { fn beefy_genesis() -> Option { pallet_beefy::GenesisBlock::::get() @@ -3061,7 +3046,7 @@ impl_runtime_apis! { Beefy::validator_set() } - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( equivocation_proof: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, @@ -3071,7 +3056,7 @@ impl_runtime_apis! { ) -> Option<()> { let key_owner_proof = key_owner_proof.decode()?; - Beefy::submit_unsigned_equivocation_report( + Beefy::submit_unsigned_double_voting_report( equivocation_proof, key_owner_proof, ) diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 3ba3f07510e00..90c9ee0555cf4 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -16,36 +16,36 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -fs_extra = "1" -futures = "0.3.30" +codec = { workspace = true, default-features = true } +fs_extra = { workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -tempfile = "3.1.0" -frame-metadata-hash-extension = { path = "../../../frame/metadata-hash-extension" } -frame-system = { path = "../../../frame/system" } -node-cli = { package = "staging-node-cli", path = "../cli" } -node-primitives = { path = "../primitives" } -kitchensink-runtime = { path = "../runtime" } -pallet-asset-conversion = { path = "../../../frame/asset-conversion" } -pallet-assets = { path = "../../../frame/assets" } -pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } -pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment" } -pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment" } -sc-block-builder = { path = "../../../client/block-builder" } -sc-client-api = { path = "../../../client/api" } -sc-client-db = { path = "../../../client/db", features = ["rocksdb"] } -sc-consensus = { path = "../../../client/consensus/common" } -sc-executor = { path = "../../../client/executor" } -sc-service = { path = "../../../client/service", features = ["rocksdb", "test-helpers"] } -sp-api = { path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-io = { path = "../../../primitives/io" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-timestamp = { path = "../../../primitives/timestamp", default-features = false } -substrate-test-client = { path = "../../../test-utils/client" } +tempfile = { workspace = true } +frame-metadata-hash-extension = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +node-cli = { workspace = true } +node-primitives = { workspace = true, default-features = true } +kitchensink-runtime = { workspace = true } +pallet-asset-conversion = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-asset-conversion-tx-payment = { workspace = true, default-features = true } +pallet-asset-tx-payment = { workspace = true, default-features = true } +pallet-skip-feeless-payment = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-client-db = { features = ["rocksdb"], workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-service = { features = ["rocksdb", "test-helpers"], workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-timestamp = { workspace = true } +substrate-test-client = { workspace = true } diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index 88585649acfe0..083f2191f3c5a 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -24,8 +24,8 @@ name = "chain-spec-builder" crate-type = ["rlib"] [dependencies] -clap = { version = "4.5.3", features = ["derive"] } +clap = { features = ["derive"], workspace = true } log = { workspace = true, default-features = true } -sc-chain-spec = { path = "../../../client/chain-spec", features = ["clap"] } +sc-chain-spec = { features = ["clap"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sp-tracing = { path = "../../../primitives/tracing" } +sp-tracing = { workspace = true, default-features = true } diff --git a/substrate/bin/utils/chain-spec-builder/bin/main.rs b/substrate/bin/utils/chain-spec-builder/bin/main.rs index 18da3c30691bd..39fa054b4806d 100644 --- a/substrate/bin/utils/chain-spec-builder/bin/main.rs +++ b/substrate/bin/utils/chain-spec-builder/bin/main.rs @@ -17,16 +17,19 @@ // along with this program. If not, see . use chain_spec_builder::{ - generate_chain_spec_for_runtime, ChainSpecBuilder, ChainSpecBuilderCmd, ConvertToRawCmd, - DisplayPresetCmd, ListPresetsCmd, UpdateCodeCmd, VerifyCmd, + generate_chain_spec_for_runtime, AddCodeSubstituteCmd, ChainSpecBuilder, ChainSpecBuilderCmd, + ConvertToRawCmd, DisplayPresetCmd, ListPresetsCmd, UpdateCodeCmd, VerifyCmd, }; use clap::Parser; use sc_chain_spec::{ - update_code_in_json_chain_spec, GenericChainSpec, GenesisConfigBuilderRuntimeCaller, + set_code_substitute_in_json_chain_spec, update_code_in_json_chain_spec, GenericChainSpec, + GenesisConfigBuilderRuntimeCaller, }; use staging_chain_spec_builder as chain_spec_builder; use std::fs; +type ChainSpec = GenericChainSpec<(), ()>; + //avoid error message escaping fn main() { match inner_main() { @@ -50,7 +53,7 @@ fn inner_main() -> Result<(), String> { ref input_chain_spec, ref runtime_wasm_path, }) => { - let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; + let chain_spec = ChainSpec::from_json_file(input_chain_spec.clone())?; let mut chain_spec_json = serde_json::from_str::(&chain_spec.as_json(false)?) @@ -65,8 +68,29 @@ fn inner_main() -> Result<(), String> { .map_err(|e| format!("to pretty failed: {e}"))?; fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; }, + ChainSpecBuilderCmd::AddCodeSubstitute(AddCodeSubstituteCmd { + ref input_chain_spec, + ref runtime_wasm_path, + block_height, + }) => { + let chain_spec = ChainSpec::from_json_file(input_chain_spec.clone())?; + + let mut chain_spec_json = + serde_json::from_str::(&chain_spec.as_json(false)?) + .map_err(|e| format!("Conversion to json failed: {e}"))?; + + set_code_substitute_in_json_chain_spec( + &mut chain_spec_json, + &fs::read(runtime_wasm_path.as_path()) + .map_err(|e| format!("Wasm blob file could not be read: {e}"))?[..], + block_height, + ); + let chain_spec_json = serde_json::to_string_pretty(&chain_spec_json) + .map_err(|e| format!("to pretty failed: {e}"))?; + fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; + }, ChainSpecBuilderCmd::ConvertToRaw(ConvertToRawCmd { ref input_chain_spec }) => { - let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; + let chain_spec = ChainSpec::from_json_file(input_chain_spec.clone())?; let chain_spec_json = serde_json::from_str::(&chain_spec.as_json(true)?) @@ -77,7 +101,7 @@ fn inner_main() -> Result<(), String> { fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; }, ChainSpecBuilderCmd::Verify(VerifyCmd { ref input_chain_spec }) => { - let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; + let chain_spec = ChainSpec::from_json_file(input_chain_spec.clone())?; let _ = serde_json::from_str::(&chain_spec.as_json(true)?) .map_err(|e| format!("Conversion to json failed: {e}"))?; }, diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 0f7c003fc8c2d..6c679f109a002 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -125,7 +125,7 @@ use serde_json::Value; /// A utility to easily create a chain spec definition. #[derive(Debug, Parser)] -#[command(rename_all = "kebab-case")] +#[command(rename_all = "kebab-case", version, about)] pub struct ChainSpecBuilder { #[command(subcommand)] pub command: ChainSpecBuilderCmd, @@ -143,6 +143,7 @@ pub enum ChainSpecBuilderCmd { ConvertToRaw(ConvertToRawCmd), ListPresets(ListPresetsCmd), DisplayPreset(DisplayPresetCmd), + AddCodeSubstitute(AddCodeSubstituteCmd), } /// Create a new chain spec by interacting with the provided runtime wasm blob. @@ -222,6 +223,25 @@ pub struct UpdateCodeCmd { pub runtime_wasm_path: PathBuf, } +/// Add a code substitute in the chain spec. +/// +/// The `codeSubstitute` object of the chain spec will be updated with the block height as key and +/// runtime code as value. This operation supports both plain and raw formats. The `codeSubstitute` +/// field instructs the node to use the provided runtime code at the given block height. This is +/// useful when the chain can not progress on its own due to a bug that prevents block-building. +/// +/// Note: For parachains, the validation function on the relaychain needs to be adjusted too, +/// otherwise blocks built using the substituted parachain runtime will be rejected. +#[derive(Parser, Debug, Clone)] +pub struct AddCodeSubstituteCmd { + /// Chain spec to be updated. + pub input_chain_spec: PathBuf, + /// New runtime wasm blob that should replace the existing code. + pub runtime_wasm_path: PathBuf, + /// The block height at which the code should be substituted. + pub block_height: u64, +} + /// Converts the given chain spec into the raw format. #[derive(Parser, Debug, Clone)] pub struct ConvertToRawCmd { diff --git a/substrate/bin/utils/subkey/Cargo.toml b/substrate/bin/utils/subkey/Cargo.toml index 8dc4bf254b2d4..5aa013097c150 100644 --- a/substrate/bin/utils/subkey/Cargo.toml +++ b/substrate/bin/utils/subkey/Cargo.toml @@ -20,5 +20,5 @@ path = "src/main.rs" name = "subkey" [dependencies] -clap = { version = "4.5.3", features = ["derive"] } -sc-cli = { path = "../../../client/cli" } +clap = { features = ["derive"], workspace = true } +sc-cli = { workspace = true, default-features = true } diff --git a/substrate/client/allocator/Cargo.toml b/substrate/client/allocator/Cargo.toml index 2c268b548ea9c..5a3b05aa8a98d 100644 --- a/substrate/client/allocator/Cargo.toml +++ b/substrate/client/allocator/Cargo.toml @@ -19,5 +19,5 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } thiserror = { workspace = true } -sp-core = { path = "../../primitives/core" } -sp-wasm-interface = { path = "../../primitives/wasm-interface" } +sp-core = { workspace = true, default-features = true } +sp-wasm-interface = { workspace = true, default-features = true } diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index 147ea2bfbf5df..a64ee3ab4ce19 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -17,30 +17,30 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -fnv = "1.0.6" -futures = "0.3.30" +], workspace = true } +fnv = { workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sc-executor = { path = "../executor" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sc-utils = { path = "../utils" } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-core = { path = "../../primitives/core", default-features = false } -sp-database = { path = "../../primitives/database" } -sp-externalities = { path = "../../primitives/externalities" } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-state-machine = { path = "../../primitives/state-machine" } -sp-statement-store = { path = "../../primitives/statement-store" } -sp-storage = { path = "../../primitives/storage" } -sp-trie = { path = "../../primitives/trie" } +parking_lot = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-database = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-state-machine = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } [dev-dependencies] thiserror = { workspace = true } -sp-test-primitives = { path = "../../primitives/test-primitives" } -substrate-test-runtime = { path = "../../test-utils/runtime" } +sp-test-primitives = { workspace = true } +substrate-test-runtime = { workspace = true } diff --git a/substrate/client/api/src/backend.rs b/substrate/client/api/src/backend.rs index 31b100433c708..0b2a349524018 100644 --- a/substrate/client/api/src/backend.rs +++ b/substrate/client/api/src/backend.rs @@ -217,7 +217,8 @@ pub trait BlockImportOperation { where I: IntoIterator, Option>)>; - /// Mark a block as finalized. + /// Mark a block as finalized, if multiple blocks are finalized in the same operation then they + /// must be marked in ascending order. fn mark_finalized( &mut self, hash: Block::Hash, diff --git a/substrate/client/api/src/leaves.rs b/substrate/client/api/src/leaves.rs index e129de8bf3fad..70efe8b19c627 100644 --- a/substrate/client/api/src/leaves.rs +++ b/substrate/client/api/src/leaves.rs @@ -45,33 +45,20 @@ pub struct RemoveOutcome { } /// Removed leaves after a finalization action. -pub struct FinalizationOutcome { - removed: BTreeMap, Vec>, +pub struct FinalizationOutcome +where + I: Iterator, +{ + removed: I, } -impl FinalizationOutcome { - /// Merge with another. This should only be used for displaced items that - /// are produced within one transaction of each other. - pub fn merge(&mut self, mut other: Self) { - // this will ignore keys that are in duplicate, however - // if these are actually produced correctly via the leaf-set within - // one transaction, then there will be no overlap in the keys. - self.removed.append(&mut other.removed); - } - - /// Iterate over all displaced leaves. - pub fn leaves(&self) -> impl Iterator { - self.removed.values().flatten() - } - +impl FinalizationOutcome +where + I: Iterator, +{ /// Constructor - pub fn new(new_displaced: impl Iterator) -> Self { - let mut removed = BTreeMap::, Vec>::new(); - for (hash, number) in new_displaced { - removed.entry(Reverse(number)).or_default().push(hash); - } - - FinalizationOutcome { removed } + pub fn new(new_displaced: I) -> Self { + FinalizationOutcome { removed: new_displaced } } } @@ -86,7 +73,7 @@ pub struct LeafSet { impl LeafSet where H: Clone + PartialEq + Decode + Encode, - N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, + N: std::fmt::Debug + Copy + AtLeast32Bit + Decode + Encode, { /// Construct a new, blank leaf set. pub fn new() -> Self { @@ -117,13 +104,13 @@ where let number = Reverse(number); let removed = if number.0 != N::zero() { - let parent_number = Reverse(number.0.clone() - N::one()); + let parent_number = Reverse(number.0 - N::one()); self.remove_leaf(&parent_number, &parent_hash).then(|| parent_hash) } else { None }; - self.insert_leaf(number.clone(), hash.clone()); + self.insert_leaf(number, hash.clone()); ImportOutcome { inserted: LeafSetItem { hash, number }, removed } } @@ -150,7 +137,7 @@ where let inserted = parent_hash.and_then(|parent_hash| { if number.0 != N::zero() { - let parent_number = Reverse(number.0.clone() - N::one()); + let parent_number = Reverse(number.0 - N::one()); self.insert_leaf(parent_number, parent_hash.clone()); Some(parent_hash) } else { @@ -162,11 +149,12 @@ where } /// Remove all leaves displaced by the last block finalization. - pub fn remove_displaced_leaves(&mut self, displaced_leaves: &FinalizationOutcome) { - for (number, hashes) in &displaced_leaves.removed { - for hash in hashes.iter() { - self.remove_leaf(number, hash); - } + pub fn remove_displaced_leaves(&mut self, displaced_leaves: FinalizationOutcome) + where + I: Iterator, + { + for (number, hash) in displaced_leaves.removed { + self.remove_leaf(&Reverse(number), &hash); } } @@ -186,13 +174,13 @@ where let items = self .storage .iter() - .flat_map(|(number, hashes)| hashes.iter().map(move |h| (h.clone(), number.clone()))) + .flat_map(|(number, hashes)| hashes.iter().map(move |h| (h.clone(), *number))) .collect::>(); - for (hash, number) in &items { + for (hash, number) in items { if number.0 > best_number { assert!( - self.remove_leaf(number, hash), + self.remove_leaf(&number, &hash), "item comes from an iterator over storage; qed", ); } @@ -207,7 +195,7 @@ where // we need to make sure that the best block exists in the leaf set as // this is an invariant of regular block import. if !leaves_contains_best { - self.insert_leaf(best_number.clone(), best_hash.clone()); + self.insert_leaf(best_number, best_hash.clone()); } } @@ -229,7 +217,7 @@ where column: u32, prefix: &[u8], ) { - let leaves: Vec<_> = self.storage.iter().map(|(n, h)| (n.0.clone(), h.clone())).collect(); + let leaves: Vec<_> = self.storage.iter().map(|(n, h)| (n.0, h.clone())).collect(); tx.set_from_vec(column, prefix, leaves.encode()); } @@ -274,7 +262,7 @@ where /// Returns the highest leaf and all hashes associated to it. pub fn highest_leaf(&self) -> Option<(N, &[H])> { - self.storage.iter().next().map(|(k, v)| (k.0.clone(), &v[..])) + self.storage.iter().next().map(|(k, v)| (k.0, &v[..])) } } @@ -286,13 +274,13 @@ pub struct Undo<'a, H: 'a, N: 'a> { impl<'a, H: 'a, N: 'a> Undo<'a, H, N> where H: Clone + PartialEq + Decode + Encode, - N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, + N: std::fmt::Debug + Copy + AtLeast32Bit + Decode + Encode, { /// Undo an imported block by providing the import operation outcome. /// No additional operations should be performed between import and undo. pub fn undo_import(&mut self, outcome: ImportOutcome) { if let Some(removed_hash) = outcome.removed { - let removed_number = Reverse(outcome.inserted.number.0.clone() - N::one()); + let removed_number = Reverse(outcome.inserted.number.0 - N::one()); self.inner.insert_leaf(removed_number, removed_hash); } self.inner.remove_leaf(&outcome.inserted.number, &outcome.inserted.hash); @@ -302,7 +290,7 @@ where /// No additional operations should be performed between remove and undo. pub fn undo_remove(&mut self, outcome: RemoveOutcome) { if let Some(inserted_hash) = outcome.inserted { - let inserted_number = Reverse(outcome.removed.number.0.clone() - N::one()); + let inserted_number = Reverse(outcome.removed.number.0 - N::one()); self.inner.remove_leaf(&inserted_number, &inserted_hash); } self.inner.insert_leaf(outcome.removed.number, outcome.removed.hash); @@ -310,8 +298,13 @@ where /// Undo a finalization operation by providing the displaced leaves. /// No additional operations should be performed between finalization and undo. - pub fn undo_finalization(&mut self, mut outcome: FinalizationOutcome) { - self.inner.storage.append(&mut outcome.removed); + pub fn undo_finalization(&mut self, outcome: FinalizationOutcome) + where + I: Iterator, + { + for (number, hash) in outcome.removed { + self.inner.storage.entry(Reverse(number)).or_default().push(hash); + } } } diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index 435ca88a80079..309c9c542a0b1 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -17,38 +17,33 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.12.4" +prost-build = { workspace = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -futures = "0.3.30" -futures-timer = "3.0.1" -ip_network = "0.4.1" -libp2p = { version = "0.51.4", features = ["ed25519", "kad"] } -multihash = { version = "0.17.0", default-features = false, features = ["sha2", "std"] } -linked_hash_set = "0.1.4" +codec = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +ip_network = { workspace = true } +libp2p = { features = ["ed25519", "kad"], workspace = true } +multihash = { workspace = true } +linked_hash_set = { workspace = true } log = { workspace = true, default-features = true } -prost = "0.12.4" -rand = "0.8.5" +prost = { workspace = true } +rand = { workspace = true, default-features = true } thiserror = { workspace = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sc-client-api = { path = "../api" } -sc-network = { path = "../network" } -sc-network-types = { path = "../network/types" } -sp-api = { path = "../../primitives/api" } -sp-authority-discovery = { path = "../../primitives/authority-discovery" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-keystore = { path = "../../primitives/keystore" } -sp-runtime = { path = "../../primitives/runtime" } -async-trait = "0.1.79" -multihash-codetable = { version = "0.1.1", features = [ - "digest", - "serde", - "sha2", -] } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +async-trait = { workspace = true } [dev-dependencies] -quickcheck = { version = "1.0.3", default-features = false } -sp-tracing = { path = "../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +quickcheck = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/authority-discovery/src/service.rs b/substrate/client/authority-discovery/src/service.rs index 60c7a2b990378..852d3ab80c9b8 100644 --- a/substrate/client/authority-discovery/src/service.rs +++ b/substrate/client/authority-discovery/src/service.rs @@ -55,7 +55,7 @@ impl Service { /// [`crate::Worker`] failed. /// /// Note: [`Multiaddr`]s returned always include a [`PeerId`] via a - /// [`libp2p::core::multiaddr::Protocol::P2p`] component. Equality of + /// [`sc_network_types::multiaddr::Protocol::P2p`] component. Equality of /// [`PeerId`]s across [`Multiaddr`]s returned by a single call is not /// enforced today, given that there are still authorities out there /// publishing the addresses of their sentry nodes on the DHT. In the future diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index f20cf6aa21212..1f1cce160786c 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -45,10 +45,7 @@ use sc_network::{ event::DhtEvent, multiaddr, KademliaKey, Multiaddr, NetworkDHTProvider, NetworkSigner, NetworkStateInfo, }; -use sc_network_types::{ - multihash::{Code, Multihash}, - PeerId, -}; +use sc_network_types::{multihash::Code, PeerId}; use schema::PeerSignature; use sp_api::{ApiError, ProvideRuntimeApi}; use sp_authority_discovery::{ @@ -247,14 +244,14 @@ where }; let public_addresses = { - let local_peer_id: Multihash = network.local_peer_id().into(); + let local_peer_id = network.local_peer_id(); config .public_addresses .into_iter() .map(|mut address| { if let Some(multiaddr::Protocol::P2p(peer_id)) = address.iter().last() { - if peer_id != local_peer_id { + if peer_id != *local_peer_id.as_ref() { error!( target: LOG_TARGET, "Discarding invalid local peer ID in public address {address}.", @@ -397,14 +394,13 @@ where debug!( target: LOG_TARGET, - "Authority DHT record peer_id='{local_peer_id}' addresses='{addresses:?}'", + "Publishing authority DHT record peer_id='{local_peer_id}' addresses='{addresses:?}'", ); // The address must include the local peer id. - let local_peer_id: Multihash = local_peer_id.into(); addresses .into_iter() - .map(move |a| a.with(multiaddr::Protocol::P2p(local_peer_id))) + .map(move |a| a.with(multiaddr::Protocol::P2p(*local_peer_id.as_ref()))) } /// Publish own public addresses. diff --git a/substrate/client/authority-discovery/src/worker/addr_cache.rs b/substrate/client/authority-discovery/src/worker/addr_cache.rs index 77cdfbd4f1502..13bb990bf8b99 100644 --- a/substrate/client/authority-discovery/src/worker/addr_cache.rs +++ b/substrate/client/authority-discovery/src/worker/addr_cache.rs @@ -177,7 +177,7 @@ mod tests { use super::*; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; - use sc_network_types::multihash::Multihash; + use sc_network_types::multihash::{Code, Multihash}; use sp_authority_discovery::{AuthorityId, AuthorityPair}; use sp_core::crypto::Pair; @@ -198,10 +198,9 @@ mod tests { impl Arbitrary for TestMultiaddr { fn arbitrary(g: &mut Gen) -> Self { let seed = (0..32).map(|_| u8::arbitrary(g)).collect::>(); - let peer_id = PeerId::from_multihash( - Multihash::wrap(multihash::Code::Sha2_256.into(), &seed).unwrap(), - ) - .unwrap(); + let peer_id = + PeerId::from_multihash(Multihash::wrap(Code::Sha2_256.into(), &seed).unwrap()) + .unwrap(); let multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333" .parse::() .unwrap() @@ -217,10 +216,9 @@ mod tests { impl Arbitrary for TestMultiaddrsSamePeerCombo { fn arbitrary(g: &mut Gen) -> Self { let seed = (0..32).map(|_| u8::arbitrary(g)).collect::>(); - let peer_id = PeerId::from_multihash( - Multihash::wrap(multihash::Code::Sha2_256.into(), &seed).unwrap(), - ) - .unwrap(); + let peer_id = + PeerId::from_multihash(Multihash::wrap(Code::Sha2_256.into(), &seed).unwrap()) + .unwrap(); let multiaddr1 = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333" .parse::() .unwrap() diff --git a/substrate/client/basic-authorship/Cargo.toml b/substrate/client/basic-authorship/Cargo.toml index b75cb463b1a87..e3ae80e14f6ff 100644 --- a/substrate/client/basic-authorship/Cargo.toml +++ b/substrate/client/basic-authorship/Cargo.toml @@ -16,24 +16,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -futures-timer = "3.0.1" +codec = { workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sc-block-builder = { path = "../block-builder" } -sc-proposer-metrics = { path = "../proposer-metrics" } -sc-telemetry = { path = "../telemetry" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-core = { path = "../../primitives/core" } -sp-inherents = { path = "../../primitives/inherents" } -sp-runtime = { path = "../../primitives/runtime" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-proposer-metrics = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -parking_lot = "0.12.1" -sc-client-api = { path = "../api" } -sc-transaction-pool = { path = "../transaction-pool" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +parking_lot = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 1519c76c42c0e..74805488792ad 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -205,7 +205,11 @@ where ) -> Proposer { let parent_hash = parent_header.hash(); - info!("๐Ÿ™Œ Starting consensus session on top of parent {:?}", parent_hash); + info!( + "๐Ÿ™Œ Starting consensus session on top of parent {:?} (#{})", + parent_hash, + parent_header.number() + ); let proposer = Proposer::<_, _, _, PR> { spawn_handle: self.spawn_handle.clone(), diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index 62efe977e989c..47e3fc39c2899 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -16,17 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = [ +codec = { features = [ "derive", -] } -sp-api = { path = "../../primitives/api" } -sp-block-builder = { path = "../../primitives/block-builder" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-trie = { path = "../../primitives/trie" } -sp-inherents = { path = "../../primitives/inherents" } -sp-runtime = { path = "../../primitives/runtime" } +], workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -sp-state-machine = { path = "../../primitives/state-machine" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +sp-state-machine = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index 5b411b642a0e3..b3cd4bd57db86 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -16,31 +16,31 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.5.3", features = ["derive"], optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -memmap2 = "0.9.3" +clap = { features = ["derive"], optional = true, workspace = true } +codec = { features = ["derive"], workspace = true } +memmap2 = { workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sc-client-api = { path = "../api" } -sc-chain-spec-derive = { path = "derive" } -sc-executor = { path = "../executor" } -sp-io = { default-features = false, path = "../../primitives/io" } -sc-network = { path = "../network" } -sc-telemetry = { path = "../telemetry" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-genesis-builder = { path = "../../primitives/genesis-builder" } -sp-runtime = { path = "../../primitives/runtime" } -sp-state-machine = { path = "../../primitives/state-machine" } +sc-client-api = { workspace = true, default-features = true } +sc-chain-spec-derive = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sp-io = { workspace = true } +sc-network = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } log = { workspace = true } -sp-tracing = { path = "../../primitives/tracing" } -array-bytes = "6.2.2" -docify = "0.2.8" +sp-tracing = { workspace = true, default-features = true } +array-bytes = { workspace = true, default-features = true } +docify = { workspace = true } [dev-dependencies] -substrate-test-runtime = { path = "../../test-utils/runtime" } -sp-keyring = { path = "../../primitives/keyring" } -sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } -sp-consensus-babe = { default-features = false, path = "../../primitives/consensus/babe", features = ["serde"] } -regex = "1.6.0" +substrate-test-runtime = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-application-crypto = { features = ["serde"], workspace = true } +sp-consensus-babe = { features = ["serde"], workspace = true } +regex = { workspace = true } diff --git a/substrate/client/chain-spec/derive/Cargo.toml b/substrate/client/chain-spec/derive/Cargo.toml index 521eee578ecae..4ab8c849cc7fe 100644 --- a/substrate/client/chain-spec/derive/Cargo.toml +++ b/substrate/client/chain-spec/derive/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "3.0.0" -proc-macro2 = "1.0.56" +proc-macro-crate = { workspace = true } +proc-macro2 = { workspace = true } quote = { workspace = true } syn = { workspace = true } diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index 883cd19adfd1c..5f90f549e0226 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -766,6 +766,16 @@ pub fn update_code_in_json_chain_spec(chain_spec: &mut json::Value, code: &[u8]) } } +/// This function sets a codeSubstitute in the chain spec. +pub fn set_code_substitute_in_json_chain_spec( + chain_spec: &mut json::Value, + code: &[u8], + block_height: u64, +) { + let substitutes = json::json!({"codeSubstitutes":{ &block_height.to_string(): sp_core::bytes::to_hex(code, false) }}); + crate::json_patch::merge(chain_spec, substitutes); +} + #[cfg(test)] mod tests { use super::*; diff --git a/substrate/client/chain-spec/src/lib.rs b/substrate/client/chain-spec/src/lib.rs index b59ad68610ece..c43f9e89b8a99 100644 --- a/substrate/client/chain-spec/src/lib.rs +++ b/substrate/client/chain-spec/src/lib.rs @@ -333,8 +333,8 @@ pub mod json_patch; pub use self::{ chain_spec::{ - update_code_in_json_chain_spec, ChainSpec as GenericChainSpec, ChainSpecBuilder, - NoExtension, + set_code_substitute_in_json_chain_spec, update_code_in_json_chain_spec, + ChainSpec as GenericChainSpec, ChainSpecBuilder, NoExtension, }, extension::{get_extension, get_extension_mut, Extension, Fork, Forks, GetExtension, Group}, genesis_block::{ diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 169ed72c96e48..1e4017c23af23 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -16,46 +16,46 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -chrono = "0.4.31" -clap = { version = "4.5.3", features = ["derive", "string", "wrap_help"] } -fdlimit = "0.3.0" -futures = "0.3.30" -itertools = "0.11" -libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } +array-bytes = { workspace = true, default-features = true } +chrono = { workspace = true } +clap = { features = ["derive", "string", "wrap_help"], workspace = true } +fdlimit = { workspace = true } +futures = { workspace = true } +itertools = { workspace = true } +libp2p-identity = { features = ["ed25519", "peerid"], workspace = true } log = { workspace = true, default-features = true } -names = { version = "0.14.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12" } -rand = "0.8.5" -regex = "1.6.0" -rpassword = "7.0.0" +names = { workspace = true } +codec = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +regex = { workspace = true } +rpassword = { workspace = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } # personal fork here as workaround for: https://github.com/rust-bitcoin/rust-bip39/pull/64 bip39 = { package = "parity-bip39", version = "2.0.1", features = ["rand"] } -tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "signal"] } -sc-client-api = { path = "../api" } -sc-client-db = { path = "../db", default-features = false } -sc-keystore = { path = "../keystore" } -sc-mixnet = { path = "../mixnet" } -sc-network = { path = "../network" } -sc-service = { path = "../service", default-features = false } -sc-telemetry = { path = "../telemetry" } -sc-tracing = { path = "../tracing" } -sc-utils = { path = "../utils" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-keyring = { path = "../../primitives/keyring" } -sp-keystore = { path = "../../primitives/keystore" } -sp-panic-handler = { path = "../../primitives/panic-handler" } -sp-runtime = { path = "../../primitives/runtime" } -sp-version = { path = "../../primitives/version" } +tokio = { features = ["parking_lot", "rt-multi-thread", "signal"], workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-client-db = { workspace = true } +sc-keystore = { workspace = true, default-features = true } +sc-mixnet = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-service = { workspace = true } +sc-telemetry = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-panic-handler = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } [dev-dependencies] -tempfile = "3.1.0" -futures-timer = "3.0.1" -sp-tracing = { path = "../../primitives/tracing" } +tempfile = { workspace = true } +futures-timer = { workspace = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["rocksdb"] diff --git a/substrate/client/cli/src/lib.rs b/substrate/client/cli/src/lib.rs index 104e8ec8b798e..1bb9fec0e2769 100644 --- a/substrate/client/cli/src/lib.rs +++ b/substrate/client/cli/src/lib.rs @@ -58,11 +58,11 @@ pub trait SubstrateCli: Sized { /// Implementation version. /// - /// By default this will look like this: + /// By default, it will look like this: /// /// `2.0.0-b950f731c` /// - /// Where the hash is the short commit hash of the commit of in the Git repository. + /// Where the hash is the short hash of the commit in the Git repository. fn impl_version() -> String; /// Executable file name. @@ -199,17 +199,8 @@ pub trait SubstrateCli: Sized { fn create_runner, DVC: DefaultConfigurationValues>( &self, command: &T, - ) -> error::Result> { - let tokio_runtime = build_runtime()?; - - // `capture` needs to be called in a tokio context. - // Also capture them as early as possible. - let signals = tokio_runtime.block_on(async { Signals::capture() })?; - - let config = command.create_configuration(self, tokio_runtime.handle().clone())?; - - command.init(&Self::support_url(), &Self::impl_version(), |_, _| {}, &config)?; - Runner::new(config, tokio_runtime, signals) + ) -> Result> { + self.create_runner_with_logger_hook(command, |_, _| {}) } /// Create a runner for the command provided in argument. The `logger_hook` can be used to setup @@ -231,11 +222,15 @@ pub trait SubstrateCli: Sized { /// } /// } /// ``` - fn create_runner_with_logger_hook( + fn create_runner_with_logger_hook< + T: CliConfiguration, + DVC: DefaultConfigurationValues, + F, + >( &self, command: &T, logger_hook: F, - ) -> error::Result> + ) -> Result> where F: FnOnce(&mut LoggerBuilder, &Configuration), { diff --git a/substrate/client/cli/src/params/pruning_params.rs b/substrate/client/cli/src/params/pruning_params.rs index 25b17b5328981..6b7b0e7ffa997 100644 --- a/substrate/client/cli/src/params/pruning_params.rs +++ b/substrate/client/cli/src/params/pruning_params.rs @@ -29,11 +29,18 @@ pub struct PruningParams { /// should be pruned (ie, removed) from the database. /// This setting can only be set on the first creation of the database. Every subsequent run /// will load the pruning mode from the database and will error if the stored mode doesn't - /// match this CLI value. It is fine to drop this CLI flag for subsequent runs. + /// match this CLI value. It is fine to drop this CLI flag for subsequent runs. The only + /// exception is that `NUMBER` can change between subsequent runs (increasing it will not + /// lead to restoring pruned state). + /// /// Possible values: - /// - archive: Keep the state of all blocks. - /// - 'archive-canonical' Keep only the state of finalized blocks. - /// - number Keep the state of the last number of finalized blocks. + /// + /// - archive: Keep the data of all blocks. + /// + /// - archive-canonical: Keep only the data of finalized blocks. + /// + /// - NUMBER: Keep the data of the last NUMBER of finalized blocks. + /// /// [default: 256] #[arg(alias = "pruning", long, value_name = "PRUNING_MODE")] pub state_pruning: Option, @@ -42,11 +49,14 @@ pub struct PruningParams { /// /// This mode specifies when the block's body (including justifications) /// should be pruned (ie, removed) from the database. + /// /// Possible values: - /// - 'archive' Keep all blocks. - /// - 'archive-canonical' Keep only finalized blocks. - /// - number - /// Keep the last `number` of finalized blocks. + /// + /// - archive: Keep the data of all blocks. + /// + /// - archive-canonical: Keep only the data of finalized blocks. + /// + /// - NUMBER: Keep the data of the last NUMBER of finalized blocks. #[arg( alias = "keep-blocks", long, @@ -117,3 +127,39 @@ impl Into for DatabasePruningMode { } } } + +#[cfg(test)] +mod tests { + use super::*; + use clap::Parser; + + #[derive(Parser)] + struct Cli { + #[clap(flatten)] + pruning: PruningParams, + } + + #[test] + fn pruning_params_parse_works() { + let Cli { pruning } = + Cli::parse_from(["", "--state-pruning=1000", "--blocks-pruning=1000"]); + + assert!(matches!(pruning.state_pruning, Some(DatabasePruningMode::Custom(1000)))); + assert!(matches!(pruning.blocks_pruning, DatabasePruningMode::Custom(1000))); + + let Cli { pruning } = + Cli::parse_from(["", "--state-pruning=archive", "--blocks-pruning=archive"]); + + assert!(matches!(dbg!(pruning.state_pruning), Some(DatabasePruningMode::Archive))); + assert!(matches!(pruning.blocks_pruning, DatabasePruningMode::Archive)); + + let Cli { pruning } = Cli::parse_from([ + "", + "--state-pruning=archive-canonical", + "--blocks-pruning=archive-canonical", + ]); + + assert!(matches!(dbg!(pruning.state_pruning), Some(DatabasePruningMode::ArchiveCanonical))); + assert!(matches!(pruning.blocks_pruning, DatabasePruningMode::ArchiveCanonical)); + } +} diff --git a/substrate/client/consensus/aura/Cargo.toml b/substrate/client/consensus/aura/Cargo.toml index d1460c45356d7..3a3d7ae18d711 100644 --- a/substrate/client/consensus/aura/Cargo.toml +++ b/substrate/client/consensus/aura/Cargo.toml @@ -16,37 +16,37 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } thiserror = { workspace = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-block-builder = { path = "../../block-builder" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sc-consensus-slots = { path = "../slots" } -sc-telemetry = { path = "../../telemetry" } -sp-api = { path = "../../../primitives/api" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-aura = { path = "../../../primitives/consensus/aura" } -sp-consensus-slots = { path = "../../../primitives/consensus/slots" } -sp-core = { path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-slots = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -parking_lot = "0.12.1" -tempfile = "3.1.0" -sc-keystore = { path = "../../keystore" } -sc-network = { path = "../../network" } -sc-network-test = { path = "../../network/test" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-timestamp = { path = "../../../primitives/timestamp" } -sp-tracing = { path = "../../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } -tokio = { version = "1.22.0" } +parking_lot = { workspace = true, default-features = true } +tempfile = { workspace = true } +sc-keystore = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-test = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +tokio = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/aura/src/import_queue.rs b/substrate/client/consensus/aura/src/import_queue.rs index a8777ef8788cc..79f4faa5ebf97 100644 --- a/substrate/client/consensus/aura/src/import_queue.rs +++ b/substrate/client/consensus/aura/src/import_queue.rs @@ -174,7 +174,7 @@ where CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { async fn verify( - &mut self, + &self, mut block: BlockImportParams, ) -> Result, String> { // Skip checks that include execution, if being told so or when importing only state. diff --git a/substrate/client/consensus/aura/src/standalone.rs b/substrate/client/consensus/aura/src/standalone.rs index 0f9b8668d4478..c1536d9ef73f3 100644 --- a/substrate/client/consensus/aura/src/standalone.rs +++ b/substrate/client/consensus/aura/src/standalone.rs @@ -24,7 +24,7 @@ use log::trace; use codec::Codec; -use sc_client_api::{backend::AuxStore, UsageProvider}; +use sc_client_api::UsageProvider; use sp_api::{Core, ProvideRuntimeApi}; use sp_application_crypto::{AppCrypto, AppPublic}; use sp_blockchain::Result as CResult; @@ -48,7 +48,7 @@ pub fn slot_duration(client: &C) -> CResult where A: Codec, B: BlockT, - C: AuxStore + ProvideRuntimeApi + UsageProvider, + C: ProvideRuntimeApi + UsageProvider, C::Api: AuraApi, { slot_duration_at(client, client.usage_info().chain.best_hash) @@ -59,7 +59,7 @@ pub fn slot_duration_at(client: &C, block_hash: B::Hash) -> CResult, + C: ProvideRuntimeApi, C::Api: AuraApi, { client.runtime_api().slot_duration(block_hash).map_err(|err| err.into()) diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index c51082a018b5c..bba60bc45ea50 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -17,41 +17,41 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -num-bigint = "0.4.3" -num-rational = "0.4.1" -num-traits = "0.2.17" -parking_lot = "0.12.1" +num-bigint = { workspace = true } +num-rational = { workspace = true } +num-traits = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } thiserror = { workspace = true } -fork-tree = { path = "../../../utils/fork-tree" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sc-consensus-epochs = { path = "../epochs" } -sc-consensus-slots = { path = "../slots" } -sc-telemetry = { path = "../../telemetry" } -sc-transaction-pool-api = { path = "../../transaction-pool/api" } -sp-api = { path = "../../../primitives/api" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe" } -sp-consensus-slots = { path = "../../../primitives/consensus/slots" } -sp-core = { path = "../../../primitives/core" } -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } +fork-tree = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-epochs = { workspace = true, default-features = true } +sc-consensus-slots = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -sc-block-builder = { path = "../../block-builder" } -sp-keyring = { path = "../../../primitives/keyring" } -sc-network-test = { path = "../../network/test" } -sp-timestamp = { path = "../../../primitives/timestamp" } -sp-tracing = { path = "../../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } -tokio = "1.37" +sc-block-builder = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sc-network-test = { workspace = true } +sp-timestamp = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +tokio = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index 4c755df541d70..1ef049c3dbcc4 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -16,27 +16,27 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } -futures = "0.3.30" +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } +futures = { workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -sc-consensus-babe = { path = ".." } -sc-consensus-epochs = { path = "../../epochs" } -sc-rpc-api = { path = "../../../rpc-api" } -sp-api = { path = "../../../../primitives/api" } -sp-application-crypto = { path = "../../../../primitives/application-crypto" } -sp-blockchain = { path = "../../../../primitives/blockchain" } -sp-consensus = { path = "../../../../primitives/consensus/common" } -sp-consensus-babe = { path = "../../../../primitives/consensus/babe" } -sp-core = { path = "../../../../primitives/core" } -sp-keystore = { path = "../../../../primitives/keystore" } -sp-runtime = { path = "../../../../primitives/runtime" } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-epochs = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } -tokio = "1.37" -sc-consensus = { path = "../../common" } -sc-keystore = { path = "../../../keystore" } -sc-transaction-pool-api = { path = "../../../transaction-pool/api" } -sp-keyring = { path = "../../../../primitives/keyring" } -substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } +tokio = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index 0c85de2400403..0c1eb88758644 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -1128,7 +1128,7 @@ where CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { async fn verify( - &mut self, + &self, mut block: BlockImportParams, ) -> Result, String> { trace!( @@ -1681,7 +1681,7 @@ where } async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { self.inner.check_block(block).await.map_err(Into::into) diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index 716067ae40006..6f805188b9a42 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -143,11 +143,11 @@ thread_local! { pub struct PanickingBlockImport(B); #[async_trait::async_trait] -impl> BlockImport for PanickingBlockImport +impl BlockImport for PanickingBlockImport where - B: Send, + BI: BlockImport + Send + Sync, { - type Error = B::Error; + type Error = BI::Error; async fn import_block( &mut self, @@ -157,7 +157,7 @@ where } async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { Ok(self.0.check_block(block).await.expect("checking block failed")) @@ -198,7 +198,7 @@ impl Verifier for TestVerifier { /// new set of validators to import. If not, err with an Error-Message /// presented to the User in the logs. async fn verify( - &mut self, + &self, mut block: BlockImportParams, ) -> Result, String> { // apply post-sealing mutations (i.e. stripping seal, if desired). diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index f5528ec5931db..b2031e0d1e077 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -12,46 +12,46 @@ homepage = "https://substrate.io" workspace = true [dependencies] -array-bytes = "6.2.2" -async-channel = "1.8.0" -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -fnv = "1.0.6" -futures = "0.3.30" +array-bytes = { workspace = true, default-features = true } +async-channel = { workspace = true } +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +fnv = { workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" +parking_lot = { workspace = true, default-features = true } thiserror = { workspace = true } -wasm-timer = "0.2.5" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sc-network = { path = "../../network" } -sc-network-gossip = { path = "../../network-gossip" } -sc-network-sync = { path = "../../network/sync" } -sc-network-types = { path = "../../network/types" } -sc-utils = { path = "../../utils" } -sp-api = { path = "../../../primitives/api" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -sp-arithmetic = { path = "../../../primitives/arithmetic" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-beefy = { path = "../../../primitives/consensus/beefy" } -sp-core = { path = "../../../primitives/core" } -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } -tokio = "1.37" +wasm-timer = { workspace = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-gossip = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } [dev-dependencies] serde = { workspace = true, default-features = true } -tempfile = "3.1.0" -sc-block-builder = { path = "../../block-builder" } -sc-network-test = { path = "../../network/test" } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } -sp-tracing = { path = "../../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +tempfile = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +sc-network-test = { workspace = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-mmr-primitives = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } [features] # This feature adds BLS crypto primitives. It should not be used in production since diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index 84f90622b5c14..7869f5a336b11 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -12,22 +12,22 @@ homepage = "https://substrate.io" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" +parking_lot = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -sc-consensus-beefy = { path = ".." } -sp-consensus-beefy = { path = "../../../../primitives/consensus/beefy" } -sc-rpc = { path = "../../../rpc" } -sp-core = { path = "../../../../primitives/core" } -sp-runtime = { path = "../../../../primitives/runtime" } -sp-application-crypto = { path = "../../../../primitives/application-crypto" } +sc-consensus-beefy = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } -sc-rpc = { path = "../../../rpc", features = ["test-helpers"] } -substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } -tokio = { version = "1.22.0", features = ["macros"] } +sc-rpc = { features = ["test-helpers"], workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/beefy/src/fisherman.rs b/substrate/client/consensus/beefy/src/fisherman.rs index 073fee0bdbdbe..faa4d34eff5ac 100644 --- a/substrate/client/consensus/beefy/src/fisherman.rs +++ b/substrate/client/consensus/beefy/src/fisherman.rs @@ -23,7 +23,7 @@ use sp_api::ProvideRuntimeApi; use sp_application_crypto::RuntimeAppPublic; use sp_blockchain::HeaderBackend; use sp_consensus_beefy::{ - check_equivocation_proof, AuthorityIdBound, BeefyApi, BeefySignatureHasher, DoubleVotingProof, + check_double_voting_proof, AuthorityIdBound, BeefyApi, BeefySignatureHasher, DoubleVotingProof, OpaqueKeyOwnershipProof, ValidatorSetId, }; use sp_runtime::{ @@ -132,7 +132,7 @@ where (active_rounds.validators(), active_rounds.validator_set_id()); let offender_id = proof.offender_id(); - if !check_equivocation_proof::<_, _, BeefySignatureHasher>(&proof) { + if !check_double_voting_proof::<_, _, BeefySignatureHasher>(&proof) { debug!(target: LOG_TARGET, "๐Ÿฅฉ Skipping report for bad equivocation {:?}", proof); return Ok(()); } @@ -155,7 +155,7 @@ where for ProvedValidator { key_owner_proof, .. } in key_owner_proofs { self.runtime .runtime_api() - .submit_report_equivocation_unsigned_extrinsic( + .submit_report_double_voting_unsigned_extrinsic( best_block_hash, proof.clone(), key_owner_proof, diff --git a/substrate/client/consensus/beefy/src/import.rs b/substrate/client/consensus/beefy/src/import.rs index c01fb3db4845e..8480268529338 100644 --- a/substrate/client/consensus/beefy/src/import.rs +++ b/substrate/client/consensus/beefy/src/import.rs @@ -192,7 +192,7 @@ where } async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { self.inner.check_block(block).await diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index 681e11a0c5310..d8f5b39dbbaaa 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -314,7 +314,7 @@ sp_api::mock_impl_runtime_apis! { self.inner.validator_set.clone() } - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( proof: DoubleVotingProof, AuthorityId, Signature>, _dummy: OpaqueKeyOwnershipProof, ) -> Option<()> { diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index 3ce4da7ecd56a..4a9f7a2d0e3b0 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -1039,7 +1039,7 @@ pub(crate) mod tests { ecdsa_crypto, known_payloads, known_payloads::MMR_ROOT_ID, mmr::MmrRootProvider, - test_utils::{generate_equivocation_proof, Keyring}, + test_utils::{generate_double_voting_proof, Keyring}, ConsensusLog, Payload, SignedCommitment, }; use sp_runtime::traits::{Header as HeaderT, One}; @@ -1586,7 +1586,7 @@ pub(crate) mod tests { let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); // generate an equivocation proof, with Bob as perpetrator - let good_proof = generate_equivocation_proof( + let good_proof = generate_double_voting_proof( (block_num, payload1.clone(), set_id, &Keyring::Bob), (block_num, payload2.clone(), set_id, &Keyring::Bob), ); @@ -1618,7 +1618,7 @@ pub(crate) mod tests { assert!(api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty()); // now let's try reporting a self-equivocation - let self_proof = generate_equivocation_proof( + let self_proof = generate_double_voting_proof( (block_num, payload1.clone(), set_id, &Keyring::Alice), (block_num, payload2.clone(), set_id, &Keyring::Alice), ); diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml index 6d642ec78fefa..a6f59e600f269 100644 --- a/substrate/client/consensus/common/Cargo.toml +++ b/substrate/client/consensus/common/Cargo.toml @@ -16,24 +16,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -futures = { version = "0.3.30", features = ["thread-pool"] } -futures-timer = "3.0.1" +async-trait = { workspace = true } +futures = { features = ["thread-pool"], workspace = true } log = { workspace = true, default-features = true } -mockall = "0.11.3" -parking_lot = "0.12.1" +mockall = { workspace = true } +parking_lot = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-client-api = { path = "../../api" } -sc-network-types = { path = "../../network/types" } -sc-utils = { path = "../../utils" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-state-machine = { path = "../../../primitives/state-machine" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } [dev-dependencies] -sp-test-primitives = { path = "../../../primitives/test-primitives" } +sp-test-primitives = { workspace = true } diff --git a/substrate/client/consensus/common/src/block_import.rs b/substrate/client/consensus/common/src/block_import.rs index d91851aea62cf..c5adbb5a5fca0 100644 --- a/substrate/client/consensus/common/src/block_import.rs +++ b/substrate/client/consensus/common/src/block_import.rs @@ -307,10 +307,7 @@ pub trait BlockImport { type Error: std::error::Error + Send + 'static; /// Check block preconditions. - async fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result; + async fn check_block(&self, block: BlockCheckParams) -> Result; /// Import a block. async fn import_block( @@ -324,10 +321,7 @@ impl BlockImport for crate::import_queue::BoxBlockImport { type Error = sp_consensus::error::Error; /// Check block preconditions. - async fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { + async fn check_block(&self, block: BlockCheckParams) -> Result { (**self).check_block(block).await } @@ -348,10 +342,7 @@ where { type Error = E; - async fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { + async fn check_block(&self, block: BlockCheckParams) -> Result { (&**self).check_block(block).await } diff --git a/substrate/client/consensus/common/src/import_queue.rs b/substrate/client/consensus/common/src/import_queue.rs index 371465536c35a..35fc8ad4a402e 100644 --- a/substrate/client/consensus/common/src/import_queue.rs +++ b/substrate/client/consensus/common/src/import_queue.rs @@ -28,6 +28,10 @@ //! queues to be instantiated simply. use log::{debug, trace}; +use std::{ + fmt, + time::{Duration, Instant}, +}; use sp_consensus::{error::Error as ConsensusError, BlockOrigin}; use sp_runtime::{ @@ -93,18 +97,18 @@ pub struct IncomingBlock { /// Verify a justification of a block #[async_trait::async_trait] -pub trait Verifier: Send { +pub trait Verifier: Send + Sync { /// Verify the given block data and return the `BlockImportParams` to /// continue the block import process. - async fn verify(&mut self, block: BlockImportParams) - -> Result, String>; + async fn verify(&self, block: BlockImportParams) -> Result, String>; } /// Blocks import queue API. /// /// The `import_*` methods can be called in order to send elements for the import queue to verify. pub trait ImportQueueService: Send { - /// Import bunch of blocks. + /// Import bunch of blocks, every next block must be an ancestor of the previous block in the + /// list. fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); /// Import block justifications. @@ -165,16 +169,16 @@ pub trait Link: Send { /// Block import successful result. #[derive(Debug, PartialEq)] -pub enum BlockImportStatus { +pub enum BlockImportStatus { /// Imported known block. - ImportedKnown(N, Option), + ImportedKnown(BlockNumber, Option), /// Imported unknown block. - ImportedUnknown(N, ImportedAux, Option), + ImportedUnknown(BlockNumber, ImportedAux, Option), } -impl BlockImportStatus { +impl BlockImportStatus { /// Returns the imported block number. - pub fn number(&self) -> &N { + pub fn number(&self) -> &BlockNumber { match self { BlockImportStatus::ImportedKnown(n, _) | BlockImportStatus::ImportedUnknown(n, _, _) => n, @@ -223,44 +227,30 @@ pub async fn import_single_block>( block: IncomingBlock, verifier: &mut V, ) -> BlockImportResult { - import_single_block_metered(import_handle, block_origin, block, verifier, None).await + match verify_single_block_metered(import_handle, block_origin, block, verifier, None).await? { + SingleBlockVerificationOutcome::Imported(import_status) => Ok(import_status), + SingleBlockVerificationOutcome::Verified(import_parameters) => + import_single_block_metered(import_handle, import_parameters, None).await, + } } -/// Single block import function with metering. -pub(crate) async fn import_single_block_metered>( - import_handle: &mut impl BlockImport, - block_origin: BlockOrigin, - block: IncomingBlock, - verifier: &mut V, - metrics: Option, -) -> BlockImportResult { - let peer = block.origin; - - let (header, justifications) = match (block.header, block.justifications) { - (Some(header), justifications) => (header, justifications), - (None, _) => { - if let Some(ref peer) = peer { - debug!(target: LOG_TARGET, "Header {} was not provided by {} ", block.hash, peer); - } else { - debug!(target: LOG_TARGET, "Header {} was not provided ", block.hash); - } - return Err(BlockImportError::IncompleteHeader(peer)) - }, - }; - - trace!(target: LOG_TARGET, "Header {} has {:?} logs", block.hash, header.digest().logs().len()); - - let number = *header.number(); - let hash = block.hash; - let parent_hash = *header.parent_hash(); - - let import_handler = |import| match import { +fn import_handler( + number: NumberFor, + hash: Block::Hash, + parent_hash: Block::Hash, + block_origin: Option, + import: Result, +) -> Result>, BlockImportError> +where + Block: BlockT, +{ + match import { Ok(ImportResult::AlreadyInChain) => { trace!(target: LOG_TARGET, "Block already in chain {}: {:?}", number, hash); - Ok(BlockImportStatus::ImportedKnown(number, peer)) + Ok(BlockImportStatus::ImportedKnown(number, block_origin)) }, Ok(ImportResult::Imported(aux)) => - Ok(BlockImportStatus::ImportedUnknown(number, aux, peer)), + Ok(BlockImportStatus::ImportedUnknown(number, aux, block_origin)), Ok(ImportResult::MissingState) => { debug!( target: LOG_TARGET, @@ -277,15 +267,60 @@ pub(crate) async fn import_single_block_metered>( }, Ok(ImportResult::KnownBad) => { debug!(target: LOG_TARGET, "Peer gave us a bad block {}: {:?}", number, hash); - Err(BlockImportError::BadBlock(peer)) + Err(BlockImportError::BadBlock(block_origin)) }, Err(e) => { debug!(target: LOG_TARGET, "Error importing block {}: {:?}: {}", number, hash, e); Err(BlockImportError::Other(e)) }, + } +} + +pub(crate) enum SingleBlockVerificationOutcome { + /// Block is already imported. + Imported(BlockImportStatus>), + /// Block is verified, but needs to be imported. + Verified(SingleBlockImportParameters), +} + +pub(crate) struct SingleBlockImportParameters { + import_block: BlockImportParams, + hash: Block::Hash, + block_origin: Option, + verification_time: Duration, +} + +/// Single block import function with metering. +pub(crate) async fn verify_single_block_metered>( + import_handle: &impl BlockImport, + block_origin: BlockOrigin, + block: IncomingBlock, + verifier: &mut V, + metrics: Option<&Metrics>, +) -> Result, BlockImportError> { + let peer = block.origin; + let justifications = block.justifications; + + let Some(header) = block.header else { + if let Some(ref peer) = peer { + debug!(target: LOG_TARGET, "Header {} was not provided by {peer} ", block.hash); + } else { + debug!(target: LOG_TARGET, "Header {} was not provided ", block.hash); + } + return Err(BlockImportError::IncompleteHeader(peer)) }; - match import_handler( + trace!(target: LOG_TARGET, "Header {} has {:?} logs", block.hash, header.digest().logs().len()); + + let number = *header.number(); + let hash = block.hash; + let parent_hash = *header.parent_hash(); + + match import_handler::( + number, + hash, + parent_hash, + peer, import_handle .check_block(BlockCheckParams { hash, @@ -298,10 +333,13 @@ pub(crate) async fn import_single_block_metered>( .await, )? { BlockImportStatus::ImportedUnknown { .. } => (), - r => return Ok(r), // Any other successful result means that the block is already imported. + r => { + // Any other successful result means that the block is already imported. + return Ok(SingleBlockVerificationOutcome::Imported(r)) + }, } - let started = std::time::Instant::now(); + let started = Instant::now(); let mut import_block = BlockImportParams::new(block_origin, header); import_block.body = block.body; @@ -332,19 +370,42 @@ pub(crate) async fn import_single_block_metered>( } else { trace!(target: LOG_TARGET, "Verifying {}({}) failed: {}", number, hash, msg); } - if let Some(metrics) = metrics.as_ref() { + if let Some(metrics) = metrics { metrics.report_verification(false, started.elapsed()); } BlockImportError::VerificationFailed(peer, msg) })?; - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification(true, started.elapsed()); + let verification_time = started.elapsed(); + if let Some(metrics) = metrics { + metrics.report_verification(true, verification_time); } + Ok(SingleBlockVerificationOutcome::Verified(SingleBlockImportParameters { + import_block, + hash, + block_origin: peer, + verification_time, + })) +} + +pub(crate) async fn import_single_block_metered( + import_handle: &mut impl BlockImport, + import_parameters: SingleBlockImportParameters, + metrics: Option<&Metrics>, +) -> BlockImportResult { + let started = Instant::now(); + + let SingleBlockImportParameters { import_block, hash, block_origin, verification_time } = + import_parameters; + + let number = *import_block.header.number(); + let parent_hash = *import_block.header.parent_hash(); + let imported = import_handle.import_block(import_block).await; - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification_and_import(started.elapsed()); + if let Some(metrics) = metrics { + metrics.report_verification_and_import(started.elapsed() + verification_time); } - import_handler(imported) + + import_handler::(number, hash, parent_hash, block_origin, imported) } diff --git a/substrate/client/consensus/common/src/import_queue/basic_queue.rs b/substrate/client/consensus/common/src/import_queue/basic_queue.rs index f4f618d1b3182..05f2b25279614 100644 --- a/substrate/client/consensus/common/src/import_queue/basic_queue.rs +++ b/substrate/client/consensus/common/src/import_queue/basic_queue.rs @@ -19,7 +19,6 @@ use futures::{ prelude::*, task::{Context, Poll}, }; -use futures_timer::Delay; use log::{debug, trace}; use prometheus_endpoint::Registry; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -28,14 +27,14 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor}, Justification, Justifications, }; -use std::{pin::Pin, time::Duration}; +use std::pin::Pin; use crate::{ import_queue::{ buffered_link::{self, BufferedLinkReceiver, BufferedLinkSender}, - import_single_block_metered, BlockImportError, BlockImportStatus, BoxBlockImport, - BoxJustificationImport, ImportQueue, ImportQueueService, IncomingBlock, Link, - RuntimeOrigin, Verifier, LOG_TARGET, + import_single_block_metered, verify_single_block_metered, BlockImportError, + BlockImportStatus, BoxBlockImport, BoxJustificationImport, ImportQueue, ImportQueueService, + IncomingBlock, Link, RuntimeOrigin, SingleBlockVerificationOutcome, Verifier, LOG_TARGET, }, metrics::Metrics, }; @@ -61,13 +60,16 @@ impl BasicQueue { /// Instantiate a new basic queue, with given verifier. /// /// This creates a background task, and calls `on_start` on the justification importer. - pub fn new>( + pub fn new( verifier: V, block_import: BoxBlockImport, justification_import: Option>, spawner: &impl sp_core::traits::SpawnEssentialNamed, prometheus_registry: Option<&Registry>, - ) -> Self { + ) -> Self + where + V: Verifier + 'static, + { let (result_sender, result_port) = buffered_link::buffered_link(100_000); let metrics = prometheus_registry.and_then(|r| { @@ -224,7 +226,6 @@ async fn block_import_process( mut result_sender: BufferedLinkSender, mut block_import_receiver: TracingUnboundedReceiver>, metrics: Option, - delay_between_blocks: Duration, ) { loop { let worker_messages::ImportBlocks(origin, blocks) = match block_import_receiver.next().await @@ -239,15 +240,9 @@ async fn block_import_process( }, }; - let res = import_many_blocks( - &mut block_import, - origin, - blocks, - &mut verifier, - delay_between_blocks, - metrics.clone(), - ) - .await; + let res = + import_many_blocks(&mut block_import, origin, blocks, &mut verifier, metrics.clone()) + .await; result_sender.blocks_processed(res.imported, res.block_count, res.results); } @@ -260,7 +255,7 @@ struct BlockImportWorker { } impl BlockImportWorker { - fn new>( + fn new( result_sender: BufferedLinkSender, verifier: V, block_import: BoxBlockImport, @@ -270,19 +265,20 @@ impl BlockImportWorker { impl Future + Send, TracingUnboundedSender>, TracingUnboundedSender>, - ) { + ) + where + V: Verifier + 'static, + { use worker_messages::*; let (justification_sender, mut justification_port) = tracing_unbounded("mpsc_import_queue_worker_justification", 100_000); - let (block_import_sender, block_import_port) = + let (block_import_sender, block_import_receiver) = tracing_unbounded("mpsc_import_queue_worker_blocks", 100_000); let mut worker = BlockImportWorker { result_sender, justification_import, metrics }; - let delay_between_blocks = Duration::default(); - let future = async move { // Let's initialize `justification_import` if let Some(justification_import) = worker.justification_import.as_mut() { @@ -295,9 +291,8 @@ impl BlockImportWorker { block_import, verifier, worker.result_sender.clone(), - block_import_port, + block_import_receiver, worker.metrics.clone(), - delay_between_blocks, ); futures::pin_mut!(block_import_process); @@ -394,7 +389,6 @@ async fn import_many_blocks>( blocks_origin: BlockOrigin, blocks: Vec>, verifier: &mut V, - delay_between_blocks: Duration, metrics: Option, ) -> ImportManyBlocksResult { let count = blocks.len(); @@ -431,15 +425,22 @@ async fn import_many_blocks>( let import_result = if has_error { Err(BlockImportError::Cancelled) } else { - // The actual import. - import_single_block_metered( + let verification_fut = verify_single_block_metered( import_handle, blocks_origin, block, verifier, - metrics.clone(), - ) - .await + metrics.as_ref(), + ); + match verification_fut.await { + Ok(SingleBlockVerificationOutcome::Imported(import_status)) => Ok(import_status), + Ok(SingleBlockVerificationOutcome::Verified(import_parameters)) => { + // The actual import. + import_single_block_metered(import_handle, import_parameters, metrics.as_ref()) + .await + }, + Err(e) => Err(e), + } }; if let Some(metrics) = metrics.as_ref() { @@ -460,11 +461,7 @@ async fn import_many_blocks>( results.push((import_result, block_hash)); - if delay_between_blocks != Duration::default() && !has_error { - Delay::new(delay_between_blocks).await; - } else { - Yield::new().await - } + Yield::new().await } } @@ -510,7 +507,7 @@ mod tests { #[async_trait::async_trait] impl Verifier for () { async fn verify( - &mut self, + &self, block: BlockImportParams, ) -> Result, String> { Ok(BlockImportParams::new(block.origin, block.header)) @@ -522,7 +519,7 @@ mod tests { type Error = sp_consensus::Error; async fn check_block( - &mut self, + &self, _block: BlockCheckParams, ) -> Result { Ok(ImportResult::imported(false)) diff --git a/substrate/client/consensus/epochs/Cargo.toml b/substrate/client/consensus/epochs/Cargo.toml index e409e171e477c..127cc9ebec207 100644 --- a/substrate/client/consensus/epochs/Cargo.toml +++ b/substrate/client/consensus/epochs/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -fork-tree = { path = "../../../utils/fork-tree" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-runtime = { path = "../../../primitives/runtime" } +codec = { features = ["derive"], workspace = true, default-features = true } +fork-tree = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index b03a263ae0a37..e49c7c9f0d7a9 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -17,51 +17,51 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ahash = "0.8.2" -array-bytes = "6.2.2" -async-trait = "0.1.79" -dyn-clone = "1.0" -finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } -futures = "0.3.30" -futures-timer = "3.0.1" +ahash = { workspace = true } +array-bytes = { workspace = true, default-features = true } +async-trait = { workspace = true } +dyn-clone = { workspace = true } +finality-grandpa = { features = ["derive-codec"], workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -parking_lot = "0.12.1" -rand = "0.8.5" +codec = { features = ["derive"], workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -fork-tree = { path = "../../../utils/fork-tree" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-block-builder = { path = "../../block-builder" } -sc-chain-spec = { path = "../../chain-spec" } -sc-client-api = { path = "../../api" } -sc-transaction-pool-api = { path = "../../transaction-pool/api" } -sc-consensus = { path = "../common" } -sc-network = { path = "../../network" } -sc-network-gossip = { path = "../../network-gossip" } -sc-network-common = { path = "../../network/common" } -sc-network-sync = { path = "../../network/sync" } -sc-network-types = { path = "../../network/types" } -sc-telemetry = { path = "../../telemetry" } -sc-utils = { path = "../../utils" } -sp-api = { path = "../../../primitives/api" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -sp-arithmetic = { path = "../../../primitives/arithmetic" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } +fork-tree = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-gossip = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = "1.3.0" -finality-grandpa = { version = "0.16.2", features = ["derive-codec", "test-helpers"] } +assert_matches = { workspace = true } +finality-grandpa = { features = ["derive-codec", "test-helpers"], workspace = true, default-features = true } serde = { workspace = true, default-features = true } -tokio = "1.37" -sc-network = { path = "../../network" } -sc-network-test = { path = "../../network/test" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-tracing = { path = "../../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +tokio = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-test = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index a9437a9be0754..0215fe2e3e642 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -13,25 +13,25 @@ homepage = "https://substrate.io" workspace = true [dependencies] -finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } -futures = "0.3.30" -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +finality-grandpa = { features = ["derive-codec"], workspace = true, default-features = true } +futures = { workspace = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } log = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } +codec = { features = ["derive"], workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -sc-client-api = { path = "../../../api" } -sc-consensus-grandpa = { path = ".." } -sc-rpc = { path = "../../../rpc" } -sp-blockchain = { path = "../../../../primitives/blockchain" } -sp-core = { path = "../../../../primitives/core" } -sp-runtime = { path = "../../../../primitives/runtime" } +sc-client-api = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -sc-block-builder = { path = "../../../block-builder" } -sc-rpc = { path = "../../../rpc", features = ["test-helpers"] } -sp-core = { path = "../../../../primitives/core" } -sp-consensus-grandpa = { path = "../../../../primitives/consensus/grandpa" } -sp-keyring = { path = "../../../../primitives/keyring" } -substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } -tokio = { version = "1.22.0", features = ["macros"] } +sc-block-builder = { workspace = true, default-features = true } +sc-rpc = { features = ["test-helpers"], workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/src/import.rs b/substrate/client/consensus/grandpa/src/import.rs index b594c0f678cea..8b7b02f180ecd 100644 --- a/substrate/client/consensus/grandpa/src/import.rs +++ b/substrate/client/consensus/grandpa/src/import.rs @@ -518,7 +518,7 @@ where Client: ClientForGrandpa, Client::Api: GrandpaApi, for<'a> &'a Client: BlockImport, - SC: Send, + SC: Send + Sync, { type Error = ConsensusError; @@ -697,7 +697,7 @@ where } async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { self.inner.check_block(block).await diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index 33f5bf1f8c150..3d74eda8fa01f 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -16,37 +16,37 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } -assert_matches = "1.3.0" -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -futures-timer = "3.0.1" +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } +assert_matches = { workspace = true } +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sc-consensus-aura = { path = "../aura" } -sc-consensus-babe = { path = "../babe" } -sc-consensus-epochs = { path = "../epochs" } -sc-transaction-pool = { path = "../../transaction-pool" } -sc-transaction-pool-api = { path = "../../transaction-pool/api" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-aura = { path = "../../../primitives/consensus/aura" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe" } -sp-consensus-slots = { path = "../../../primitives/consensus/slots" } -sp-core = { path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-timestamp = { path = "../../../primitives/timestamp" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-epochs = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } [dev-dependencies] -tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } -sc-basic-authorship = { path = "../../basic-authorship" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } -substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool" } +tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +substrate-test-runtime-transaction-pool = { workspace = true } diff --git a/substrate/client/consensus/manual-seal/src/consensus/babe.rs b/substrate/client/consensus/manual-seal/src/consensus/babe.rs index bc56ce0227142..a68e46f0134d6 100644 --- a/substrate/client/consensus/manual-seal/src/consensus/babe.rs +++ b/substrate/client/consensus/manual-seal/src/consensus/babe.rs @@ -96,7 +96,7 @@ where C: HeaderBackend + HeaderMetadata, { async fn verify( - &mut self, + &self, mut import_params: BlockImportParams, ) -> Result, String> { import_params.finalized = false; diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index 8fc7e7ecab2f4..39f8f8609d8d7 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -65,7 +65,7 @@ struct ManualSealVerifier; #[async_trait::async_trait] impl Verifier for ManualSealVerifier { async fn verify( - &mut self, + &self, mut block: BlockImportParams, ) -> Result, String> { block.finalized = false; diff --git a/substrate/client/consensus/pow/Cargo.toml b/substrate/client/consensus/pow/Cargo.toml index 51a2be1b6cf5d..f2a071ec25c2c 100644 --- a/substrate/client/consensus/pow/Cargo.toml +++ b/substrate/client/consensus/pow/Cargo.toml @@ -16,21 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.1" +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" +parking_lot = { workspace = true, default-features = true } thiserror = { workspace = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sp-api = { path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-pow = { path = "../../../primitives/consensus/pow" } -sp-core = { path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-runtime = { path = "../../../primitives/runtime" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-pow = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/pow/src/lib.rs b/substrate/client/consensus/pow/src/lib.rs index ee5c1dfc6f11a..50e9533abb36a 100644 --- a/substrate/client/consensus/pow/src/lib.rs +++ b/substrate/client/consensus/pow/src/lib.rs @@ -312,10 +312,7 @@ where { type Error = ConsensusError; - async fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { + async fn check_block(&self, block: BlockCheckParams) -> Result { self.inner.check_block(block).await.map_err(Into::into) } @@ -442,7 +439,7 @@ where Algorithm::Difficulty: 'static + Send, { async fn verify( - &mut self, + &self, mut block: BlockImportParams, ) -> Result, String> { let hash = block.header.hash(); diff --git a/substrate/client/consensus/slots/Cargo.toml b/substrate/client/consensus/slots/Cargo.toml index 8e88ee68d7d73..2b795b13f8e33 100644 --- a/substrate/client/consensus/slots/Cargo.toml +++ b/substrate/client/consensus/slots/Cargo.toml @@ -17,22 +17,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -futures-timer = "3.0.1" +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../common" } -sc-telemetry = { path = "../../telemetry" } -sp-arithmetic = { path = "../../../primitives/arithmetic" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-slots = { path = "../../../primitives/consensus/slots" } -sp-core = { path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-state-machine = { path = "../../../primitives/state-machine" } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } [dev-dependencies] -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs index d9d7920053125..7cdf90877dffa 100644 --- a/substrate/client/consensus/slots/src/lib.rs +++ b/substrate/client/consensus/slots/src/lib.rs @@ -29,8 +29,8 @@ mod aux_schema; mod slots; pub use aux_schema::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; -pub use slots::SlotInfo; use slots::Slots; +pub use slots::{time_until_next_slot, SlotInfo}; use futures::{future::Either, Future, TryFutureExt}; use futures_timer::Delay; diff --git a/substrate/client/db/Cargo.toml b/substrate/client/db/Cargo.toml index b10c42d50f0bc..c8372701ac329 100644 --- a/substrate/client/db/Cargo.toml +++ b/substrate/client/db/Cargo.toml @@ -16,38 +16,38 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = [ +codec = { features = [ "derive", -] } -hash-db = "0.16.0" -kvdb = "0.13.0" -kvdb-memorydb = "0.13.0" -kvdb-rocksdb = { version = "0.19.0", optional = true } -linked-hash-map = "0.5.4" +], workspace = true, default-features = true } +hash-db = { workspace = true, default-features = true } +kvdb = { workspace = true } +kvdb-memorydb = { workspace = true } +kvdb-rocksdb = { optional = true, workspace = true } +linked-hash-map = { workspace = true } log = { workspace = true, default-features = true } -parity-db = "0.4.12" -parking_lot = "0.12.1" -sc-client-api = { path = "../api" } -sc-state-db = { path = "../state-db" } -schnellru = "0.2.1" -sp-arithmetic = { path = "../../primitives/arithmetic" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-database = { path = "../../primitives/database" } -sp-runtime = { path = "../../primitives/runtime" } -sp-state-machine = { path = "../../primitives/state-machine" } -sp-trie = { path = "../../primitives/trie" } +parity-db = { workspace = true } +parking_lot = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-state-db = { workspace = true, default-features = true } +schnellru = { workspace = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-database = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } [dev-dependencies] -criterion = "0.5.1" -kvdb-rocksdb = "0.19.0" -rand = "0.8.5" -tempfile = "3.1.0" -quickcheck = { version = "1.0.3", default-features = false } -kitchensink-runtime = { path = "../../bin/node/runtime" } -sp-tracing = { path = "../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -array-bytes = "6.2.2" +criterion = { workspace = true, default-features = true } +kvdb-rocksdb = { workspace = true } +rand = { workspace = true, default-features = true } +tempfile = { workspace = true } +quickcheck = { workspace = true } +kitchensink-runtime = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +array-bytes = { workspace = true, default-features = true } [features] default = [] diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 8d8b7a2aff88f..e95cd9e4ad5fd 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -1357,6 +1357,8 @@ impl Backend { Ok(()) } + /// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls + /// for performance reasons. fn finalize_block_with_transaction( &self, transaction: &mut Transaction, @@ -1365,6 +1367,7 @@ impl Backend { last_finalized: Option, justification: Option, current_transaction_justifications: &mut HashMap, + remove_displaced: bool, ) -> ClientResult> { // TODO: ensure best chain contains this block. let number = *header.number(); @@ -1377,6 +1380,7 @@ impl Backend { hash, with_state, current_transaction_justifications, + remove_displaced, )?; if let Some(justification) = justification { @@ -1454,7 +1458,8 @@ impl Backend { let mut current_transaction_justifications: HashMap = HashMap::new(); - for (block_hash, justification) in operation.finalized_blocks { + let mut finalized_blocks = operation.finalized_blocks.into_iter().peekable(); + while let Some((block_hash, justification)) = finalized_blocks.next() { let block_header = self.blockchain.expect_header(block_hash)?; meta_updates.push(self.finalize_block_with_transaction( &mut transaction, @@ -1463,6 +1468,7 @@ impl Backend { Some(last_finalized_hash), justification, &mut current_transaction_justifications, + finalized_blocks.peek().is_none(), )?); last_finalized_hash = block_hash; last_finalized_num = *block_header.number(); @@ -1642,6 +1648,7 @@ impl Backend { hash, operation.commit_state, &mut current_transaction_justifications, + true, )?; } else { // canonicalize blocks which are old enough, regardless of finality. @@ -1766,9 +1773,10 @@ impl Backend { Ok(()) } - // write stuff to a transaction after a new block is finalized. - // this canonicalizes finalized blocks. Fails if called with a block which - // was not a child of the last finalized block. + // Write stuff to a transaction after a new block is finalized. This canonicalizes finalized + // blocks. Fails if called with a block which was not a child of the last finalized block. + /// `remove_displaced` can be set to `false` if this is not the last of many subsequent calls + /// for performance reasons. fn note_finalized( &self, transaction: &mut Transaction, @@ -1776,6 +1784,7 @@ impl Backend { f_hash: Block::Hash, with_state: bool, current_transaction_justifications: &mut HashMap, + remove_displaced: bool, ) -> ClientResult<()> { let f_num = *f_header.number(); @@ -1800,13 +1809,19 @@ impl Backend { apply_state_commit(transaction, commit); } - let new_displaced = self.blockchain.displaced_leaves_after_finalizing(f_hash, f_num)?; - let finalization_outcome = - FinalizationOutcome::new(new_displaced.displaced_leaves.clone().into_iter()); + if remove_displaced { + let new_displaced = self.blockchain.displaced_leaves_after_finalizing(f_hash, f_num)?; - self.blockchain.leaves.write().remove_displaced_leaves(&finalization_outcome); + self.blockchain.leaves.write().remove_displaced_leaves(FinalizationOutcome::new( + new_displaced.displaced_leaves.iter().copied(), + )); - self.prune_blocks(transaction, f_num, &new_displaced, current_transaction_justifications)?; + if !matches!(self.blocks_pruning, BlocksPruning::KeepAll) { + self.prune_displaced_branches(transaction, &new_displaced)?; + } + } + + self.prune_blocks(transaction, f_num, current_transaction_justifications)?; Ok(()) } @@ -1815,39 +1830,29 @@ impl Backend { &self, transaction: &mut Transaction, finalized_number: NumberFor, - displaced: &DisplacedLeavesAfterFinalization, current_transaction_justifications: &mut HashMap, ) -> ClientResult<()> { - match self.blocks_pruning { - BlocksPruning::KeepAll => {}, - BlocksPruning::Some(blocks_pruning) => { - // Always keep the last finalized block - let keep = std::cmp::max(blocks_pruning, 1); - if finalized_number >= keep.into() { - let number = finalized_number.saturating_sub(keep.into()); - - // Before we prune a block, check if it is pinned - if let Some(hash) = self.blockchain.hash(number)? { - self.blockchain.insert_persisted_body_if_pinned(hash)?; - - // If the block was finalized in this transaction, it will not be in the db - // yet. - if let Some(justification) = - current_transaction_justifications.remove(&hash) - { - self.blockchain.insert_justifications_if_pinned(hash, justification); - } else { - self.blockchain.insert_persisted_justifications_if_pinned(hash)?; - } - }; + if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning { + // Always keep the last finalized block + let keep = std::cmp::max(blocks_pruning, 1); + if finalized_number >= keep.into() { + let number = finalized_number.saturating_sub(keep.into()); + + // Before we prune a block, check if it is pinned + if let Some(hash) = self.blockchain.hash(number)? { + self.blockchain.insert_persisted_body_if_pinned(hash)?; + + // If the block was finalized in this transaction, it will not be in the db + // yet. + if let Some(justification) = current_transaction_justifications.remove(&hash) { + self.blockchain.insert_justifications_if_pinned(hash, justification); + } else { + self.blockchain.insert_persisted_justifications_if_pinned(hash)?; + } + }; - self.prune_block(transaction, BlockId::::number(number))?; - } - self.prune_displaced_branches(transaction, displaced)?; - }, - BlocksPruning::KeepFinalized => { - self.prune_displaced_branches(transaction, displaced)?; - }, + self.prune_block(transaction, BlockId::::number(number))?; + } } Ok(()) } @@ -1858,11 +1863,9 @@ impl Backend { displaced: &DisplacedLeavesAfterFinalization, ) -> ClientResult<()> { // Discard all blocks from displaced branches - for (_, tree_route) in displaced.tree_routes.iter() { - for r in tree_route.retracted() { - self.blockchain.insert_persisted_body_if_pinned(r.hash)?; - self.prune_block(transaction, BlockId::::hash(r.hash))?; - } + for &hash in displaced.displaced_blocks.iter() { + self.blockchain.insert_persisted_body_if_pinned(hash)?; + self.prune_block(transaction, BlockId::::hash(hash))?; } Ok(()) } @@ -2110,6 +2113,7 @@ impl sc_client_api::backend::Backend for Backend { None, justification, &mut current_transaction_justifications, + true, )?; self.storage.db.commit(transaction)?; @@ -2547,7 +2551,7 @@ pub(crate) mod tests { backend::{Backend as BTrait, BlockImportOperation as Op}, blockchain::Backend as BLBTrait, }; - use sp_blockchain::{lowest_common_ancestor, lowest_common_ancestor_multiblock, tree_route}; + use sp_blockchain::{lowest_common_ancestor, tree_route}; use sp_core::H256; use sp_runtime::{ testing::{Block as RawBlock, ExtrinsicWrapper, Header}, @@ -3109,121 +3113,118 @@ pub(crate) mod tests { } #[test] - fn lowest_common_ancestors_multiblock_works() { + fn displaced_leaves_after_finalizing_works() { let backend = Backend::::new_test(1000, 100); let blockchain = backend.blockchain(); - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let genesis_number = 0; + let genesis_hash = + insert_header(&backend, genesis_number, Default::default(), None, Default::default()); // fork from genesis: 3 prong. // block 0 -> a1 -> a2 -> a3 - // | - // -> b1 -> b2 -> c1 -> c2 - // | - // -> d1 -> d2 - let a1 = insert_header(&backend, 1, block0, None, Default::default()); - let a2 = insert_header(&backend, 2, a1, None, Default::default()); - let a3 = insert_header(&backend, 3, a2, None, Default::default()); - - // fork from genesis: 2 prong. - let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32])); - let b2 = insert_header(&backend, 2, b1, None, Default::default()); - - // fork from b2. - let c1 = insert_header(&backend, 3, b2, None, H256::from([2; 32])); - let c2 = insert_header(&backend, 4, c1, None, Default::default()); - - // fork from b1. - let d1 = insert_header(&backend, 2, b1, None, H256::from([3; 32])); - let d2 = insert_header(&backend, 3, d1, None, Default::default()); - { - let lca = lowest_common_ancestor_multiblock(blockchain, vec![a3, b2]).unwrap().unwrap(); - - assert_eq!(lca.hash, block0); - assert_eq!(lca.number, 0); - } - - { - let lca = lowest_common_ancestor_multiblock(blockchain, vec![a1, a3]).unwrap().unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor_multiblock(blockchain, vec![a3, a1]).unwrap().unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor_multiblock(blockchain, vec![a2, a3]).unwrap().unwrap(); - - assert_eq!(lca.hash, a2); - assert_eq!(lca.number, 2); - } + // \ + // -> b1 -> b2 -> c1 -> c2 + // \ + // -> d1 -> d2 + let a1_number = 1; + let a1_hash = insert_header(&backend, a1_number, genesis_hash, None, Default::default()); + let a2_number = 2; + let a2_hash = insert_header(&backend, a2_number, a1_hash, None, Default::default()); + let a3_number = 3; + let a3_hash = insert_header(&backend, a3_number, a2_hash, None, Default::default()); { - let lca = lowest_common_ancestor_multiblock(blockchain, vec![a2, a1]).unwrap().unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); + let displaced = blockchain + .displaced_leaves_after_finalizing(genesis_hash, genesis_number) + .unwrap(); + assert_eq!(displaced.displaced_leaves, vec![]); + assert_eq!(displaced.displaced_blocks, vec![]); } - { - let lca = lowest_common_ancestor_multiblock(blockchain, vec![a2, a2]).unwrap().unwrap(); - - assert_eq!(lca.hash, a2); - assert_eq!(lca.number, 2); + let displaced_a1 = + blockchain.displaced_leaves_after_finalizing(a1_hash, a1_number).unwrap(); + assert_eq!(displaced_a1.displaced_leaves, vec![]); + assert_eq!(displaced_a1.displaced_blocks, vec![]); + + let displaced_a2 = + blockchain.displaced_leaves_after_finalizing(a2_hash, a3_number).unwrap(); + assert_eq!(displaced_a2.displaced_leaves, vec![]); + assert_eq!(displaced_a2.displaced_blocks, vec![]); + + let displaced_a3 = + blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + assert_eq!(displaced_a3.displaced_leaves, vec![]); + assert_eq!(displaced_a3.displaced_blocks, vec![]); } - { - let lca = lowest_common_ancestor_multiblock(blockchain, vec![a3, d2, c2]) - .unwrap() - .unwrap(); - - assert_eq!(lca.hash, block0); - assert_eq!(lca.number, 0); - } + // fork from genesis: 2 prong. + let b1_number = 1; + let b1_hash = insert_header(&backend, b1_number, genesis_hash, None, H256::from([1; 32])); + let b2_number = 2; + let b2_hash = insert_header(&backend, b2_number, b1_hash, None, Default::default()); - { - let lca = lowest_common_ancestor_multiblock(blockchain, vec![c2, d2, b2]) - .unwrap() - .unwrap(); + // fork from b2. + let c1_number = 3; + let c1_hash = insert_header(&backend, c1_number, b2_hash, None, H256::from([2; 32])); + let c2_number = 4; + let c2_hash = insert_header(&backend, c2_number, c1_hash, None, Default::default()); - assert_eq!(lca.hash, b1); - assert_eq!(lca.number, 1); - } + // fork from b1. + let d1_number = 2; + let d1_hash = insert_header(&backend, d1_number, b1_hash, None, H256::from([3; 32])); + let d2_number = 3; + let d2_hash = insert_header(&backend, d2_number, d1_hash, None, Default::default()); { - let lca = lowest_common_ancestor_multiblock(blockchain, vec![a1, a2, a3]) - .unwrap() - .unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); + let displaced_a1 = + blockchain.displaced_leaves_after_finalizing(a1_hash, a1_number).unwrap(); + assert_eq!( + displaced_a1.displaced_leaves, + vec![(c2_number, c2_hash), (d2_number, d2_hash)] + ); + let mut displaced_blocks = vec![b1_hash, b2_hash, c1_hash, c2_hash, d1_hash, d2_hash]; + displaced_blocks.sort(); + assert_eq!(displaced_a1.displaced_blocks, displaced_blocks); + + let displaced_a2 = + blockchain.displaced_leaves_after_finalizing(a2_hash, a2_number).unwrap(); + assert_eq!(displaced_a1.displaced_leaves, displaced_a2.displaced_leaves); + assert_eq!(displaced_a1.displaced_blocks, displaced_a2.displaced_blocks); + + let displaced_a3 = + blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + assert_eq!(displaced_a1.displaced_leaves, displaced_a3.displaced_leaves); + assert_eq!(displaced_a1.displaced_blocks, displaced_a3.displaced_blocks); } - { - let lca = lowest_common_ancestor_multiblock(blockchain, vec![b1, b2, d1]) - .unwrap() - .unwrap(); - - assert_eq!(lca.hash, b1); - assert_eq!(lca.number, 1); + let displaced = + blockchain.displaced_leaves_after_finalizing(b1_hash, b1_number).unwrap(); + assert_eq!(displaced.displaced_leaves, vec![(a3_number, a3_hash)]); + let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash]; + displaced_blocks.sort(); + assert_eq!(displaced.displaced_blocks, displaced_blocks); } - { - let lca = lowest_common_ancestor_multiblock(blockchain, vec![]); - - assert_eq!(true, matches!(lca, Ok(None))); + let displaced = + blockchain.displaced_leaves_after_finalizing(b2_hash, b2_number).unwrap(); + assert_eq!( + displaced.displaced_leaves, + vec![(a3_number, a3_hash), (d2_number, d2_hash)] + ); + let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash]; + displaced_blocks.sort(); + assert_eq!(displaced.displaced_blocks, displaced_blocks); } - { - let lca = lowest_common_ancestor_multiblock(blockchain, vec![a1]).unwrap().unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); + let displaced = + blockchain.displaced_leaves_after_finalizing(c2_hash, c2_number).unwrap(); + assert_eq!( + displaced.displaced_leaves, + vec![(a3_number, a3_hash), (d2_number, d2_hash)] + ); + let mut displaced_blocks = vec![a1_hash, a2_hash, a3_hash, d1_hash, d2_hash]; + displaced_blocks.sort(); + assert_eq!(displaced.displaced_blocks, displaced_blocks); } } diff --git a/substrate/client/executor/Cargo.toml b/substrate/client/executor/Cargo.toml index 1f54b82030ff2..c10c60822ff8d 100644 --- a/substrate/client/executor/Cargo.toml +++ b/substrate/client/executor/Cargo.toml @@ -17,43 +17,43 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parking_lot = "0.12.1" -schnellru = "0.2.1" -tracing = "0.1.29" +parking_lot = { workspace = true, default-features = true } +schnellru = { workspace = true } +tracing = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12" } -sc-executor-common = { path = "common" } -sc-executor-polkavm = { path = "polkavm" } -sc-executor-wasmtime = { path = "wasmtime" } -sp-api = { path = "../../primitives/api" } -sp-core = { path = "../../primitives/core" } -sp-externalities = { path = "../../primitives/externalities" } -sp-io = { path = "../../primitives/io" } -sp-panic-handler = { path = "../../primitives/panic-handler" } -sp-runtime-interface = { path = "../../primitives/runtime-interface" } -sp-trie = { path = "../../primitives/trie" } -sp-version = { path = "../../primitives/version" } -sp-wasm-interface = { path = "../../primitives/wasm-interface" } +codec = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sc-executor-polkavm = { workspace = true, default-features = true } +sc-executor-wasmtime = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-panic-handler = { workspace = true, default-features = true } +sp-runtime-interface = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sp-wasm-interface = { workspace = true, default-features = true } [dev-dependencies] -array-bytes = "6.2.2" -assert_matches = "1.3.0" -wat = "1.0" -sc-runtime-test = { path = "runtime-test" } -substrate-test-runtime = { path = "../../test-utils/runtime" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-state-machine = { path = "../../primitives/state-machine" } -sp-runtime = { path = "../../primitives/runtime" } -sp-maybe-compressed-blob = { path = "../../primitives/maybe-compressed-blob" } -sc-tracing = { path = "../tracing" } -sp-tracing = { path = "../../primitives/tracing" } +array-bytes = { workspace = true, default-features = true } +assert_matches = { workspace = true } +wat = { workspace = true } +sc-runtime-test = { workspace = true } +substrate-test-runtime = { workspace = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } tracing-subscriber = { workspace = true } -paste = "1.0" -regex = "1.6.0" -criterion = "0.5.1" -env_logger = "0.11" -num_cpus = "1.13.1" -tempfile = "3.3.0" +paste = { workspace = true, default-features = true } +regex = { workspace = true } +criterion = { workspace = true, default-features = true } +env_logger = { workspace = true } +num_cpus = { workspace = true } +tempfile = { workspace = true } [[bench]] name = "bench" diff --git a/substrate/client/executor/common/Cargo.toml b/substrate/client/executor/common/Cargo.toml index 8ff34c3709a5e..e985c75ca908a 100644 --- a/substrate/client/executor/common/Cargo.toml +++ b/substrate/client/executor/common/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] thiserror = { workspace = true } -wasm-instrument = "0.4" -sc-allocator = { path = "../../allocator" } -sp-maybe-compressed-blob = { path = "../../../primitives/maybe-compressed-blob" } -sp-wasm-interface = { path = "../../../primitives/wasm-interface" } +wasm-instrument = { workspace = true, default-features = true } +sc-allocator = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sp-wasm-interface = { workspace = true, default-features = true } polkavm = { workspace = true } [features] diff --git a/substrate/client/executor/polkavm/Cargo.toml b/substrate/client/executor/polkavm/Cargo.toml index 9d0eb8ccf0ee0..8b849209a07cf 100644 --- a/substrate/client/executor/polkavm/Cargo.toml +++ b/substrate/client/executor/polkavm/Cargo.toml @@ -19,5 +19,5 @@ targets = ["x86_64-unknown-linux-gnu"] log = { workspace = true } polkavm = { workspace = true } -sc-executor-common = { path = "../common" } -sp-wasm-interface = { path = "../../../primitives/wasm-interface" } +sc-executor-common = { workspace = true, default-features = true } +sp-wasm-interface = { workspace = true, default-features = true } diff --git a/substrate/client/executor/runtime-test/Cargo.toml b/substrate/client/executor/runtime-test/Cargo.toml index 82610c4f50c28..d132f47ff877f 100644 --- a/substrate/client/executor/runtime-test/Cargo.toml +++ b/substrate/client/executor/runtime-test/Cargo.toml @@ -16,14 +16,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false, features = ["improved_panic_error_reporting"] } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-runtime-interface = { path = "../../../primitives/runtime-interface", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-core = { workspace = true } +sp-io = { features = ["improved_panic_error_reporting"], workspace = true } +sp-runtime = { workspace = true } +sp-runtime-interface = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -32,6 +31,5 @@ std = [ "sp-io/std", "sp-runtime-interface/std", "sp-runtime/std", - "sp-std/std", "substrate-wasm-builder", ] diff --git a/substrate/client/executor/runtime-test/src/lib.rs b/substrate/client/executor/runtime-test/src/lib.rs index 40683fbb664aa..08a5e39dff2cf 100644 --- a/substrate/client/executor/runtime-test/src/lib.rs +++ b/substrate/client/executor/runtime-test/src/lib.rs @@ -32,7 +32,10 @@ pub fn wasm_binary_unwrap() -> &'static [u8] { } #[cfg(not(feature = "std"))] -use sp_std::{vec, vec::Vec}; +extern crate alloc; + +#[cfg(not(feature = "std"))] +use alloc::{vec, vec::Vec}; #[cfg(not(feature = "std"))] use sp_core::{ed25519, sr25519}; @@ -332,7 +335,7 @@ sp_core::wasm_export_functions! { let test_message = b"Hello invalid heap memory"; let ptr = (heap_base + offset) as *mut u8; - let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; + let message_slice = unsafe { alloc::slice::from_raw_parts_mut(ptr, test_message.len()) }; assert_ne!(test_message, message_slice); message_slice.copy_from_slice(test_message); diff --git a/substrate/client/executor/wasmtime/Cargo.toml b/substrate/client/executor/wasmtime/Cargo.toml index d3d670650db78..e58b19bb12431 100644 --- a/substrate/client/executor/wasmtime/Cargo.toml +++ b/substrate/client/executor/wasmtime/Cargo.toml @@ -17,24 +17,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } -cfg-if = "1.0" -libc = "0.2.152" -parking_lot = "0.12.1" +cfg-if = { workspace = true } +libc = { workspace = true } +parking_lot = { workspace = true, default-features = true } # When bumping wasmtime do not forget to also bump rustix # to exactly the same version as used by wasmtime! -wasmtime = { version = "8.0.1", default-features = false, features = [ +wasmtime = { features = [ "cache", "cranelift", "jitdump", "parallel-compilation", "pooling-allocator", -] } -anyhow = "1.0.81" -sc-allocator = { path = "../../allocator" } -sc-executor-common = { path = "../common" } -sp-runtime-interface = { path = "../../../primitives/runtime-interface" } -sp-wasm-interface = { path = "../../../primitives/wasm-interface", features = ["wasmtime"] } +], workspace = true } +anyhow = { workspace = true } +sc-allocator = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sp-runtime-interface = { workspace = true, default-features = true } +sp-wasm-interface = { features = ["wasmtime"], workspace = true, default-features = true } # Here we include the rustix crate in the exactly same semver-compatible version as used by # wasmtime and enable its 'use-libc' flag. @@ -42,13 +42,13 @@ sp-wasm-interface = { path = "../../../primitives/wasm-interface", features = [" # By default rustix directly calls the appropriate syscalls completely bypassing libc; # this doesn't have any actual benefits for us besides making it harder to debug memory # problems (since then `mmap` etc. cannot be easily hooked into). -rustix = { version = "0.36.7", default-features = false, features = ["fs", "mm", "param", "std", "use-libc"] } +rustix = { features = ["fs", "mm", "param", "std", "use-libc"], workspace = true } [dev-dependencies] -wat = "1.0" -sc-runtime-test = { path = "../runtime-test" } -sp-io = { path = "../../../primitives/io" } -tempfile = "3.3.0" -paste = "1.0" -codec = { package = "parity-scale-codec", version = "3.6.12" } -cargo_metadata = "0.15.4" +wat = { workspace = true } +sc-runtime-test = { workspace = true } +sp-io = { workspace = true, default-features = true } +tempfile = { workspace = true } +paste = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +cargo_metadata = { workspace = true } diff --git a/substrate/client/informant/Cargo.toml b/substrate/client/informant/Cargo.toml index 191ef5f19f8df..9da2296deee3c 100644 --- a/substrate/client/informant/Cargo.toml +++ b/substrate/client/informant/Cargo.toml @@ -16,13 +16,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ansi_term = "0.12.1" -futures = "0.3.30" -futures-timer = "3.0.1" +ansi_term = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -sc-client-api = { path = "../api" } -sc-network-common = { path = "../network/common" } -sc-network-sync = { path = "../network/sync" } -sc-network = { path = "../network" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-runtime = { path = "../../primitives/runtime" } +sc-client-api = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/client/keystore/Cargo.toml b/substrate/client/keystore/Cargo.toml index 443ce3507542c..cf128016370b4 100644 --- a/substrate/client/keystore/Cargo.toml +++ b/substrate/client/keystore/Cargo.toml @@ -17,16 +17,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -parking_lot = "0.12.1" +array-bytes = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -sp-application-crypto = { path = "../../primitives/application-crypto" } -sp-core = { path = "../../primitives/core" } -sp-keystore = { path = "../../primitives/keystore" } +sp-application-crypto = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } [dev-dependencies] -tempfile = "3.1.0" +tempfile = { workspace = true } [features] # This feature adds BLS crypto primitives. diff --git a/substrate/client/merkle-mountain-range/Cargo.toml b/substrate/client/merkle-mountain-range/Cargo.toml index 3cf3cdd15dad9..e219d36d3f79c 100644 --- a/substrate/client/merkle-mountain-range/Cargo.toml +++ b/substrate/client/merkle-mountain-range/Cargo.toml @@ -14,22 +14,22 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" +codec = { workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sc-client-api = { path = "../api" } -sp-consensus-beefy = { path = "../../primitives/consensus/beefy" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-core = { path = "../../primitives/core" } -sp-mmr-primitives = { path = "../../primitives/merkle-mountain-range" } -sc-offchain = { path = "../offchain" } -sp-runtime = { path = "../../primitives/runtime" } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-mmr-primitives = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -parking_lot = "0.12.1" -sc-block-builder = { path = "../block-builder" } -sp-tracing = { path = "../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -tokio = "1.37" +parking_lot = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +tokio = { workspace = true, default-features = true } diff --git a/substrate/client/merkle-mountain-range/rpc/Cargo.toml b/substrate/client/merkle-mountain-range/rpc/Cargo.toml index 25e6e316a8be0..5f856b4069a0f 100644 --- a/substrate/client/merkle-mountain-range/rpc/Cargo.toml +++ b/substrate/client/merkle-mountain-range/rpc/Cargo.toml @@ -15,14 +15,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +codec = { workspace = true, default-features = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-core = { path = "../../../primitives/core" } -sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } -sp-runtime = { path = "../../../primitives/runtime" } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-mmr-primitives = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 1626305639498..2b06c3eca2344 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -16,25 +16,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -arrayvec = "0.7.2" -blake2 = "0.10.4" -bytes = "1" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.2" +array-bytes = { workspace = true, default-features = true } +arrayvec = { workspace = true } +blake2 = { workspace = true, default-features = true } +bytes = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -mixnet = "0.7.0" -multiaddr = "0.17.1" -parking_lot = "0.12.1" -sc-client-api = { path = "../api" } -sc-network = { path = "../network" } -sc-network-types = { path = "../network/types" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sp-api = { path = "../../primitives/api" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-core = { path = "../../primitives/core" } -sp-keystore = { path = "../../primitives/keystore" } -sp-mixnet = { path = "../../primitives/mixnet" } -sp-runtime = { path = "../../primitives/runtime" } +mixnet = { workspace = true } +multiaddr = { workspace = true } +parking_lot = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-mixnet = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index 3eeea66511861..f1441e4a1cf27 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -17,23 +17,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ahash = "0.8.2" -futures = "0.3.30" -futures-timer = "3.0.1" -libp2p = "0.51.4" +ahash = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } log = { workspace = true, default-features = true } -schnellru = "0.2.1" -tracing = "0.1.29" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sc-network = { path = "../network" } -sc-network-common = { path = "../network/common" } -sc-network-sync = { path = "../network/sync" } -sc-network-types = { path = "../network/types" } -sp-runtime = { path = "../../primitives/runtime" } +schnellru = { workspace = true } +tracing = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -tokio = "1.37" -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -quickcheck = { version = "1.0.3", default-features = false } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +tokio = { workspace = true, default-features = true } +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +quickcheck = { workspace = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs index cd344d9196d87..414da9b2a5890 100644 --- a/substrate/client/network-gossip/src/bridge.rs +++ b/substrate/client/network-gossip/src/bridge.rs @@ -348,7 +348,7 @@ impl futures::future::FusedFuture for GossipEngine { #[cfg(test)] mod tests { use super::*; - use crate::{multiaddr::Multiaddr, ValidationResult, ValidatorContext}; + use crate::{ValidationResult, ValidatorContext}; use codec::{DecodeAll, Encode}; use futures::{ channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, @@ -363,6 +363,7 @@ mod tests { }; use sc_network_common::role::ObservedRole; use sc_network_sync::SyncEventStream; + use sc_network_types::multiaddr::Multiaddr; use sp_runtime::{ testing::H256, traits::{Block as BlockT, NumberFor}, diff --git a/substrate/client/network-gossip/src/lib.rs b/substrate/client/network-gossip/src/lib.rs index e04ea2a91e7cb..20d9922200c2c 100644 --- a/substrate/client/network-gossip/src/lib.rs +++ b/substrate/client/network-gossip/src/lib.rs @@ -67,9 +67,12 @@ pub use self::{ validator::{DiscardAll, MessageIntent, ValidationResult, Validator, ValidatorContext}, }; -use sc_network::{multiaddr, types::ProtocolName, NetworkBlock, NetworkEventStream, NetworkPeers}; +use sc_network::{types::ProtocolName, NetworkBlock, NetworkEventStream, NetworkPeers}; use sc_network_sync::SyncEventStream; -use sc_network_types::PeerId; +use sc_network_types::{ + multiaddr::{Multiaddr, Protocol}, + PeerId, +}; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::iter; @@ -80,8 +83,7 @@ mod validator; /// Abstraction over a network. pub trait Network: NetworkPeers + NetworkEventStream { fn add_set_reserved(&self, who: PeerId, protocol: ProtocolName) { - let addr = - iter::once(multiaddr::Protocol::P2p(who.into())).collect::(); + let addr = Multiaddr::empty().with(Protocol::P2p(*who.as_ref())); let result = self.add_peers_to_reserved_set(protocol, iter::once(addr).collect()); if let Err(err) = result { log::error!(target: "gossip", "add_set_reserved failed: {}", err); diff --git a/substrate/client/network-gossip/src/state_machine.rs b/substrate/client/network-gossip/src/state_machine.rs index 016afa95eceae..ac3f7a1b8c74c 100644 --- a/substrate/client/network-gossip/src/state_machine.rs +++ b/substrate/client/network-gossip/src/state_machine.rs @@ -542,12 +542,12 @@ impl Metrics { #[cfg(test)] mod tests { use super::*; - use crate::multiaddr::Multiaddr; use futures::prelude::*; use sc_network::{ config::MultiaddrWithPeerId, event::Event, service::traits::NotificationEvent, MessageSink, NetworkBlock, NetworkEventStream, NetworkPeers, ReputationChange, }; + use sc_network_types::multiaddr::Multiaddr; use sp_runtime::{ testing::{Block as RawBlock, ExtrinsicWrapper, H256}, traits::NumberFor, diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index 29b14a4511cac..a0cf42eaab22f 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -17,71 +17,71 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.12.4" +prost-build = { workspace = true } [dependencies] -array-bytes = "6.2.2" -async-channel = "1.8.0" -async-trait = "0.1.79" -asynchronous-codec = "0.6" -bytes = "1" -cid = "0.9.0" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -either = "1.5.3" -fnv = "1.0.6" -futures = "0.3.30" -futures-timer = "3.0.2" -ip_network = "0.4.1" -libp2p = { version = "0.51.4", features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "request-response", "tcp", "tokio", "websocket", "yamux"] } -linked_hash_set = "0.1.4" +array-bytes = { workspace = true, default-features = true } +async-channel = { workspace = true } +async-trait = { workspace = true } +asynchronous-codec = { workspace = true } +bytes = { workspace = true, default-features = true } +cid = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +either = { workspace = true, default-features = true } +fnv = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +ip_network = { workspace = true } +libp2p = { features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "request-response", "tcp", "tokio", "websocket", "yamux"], workspace = true } +linked_hash_set = { workspace = true } log = { workspace = true, default-features = true } -mockall = "0.11.3" -parking_lot = "0.12.1" -partial_sort = "0.2.0" -pin-project = "1.0.12" -rand = "0.8.5" +mockall = { workspace = true } +parking_lot = { workspace = true, default-features = true } +partial_sort = { workspace = true } +pin-project = { workspace = true } +rand = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -smallvec = "1.11.0" +smallvec = { workspace = true, default-features = true } thiserror = { workspace = true } -tokio = { version = "1.22.0", features = ["macros", "sync"] } -tokio-stream = "0.1.7" -unsigned-varint = { version = "0.7.2", features = ["asynchronous_codec", "futures"] } -zeroize = "1.4.3" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -prost = "0.12.4" -sc-client-api = { path = "../api" } -sc-network-common = { path = "common" } -sc-network-types = { path = "types" } -sc-utils = { path = "../utils" } -sp-arithmetic = { path = "../../primitives/arithmetic" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-runtime = { path = "../../primitives/runtime" } -wasm-timer = "0.2" -litep2p = "0.5.0" -once_cell = "1.18.0" -void = "1.0.2" -schnellru = "0.2.1" +tokio = { features = ["macros", "sync"], workspace = true, default-features = true } +tokio-stream = { workspace = true } +unsigned-varint = { features = ["asynchronous_codec", "futures"], workspace = true } +zeroize = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +prost = { workspace = true } +sc-client-api = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +wasm-timer = { workspace = true } +litep2p = { workspace = true } +once_cell = { workspace = true } +void = { workspace = true } +schnellru = { workspace = true } [dev-dependencies] -assert_matches = "1.3" -mockall = "0.11.3" -multistream-select = "0.12.1" -rand = "0.8.5" -tempfile = "3.1.0" -tokio = { version = "1.22.0", features = ["macros"] } -tokio-util = { version = "0.7.4", features = ["compat"] } -tokio-test = "0.4.2" -sc-block-builder = { path = "../block-builder" } -sc-network-light = { path = "light" } -sc-network-sync = { path = "sync" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-test-primitives = { path = "../../primitives/test-primitives" } -sp-tracing = { path = "../../primitives/tracing" } -substrate-test-runtime = { path = "../../test-utils/runtime" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +assert_matches = { workspace = true } +mockall = { workspace = true } +multistream-select = { workspace = true } +rand = { workspace = true, default-features = true } +tempfile = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } +tokio-util = { features = ["compat"], workspace = true } +tokio-test = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +sc-network-light = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-test-primitives = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime = { workspace = true } +substrate-test-runtime-client = { workspace = true } [features] default = [] diff --git a/substrate/client/network/common/Cargo.toml b/substrate/client/network/common/Cargo.toml index 9a1bf5b88ea1a..79326492159b1 100644 --- a/substrate/client/network/common/Cargo.toml +++ b/substrate/client/network/common/Cargo.toml @@ -16,21 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.12.4" +prost-build = { workspace = true } [dependencies] -async-trait = "0.1.79" -bitflags = "1.3.2" -codec = { package = "parity-scale-codec", version = "3.6.12", features = [ +async-trait = { workspace = true } +bitflags = { workspace = true } +codec = { features = [ "derive", -] } -futures = "0.3.30" -libp2p-identity = { version = "0.1.3", features = ["peerid"] } -sc-consensus = { path = "../../consensus/common" } -sc-network-types = { path = "../types" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } -sp-runtime = { path = "../../../primitives/runtime" } +], workspace = true, default-features = true } +futures = { workspace = true } +libp2p-identity = { features = ["peerid"], workspace = true } +sc-consensus = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -tempfile = "3.1.0" +tempfile = { workspace = true } diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml index baaed578b8841..52deaa93852af 100644 --- a/substrate/client/network/light/Cargo.toml +++ b/substrate/client/network/light/Cargo.toml @@ -16,21 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.12.4" +prost-build = { workspace = true } [dependencies] -async-channel = "1.8.0" -array-bytes = "6.2.2" -codec = { package = "parity-scale-codec", version = "3.6.12", features = [ +async-channel = { workspace = true } +array-bytes = { workspace = true, default-features = true } +codec = { features = [ "derive", -] } -futures = "0.3.30" +], workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -prost = "0.12.4" -sp-blockchain = { path = "../../../primitives/blockchain" } -sc-client-api = { path = "../../api" } -sc-network-types = { path = "../types" } -sc-network = { path = ".." } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } +prost = { workspace = true } +sp-blockchain = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/network/src/behaviour.rs b/substrate/client/network/src/behaviour.rs index 68deac0f47bc1..68816a10980d4 100644 --- a/substrate/client/network/src/behaviour.rs +++ b/substrate/client/network/src/behaviour.rs @@ -31,8 +31,8 @@ use crate::{ use futures::channel::oneshot; use libp2p::{ - core::Multiaddr, identify::Info as IdentifyInfo, identity::PublicKey, kad::RecordKey, - swarm::NetworkBehaviour, PeerId, + connection_limits::ConnectionLimits, core::Multiaddr, identify::Info as IdentifyInfo, + identity::PublicKey, kad::RecordKey, swarm::NetworkBehaviour, PeerId, StreamProtocol, }; use parking_lot::Mutex; @@ -47,8 +47,10 @@ pub use crate::request_responses::{InboundFailure, OutboundFailure, ResponseFail /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] -#[behaviour(out_event = "BehaviourOut")] +#[behaviour(to_swarm = "BehaviourOut")] pub struct Behaviour { + /// Connection limits. + connection_limits: libp2p::connection_limits::Behaviour, /// All the substrate-specific protocols. substrate: Protocol, /// Periodically pings and identifies the nodes we are connected to, and store information in a @@ -180,6 +182,7 @@ impl Behaviour { request_response_protocols: Vec, peer_store_handle: Arc, external_addresses: Arc>>, + connection_limits: ConnectionLimits, ) -> Result { Ok(Self { substrate, @@ -193,6 +196,7 @@ impl Behaviour { request_response_protocols.into_iter(), peer_store_handle, )?, + connection_limits: libp2p::connection_limits::Behaviour::new(connection_limits), }) } @@ -267,7 +271,7 @@ impl Behaviour { pub fn add_self_reported_address_to_dht( &mut self, peer_id: &PeerId, - supported_protocols: &[impl AsRef<[u8]>], + supported_protocols: &[StreamProtocol], addr: Multiaddr, ) { self.discovery.add_self_reported_address(peer_id, supported_protocols, addr); @@ -376,3 +380,9 @@ impl From for BehaviourOut { } } } + +impl From for BehaviourOut { + fn from(e: void::Void) -> Self { + void::unreachable(e) + } +} diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs index 100a1e9dfb38e..e939558b20b8b 100644 --- a/substrate/client/network/src/config.rs +++ b/substrate/client/network/src/config.rs @@ -114,13 +114,13 @@ pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { /// Splits a Multiaddress into a Multiaddress and PeerId. pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> { - let who = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => - PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)?, + let multihash = match addr.pop() { + Some(multiaddr::Protocol::P2p(multihash)) => multihash, _ => return Err(ParseErr::PeerIdMissing), }; + let peer_id = PeerId::from_multihash(multihash).map_err(|_| ParseErr::InvalidPeerId)?; - Ok((who, addr)) + Ok((peer_id, addr)) } /// Address of a node, including its identity. diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 2c788ec713f34..3145b891a8d3c 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -55,20 +55,20 @@ use ip_network::IpNetwork; use libp2p::{ core::{Endpoint, Multiaddr}, kad::{ - handler::KademliaHandler, + self, record::store::{MemoryStore, RecordStore}, - GetClosestPeersError, GetRecordOk, Kademlia, KademliaBucketInserts, KademliaConfig, - KademliaEvent, QueryId, QueryResult, Quorum, Record, RecordKey, + Behaviour as Kademlia, BucketInserts, Config as KademliaConfig, Event as KademliaEvent, + GetClosestPeersError, GetRecordOk, QueryId, QueryResult, Quorum, Record, RecordKey, }, mdns::{self, tokio::Behaviour as TokioMdns}, multiaddr::Protocol, swarm::{ behaviour::{ toggle::{Toggle, ToggleConnectionHandler}, - DialFailure, FromSwarm, NewExternalAddr, + DialFailure, ExternalAddrConfirmed, FromSwarm, }, - ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, PollParameters, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, PollParameters, + StreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }, PeerId, }; @@ -105,8 +105,8 @@ pub struct DiscoveryConfig { discovery_only_if_under_num: u64, enable_mdns: bool, kademlia_disjoint_query_paths: bool, - kademlia_protocol: Vec, - kademlia_legacy_protocol: Vec, + kademlia_protocol: Option, + kademlia_legacy_protocol: Option, kademlia_replication_factor: NonZeroUsize, } @@ -122,8 +122,8 @@ impl DiscoveryConfig { discovery_only_if_under_num: std::u64::MAX, enable_mdns: false, kademlia_disjoint_query_paths: false, - kademlia_protocol: Vec::new(), - kademlia_legacy_protocol: Vec::new(), + kademlia_protocol: None, + kademlia_legacy_protocol: None, kademlia_replication_factor: NonZeroUsize::new(DEFAULT_KADEMLIA_REPLICATION_FACTOR) .expect("value is a constant; constant is non-zero; qed."), } @@ -179,8 +179,8 @@ impl DiscoveryConfig { fork_id: Option<&str>, protocol_id: &ProtocolId, ) -> &mut Self { - self.kademlia_protocol = kademlia_protocol_name(genesis_hash, fork_id); - self.kademlia_legacy_protocol = legacy_kademlia_protocol_name(protocol_id); + self.kademlia_protocol = Some(kademlia_protocol_name(genesis_hash, fork_id)); + self.kademlia_legacy_protocol = Some(legacy_kademlia_protocol_name(protocol_id)); self } @@ -213,26 +213,31 @@ impl DiscoveryConfig { kademlia_replication_factor, } = self; - let kademlia = if !kademlia_protocol.is_empty() { + let kademlia = if let Some(ref kademlia_protocol) = kademlia_protocol { let mut config = KademliaConfig::default(); config.set_replication_factor(kademlia_replication_factor); // Populate kad with both the legacy and the new protocol names. // Remove the legacy protocol: // https://github.com/paritytech/polkadot-sdk/issues/504 - let kademlia_protocols = [kademlia_protocol.clone(), kademlia_legacy_protocol]; + let kademlia_protocols = if let Some(legacy_protocol) = kademlia_legacy_protocol { + vec![kademlia_protocol.clone(), legacy_protocol] + } else { + vec![kademlia_protocol.clone()] + }; config.set_protocol_names(kademlia_protocols.into_iter().map(Into::into).collect()); - config.set_record_filtering(libp2p::kad::KademliaStoreInserts::FilterBoth); + config.set_record_filtering(libp2p::kad::StoreInserts::FilterBoth); // By default Kademlia attempts to insert all peers into its routing table once a // dialing attempt succeeds. In order to control which peer is added, disable the // auto-insertion and instead add peers manually. - config.set_kbucket_inserts(KademliaBucketInserts::Manual); + config.set_kbucket_inserts(BucketInserts::Manual); config.disjoint_query_paths(kademlia_disjoint_query_paths); let store = MemoryStore::new(local_peer_id); let mut kad = Kademlia::with_config(local_peer_id, store, config); + kad.set_mode(Some(kad::Mode::Server)); for (peer_id, addr) in &permanent_addresses { kad.add_address(peer_id, addr.clone()); @@ -323,7 +328,7 @@ pub struct DiscoveryBehaviour { /// /// Remove when all nodes are upgraded to genesis hash and fork ID-based Kademlia: /// . - kademlia_protocol: Vec, + kademlia_protocol: Option, } impl DiscoveryBehaviour { @@ -369,7 +374,7 @@ impl DiscoveryBehaviour { pub fn add_self_reported_address( &mut self, peer_id: &PeerId, - supported_protocols: &[impl AsRef<[u8]>], + supported_protocols: &[StreamProtocol], addr: Multiaddr, ) { if let Some(kademlia) = self.kademlia.as_mut() { @@ -386,10 +391,12 @@ impl DiscoveryBehaviour { // Extract the chain-based Kademlia protocol from `kademlia.protocol_name()` // when all nodes are upgraded to genesis hash and fork ID-based Kademlia: // https://github.com/paritytech/polkadot-sdk/issues/504. - if !supported_protocols - .iter() - .any(|p| p.as_ref() == self.kademlia_protocol.as_slice()) - { + if !supported_protocols.iter().any(|p| { + p == self + .kademlia_protocol + .as_ref() + .expect("kademlia protocol was checked above to be enabled; qed") + }) { trace!( target: "sub-libp2p", "Ignoring self-reported address {} from {} as remote node is not part of the \ @@ -503,7 +510,7 @@ impl DiscoveryBehaviour { #[derive(Debug)] pub enum DiscoveryOut { /// A connection to a peer has been established but the peer has not been - /// added to the routing table because [`KademliaBucketInserts::Manual`] is + /// added to the routing table because [`BucketInserts::Manual`] is /// configured. If the peer is to be included in the routing table, it must /// be explicitly added via /// [`DiscoveryBehaviour::add_self_reported_address`]. @@ -552,8 +559,9 @@ pub enum DiscoveryOut { } impl NetworkBehaviour for DiscoveryBehaviour { - type ConnectionHandler = ToggleConnectionHandler>; - type OutEvent = DiscoveryOut; + type ConnectionHandler = + ToggleConnectionHandler< as NetworkBehaviour>::ConnectionHandler>; + type ToSwarm = DiscoveryOut; fn handle_established_inbound_connection( &mut self, @@ -689,11 +697,11 @@ impl NetworkBehaviour for DiscoveryBehaviour { FromSwarm::ListenerError(e) => { self.kademlia.on_swarm_event(FromSwarm::ListenerError(e)); }, - FromSwarm::ExpiredExternalAddr(e) => { + FromSwarm::ExternalAddrExpired(e) => { // We intentionally don't remove the element from `known_external_addresses` in // order to not print the log line again. - self.kademlia.on_swarm_event(FromSwarm::ExpiredExternalAddr(e)); + self.kademlia.on_swarm_event(FromSwarm::ExternalAddrExpired(e)); }, FromSwarm::NewListener(e) => { self.kademlia.on_swarm_event(FromSwarm::NewListener(e)); @@ -701,8 +709,18 @@ impl NetworkBehaviour for DiscoveryBehaviour { FromSwarm::ExpiredListenAddr(e) => { self.kademlia.on_swarm_event(FromSwarm::ExpiredListenAddr(e)); }, - FromSwarm::NewExternalAddr(e @ NewExternalAddr { addr }) => { - let new_addr = addr.clone().with(Protocol::P2p(self.local_peer_id.into())); + FromSwarm::NewExternalAddrCandidate(e) => { + self.kademlia.on_swarm_event(FromSwarm::NewExternalAddrCandidate(e)); + }, + FromSwarm::AddressChange(e) => { + self.kademlia.on_swarm_event(FromSwarm::AddressChange(e)); + }, + FromSwarm::NewListenAddr(e) => { + self.kademlia.on_swarm_event(FromSwarm::NewListenAddr(e)); + self.mdns.on_swarm_event(FromSwarm::NewListenAddr(e)); + }, + FromSwarm::ExternalAddrConfirmed(e @ ExternalAddrConfirmed { addr }) => { + let new_addr = addr.clone().with(Protocol::P2p(self.local_peer_id)); if Self::can_add_to_dht(addr) { // NOTE: we might re-discover the same address multiple times @@ -716,14 +734,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - self.kademlia.on_swarm_event(FromSwarm::NewExternalAddr(e)); - }, - FromSwarm::AddressChange(e) => { - self.kademlia.on_swarm_event(FromSwarm::AddressChange(e)); - }, - FromSwarm::NewListenAddr(e) => { - self.kademlia.on_swarm_event(FromSwarm::NewListenAddr(e)); - self.mdns.on_swarm_event(FromSwarm::NewListenAddr(e)); + self.kademlia.on_swarm_event(FromSwarm::ExternalAddrConfirmed(e)); }, } } @@ -741,7 +752,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { &mut self, cx: &mut Context, params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { // Immediately process the content of `discovered`. if let Some(ev) = self.pending_events.pop_front() { return Poll::Ready(ToSwarm::GenerateEvent(ev)) @@ -963,10 +974,17 @@ impl NetworkBehaviour for DiscoveryBehaviour { ToSwarm::Dial { opts } => return Poll::Ready(ToSwarm::Dial { opts }), ToSwarm::NotifyHandler { peer_id, handler, event } => return Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }), - ToSwarm::ReportObservedAddr { address, score } => - return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), ToSwarm::CloseConnection { peer_id, connection } => return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + ToSwarm::NewExternalAddrCandidate(observed) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + ToSwarm::ExternalAddrConfirmed(addr) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + ToSwarm::ExternalAddrExpired(addr) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + ToSwarm::ListenOn { opts } => return Poll::Ready(ToSwarm::ListenOn { opts }), + ToSwarm::RemoveListener { id } => + return Poll::Ready(ToSwarm::RemoveListener { id }), } } @@ -979,8 +997,9 @@ impl NetworkBehaviour for DiscoveryBehaviour { continue } - self.pending_events - .extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); + self.pending_events.extend( + list.into_iter().map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id)), + ); if let Some(ev) = self.pending_events.pop_front() { return Poll::Ready(ToSwarm::GenerateEvent(ev)) } @@ -990,13 +1009,19 @@ impl NetworkBehaviour for DiscoveryBehaviour { ToSwarm::Dial { .. } => { unreachable!("mDNS never dials!"); }, - ToSwarm::NotifyHandler { event, .. } => match event {}, /* `event` is an */ - // enum with no - // variant - ToSwarm::ReportObservedAddr { address, score } => - return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), + // `event` is an enum with no variant + ToSwarm::NotifyHandler { event, .. } => match event {}, ToSwarm::CloseConnection { peer_id, connection } => return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + ToSwarm::NewExternalAddrCandidate(observed) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + ToSwarm::ExternalAddrConfirmed(addr) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + ToSwarm::ExternalAddrExpired(addr) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + ToSwarm::ListenOn { opts } => return Poll::Ready(ToSwarm::ListenOn { opts }), + ToSwarm::RemoveListener { id } => + return Poll::Ready(ToSwarm::RemoveListener { id }), } } @@ -1005,21 +1030,24 @@ impl NetworkBehaviour for DiscoveryBehaviour { } /// Legacy (fallback) Kademlia protocol name based on `protocol_id`. -fn legacy_kademlia_protocol_name(id: &ProtocolId) -> Vec { - let mut v = vec![b'/']; - v.extend_from_slice(id.as_ref().as_bytes()); - v.extend_from_slice(b"/kad"); - v +fn legacy_kademlia_protocol_name(id: &ProtocolId) -> StreamProtocol { + let name = format!("/{}/kad", id.as_ref()); + StreamProtocol::try_from_owned(name).expect("protocol name is valid. qed") } /// Kademlia protocol name based on `genesis_hash` and `fork_id`. -fn kademlia_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> Vec { +fn kademlia_protocol_name>( + genesis_hash: Hash, + fork_id: Option<&str>, +) -> StreamProtocol { let genesis_hash_hex = bytes2hex("", genesis_hash.as_ref()); - if let Some(fork_id) = fork_id { - format!("/{}/{}/kad", genesis_hash_hex, fork_id).as_bytes().into() + let name = if let Some(fork_id) = fork_id { + format!("/{genesis_hash_hex}/{fork_id}/kad") } else { - format!("/{}/kad", genesis_hash_hex).as_bytes().into() - } + format!("/{genesis_hash_hex}/kad") + }; + + StreamProtocol::try_from_owned(name).expect("protocol name is valid. qed") } #[cfg(test)] @@ -1036,7 +1064,7 @@ mod tests { }, identity::Keypair, noise, - swarm::{Executor, Swarm, SwarmBuilder, SwarmEvent}, + swarm::{Executor, Swarm, SwarmEvent}, yamux, Multiaddr, }; use sp_core::hash::H256; @@ -1082,7 +1110,8 @@ mod tests { }; let runtime = tokio::runtime::Runtime::new().unwrap(); - let mut swarm = SwarmBuilder::with_executor( + #[allow(deprecated)] + let mut swarm = libp2p::swarm::SwarmBuilder::with_executor( transport, behaviour, keypair.public().to_peer_id(), diff --git a/substrate/client/network/src/event.rs b/substrate/client/network/src/event.rs index d0ccbd8622b88..b518a2094d766 100644 --- a/substrate/client/network/src/event.rs +++ b/substrate/client/network/src/event.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Network event types. These are are not the part of the protocol, but rather +//! Network event types. These are not the part of the protocol, but rather //! events that happen on the network like DHT get/put results received. use crate::types::ProtocolName; diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index ff5f492df246a..6ff05e6af327a 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -33,8 +33,9 @@ use litep2p::{ libp2p::{ identify::{Config as IdentifyConfig, IdentifyEvent}, kademlia::{ - Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, KademliaEvent, - KademliaHandle, QueryId, Quorum, Record, RecordKey, RecordsType, + Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, + IncomingRecordValidationMode, KademliaEvent, KademliaHandle, QueryId, Quorum, + Record, RecordKey, RecordsType, }, ping::{Config as PingConfig, PingEvent}, }, @@ -52,7 +53,7 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, - time::Duration, + time::{Duration, Instant}, }; /// Logging target for the file. @@ -138,6 +139,15 @@ pub enum DiscoveryEvent { /// Query ID. query_id: QueryId, }, + + /// Incoming record to store. + IncomingRecord { + /// Record. + record: Record, + }, + + /// Started a random Kademlia query. + RandomKademliaStarted, } /// Discovery. @@ -249,6 +259,7 @@ impl Discovery { KademliaConfigBuilder::new() .with_known_peers(known_peers) .with_protocol_names(protocol_names) + .with_incoming_records_validation_mode(IncomingRecordValidationMode::Manual) .build() }; @@ -295,7 +306,7 @@ impl Discovery { ) { if self.local_protocols.is_disjoint(&supported_protocols) { log::trace!( - target: "sub-libp2p", + target: LOG_TARGET, "Ignoring self-reported address of peer {peer} as remote node is not part of the \ Kademlia DHT supported by the local node.", ); @@ -340,6 +351,30 @@ impl Discovery { .await } + /// Store record in the local DHT store. + pub async fn store_record( + &mut self, + key: KademliaKey, + value: Vec, + publisher: Option, + expires: Option, + ) { + log::debug!( + target: LOG_TARGET, + "Storing DHT record with key {key:?}, originally published by {publisher:?}, \ + expires {expires:?}.", + ); + + self.kademlia_handle + .store_record(Record { + key: RecordKey::new(&key.to_vec()), + value, + publisher: publisher.map(Into::into), + expires, + }) + .await; + } + /// Check if the observed address is a known address. fn is_known_address(known: &Multiaddr, observed: &Multiaddr) -> bool { let mut known = known.iter(); @@ -424,6 +459,7 @@ impl Stream for Discovery { match this.kademlia_handle.try_find_node(peer) { Ok(query_id) => { this.find_node_query_id = Some(query_id); + return Poll::Ready(Some(DiscoveryEvent::RandomKademliaStarted)) }, Err(()) => { this.duration_to_next_find_query = cmp::min( @@ -481,6 +517,16 @@ impl Stream for Discovery { false => return Poll::Ready(Some(DiscoveryEvent::QueryFailed { query_id })), } }, + Poll::Ready(Some(KademliaEvent::IncomingRecord { record })) => { + log::trace!( + target: LOG_TARGET, + "incoming `PUT_RECORD` request with key {:?} from publisher {:?}", + record.key, + record.publisher, + ); + + return Poll::Ready(Some(DiscoveryEvent::IncomingRecord { record })) + }, } match Pin::new(&mut this.identify_event_stream).poll_next(cx) { diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index ae287052b2d44..34ca5b716101f 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -58,7 +58,7 @@ use litep2p::{ protocol::{ libp2p::{ bitswap::Config as BitswapConfig, - kademlia::{QueryId, RecordsType}, + kademlia::{QueryId, Record, RecordsType}, }, request_response::ConfigBuilder as RequestResponseConfigBuilder, }, @@ -369,11 +369,13 @@ impl Litep2pNetworkBackend { .with_websocket(WebSocketTransportConfig { listen_addresses: websocket.into_iter().flatten().map(Into::into).collect(), yamux_config: yamux_config.clone(), + nodelay: true, ..Default::default() }) .with_tcp(TcpTransportConfig { listen_addresses: tcp.into_iter().flatten().map(Into::into).collect(), yamux_config, + nodelay: true, ..Default::default() }) } @@ -698,6 +700,9 @@ impl NetworkBackend for Litep2pNetworkBac let query_id = self.discovery.put_value(key.clone(), value).await; self.pending_put_values.insert(query_id, (key, Instant::now())); } + NetworkServiceCommand::StoreRecord { key, value, publisher, expires } => { + self.discovery.store_record(key, value, publisher.map(Into::into), expires).await; + } NetworkServiceCommand::EventStream { tx } => { self.event_streams.push(tx); } @@ -846,6 +851,10 @@ impl NetworkBackend for Litep2pNetworkBac "`PUT_VALUE` for {key:?} ({query_id:?}) succeeded", ); + self.event_streams.send(Event::Dht( + DhtEvent::ValuePut(libp2p::kad::RecordKey::new(&key)) + )); + if let Some(ref metrics) = self.metrics { metrics .kademlia_query_duration @@ -915,6 +924,22 @@ impl NetworkBackend for Litep2pNetworkBac "ping time with {peer:?}: {rtt:?}", ); } + Some(DiscoveryEvent::IncomingRecord { record: Record { key, value, publisher, expires }} ) => { + self.event_streams.send(Event::Dht( + DhtEvent::PutRecordRequest( + libp2p::kad::RecordKey::new(&key), + value, + publisher.map(Into::into), + expires, + ) + )); + }, + + Some(DiscoveryEvent::RandomKademliaStarted) => { + if let Some(metrics) = self.metrics.as_ref() { + metrics.kademlia_random_queries_total.inc(); + } + } }, event = self.litep2p.next_event() => match event { Some(Litep2pEvent::ConnectionEstablished { peer, endpoint }) => { diff --git a/substrate/client/network/src/litep2p/peerstore.rs b/substrate/client/network/src/litep2p/peerstore.rs index dd377ea09af9b..347aa0b90eed5 100644 --- a/substrate/client/network/src/litep2p/peerstore.rs +++ b/substrate/client/network/src/litep2p/peerstore.rs @@ -42,14 +42,20 @@ use std::{ const LOG_TARGET: &str = "sub-libp2p::peerstore"; /// We don't accept nodes whose reputation is under this value. -pub const BANNED_THRESHOLD: i32 = 82 * (i32::MIN / 100); +pub const BANNED_THRESHOLD: i32 = 71 * (i32::MIN / 100); /// Relative decrement of a reputation value that is applied every second. I.e., for inverse -/// decrement of 50 we decrease absolute value of the reputation by 1/50. This corresponds to a -/// factor of `k = 0.98`. It takes ~ `ln(0.5) / ln(k)` seconds to reduce the reputation by half, -/// or 34.3 seconds for the values above. In this setup the maximum allowed absolute value of -/// `i32::MAX` becomes 0 in ~1100 seconds (actually less due to integer arithmetic). -const INVERSE_DECREMENT: i32 = 50; +/// decrement of 200 we decrease absolute value of the reputation by 1/200. +/// +/// This corresponds to a factor of `k = 0.995`, where k = 1 - 1 / INVERSE_DECREMENT. +/// +/// It takes ~ `ln(0.5) / ln(k)` seconds to reduce the reputation by half, or 138.63 seconds for the +/// values above. +/// +/// In this setup: +/// - `i32::MAX` becomes 0 in exactly 3544 seconds, or approximately 59 minutes +/// - `i32::MIN` escapes the banned threshold in 69 seconds +const INVERSE_DECREMENT: i32 = 200; /// Amount of time between the moment we last updated the [`PeerStore`] entry and the moment we /// remove it, once the reputation value reaches 0. @@ -79,6 +85,11 @@ impl PeerInfo { self.reputation < BANNED_THRESHOLD } + fn add_reputation(&mut self, increment: i32) { + self.reputation = self.reputation.saturating_add(increment); + self.bump_last_updated(); + } + fn decay_reputation(&mut self, seconds_passed: u64) { // Note that decaying the reputation value happens "on its own", // so we don't do `bump_last_updated()`. @@ -97,6 +108,10 @@ impl PeerInfo { } } } + + fn bump_last_updated(&mut self) { + self.last_updated = Instant::now(); + } } #[derive(Debug, Default)] @@ -163,7 +178,7 @@ impl PeerStoreProvider for PeerstoreHandle { match lock.peers.get_mut(&peer) { Some(info) => { - info.reputation = info.reputation.saturating_add(reputation_change.value); + info.add_reputation(reputation_change.value); }, None => { lock.peers.insert( @@ -362,7 +377,7 @@ mod tests { #[test] fn decaying_max_reputation_finally_yields_zero() { const INITIAL_REPUTATION: i32 = i32::MAX; - const SECONDS: u64 = 1000; + const SECONDS: u64 = 3544; let mut peer_info = PeerInfo::default(); peer_info.reputation = INITIAL_REPUTATION; @@ -377,7 +392,7 @@ mod tests { #[test] fn decaying_min_reputation_finally_yields_zero() { const INITIAL_REPUTATION: i32 = i32::MIN; - const SECONDS: u64 = 1000; + const SECONDS: u64 = 3544; let mut peer_info = PeerInfo::default(); peer_info.reputation = INITIAL_REPUTATION; diff --git a/substrate/client/network/src/litep2p/service.rs b/substrate/client/network/src/litep2p/service.rs index 8f36b0828bd39..7d972bbeee5c7 100644 --- a/substrate/client/network/src/litep2p/service.rs +++ b/substrate/client/network/src/litep2p/service.rs @@ -76,6 +76,21 @@ pub enum NetworkServiceCommand { value: Vec, }, + /// Store record in the local DHT store. + StoreRecord { + /// Record key. + key: KademliaKey, + + /// Record value. + value: Vec, + + /// Original publisher of the record. + publisher: Option, + + /// Record expiration time as measured by a local, monothonic clock. + expires: Option, + }, + /// Query network status. Status { /// `oneshot::Sender` for sending the status. @@ -240,13 +255,17 @@ impl NetworkDHTProvider for Litep2pNetworkService { fn store_record( &self, - _key: KademliaKey, - _value: Vec, - _publisher: Option, - _expires: Option, + key: KademliaKey, + value: Vec, + publisher: Option, + expires: Option, ) { - // Will be added once litep2p is released with: https://github.com/paritytech/litep2p/pull/135 - log::warn!(target: LOG_TARGET, "Store record is not implemented for litep2p"); + let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::StoreRecord { + key, + value, + publisher, + expires, + }); } } @@ -264,8 +283,20 @@ impl NetworkStatusProvider for Litep2pNetworkService { async fn network_state(&self) -> Result { Ok(NetworkState { peer_id: self.local_peer_id.to_base58(), - listened_addresses: self.listen_addresses.read().iter().cloned().collect(), - external_addresses: self.external_addresses.read().iter().cloned().collect(), + listened_addresses: self + .listen_addresses + .read() + .iter() + .cloned() + .map(|a| Multiaddr::from(a).into()) + .collect(), + external_addresses: self + .external_addresses + .read() + .iter() + .cloned() + .map(|a| Multiaddr::from(a).into()) + .collect(), connected_peers: HashMap::new(), not_connected_peers: HashMap::new(), // TODO: Check what info we can include here. diff --git a/substrate/client/network/src/peer_info.rs b/substrate/client/network/src/peer_info.rs index 2735bd873db91..21eeea6bcc0c3 100644 --- a/substrate/client/network/src/peer_info.rs +++ b/substrate/client/network/src/peer_info.rs @@ -31,14 +31,15 @@ use libp2p::{ Info as IdentifyInfo, }, identity::PublicKey, - ping::{Behaviour as Ping, Config as PingConfig, Event as PingEvent, Success as PingSuccess}, + ping::{Behaviour as Ping, Config as PingConfig, Event as PingEvent}, swarm::{ behaviour::{ - AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm, - ListenFailure, + AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, + ExternalAddrConfirmed, FromSwarm, ListenFailure, }, - ConnectionDenied, ConnectionHandler, ConnectionId, IntoConnectionHandlerSelect, - NetworkBehaviour, PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionHandler, ConnectionHandlerSelect, ConnectionId, + NetworkBehaviour, NewExternalAddrCandidate, PollParameters, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, }, Multiaddr, PeerId, }; @@ -47,7 +48,7 @@ use parking_lot::Mutex; use smallvec::SmallVec; use std::{ - collections::{hash_map::Entry, HashSet}, + collections::{hash_map::Entry, HashSet, VecDeque}, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -71,6 +72,8 @@ pub struct PeerInfoBehaviour { garbage_collect: Pin + Send>>, /// Record keeping of external addresses. Data is queried by the `NetworkService`. external_addresses: ExternalAddresses, + /// Pending events to emit to [`Swarm`](libp2p::swarm::Swarm). + pending_actions: VecDeque>>, } /// Information about a node we're connected to. @@ -134,6 +137,7 @@ impl PeerInfoBehaviour { nodes_info: FnvHashMap::default(), garbage_collect: Box::pin(interval(GARBAGE_COLLECT_INTERVAL)), external_addresses: ExternalAddresses { addresses: external_addresses }, + pending_actions: Default::default(), } } @@ -148,13 +152,18 @@ impl PeerInfoBehaviour { /// Inserts a ping time in the cache. Has no effect if we don't have any entry for that node, /// which shouldn't happen. - fn handle_ping_report(&mut self, peer_id: &PeerId, ping_time: Duration) { - trace!(target: "sub-libp2p", "Ping time with {:?}: {:?}", peer_id, ping_time); + fn handle_ping_report( + &mut self, + peer_id: &PeerId, + ping_time: Duration, + connection: ConnectionId, + ) { + trace!(target: "sub-libp2p", "Ping time with {:?} via {:?}: {:?}", peer_id, connection, ping_time); if let Some(entry) = self.nodes_info.get_mut(peer_id) { entry.latest_ping = Some(ping_time); } else { error!(target: "sub-libp2p", - "Received ping from node we're not connected to {:?}", peer_id); + "Received ping from node we're not connected to {:?} via {:?}", peer_id, connection); } } @@ -208,11 +217,11 @@ pub enum PeerInfoEvent { } impl NetworkBehaviour for PeerInfoBehaviour { - type ConnectionHandler = IntoConnectionHandlerSelect< + type ConnectionHandler = ConnectionHandlerSelect< ::ConnectionHandler, ::ConnectionHandler, >; - type OutEvent = PeerInfoEvent; + type ToSwarm = PeerInfoEvent; fn handle_pending_inbound_connection( &mut self, @@ -378,9 +387,9 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.ping.on_swarm_event(FromSwarm::ListenerError(e)); self.identify.on_swarm_event(FromSwarm::ListenerError(e)); }, - FromSwarm::ExpiredExternalAddr(e) => { - self.ping.on_swarm_event(FromSwarm::ExpiredExternalAddr(e)); - self.identify.on_swarm_event(FromSwarm::ExpiredExternalAddr(e)); + FromSwarm::ExternalAddrExpired(e) => { + self.ping.on_swarm_event(FromSwarm::ExternalAddrExpired(e)); + self.identify.on_swarm_event(FromSwarm::ExternalAddrExpired(e)); }, FromSwarm::NewListener(e) => { self.ping.on_swarm_event(FromSwarm::NewListener(e)); @@ -391,10 +400,23 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.identify.on_swarm_event(FromSwarm::ExpiredListenAddr(e)); self.external_addresses.remove(e.addr); }, - FromSwarm::NewExternalAddr(e) => { - self.ping.on_swarm_event(FromSwarm::NewExternalAddr(e)); - self.identify.on_swarm_event(FromSwarm::NewExternalAddr(e)); - self.external_addresses.add(e.addr.clone()); + FromSwarm::NewExternalAddrCandidate(e @ NewExternalAddrCandidate { addr }) => { + self.ping.on_swarm_event(FromSwarm::NewExternalAddrCandidate(e)); + self.identify.on_swarm_event(FromSwarm::NewExternalAddrCandidate(e)); + + // Manually confirm all external address candidates. + // TODO: consider adding [AutoNAT protocol](https://docs.rs/libp2p/0.52.3/libp2p/autonat/index.html) + // (must go through the polkadot protocol spec) or implemeting heuristics for + // approving external address candidates. This can be done, for example, by + // approving only addresses reported by multiple peers. + // See also https://github.com/libp2p/rust-libp2p/pull/4721 introduced + // in libp2p v0.53 for heuristics approach. + self.pending_actions.push_back(ToSwarm::ExternalAddrConfirmed(addr.clone())); + }, + FromSwarm::ExternalAddrConfirmed(e @ ExternalAddrConfirmed { addr }) => { + self.ping.on_swarm_event(FromSwarm::ExternalAddrConfirmed(e)); + self.identify.on_swarm_event(FromSwarm::ExternalAddrConfirmed(e)); + self.external_addresses.add(addr.clone()); }, FromSwarm::AddressChange(e @ AddressChange { peer_id, old, new, .. }) => { self.ping.on_swarm_event(FromSwarm::AddressChange(e)); @@ -437,13 +459,17 @@ impl NetworkBehaviour for PeerInfoBehaviour { &mut self, cx: &mut Context, params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { + if let Some(event) = self.pending_actions.pop_front() { + return Poll::Ready(event) + } + loop { match self.ping.poll(cx, params) { Poll::Pending => break, Poll::Ready(ToSwarm::GenerateEvent(ev)) => { - if let PingEvent { peer, result: Ok(PingSuccess::Ping { rtt }) } = ev { - self.handle_ping_report(&peer, rtt) + if let PingEvent { peer, result: Ok(rtt), connection } = ev { + self.handle_ping_report(&peer, rtt, connection) } }, Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), @@ -453,10 +479,18 @@ impl NetworkBehaviour for PeerInfoBehaviour { handler, event: Either::Left(event), }), - Poll::Ready(ToSwarm::ReportObservedAddr { address, score }) => - return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + Poll::Ready(ToSwarm::ListenOn { opts }) => + return Poll::Ready(ToSwarm::ListenOn { opts }), + Poll::Ready(ToSwarm::RemoveListener { id }) => + return Poll::Ready(ToSwarm::RemoveListener { id }), } } @@ -482,10 +516,18 @@ impl NetworkBehaviour for PeerInfoBehaviour { handler, event: Either::Right(event), }), - Poll::Ready(ToSwarm::ReportObservedAddr { address, score }) => - return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + Poll::Ready(ToSwarm::ListenOn { opts }) => + return Poll::Ready(ToSwarm::ListenOn { opts }), + Poll::Ready(ToSwarm::RemoveListener { id }) => + return Poll::Ready(ToSwarm::RemoveListener { id }), } } diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs index 2e57ff1b6a86f..977c4c4de6632 100644 --- a/substrate/client/network/src/protocol.rs +++ b/substrate/client/network/src/protocol.rs @@ -163,9 +163,6 @@ impl Protocol { pub fn disconnect_peer(&mut self, peer_id: &PeerId, protocol_name: ProtocolName) { if let Some(position) = self.notification_protocols.iter().position(|p| *p == protocol_name) { - // Note: no need to remove a peer from `self.peers` if we are dealing with sync - // protocol, because it will be done when handling - // `NotificationsOut::CustomProtocolClosed`. self.behaviour.disconnect_peer(peer_id, SetId::from(position)); } else { warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name") @@ -229,7 +226,7 @@ pub enum CustomMessageOutcome { impl NetworkBehaviour for Protocol { type ConnectionHandler = ::ConnectionHandler; - type OutEvent = CustomMessageOutcome; + type ToSwarm = CustomMessageOutcome; fn handle_established_inbound_connection( &mut self, @@ -290,17 +287,25 @@ impl NetworkBehaviour for Protocol { &mut self, cx: &mut std::task::Context, params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { let event = match self.behaviour.poll(cx, params) { Poll::Pending => return Poll::Pending, Poll::Ready(ToSwarm::GenerateEvent(ev)) => ev, Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }) => return Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }), - Poll::Ready(ToSwarm::ReportObservedAddr { address, score }) => - return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + Poll::Ready(ToSwarm::ListenOn { opts }) => + return Poll::Ready(ToSwarm::ListenOn { opts }), + Poll::Ready(ToSwarm::RemoveListener { id }) => + return Poll::Ready(ToSwarm::RemoveListener { id }), }; let outcome = match event { diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index 03ba437a66726..cb4f089995e3c 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -1198,7 +1198,7 @@ impl Notifications { impl NetworkBehaviour for Notifications { type ConnectionHandler = NotifsHandler; - type OutEvent = NotificationsOut; + type ToSwarm = NotificationsOut; fn handle_pending_inbound_connection( &mut self, @@ -1678,10 +1678,11 @@ impl NetworkBehaviour for Notifications { FromSwarm::ListenerClosed(_) => {}, FromSwarm::ListenFailure(_) => {}, FromSwarm::ListenerError(_) => {}, - FromSwarm::ExpiredExternalAddr(_) => {}, + FromSwarm::ExternalAddrExpired(_) => {}, FromSwarm::NewListener(_) => {}, FromSwarm::ExpiredListenAddr(_) => {}, - FromSwarm::NewExternalAddr(_) => {}, + FromSwarm::NewExternalAddrCandidate(_) => {}, + FromSwarm::ExternalAddrConfirmed(_) => {}, FromSwarm::AddressChange(_) => {}, FromSwarm::NewListenAddr(_) => {}, } @@ -2239,7 +2240,7 @@ impl NetworkBehaviour for Notifications { &mut self, cx: &mut Context, _params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event) } @@ -2382,7 +2383,6 @@ mod tests { protocol::notifications::handler::tests::*, protocol_controller::{IncomingIndex, ProtoSetConfig, ProtocolController}, }; - use libp2p::swarm::AddressRecord; use sc_utils::mpsc::tracing_unbounded; use std::{collections::HashSet, iter}; @@ -2402,31 +2402,14 @@ mod tests { } #[derive(Clone)] - struct MockPollParams { - peer_id: PeerId, - addr: Multiaddr, - } + struct MockPollParams {} impl PollParameters for MockPollParams { type SupportedProtocolsIter = std::vec::IntoIter>; - type ListenedAddressesIter = std::vec::IntoIter; - type ExternalAddressesIter = std::vec::IntoIter; fn supported_protocols(&self) -> Self::SupportedProtocolsIter { vec![].into_iter() } - - fn listened_addresses(&self) -> Self::ListenedAddressesIter { - vec![self.addr.clone()].into_iter() - } - - fn external_addresses(&self) -> Self::ExternalAddressesIter { - vec![].into_iter() - } - - fn local_peer_id(&self) -> &PeerId { - &self.peer_id - } } fn development_notifs( @@ -3331,7 +3314,7 @@ mod tests { notif.on_swarm_event(FromSwarm::DialFailure(libp2p::swarm::behaviour::DialFailure { peer_id: Some(peer), - error: &libp2p::swarm::DialError::Banned, + error: &libp2p::swarm::DialError::Aborted, connection_id: ConnectionId::new_unchecked(1337), })); @@ -3877,7 +3860,7 @@ mod tests { let now = Instant::now(); notif.on_swarm_event(FromSwarm::DialFailure(libp2p::swarm::behaviour::DialFailure { peer_id: Some(peer), - error: &libp2p::swarm::DialError::Banned, + error: &libp2p::swarm::DialError::Aborted, connection_id: ConnectionId::new_unchecked(0), })); @@ -4003,7 +3986,7 @@ mod tests { assert!(notif.peers.get(&(peer, set_id)).is_some()); if tokio::time::timeout(Duration::from_secs(5), async { - let mut params = MockPollParams { peer_id: PeerId::random(), addr: Multiaddr::empty() }; + let mut params = MockPollParams {}; loop { futures::future::poll_fn(|cx| { @@ -4115,7 +4098,7 @@ mod tests { // verify that the code continues to keep the peer disabled by resetting the timer // after the first one expired. if tokio::time::timeout(Duration::from_secs(5), async { - let mut params = MockPollParams { peer_id: PeerId::random(), addr: Multiaddr::empty() }; + let mut params = MockPollParams {}; loop { futures::future::poll_fn(|cx| { diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs index cb09583b73a2d..967ef614c5560 100644 --- a/substrate/client/network/src/protocol/notifications/handler.rs +++ b/substrate/client/network/src/protocol/notifications/handler.rs @@ -75,8 +75,8 @@ use futures::{ use libp2p::{ core::ConnectedPoint, swarm::{ - handler::ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, KeepAlive, - NegotiatedSubstream, SubstreamProtocol, + handler::ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, KeepAlive, Stream, + SubstreamProtocol, }, PeerId, }; @@ -199,7 +199,7 @@ enum State { /// emitted. OpenDesiredByRemote { /// Substream opened by the remote and that hasn't been accepted/rejected yet. - in_substream: NotificationsInSubstream, + in_substream: NotificationsInSubstream, /// See [`State::Closed::pending_opening`]. pending_opening: bool, @@ -212,7 +212,7 @@ enum State { /// be emitted when transitioning to respectively [`State::Open`] or [`State::Closed`]. Opening { /// Substream opened by the remote. If `Some`, has been accepted. - in_substream: Option>, + in_substream: Option>, /// Is the connection inbound. inbound: bool, }, @@ -236,14 +236,14 @@ enum State { /// Always `Some` on transition to [`State::Open`]. Switched to `None` only if the remote /// closed the substream. If `None`, a [`NotifsHandlerOut::CloseDesired`] event has been /// emitted. - out_substream: Option>, + out_substream: Option>, /// Substream opened by the remote. /// /// Contrary to the `out_substream` field, operations continue as normal even if the /// substream has been closed by the remote. A `None` is treated the same way as if there /// was an idle substream. - in_substream: Option>, + in_substream: Option>, }, } @@ -481,8 +481,8 @@ pub enum NotifsHandlerError { } impl ConnectionHandler for NotifsHandler { - type InEvent = NotifsHandlerIn; - type OutEvent = NotifsHandlerOut; + type FromBehaviour = NotifsHandlerIn; + type ToBehaviour = NotifsHandlerOut; type Error = NotifsHandlerError; type InboundProtocol = UpgradeCollec; type OutboundProtocol = NotificationsOut; @@ -517,7 +517,7 @@ impl ConnectionHandler for NotifsHandler { match protocol_info.state { State::Closed { pending_opening } => { - self.events_queue.push_back(ConnectionHandlerEvent::Custom( + self.events_queue.push_back(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::OpenDesiredByRemote { protocol_index, handshake: in_substream_open.handshake, @@ -586,7 +586,7 @@ impl ConnectionHandler for NotifsHandler { in_substream: in_substream.take(), }; - self.events_queue.push_back(ConnectionHandlerEvent::Custom( + self.events_queue.push_back(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::OpenResultOk { protocol_index, negotiated_fallback: new_open.negotiated_fallback, @@ -600,6 +600,8 @@ impl ConnectionHandler for NotifsHandler { } }, ConnectionEvent::AddressChange(_address_change) => {}, + ConnectionEvent::LocalProtocolsChange(_) => {}, + ConnectionEvent::RemoteProtocolsChange(_) => {}, ConnectionEvent::DialUpgradeError(dial_upgrade_error) => match self.protocols [dial_upgrade_error.info] .state @@ -614,7 +616,7 @@ impl ConnectionHandler for NotifsHandler { self.protocols[dial_upgrade_error.info].state = State::Closed { pending_opening: false }; - self.events_queue.push_back(ConnectionHandlerEvent::Custom( + self.events_queue.push_back(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::OpenResultErr { protocol_index: dial_upgrade_error.info }, )); }, @@ -701,7 +703,7 @@ impl ConnectionHandler for NotifsHandler { self.protocols[protocol_index].state = State::Closed { pending_opening: true }; - self.events_queue.push_back(ConnectionHandlerEvent::Custom( + self.events_queue.push_back(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::OpenResultErr { protocol_index }, )); }, @@ -711,7 +713,7 @@ impl ConnectionHandler for NotifsHandler { State::Closed { .. } => {}, } - self.events_queue.push_back(ConnectionHandlerEvent::Custom( + self.events_queue.push_back(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::CloseResult { protocol_index }, )); }, @@ -726,9 +728,11 @@ impl ConnectionHandler for NotifsHandler { // A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote // to express desire to open substreams. + #[allow(deprecated)] KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME) } + #[allow(deprecated)] fn poll( &mut self, cx: &mut Context, @@ -736,7 +740,7 @@ impl ConnectionHandler for NotifsHandler { ConnectionHandlerEvent< Self::OutboundProtocol, Self::OutboundOpenInfo, - Self::OutEvent, + Self::ToBehaviour, Self::Error, >, > { @@ -755,6 +759,7 @@ impl ConnectionHandler for NotifsHandler { // Only proceed with `out_substream.poll_ready_unpin` if there is an element // available in `notifications_sink_rx`. This avoids waking up the task when // a substream is ready to send if there isn't actually something to send. + #[allow(deprecated)] match Pin::new(&mut *notifications_sink_rx).as_mut().poll_peek(cx) { Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => return Poll::Ready(ConnectionHandlerEvent::Close( @@ -808,7 +813,7 @@ impl ConnectionHandler for NotifsHandler { Poll::Ready(Err(_)) => { *out_substream = None; let event = NotifsHandlerOut::CloseDesired { protocol_index }; - return Poll::Ready(ConnectionHandlerEvent::Custom(event)) + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)) }, }; }, @@ -830,11 +835,14 @@ impl ConnectionHandler for NotifsHandler { State::Opening { in_substream: None, .. } => {}, State::Open { in_substream: in_substream @ Some(_), .. } => - match Stream::poll_next(Pin::new(in_substream.as_mut().unwrap()), cx) { + match futures::prelude::stream::Stream::poll_next( + Pin::new(in_substream.as_mut().unwrap()), + cx, + ) { Poll::Pending => {}, Poll::Ready(Some(Ok(message))) => { let event = NotifsHandlerOut::Notification { protocol_index, message }; - return Poll::Ready(ConnectionHandlerEvent::Custom(event)) + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)) }, Poll::Ready(None) | Poll::Ready(Some(Err(_))) => *in_substream = None, }, @@ -846,7 +854,7 @@ impl ConnectionHandler for NotifsHandler { Poll::Ready(Err(_)) => { self.protocols[protocol_index].state = State::Closed { pending_opening: *pending_opening }; - return Poll::Ready(ConnectionHandlerEvent::Custom( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::CloseDesired { protocol_index }, )) }, @@ -880,8 +888,8 @@ pub mod tests { use asynchronous_codec::Framed; use libp2p::{ core::muxing::SubstreamBox, - swarm::{handler, ConnectionHandlerUpgrErr}, - Multiaddr, + swarm::handler::{self, StreamUpgradeError}, + Multiaddr, Stream, }; use multistream_select::{dialer_select_proto, listener_select_proto, Negotiated, Version}; use std::{ @@ -972,6 +980,7 @@ pub mod tests { .await } } + struct MockSubstream { pub rx: mpsc::Receiver>, pub tx: mpsc::Sender>, @@ -991,18 +1000,35 @@ pub mod tests { } /// Create new negotiated substream pair. - pub async fn negotiated() -> (Negotiated, Negotiated) { + pub async fn negotiated() -> (Stream, Stream) { let (socket1, socket2) = Self::new(); let socket1 = SubstreamBox::new(socket1); let socket2 = SubstreamBox::new(socket2); - let protos = vec![b"/echo/1.0.0", b"/echo/2.5.0"]; + let protos = vec!["/echo/1.0.0", "/echo/2.5.0"]; let (res1, res2) = tokio::join!( dialer_select_proto(socket1, protos.clone(), Version::V1), listener_select_proto(socket2, protos), ); - (res1.unwrap().1, res2.unwrap().1) + (Self::stream_new(res1.unwrap().1), Self::stream_new(res2.unwrap().1)) + } + + /// Unsafe substitute for `Stream::new` private constructor. + fn stream_new(stream: Negotiated) -> Stream { + // Static asserts to make sure this doesn't break. + const _: () = { + assert!( + core::mem::size_of::() == + core::mem::size_of::>() + ); + assert!( + core::mem::align_of::() == + core::mem::align_of::>() + ); + }; + + unsafe { core::mem::transmute(stream) } } } @@ -1504,7 +1530,7 @@ pub mod tests { // inject dial failure to an already closed substream and verify outbound state is reset handler.on_connection_event(handler::ConnectionEvent::DialUpgradeError( - handler::DialUpgradeError { info: 0, error: ConnectionHandlerUpgrErr::Timeout }, + handler::DialUpgradeError { info: 0, error: StreamUpgradeError::Timeout }, )); assert!(std::matches!( handler.protocols[0].state, @@ -1574,7 +1600,7 @@ pub mod tests { // inject dial failure to an already closed substream and verify outbound state is reset handler.on_connection_event(handler::ConnectionEvent::DialUpgradeError( - handler::DialUpgradeError { info: 0, error: ConnectionHandlerUpgrErr::Timeout }, + handler::DialUpgradeError { info: 0, error: StreamUpgradeError::Timeout }, )); assert!(std::matches!( handler.protocols[0].state, @@ -1610,6 +1636,7 @@ pub mod tests { notifications_sink.send_sync_notification(vec![1, 3, 3, 9]); notifications_sink.send_sync_notification(vec![1, 3, 4, 0]); + #[allow(deprecated)] futures::future::poll_fn(|cx| { assert!(std::matches!( handler.poll(cx), @@ -1648,15 +1675,15 @@ pub mod tests { futures::future::poll_fn(|cx| { assert!(std::matches!( handler.poll(cx), - Poll::Ready(ConnectionHandlerEvent::Custom( + Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0, .. }, )) )); assert!(std::matches!( handler.poll(cx), - Poll::Ready(ConnectionHandlerEvent::Custom(NotifsHandlerOut::CloseDesired { - protocol_index: 0 - },)) + Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + NotifsHandlerOut::CloseDesired { protocol_index: 0 }, + )) )); Poll::Ready(()) }) diff --git a/substrate/client/network/src/protocol/notifications/tests.rs b/substrate/client/network/src/protocol/notifications/tests.rs index a72b5b4a6748f..90c9cc5b7cde3 100644 --- a/substrate/client/network/src/protocol/notifications/tests.rs +++ b/substrate/client/network/src/protocol/notifications/tests.rs @@ -33,9 +33,8 @@ use libp2p::{ core::{transport::MemoryTransport, upgrade, Endpoint}, identity, noise, swarm::{ - behaviour::FromSwarm, ConnectionDenied, ConnectionId, Executor, NetworkBehaviour, - PollParameters, Swarm, SwarmBuilder, SwarmEvent, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + self, behaviour::FromSwarm, ConnectionDenied, ConnectionId, Executor, NetworkBehaviour, + PollParameters, Swarm, SwarmEvent, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }, yamux, Multiaddr, PeerId, Transport, }; @@ -141,13 +140,12 @@ fn build_nodes() -> (Swarm, Swarm) { } }); - let mut swarm = SwarmBuilder::with_executor( + let mut swarm = Swarm::new( transport, behaviour, keypairs[index].public().to_peer_id(), - TokioExecutor(runtime), - ) - .build(); + swarm::Config::with_executor(TokioExecutor(runtime)), + ); swarm.listen_on(addrs[index].clone()).unwrap(); out.push(swarm); } @@ -183,7 +181,7 @@ impl std::ops::DerefMut for CustomProtoWithAddr { impl NetworkBehaviour for CustomProtoWithAddr { type ConnectionHandler = ::ConnectionHandler; - type OutEvent = ::OutEvent; + type ToSwarm = ::ToSwarm; fn handle_pending_inbound_connection( &mut self, @@ -261,7 +259,7 @@ impl NetworkBehaviour for CustomProtoWithAddr { &mut self, cx: &mut Context, params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { let _ = self.peer_store_future.poll_unpin(cx); let _ = self.protocol_controller_future.poll_unpin(cx); self.inner.poll(cx, params) diff --git a/substrate/client/network/src/protocol/notifications/upgrade.rs b/substrate/client/network/src/protocol/notifications/upgrade.rs index 8fd837f949d8a..72e0c2d103962 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade.rs @@ -20,6 +20,7 @@ pub(crate) use self::notifications::{ NotificationsInOpen, NotificationsInSubstreamHandshake, NotificationsOutOpen, }; + pub use self::{ collec::UpgradeCollec, notifications::{ diff --git a/substrate/client/network/src/protocol/notifications/upgrade/collec.rs b/substrate/client/network/src/protocol/notifications/upgrade/collec.rs index 33c090ae50e9d..ab0f87215cca3 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/collec.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/collec.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use futures::prelude::*; -use libp2p::core::upgrade::{InboundUpgrade, ProtocolName, UpgradeInfo}; +use libp2p::core::upgrade::{InboundUpgrade, UpgradeInfo}; use std::{ pin::Pin, task::{Context, Poll}, @@ -75,9 +75,9 @@ where #[derive(Debug, Clone, PartialEq)] pub struct ProtoNameWithUsize(T, usize); -impl ProtocolName for ProtoNameWithUsize { - fn protocol_name(&self) -> &[u8] { - self.0.protocol_name() +impl> AsRef for ProtoNameWithUsize { + fn as_ref(&self) -> &str { + self.0.as_ref() } } @@ -103,13 +103,13 @@ impl>, O, E> Future for FutWithUsize { mod tests { use super::*; use crate::types::ProtocolName as ProtoName; - use libp2p::core::upgrade::{ProtocolName, UpgradeInfo}; + use libp2p::core::upgrade::UpgradeInfo; // TODO: move to mocks mockall::mock! { pub ProtocolUpgrade {} - impl UpgradeInfo for ProtocolUpgrade { + impl> UpgradeInfo for ProtocolUpgrade { type Info = T; type InfoIter = vec::IntoIter; fn protocol_info(&self) -> vec::IntoIter; diff --git a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs index c760b7a963fc1..a8a9e453a7bb0 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -513,45 +513,99 @@ pub enum NotificationsOutError { #[cfg(test)] mod tests { + use crate::ProtocolName; + use super::{ - NotificationsIn, NotificationsInOpen, NotificationsOut, NotificationsOutError, - NotificationsOutOpen, + NotificationsHandshakeError, NotificationsIn, NotificationsInOpen, + NotificationsInSubstream, NotificationsOut, NotificationsOutError, NotificationsOutOpen, + NotificationsOutSubstream, }; - use futures::{channel::oneshot, future, prelude::*}; - use libp2p::core::upgrade; + use futures::{channel::oneshot, future, prelude::*, SinkExt, StreamExt}; + use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use std::{pin::Pin, task::Poll}; use tokio::net::{TcpListener, TcpStream}; use tokio_util::compat::TokioAsyncReadCompatExt; + /// Opens a substream to the given address, negotiates the protocol, and returns the substream + /// along with the handshake message. + async fn dial( + addr: std::net::SocketAddr, + handshake: impl Into>, + ) -> Result< + ( + Vec, + NotificationsOutSubstream< + multistream_select::Negotiated>, + >, + ), + NotificationsHandshakeError, + > { + let socket = TcpStream::connect(addr).await.unwrap(); + let notifs_out = NotificationsOut::new("/test/proto/1", Vec::new(), handshake, 1024 * 1024); + let (_, substream) = multistream_select::dialer_select_proto( + socket.compat(), + notifs_out.protocol_info(), + upgrade::Version::V1, + ) + .await + .unwrap(); + let NotificationsOutOpen { handshake, substream, .. } = + >::upgrade_outbound( + notifs_out, + substream, + "/test/proto/1".into(), + ) + .await?; + Ok((handshake, substream)) + } + + /// Listens on a localhost, negotiates the protocol, and returns the substream along with the + /// handshake message. + /// + /// Also sends the listener address through the given channel. + async fn listen_on_localhost( + listener_addr_tx: oneshot::Sender, + ) -> Result< + ( + Vec, + NotificationsInSubstream< + multistream_select::Negotiated>, + >, + ), + NotificationsHandshakeError, + > { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let notifs_in = NotificationsIn::new("/test/proto/1", Vec::new(), 1024 * 1024); + let (_, substream) = + multistream_select::listener_select_proto(socket.compat(), notifs_in.protocol_info()) + .await + .unwrap(); + let NotificationsInOpen { handshake, substream, .. } = + >::upgrade_inbound( + notifs_in, + substream, + "/test/proto/1".into(), + ) + .await?; + Ok((handshake, substream)) + } + #[tokio::test] async fn basic_works() { - const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = tokio::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let NotificationsOutOpen { handshake, mut substream, .. } = upgrade::apply_outbound( - socket.compat(), - NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024), - upgrade::Version::V1, - ) - .await - .unwrap(); + let (handshake, mut substream) = + dial(listener_addr_rx.await.unwrap(), &b"initial message"[..]).await.unwrap(); assert_eq!(handshake, b"hello world"); substream.send(b"test message".to_vec()).await.unwrap(); }); - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( - socket.compat(), - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), - ) - .await - .unwrap(); + let (handshake, mut substream) = listen_on_localhost(listener_addr_tx).await.unwrap(); assert_eq!(handshake, b"initial message"); substream.send_handshake(&b"hello world"[..]); @@ -566,33 +620,17 @@ mod tests { async fn empty_handshake() { // Check that everything still works when the handshake messages are empty. - const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = tokio::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let NotificationsOutOpen { handshake, mut substream, .. } = upgrade::apply_outbound( - socket.compat(), - NotificationsOut::new(PROTO_NAME, Vec::new(), vec![], 1024 * 1024), - upgrade::Version::V1, - ) - .await - .unwrap(); + let (handshake, mut substream) = + dial(listener_addr_rx.await.unwrap(), vec![]).await.unwrap(); assert!(handshake.is_empty()); substream.send(Default::default()).await.unwrap(); }); - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( - socket.compat(), - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), - ) - .await - .unwrap(); + let (handshake, mut substream) = listen_on_localhost(listener_addr_tx).await.unwrap(); assert!(handshake.is_empty()); substream.send_handshake(vec![]); @@ -605,17 +643,10 @@ mod tests { #[tokio::test] async fn refused() { - const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = tokio::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let outcome = upgrade::apply_outbound( - socket.compat(), - NotificationsOut::new(PROTO_NAME, Vec::new(), &b"hello"[..], 1024 * 1024), - upgrade::Version::V1, - ) - .await; + let outcome = dial(listener_addr_rx.await.unwrap(), &b"hello"[..]).await; // Despite the protocol negotiation being successfully conducted on the listener // side, we have to receive an error here because the listener didn't send the @@ -623,17 +654,7 @@ mod tests { assert!(outcome.is_err()); }); - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let NotificationsInOpen { handshake, substream, .. } = upgrade::apply_inbound( - socket.compat(), - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), - ) - .await - .unwrap(); - + let (handshake, substream) = listen_on_localhost(listener_addr_tx).await.unwrap(); assert_eq!(handshake, b"hello"); // We successfully upgrade to the protocol, but then close the substream. @@ -644,66 +665,29 @@ mod tests { #[tokio::test] async fn large_initial_message_refused() { - const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = tokio::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let ret = upgrade::apply_outbound( - socket.compat(), - // We check that an initial message that is too large gets refused. - NotificationsOut::new( - PROTO_NAME, - Vec::new(), - (0..32768).map(|_| 0).collect::>(), - 1024 * 1024, - ), - upgrade::Version::V1, - ) - .await; + let ret = + dial(listener_addr_rx.await.unwrap(), (0..32768).map(|_| 0).collect::>()) + .await; assert!(ret.is_err()); }); - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let ret = upgrade::apply_inbound( - socket.compat(), - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), - ) - .await; - assert!(ret.is_err()); - + let _ret = listen_on_localhost(listener_addr_tx).await; client.await.unwrap(); } #[tokio::test] async fn large_handshake_refused() { - const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = tokio::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let ret = upgrade::apply_outbound( - socket.compat(), - NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024), - upgrade::Version::V1, - ) - .await; + let ret = dial(listener_addr_rx.await.unwrap(), &b"initial message"[..]).await; assert!(ret.is_err()); }); - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( - socket.compat(), - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), - ) - .await - .unwrap(); + let (handshake, mut substream) = listen_on_localhost(listener_addr_tx).await.unwrap(); assert_eq!(handshake, b"initial message"); // We check that a handshake that is too large gets refused. @@ -720,10 +704,10 @@ mod tests { let client = tokio::spawn(async move { let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let NotificationsOutOpen { handshake, .. } = upgrade::apply_outbound( - socket.compat(), + let NotificationsOutOpen { handshake, .. } = OutboundUpgrade::upgrade_outbound( NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024), - upgrade::Version::V1, + socket.compat(), + ProtocolName::Static(PROTO_NAME), ) .await .unwrap(); @@ -735,9 +719,10 @@ mod tests { listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); let (socket, _) = listener.accept().await.unwrap(); - let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( - socket.compat(), + let NotificationsInOpen { handshake, mut substream, .. } = InboundUpgrade::upgrade_inbound( NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + socket.compat(), + ProtocolName::Static(PROTO_NAME), ) .await .unwrap(); @@ -758,13 +743,19 @@ mod tests { let client = tokio::spawn(async move { let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let NotificationsOutOpen { handshake, mut substream, .. } = upgrade::apply_outbound( - socket.compat(), - NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024), - upgrade::Version::V1, - ) - .await - .unwrap(); + let NotificationsOutOpen { handshake, mut substream, .. } = + OutboundUpgrade::upgrade_outbound( + NotificationsOut::new( + PROTO_NAME, + Vec::new(), + &b"initial message"[..], + 1024 * 1024, + ), + socket.compat(), + ProtocolName::Static(PROTO_NAME), + ) + .await + .unwrap(); assert_eq!(handshake, b"hello world"); @@ -786,9 +777,10 @@ mod tests { listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); let (socket, _) = listener.accept().await.unwrap(); - let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( - socket.compat(), + let NotificationsInOpen { handshake, mut substream, .. } = InboundUpgrade::upgrade_inbound( NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + socket.compat(), + ProtocolName::Static(PROTO_NAME), ) .await .unwrap(); diff --git a/substrate/client/network/src/request_responses.rs b/substrate/client/network/src/request_responses.rs index fbf050a65713d..3671d76ea630b 100644 --- a/substrate/client/network/src/request_responses.rs +++ b/substrate/client/network/src/request_responses.rs @@ -318,7 +318,6 @@ impl RequestResponsesBehaviour { let mut protocols = HashMap::new(); for protocol in list { let mut cfg = Config::default(); - cfg.set_connection_keep_alive(Duration::from_secs(10)); cfg.set_request_timeout(protocol.request_timeout); let protocol_support = if protocol.inbound_queue.is_some() { @@ -327,13 +326,13 @@ impl RequestResponsesBehaviour { ProtocolSupport::Outbound }; - let rq_rp = Behaviour::new( + let rq_rp = Behaviour::with_codec( GenericCodec { max_request_size: protocol.max_request_size, max_response_size: protocol.max_response_size, }, - iter::once(protocol.name.as_bytes().to_vec()) - .chain(protocol.fallback_names.iter().map(|name| name.as_bytes().to_vec())) + iter::once(protocol.name.clone()) + .chain(protocol.fallback_names) .zip(iter::repeat(protocol_support)), cfg, ); @@ -427,7 +426,7 @@ impl RequestResponsesBehaviour { impl NetworkBehaviour for RequestResponsesBehaviour { type ConnectionHandler = MultiHandler as NetworkBehaviour>::ConnectionHandler>; - type OutEvent = Event; + type ToSwarm = Event; fn handle_pending_inbound_connection( &mut self, @@ -543,9 +542,9 @@ impl NetworkBehaviour for RequestResponsesBehaviour { for (p, _) in self.protocols.values_mut() { NetworkBehaviour::on_swarm_event(p, FromSwarm::ListenerError(e)); }, - FromSwarm::ExpiredExternalAddr(e) => + FromSwarm::ExternalAddrExpired(e) => for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ExpiredExternalAddr(e)); + NetworkBehaviour::on_swarm_event(p, FromSwarm::ExternalAddrExpired(e)); }, FromSwarm::NewListener(e) => for (p, _) in self.protocols.values_mut() { @@ -555,9 +554,13 @@ impl NetworkBehaviour for RequestResponsesBehaviour { for (p, _) in self.protocols.values_mut() { NetworkBehaviour::on_swarm_event(p, FromSwarm::ExpiredListenAddr(e)); }, - FromSwarm::NewExternalAddr(e) => + FromSwarm::NewExternalAddrCandidate(e) => for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::NewExternalAddr(e)); + NetworkBehaviour::on_swarm_event(p, FromSwarm::NewExternalAddrCandidate(e)); + }, + FromSwarm::ExternalAddrConfirmed(e) => + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::on_swarm_event(p, FromSwarm::ExternalAddrConfirmed(e)); }, FromSwarm::AddressChange(e) => for (p, _) in self.protocols.values_mut() { @@ -592,7 +595,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { &mut self, cx: &mut Context, params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { 'poll_all: loop { // Poll to see if any response is ready to be sent back. while let Poll::Ready(Some(outcome)) = self.pending_responses.poll_next_unpin(cx) { @@ -663,10 +666,18 @@ impl NetworkBehaviour for RequestResponsesBehaviour { handler, event: ((*protocol).to_string(), event), }), - ToSwarm::ReportObservedAddr { address, score } => - return Poll::Ready(ToSwarm::ReportObservedAddr { address, score }), ToSwarm::CloseConnection { peer_id, connection } => return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), + ToSwarm::NewExternalAddrCandidate(observed) => + return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), + ToSwarm::ExternalAddrConfirmed(addr) => + return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), + ToSwarm::ExternalAddrExpired(addr) => + return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), + ToSwarm::ListenOn { opts } => + return Poll::Ready(ToSwarm::ListenOn { opts }), + ToSwarm::RemoveListener { id } => + return Poll::Ready(ToSwarm::RemoveListener { id }), }; match ev { @@ -950,7 +961,7 @@ pub struct GenericCodec { #[async_trait::async_trait] impl Codec for GenericCodec { - type Protocol = Vec; + type Protocol = ProtocolName; type Request = Vec; type Response = Result, ()>; @@ -1078,7 +1089,7 @@ mod tests { }, identity::Keypair, noise, - swarm::{Executor, Swarm, SwarmBuilder, SwarmEvent}, + swarm::{Config as SwarmConfig, Executor, Swarm, SwarmEvent}, Multiaddr, }; use std::{iter, time::Duration}; @@ -1104,16 +1115,18 @@ mod tests { let behaviour = RequestResponsesBehaviour::new(list, Arc::new(MockPeerStore {})).unwrap(); let runtime = tokio::runtime::Runtime::new().unwrap(); - let mut swarm = SwarmBuilder::with_executor( + + let mut swarm = Swarm::new( transport, behaviour, keypair.public().to_peer_id(), - TokioExecutor(runtime), - ) - .build(); + SwarmConfig::with_executor(TokioExecutor(runtime)), + ); + let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); swarm.listen_on(listen_addr.clone()).unwrap(); + (swarm, listen_addr) } diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs index 2cf4564e312c6..3a685787c48e6 100644 --- a/substrate/client/network/src/service.rs +++ b/substrate/client/network/src/service.rs @@ -61,18 +61,18 @@ use crate::{ use codec::DecodeAll; use either::Either; use futures::{channel::oneshot, prelude::*}; -use libp2p::identity::ed25519; #[allow(deprecated)] +use libp2p::swarm::THandlerErr; use libp2p::{ - connection_limits::Exceeded, + connection_limits::{ConnectionLimits, Exceeded}, core::{upgrade, ConnectedPoint, Endpoint}, identify::Info as IdentifyInfo, + identity::ed25519, kad::record::Key as KademliaKey, multiaddr::{self, Multiaddr}, - ping::Failure as PingFailure, swarm::{ - AddressScore, ConnectionError, ConnectionId, ConnectionLimits, DialError, Executor, - ListenError, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent, THandlerErr, + Config as SwarmConfig, ConnectionError, ConnectionId, DialError, Executor, ListenError, + NetworkBehaviour, Swarm, SwarmEvent, }, PeerId, }; @@ -274,10 +274,6 @@ where let local_identity: ed25519::Keypair = local_identity.into(); let local_public: ed25519::PublicKey = local_public.into(); let local_peer_id: PeerId = local_peer_id.into(); - let listen_addresses: Vec = - network_config.listen_addresses.iter().cloned().map(Into::into).collect(); - let public_addresses: Vec = - network_config.public_addresses.iter().cloned().map(Into::into).collect(); network_config.boot_nodes = network_config .boot_nodes @@ -559,6 +555,11 @@ where request_response_protocols, Arc::clone(&peer_store_handle), external_addresses.clone(), + ConnectionLimits::default() + .with_max_established_per_peer(Some(crate::MAX_CONNECTIONS_PER_PEER as u32)) + .with_max_established_incoming(Some( + crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING, + )), ); match result { @@ -568,37 +569,27 @@ where } }; - let builder = { + let swarm = { struct SpawnImpl(F); impl + Send>>)> Executor for SpawnImpl { fn exec(&self, f: Pin + Send>>) { (self.0)(f) } } - SwarmBuilder::with_executor( - transport, - behaviour, - local_peer_id, - SpawnImpl(params.executor), - ) + + let config = SwarmConfig::with_executor(SpawnImpl(params.executor)) + .with_substream_upgrade_protocol_override(upgrade::Version::V1) + .with_notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed")) + // NOTE: 24 is somewhat arbitrary and should be tuned in the future if + // necessary. See + .with_per_connection_event_buffer_size(24) + .with_max_negotiating_inbound_streams(2048) + .with_idle_connection_timeout(Duration::from_secs(10)); + + Swarm::new(transport, behaviour, local_peer_id, config) }; - #[allow(deprecated)] - let builder = builder - .connection_limits( - ConnectionLimits::default() - .with_max_established_per_peer(Some(crate::MAX_CONNECTIONS_PER_PEER as u32)) - .with_max_established_incoming(Some( - crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING, - )), - ) - .substream_upgrade_protocol_override(upgrade::Version::V1) - .notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed")) - // NOTE: 24 is somewhat arbitrary and should be tuned in the future if necessary. - // See - .per_connection_event_buffer_size(24) - .max_negotiating_inbound_streams(2048); - - (builder.build(), Arc::new(Libp2pBandwidthSink { sink: bandwidth })) + + (swarm, Arc::new(Libp2pBandwidthSink { sink: bandwidth })) }; // Initialize the metrics. @@ -614,19 +605,15 @@ where }; // Listen on multiaddresses. - for addr in &listen_addresses { - if let Err(err) = Swarm::>::listen_on(&mut swarm, addr.clone()) { + for addr in &network_config.listen_addresses { + if let Err(err) = Swarm::>::listen_on(&mut swarm, addr.clone().into()) { warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err) } } // Add external addresses. - for addr in &public_addresses { - Swarm::>::add_external_address( - &mut swarm, - addr.clone(), - AddressScore::Infinite, - ); + for addr in &network_config.public_addresses { + Swarm::>::add_external_address(&mut swarm, addr.clone().into()); } let listen_addresses_set = Arc::new(Mutex::new(HashSet::new())); @@ -807,7 +794,7 @@ where let peer_id = Swarm::>::local_peer_id(swarm).to_base58(); let listened_addresses = swarm.listeners().cloned().collect(); - let external_addresses = swarm.external_addresses().map(|r| &r.addr).cloned().collect(); + let external_addresses = swarm.external_addresses().cloned().collect(); NetworkState { peer_id, @@ -867,8 +854,7 @@ impl NetworkService { .into_iter() .map(|mut addr| { let peer = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) - .map_err(|_| "Invalid PeerId format".to_string())?, + Some(multiaddr::Protocol::P2p(peer_id)) => peer_id, _ => return Err("Missing PeerId from address".to_string()), }; @@ -1492,6 +1478,7 @@ where } /// Process the next event coming from `Swarm`. + #[allow(deprecated)] fn handle_swarm_event(&mut self, event: SwarmEvent>>) { match event { SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, .. }) => { @@ -1586,9 +1573,11 @@ where listen_addrs.truncate(30); } for addr in listen_addrs { - self.network_service - .behaviour_mut() - .add_self_reported_address_to_dht(&peer_id, &protocols, addr); + self.network_service.behaviour_mut().add_self_reported_address_to_dht( + &peer_id, + &protocols, + addr.clone(), + ); } self.peer_store_handle.add_known_peer(peer_id.into()); }, @@ -1705,8 +1694,14 @@ where } } }, - SwarmEvent::ConnectionClosed { peer_id, cause, endpoint, num_established } => { - debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}, {:?})", peer_id, cause); + SwarmEvent::ConnectionClosed { + connection_id, + peer_id, + cause, + endpoint, + num_established, + } => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({peer_id:?} via {connection_id:?}, {cause:?})"); if let Some(metrics) = self.metrics.as_ref() { let direction = match endpoint { ConnectedPoint::Dialer { .. } => "out", @@ -1715,11 +1710,13 @@ where let reason = match cause { Some(ConnectionError::IO(_)) => "transport-error", Some(ConnectionError::Handler(Either::Left(Either::Left( - Either::Right(Either::Left(PingFailure::Timeout)), - )))) => "ping-timeout", - Some(ConnectionError::Handler(Either::Left(Either::Left( - Either::Left(NotifsHandlerError::SyncNotificationsClogged), + Either::Left(Either::Right( + NotifsHandlerError::SyncNotificationsClogged, + )), )))) => "sync-notifications-clogged", + Some(ConnectionError::Handler(Either::Left(Either::Left( + Either::Right(Either::Left(_)), + )))) => "ping-timeout", Some(ConnectionError::Handler(_)) => "protocol-error", Some(ConnectionError::KeepAliveTimeout) => "keep-alive-timeout", None => "actively-closed", @@ -1746,12 +1743,11 @@ where } self.listen_addresses.lock().remove(&address); }, - SwarmEvent::OutgoingConnectionError { peer_id, error } => { + SwarmEvent::OutgoingConnectionError { connection_id, peer_id, error } => { if let Some(peer_id) = peer_id { trace!( target: "sub-libp2p", - "Libp2p => Failed to reach {:?}: {}", - peer_id, error, + "Libp2p => Failed to reach {peer_id:?} via {connection_id:?}: {error}", ); let not_reported = !self.reported_invalid_boot_nodes.contains(&peer_id); @@ -1789,12 +1785,9 @@ where } else { None }, - DialError::ConnectionLimit(_) => Some("limit-reached"), - DialError::InvalidPeerId(_) | - DialError::WrongPeerId { .. } | - DialError::LocalPeerId { .. } => Some("invalid-peer-id"), + DialError::LocalPeerId { .. } => Some("local-peer-id"), + DialError::WrongPeerId { .. } => Some("invalid-peer-id"), DialError::Transport(_) => Some("transport-error"), - DialError::Banned | DialError::NoAddresses | DialError::DialPeerConditionFalse(_) | DialError::Aborted => None, // ignore them @@ -1804,21 +1797,24 @@ where } } }, - SwarmEvent::Dialing(peer_id) => { - trace!(target: "sub-libp2p", "Libp2p => Dialing({:?})", peer_id) + SwarmEvent::Dialing { connection_id, peer_id } => { + trace!(target: "sub-libp2p", "Libp2p => Dialing({peer_id:?}) via {connection_id:?}") }, - SwarmEvent::IncomingConnection { local_addr, send_back_addr } => { - trace!(target: "sub-libp2p", "Libp2p => IncomingConnection({},{}))", - local_addr, send_back_addr); + SwarmEvent::IncomingConnection { connection_id, local_addr, send_back_addr } => { + trace!(target: "sub-libp2p", "Libp2p => IncomingConnection({local_addr},{send_back_addr} via {connection_id:?}))"); if let Some(metrics) = self.metrics.as_ref() { metrics.incoming_connections_total.inc(); } }, - SwarmEvent::IncomingConnectionError { local_addr, send_back_addr, error } => { + SwarmEvent::IncomingConnectionError { + connection_id, + local_addr, + send_back_addr, + error, + } => { debug!( target: "sub-libp2p", - "Libp2p => IncomingConnectionError({},{}): {}", - local_addr, send_back_addr, error, + "Libp2p => IncomingConnectionError({local_addr},{send_back_addr} via {connection_id:?}): {error}" ); if let Some(metrics) = self.metrics.as_ref() { #[allow(deprecated)] @@ -1829,7 +1825,6 @@ where } else { None }, - ListenError::ConnectionLimit(_) => Some("limit-reached"), ListenError::WrongPeerId { .. } | ListenError::LocalPeerId { .. } => Some("invalid-peer-id"), ListenError::Transport(_) => Some("transport-error"), @@ -1844,17 +1839,6 @@ where } } }, - #[allow(deprecated)] - SwarmEvent::BannedPeer { peer_id, endpoint } => { - debug!( - target: "sub-libp2p", - "Libp2p => BannedPeer({}). Connected via {:?}.", - peer_id, endpoint, - ); - if let Some(metrics) = self.metrics.as_ref() { - metrics.incoming_connections_errors_total.with_label_values(&["banned"]).inc(); - } - }, SwarmEvent::ListenerClosed { reason, addresses, .. } => { if let Some(metrics) = self.metrics.as_ref() { metrics.listeners_local_addresses.sub(addresses.len() as u64); diff --git a/substrate/client/network/src/transport.rs b/substrate/client/network/src/transport.rs index 4136b34fc0e8e..ed7e7c574e16f 100644 --- a/substrate/client/network/src/transport.rs +++ b/substrate/client/network/src/transport.rs @@ -57,7 +57,7 @@ pub fn build_transport( // Main transport: DNS(TCP) let tcp_config = tcp::Config::new().nodelay(true); let tcp_trans = tcp::tokio::Transport::new(tcp_config.clone()); - let dns_init = dns::TokioDnsConfig::system(tcp_trans); + let dns_init = dns::tokio::Transport::system(tcp_trans); Either::Left(if let Ok(dns) = dns_init { // WS + WSS transport @@ -66,7 +66,7 @@ pub fn build_transport( // unresolved addresses (BUT WSS transport itself needs an instance of DNS transport to // resolve and dial addresses). let tcp_trans = tcp::tokio::Transport::new(tcp_config); - let dns_for_wss = dns::TokioDnsConfig::system(tcp_trans) + let dns_for_wss = dns::tokio::Transport::system(tcp_trans) .expect("same system_conf & resolver to work"); Either::Left(websocket::WsConfig::new(dns_for_wss).or_transport(dns)) } else { diff --git a/substrate/client/network/src/types.rs b/substrate/client/network/src/types.rs index 25517599469e6..0652bbcdddecf 100644 --- a/substrate/client/network/src/types.rs +++ b/substrate/client/network/src/types.rs @@ -18,8 +18,6 @@ //! `sc-network` type definitions -use libp2p::core::upgrade; - use std::{ borrow::Borrow, fmt, @@ -94,9 +92,9 @@ impl fmt::Display for ProtocolName { } } -impl upgrade::ProtocolName for ProtocolName { - fn protocol_name(&self) -> &[u8] { - (self as &str).as_bytes() +impl AsRef for ProtocolName { + fn as_ref(&self) -> &str { + self as &str } } diff --git a/substrate/client/network/statement/Cargo.toml b/substrate/client/network/statement/Cargo.toml index 0dfaa491b65c9..4cced49fee791 100644 --- a/substrate/client/network/statement/Cargo.toml +++ b/substrate/client/network/statement/Cargo.toml @@ -16,17 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -async-channel = "1.8.0" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" -libp2p = "0.51.4" +array-bytes = { workspace = true, default-features = true } +async-channel = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-network-common = { path = "../common" } -sc-network-sync = { path = "../sync" } -sc-network-types = { path = "../types" } -sc-network = { path = ".." } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-statement-store = { path = "../../../primitives/statement-store" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index 964090444b22a..17e3e2119d7e8 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -16,43 +16,43 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.12.4" +prost-build = { workspace = true } [dependencies] -array-bytes = "6.2.2" -async-channel = "1.8.0" -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" -futures-timer = "3.0.2" -libp2p = "0.51.4" +array-bytes = { workspace = true, default-features = true } +async-channel = { workspace = true } +async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } +libp2p = { workspace = true } log = { workspace = true, default-features = true } -mockall = "0.11.3" -prost = "0.12.4" -schnellru = "0.2.1" -smallvec = "1.11.0" +mockall = { workspace = true } +prost = { workspace = true } +schnellru = { workspace = true } +smallvec = { workspace = true, default-features = true } thiserror = { workspace = true } -tokio-stream = "0.1.14" -tokio = { version = "1.32.0", features = ["macros", "time"] } -fork-tree = { path = "../../../utils/fork-tree" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../../consensus/common" } -sc-network = { path = ".." } -sc-network-common = { path = "../common" } -sc-network-types = { path = "../types" } -sc-utils = { path = "../../utils" } -sp-arithmetic = { path = "../../../primitives/arithmetic" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } -sp-runtime = { path = "../../../primitives/runtime" } +tokio-stream = { workspace = true } +tokio = { features = ["macros", "time"], workspace = true, default-features = true } +fork-tree = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -mockall = "0.11.3" -quickcheck = { version = "1.0.3", default-features = false } -sc-block-builder = { path = "../../block-builder" } -sp-test-primitives = { path = "../../../primitives/test-primitives" } -sp-tracing = { path = "../../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +mockall = { workspace = true } +quickcheck = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +sp-test-primitives = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index f70e4847f59f3..74b43173508be 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -16,28 +16,28 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -tokio = "1.37" -async-trait = "0.1.79" -futures = "0.3.30" -futures-timer = "3.0.1" -libp2p = "0.51.4" +tokio = { workspace = true, default-features = true } +async-trait = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +libp2p = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -rand = "0.8.5" -sc-block-builder = { path = "../../block-builder" } -sc-client-api = { path = "../../api" } -sc-consensus = { path = "../../consensus/common" } -sc-network = { path = ".." } -sc-network-common = { path = "../common" } -sc-network-types = { path = "../types" } -sc-utils = { path = "../../utils" } -sc-network-light = { path = "../light" } -sc-network-sync = { path = "../sync" } -sc-service = { path = "../../service", default-features = false, features = ["test-helpers"] } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-tracing = { path = "../../../primitives/tracing" } -substrate-test-runtime = { path = "../../../test-utils/runtime" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +parking_lot = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sc-network-light = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-service = { features = ["test-helpers"], workspace = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime = { workspace = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 8a8f9608051af..221c8515d6d41 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -114,7 +114,7 @@ impl PassThroughVerifier { #[async_trait::async_trait] impl Verifier for PassThroughVerifier { async fn verify( - &mut self, + &self, mut block: BlockImportParams, ) -> Result, String> { if block.fork_choice.is_none() { @@ -210,7 +210,7 @@ impl BlockImport for PeersClient { type Error = ConsensusError; async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { self.client.check_block(block).await @@ -600,7 +600,7 @@ where type Error = ConsensusError; async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { self.inner.check_block(block).await @@ -622,10 +622,7 @@ struct VerifierAdapter { #[async_trait::async_trait] impl Verifier for VerifierAdapter { - async fn verify( - &mut self, - block: BlockImportParams, - ) -> Result, String> { + async fn verify(&self, block: BlockImportParams) -> Result, String> { let hash = block.header.hash(); self.verifier.lock().await.verify(block).await.map_err(|e| { self.failed_verifications.lock().insert(hash, e.clone()); diff --git a/substrate/client/network/test/src/service.rs b/substrate/client/network/test/src/service.rs index 150c1db7560e6..c4a2b261081e6 100644 --- a/substrate/client/network/test/src/service.rs +++ b/substrate/client/network/test/src/service.rs @@ -134,7 +134,7 @@ impl TestNetworkBuilder { #[async_trait::async_trait] impl sc_consensus::Verifier for PassThroughVerifier { async fn verify( - &mut self, + &self, mut block: sc_consensus::BlockImportParams, ) -> Result, String> { block.finalized = self.0; diff --git a/substrate/client/network/transactions/Cargo.toml b/substrate/client/network/transactions/Cargo.toml index d871b59b37bb1..eb907b606d584 100644 --- a/substrate/client/network/transactions/Cargo.toml +++ b/substrate/client/network/transactions/Cargo.toml @@ -16,16 +16,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -futures = "0.3.30" -libp2p = "0.51.4" +array-bytes = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } -sc-network = { path = ".." } -sc-network-common = { path = "../common" } -sc-network-sync = { path = "../sync" } -sc-network-types = { path = "../types" } -sc-utils = { path = "../../utils" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-consensus = { path = "../../../primitives/consensus/common" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } diff --git a/substrate/client/network/transactions/src/lib.rs b/substrate/client/network/transactions/src/lib.rs index 3384aab5149dc..31ad0781035e5 100644 --- a/substrate/client/network/transactions/src/lib.rs +++ b/substrate/client/network/transactions/src/lib.rs @@ -368,7 +368,8 @@ where { self.on_transactions(peer, m); } else { - warn!(target: "sub-libp2p", "Failed to decode transactions list"); + warn!(target: "sub-libp2p", "Failed to decode transactions list from peer {peer}"); + self.network.report_peer(peer, rep::BAD_TRANSACTION); } }, } diff --git a/substrate/client/network/types/Cargo.toml b/substrate/client/network/types/Cargo.toml index a9334aaa17059..811ccddbef930 100644 --- a/substrate/client/network/types/Cargo.toml +++ b/substrate/client/network/types/Cargo.toml @@ -10,15 +10,16 @@ repository.workspace = true documentation = "https://docs.rs/sc-network-types" [dependencies] -bs58 = "0.5.0" -ed25519-dalek = "2.1" -libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } -litep2p = "0.5.0" -multiaddr = "0.17.0" -multihash = { version = "0.17.0", default-features = false, features = ["identity", "multihash-impl", "sha2", "std"] } -rand = "0.8.5" -thiserror = "1.0.48" -zeroize = { version = "1.7.0", default-features = false } +bs58 = { workspace = true, default-features = true } +ed25519-dalek = { workspace = true, default-features = true } +libp2p-identity = { features = ["ed25519", "peerid", "rand"], workspace = true } +litep2p = { workspace = true } +log = { workspace = true, default-features = true } +multiaddr = { workspace = true } +multihash = { workspace = true } +rand = { workspace = true, default-features = true } +thiserror = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] -quickcheck = "1.0.3" +quickcheck = { workspace = true, default-features = true } diff --git a/substrate/client/network/types/src/ed25519.rs b/substrate/client/network/types/src/ed25519.rs index e85f405b13066..acaa01759e5cf 100644 --- a/substrate/client/network/types/src/ed25519.rs +++ b/substrate/client/network/types/src/ed25519.rs @@ -82,14 +82,14 @@ impl fmt::Debug for Keypair { impl From for Keypair { fn from(kp: litep2p_ed25519::Keypair) -> Self { - Self::try_from_bytes(&mut kp.encode()) + Self::try_from_bytes(&mut kp.to_bytes()) .expect("ed25519_dalek in substrate & litep2p to use the same format") } } impl From for litep2p_ed25519::Keypair { fn from(kp: Keypair) -> Self { - Self::decode(&mut kp.to_bytes()) + Self::try_from_bytes(&mut kp.to_bytes()) .expect("ed25519_dalek in substrate & litep2p to use the same format") } } @@ -191,14 +191,14 @@ impl PublicKey { impl From for PublicKey { fn from(k: litep2p_ed25519::PublicKey) -> Self { - Self::try_from_bytes(&k.encode()) + Self::try_from_bytes(&k.to_bytes()) .expect("ed25519_dalek in substrate & litep2p to use the same format") } } impl From for litep2p_ed25519::PublicKey { fn from(k: PublicKey) -> Self { - Self::decode(&k.to_bytes()) + Self::try_from_bytes(&k.to_bytes()) .expect("ed25519_dalek in substrate & litep2p to use the same format") } } @@ -272,7 +272,7 @@ impl From for SecretKey { impl From for litep2p_ed25519::SecretKey { fn from(sk: SecretKey) -> Self { - Self::from_bytes(&mut sk.to_bytes()) + Self::try_from_bytes(&mut sk.to_bytes()) .expect("litep2p `SecretKey` to accept 32 bytes as Ed25519 key") } } @@ -357,10 +357,10 @@ mod tests { let kp1: libp2p_ed25519::Keypair = kp.clone().into(); let kp2: litep2p_ed25519::Keypair = kp.clone().into(); let kp3 = libp2p_ed25519::Keypair::try_from_bytes(&mut kp_bytes.clone()).unwrap(); - let kp4 = litep2p_ed25519::Keypair::decode(&mut kp_bytes.clone()).unwrap(); + let kp4 = litep2p_ed25519::Keypair::try_from_bytes(&mut kp_bytes.clone()).unwrap(); assert_eq!(kp_bytes, kp1.to_bytes()); - assert_eq!(kp_bytes, kp2.encode()); + assert_eq!(kp_bytes, kp2.to_bytes()); let msg = "hello world".as_bytes(); let sig = kp.sign(msg); @@ -389,9 +389,9 @@ mod tests { fn litep2p_kp_to_substrate_kp() { let kp = litep2p_ed25519::Keypair::generate(); let kp1: Keypair = kp.clone().into(); - let kp2 = Keypair::try_from_bytes(&mut kp.encode()).unwrap(); + let kp2 = Keypair::try_from_bytes(&mut kp.to_bytes()).unwrap(); - assert_eq!(kp.encode(), kp1.to_bytes()); + assert_eq!(kp.to_bytes(), kp1.to_bytes()); let msg = "hello world".as_bytes(); let sig = kp.sign(msg); @@ -439,10 +439,10 @@ mod tests { let pk1: libp2p_ed25519::PublicKey = pk.clone().into(); let pk2: litep2p_ed25519::PublicKey = pk.clone().into(); let pk3 = libp2p_ed25519::PublicKey::try_from_bytes(&pk_bytes).unwrap(); - let pk4 = litep2p_ed25519::PublicKey::decode(&pk_bytes).unwrap(); + let pk4 = litep2p_ed25519::PublicKey::try_from_bytes(&pk_bytes).unwrap(); assert_eq!(pk_bytes, pk1.to_bytes()); - assert_eq!(pk_bytes, pk2.encode()); + assert_eq!(pk_bytes, pk2.to_bytes()); let msg = "hello world".as_bytes(); let sig = kp.sign(msg); @@ -458,7 +458,7 @@ mod tests { fn litep2p_pk_to_substrate_pk() { let kp = litep2p_ed25519::Keypair::generate(); let pk = kp.public(); - let pk_bytes = pk.clone().encode(); + let pk_bytes = pk.clone().to_bytes(); let pk1: PublicKey = pk.clone().into(); let pk2 = PublicKey::try_from_bytes(&pk_bytes).unwrap(); @@ -497,7 +497,7 @@ mod tests { let sk1: libp2p_ed25519::SecretKey = sk.clone().into(); let sk2: litep2p_ed25519::SecretKey = sk.clone().into(); let sk3 = libp2p_ed25519::SecretKey::try_from_bytes(&mut sk_bytes.clone()).unwrap(); - let sk4 = litep2p_ed25519::SecretKey::from_bytes(&mut sk_bytes.clone()).unwrap(); + let sk4 = litep2p_ed25519::SecretKey::try_from_bytes(&mut sk_bytes.clone()).unwrap(); let kp: Keypair = sk.into(); let kp1: libp2p_ed25519::Keypair = sk1.into(); diff --git a/substrate/client/network/types/src/multiaddr.rs b/substrate/client/network/types/src/multiaddr.rs index 312bef9baab12..925e24fe70d6d 100644 --- a/substrate/client/network/types/src/multiaddr.rs +++ b/substrate/client/network/types/src/multiaddr.rs @@ -20,8 +20,10 @@ use litep2p::types::multiaddr::{ Error as LiteP2pError, Iter as LiteP2pIter, Multiaddr as LiteP2pMultiaddr, Protocol as LiteP2pProtocol, }; +use multiaddr::Multiaddr as LibP2pMultiaddr; use std::{ fmt::{self, Debug, Display}, + net::{IpAddr, Ipv4Addr, Ipv6Addr}, str::FromStr, }; @@ -102,6 +104,39 @@ impl From for LiteP2pMultiaddr { } } +impl From for Multiaddr { + fn from(multiaddr: LibP2pMultiaddr) -> Self { + multiaddr.into_iter().map(Into::into).collect() + } +} + +impl From for LibP2pMultiaddr { + fn from(multiaddr: Multiaddr) -> Self { + multiaddr.into_iter().map(Into::into).collect() + } +} + +impl From for Multiaddr { + fn from(v: IpAddr) -> Multiaddr { + match v { + IpAddr::V4(a) => a.into(), + IpAddr::V6(a) => a.into(), + } + } +} + +impl From for Multiaddr { + fn from(v: Ipv4Addr) -> Multiaddr { + Protocol::Ip4(v).into() + } +} + +impl From for Multiaddr { + fn from(v: Ipv6Addr) -> Multiaddr { + Protocol::Ip6(v).into() + } +} + impl TryFrom> for Multiaddr { type Error = ParseError; diff --git a/substrate/client/network/types/src/multiaddr/protocol.rs b/substrate/client/network/types/src/multiaddr/protocol.rs index 800d08fe36bd6..aca3a31136860 100644 --- a/substrate/client/network/types/src/multiaddr/protocol.rs +++ b/substrate/client/network/types/src/multiaddr/protocol.rs @@ -17,12 +17,18 @@ // along with this program. If not, see . use crate::multihash::Multihash; +use libp2p_identity::PeerId; use litep2p::types::multiaddr::Protocol as LiteP2pProtocol; +use multiaddr::Protocol as LibP2pProtocol; use std::{ borrow::Cow, - net::{Ipv4Addr, Ipv6Addr}, + fmt::{self, Debug, Display}, + net::{IpAddr, Ipv4Addr, Ipv6Addr}, }; +// Log target for this file. +const LOG_TARGET: &str = "sub-libp2p"; + /// [`Protocol`] describes all possible multiaddress protocols. #[derive(PartialEq, Eq, Clone, Debug)] pub enum Protocol<'a> { @@ -60,6 +66,37 @@ pub enum Protocol<'a> { Wss(Cow<'a, str>), } +impl<'a> Display for Protocol<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let protocol = LiteP2pProtocol::from(self.clone()); + Display::fmt(&protocol, f) + } +} + +impl<'a> From for Protocol<'a> { + #[inline] + fn from(addr: IpAddr) -> Self { + match addr { + IpAddr::V4(addr) => Protocol::Ip4(addr), + IpAddr::V6(addr) => Protocol::Ip6(addr), + } + } +} + +impl<'a> From for Protocol<'a> { + #[inline] + fn from(addr: Ipv4Addr) -> Self { + Protocol::Ip4(addr) + } +} + +impl<'a> From for Protocol<'a> { + #[inline] + fn from(addr: Ipv6Addr) -> Self { + Protocol::Ip6(addr) + } +} + impl<'a> From> for Protocol<'a> { fn from(protocol: LiteP2pProtocol<'a>) -> Self { match protocol { @@ -136,3 +173,102 @@ impl<'a> From> for LiteP2pProtocol<'a> { } } } + +impl<'a> From> for Protocol<'a> { + fn from(protocol: LibP2pProtocol<'a>) -> Self { + match protocol { + LibP2pProtocol::Dccp(port) => Protocol::Dccp(port), + LibP2pProtocol::Dns(str) => Protocol::Dns(str), + LibP2pProtocol::Dns4(str) => Protocol::Dns4(str), + LibP2pProtocol::Dns6(str) => Protocol::Dns6(str), + LibP2pProtocol::Dnsaddr(str) => Protocol::Dnsaddr(str), + LibP2pProtocol::Http => Protocol::Http, + LibP2pProtocol::Https => Protocol::Https, + LibP2pProtocol::Ip4(ipv4_addr) => Protocol::Ip4(ipv4_addr), + LibP2pProtocol::Ip6(ipv6_addr) => Protocol::Ip6(ipv6_addr), + LibP2pProtocol::P2pWebRtcDirect => Protocol::P2pWebRtcDirect, + LibP2pProtocol::P2pWebRtcStar => Protocol::P2pWebRtcStar, + LibP2pProtocol::Certhash(multihash) => Protocol::Certhash(multihash.into()), + LibP2pProtocol::P2pWebSocketStar => Protocol::P2pWebSocketStar, + LibP2pProtocol::Memory(port) => Protocol::Memory(port), + LibP2pProtocol::Onion(str, port) => Protocol::Onion(str, port), + LibP2pProtocol::Onion3(addr) => Protocol::Onion3(Cow::Owned(*addr.hash()), addr.port()), + LibP2pProtocol::P2p(peer_id) => Protocol::P2p((*peer_id.as_ref()).into()), + LibP2pProtocol::P2pCircuit => Protocol::P2pCircuit, + LibP2pProtocol::Quic => Protocol::Quic, + LibP2pProtocol::QuicV1 => Protocol::QuicV1, + LibP2pProtocol::Sctp(port) => Protocol::Sctp(port), + LibP2pProtocol::Tcp(port) => Protocol::Tcp(port), + LibP2pProtocol::Tls => Protocol::Tls, + LibP2pProtocol::Noise => Protocol::Noise, + LibP2pProtocol::Udp(port) => Protocol::Udp(port), + LibP2pProtocol::Udt => Protocol::Udt, + LibP2pProtocol::Unix(str) => Protocol::Unix(str), + LibP2pProtocol::Utp => Protocol::Utp, + LibP2pProtocol::Ws(str) => Protocol::Ws(str), + LibP2pProtocol::Wss(str) => Protocol::Wss(str), + protocol => { + log::error!( + target: LOG_TARGET, + "Got unsupported multiaddr protocol '{}'", + protocol.tag(), + ); + // Strictly speaking, this conversion is incorrect. But making protocol conversion + // fallible would significantly complicate the client code. As DCCP transport is not + // used by substrate, this conversion should be safe. + // Also, as of `multiaddr-18.1`, all enum variants are actually covered. + Protocol::Dccp(0) + }, + } + } +} + +impl<'a> From> for LibP2pProtocol<'a> { + fn from(protocol: Protocol<'a>) -> Self { + match protocol { + Protocol::Dccp(port) => LibP2pProtocol::Dccp(port), + Protocol::Dns(str) => LibP2pProtocol::Dns(str), + Protocol::Dns4(str) => LibP2pProtocol::Dns4(str), + Protocol::Dns6(str) => LibP2pProtocol::Dns6(str), + Protocol::Dnsaddr(str) => LibP2pProtocol::Dnsaddr(str), + Protocol::Http => LibP2pProtocol::Http, + Protocol::Https => LibP2pProtocol::Https, + Protocol::Ip4(ipv4_addr) => LibP2pProtocol::Ip4(ipv4_addr), + Protocol::Ip6(ipv6_addr) => LibP2pProtocol::Ip6(ipv6_addr), + Protocol::P2pWebRtcDirect => LibP2pProtocol::P2pWebRtcDirect, + Protocol::P2pWebRtcStar => LibP2pProtocol::P2pWebRtcStar, + // Protocol #280 is called `WebRTC` in multiaddr-17.0 and `WebRTCDirect` in + // multiaddr-18.1. + Protocol::WebRTC => LibP2pProtocol::WebRTCDirect, + Protocol::Certhash(multihash) => LibP2pProtocol::Certhash(multihash.into()), + Protocol::P2pWebSocketStar => LibP2pProtocol::P2pWebSocketStar, + Protocol::Memory(port) => LibP2pProtocol::Memory(port), + Protocol::Onion(str, port) => LibP2pProtocol::Onion(str, port), + Protocol::Onion3(str, port) => LibP2pProtocol::Onion3((str.into_owned(), port).into()), + Protocol::P2p(multihash) => + LibP2pProtocol::P2p(PeerId::from_multihash(multihash.into()).unwrap_or_else(|_| { + // This is better than making conversion fallible and complicating the + // client code. + log::error!( + target: LOG_TARGET, + "Received multiaddr with p2p multihash which is not a valid \ + peer_id. Replacing with random peer_id." + ); + PeerId::random() + })), + Protocol::P2pCircuit => LibP2pProtocol::P2pCircuit, + Protocol::Quic => LibP2pProtocol::Quic, + Protocol::QuicV1 => LibP2pProtocol::QuicV1, + Protocol::Sctp(port) => LibP2pProtocol::Sctp(port), + Protocol::Tcp(port) => LibP2pProtocol::Tcp(port), + Protocol::Tls => LibP2pProtocol::Tls, + Protocol::Noise => LibP2pProtocol::Noise, + Protocol::Udp(port) => LibP2pProtocol::Udp(port), + Protocol::Udt => LibP2pProtocol::Udt, + Protocol::Unix(str) => LibP2pProtocol::Unix(str), + Protocol::Utp => LibP2pProtocol::Utp, + Protocol::Ws(str) => LibP2pProtocol::Ws(str), + Protocol::Wss(str) => LibP2pProtocol::Wss(str), + } + } +} diff --git a/substrate/client/network/types/src/multihash.rs b/substrate/client/network/types/src/multihash.rs index 91f5b6353a718..321211c598d38 100644 --- a/substrate/client/network/types/src/multihash.rs +++ b/substrate/client/network/types/src/multihash.rs @@ -156,22 +156,20 @@ impl From for LiteP2pMultihash { } } -// TODO: uncomment this after upgrading `multihash` crate to v0.19.1. -// -// impl From> for Multihash { -// fn from(generic: multihash::MultihashGeneric<64>) -> Self { -// LiteP2pMultihash::wrap(generic.code(), generic.digest()) -// .expect("both have size 64; qed") -// .into() -// } -// } -// -// impl From for multihash::Multihash<64> { -// fn from(multihash: Multihash) -> Self { -// multihash::Multihash::<64>::wrap(multihash.code(), multihash.digest()) -// .expect("both have size 64; qed") -// } -// } +impl From> for Multihash { + fn from(generic: multihash::Multihash<64>) -> Self { + LiteP2pMultihash::wrap(generic.code(), generic.digest()) + .expect("both have size 64; qed") + .into() + } +} + +impl From for multihash::Multihash<64> { + fn from(multihash: Multihash) -> Self { + multihash::Multihash::<64>::wrap(multihash.code(), multihash.digest()) + .expect("both have size 64; qed") + } +} #[cfg(test)] mod tests { diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml index 2944ff7f4f49d..12e017317b223 100644 --- a/substrate/client/offchain/Cargo.toml +++ b/substrate/client/offchain/Cargo.toml @@ -16,46 +16,45 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -bytes = "1.1" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -fnv = "1.0.6" -futures = "0.3.30" -futures-timer = "3.0.2" -hyper = { version = "0.14.16", features = ["http2", "stream"] } -hyper-rustls = { version = "0.24.0", features = ["http2"] } -libp2p = "0.51.4" -num_cpus = "1.13" -once_cell = "1.19" -parking_lot = "0.12.1" -rand = "0.8.5" -threadpool = "1.7" -tracing = "0.1.29" -sc-client-api = { path = "../api" } -sc-network = { path = "../network" } -sc-network-common = { path = "../network/common" } -sc-network-types = { path = "../network/types" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sc-utils = { path = "../utils" } -sp-api = { path = "../../primitives/api" } -sp-core = { path = "../../primitives/core" } -sp-offchain = { path = "../../primitives/offchain" } -sp-runtime = { path = "../../primitives/runtime" } -sp-keystore = { path = "../../primitives/keystore" } -sp-externalities = { path = "../../primitives/externalities" } +array-bytes = { workspace = true, default-features = true } +bytes = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true, default-features = true } +fnv = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +hyperv14 = { features = ["http2", "stream"], workspace = true, default-features = true } +hyper-rustls = { features = ["http2"], workspace = true } +num_cpus = { workspace = true } +once_cell = { workspace = true } +parking_lot = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +threadpool = { workspace = true } +tracing = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-offchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } log = { workspace = true, default-features = true } [dev-dependencies] -async-trait = "0.1" -lazy_static = "1.4.0" -tokio = "1.37" -sc-block-builder = { path = "../block-builder" } -sc-client-db = { path = "../db", default-features = true } -sc-transaction-pool = { path = "../transaction-pool" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-tracing = { path = "../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +async-trait = { workspace = true } +lazy_static = { workspace = true } +tokio = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-client-db = { default-features = true, workspace = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } [features] default = [] diff --git a/substrate/client/offchain/src/api/http.rs b/substrate/client/offchain/src/api/http.rs index 46f573341c579..fda5728b0d03e 100644 --- a/substrate/client/offchain/src/api/http.rs +++ b/substrate/client/offchain/src/api/http.rs @@ -27,6 +27,8 @@ //! (i.e.: the socket should continue being processed) in the background even if the runtime isn't //! actively calling any function. +use hyperv14 as hyper; + use crate::api::timestamp; use bytes::buf::{Buf, Reader}; use fnv::FnvHashMap; diff --git a/substrate/client/proposer-metrics/Cargo.toml b/substrate/client/proposer-metrics/Cargo.toml index f560ce2d65e6e..98064049b297f 100644 --- a/substrate/client/proposer-metrics/Cargo.toml +++ b/substrate/client/proposer-metrics/Cargo.toml @@ -17,4 +17,4 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } +prometheus-endpoint = { workspace = true, default-features = true } diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index d8f833e2b8d45..fda81b31ee502 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -16,16 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -sc-chain-spec = { path = "../chain-spec" } -sc-mixnet = { path = "../mixnet" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sp-core = { path = "../../primitives/core" } -sp-rpc = { path = "../../primitives/rpc" } -sp-runtime = { path = "../../primitives/runtime" } -sp-version = { path = "../../primitives/version" } -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +sc-chain-spec = { workspace = true, default-features = true } +sc-mixnet = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } diff --git a/substrate/client/rpc-servers/Cargo.toml b/substrate/client/rpc-servers/Cargo.toml index 7837c852a1c9b..2f51d42bc1504 100644 --- a/substrate/client/rpc-servers/Cargo.toml +++ b/substrate/client/rpc-servers/Cargo.toml @@ -16,16 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -forwarded-header-value = "0.1.1" -futures = "0.3.30" -governor = "0.6.0" -http = "0.2.8" -hyper = "0.14.27" -ip_network = "0.4.1" -jsonrpsee = { version = "0.22", features = ["server"] } +forwarded-header-value = { workspace = true } +futures = { workspace = true } +governor = { workspace = true } +http = { workspace = true } +http-body-util = { workspace = true } +ip_network = { workspace = true } +jsonrpsee = { features = ["server"], workspace = true } log = { workspace = true, default-features = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } +prometheus-endpoint = { workspace = true, default-features = true } +serde = { workspace = true } serde_json = { workspace = true, default-features = true } -tokio = { version = "1.22.0", features = ["parking_lot"] } -tower = { version = "0.4.13", features = ["util"] } -tower-http = { version = "0.4.0", features = ["cors"] } +tokio = { features = ["parking_lot"], workspace = true, default-features = true } +tower = { workspace = true, features = ["util"] } +tower-http = { workspace = true, features = ["cors"] } + +# Dependencies outside the polkadot-sdk workspace +# which requires hyper v1 +hyper = "1.3" diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index ba1fcf5e36771..0bae16b113dff 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -23,21 +23,16 @@ pub mod middleware; pub mod utils; -use std::{ - convert::Infallible, error::Error as StdError, net::SocketAddr, num::NonZeroU32, time::Duration, -}; +use std::{error::Error as StdError, net::SocketAddr, num::NonZeroU32, sync::Arc, time::Duration}; -use hyper::{ - server::conn::AddrStream, - service::{make_service_fn, service_fn}, -}; use jsonrpsee::{ + core::BoxError, server::{ - middleware::http::ProxyGetRequestLayer, stop_channel, ws, PingConfig, StopHandle, - TowerServiceBuilder, + serve_with_graceful_shutdown, stop_channel, ws, PingConfig, StopHandle, TowerServiceBuilder, }, Methods, RpcModule, }; +use middleware::NodeHealthProxyLayer; use tokio::net::TcpListener; use tower::Service; use utils::{build_rpc_api, format_cors, get_proxy_ip, host_filtering, try_into_cors}; @@ -99,6 +94,7 @@ struct PerConnection { metrics: Option, tokio_handle: tokio::runtime::Handle, service_builder: TowerServiceBuilder, + rate_limit_whitelisted_ips: Arc>, } /// Start RPC server listening on given address. @@ -126,14 +122,14 @@ where rate_limit_trust_proxy_headers, } = config; - let std_listener = TcpListener::bind(addrs.as_slice()).await?.into_std()?; - let local_addr = std_listener.local_addr().ok(); + let listener = TcpListener::bind(addrs.as_slice()).await?; + let local_addr = listener.local_addr().ok(); let host_filter = host_filtering(cors.is_some(), local_addr); let http_middleware = tower::ServiceBuilder::new() .option_layer(host_filter) - // Proxy `GET /health` requests to internal `system_health` method. - .layer(ProxyGetRequestLayer::new("/health", "system_health")?) + // Proxy `GET /health, /health/readiness` requests to the internal `system_health` method. + .layer(NodeHealthProxyLayer::default()) .layer(try_into_cors(cors)?); let mut builder = jsonrpsee::server::Server::builder() @@ -163,20 +159,38 @@ where methods: build_rpc_api(rpc_api).into(), service_builder: builder.to_service_builder(), metrics, - tokio_handle, - stop_handle: stop_handle.clone(), + tokio_handle: tokio_handle.clone(), + stop_handle, + rate_limit_whitelisted_ips: Arc::new(rate_limit_whitelisted_ips), }; - let make_service = make_service_fn(move |addr: &AddrStream| { - let cfg = cfg.clone(); - let rate_limit_whitelisted_ips = rate_limit_whitelisted_ips.clone(); - let ip = addr.remote_addr().ip(); - - async move { - let cfg = cfg.clone(); - let rate_limit_whitelisted_ips = rate_limit_whitelisted_ips.clone(); + tokio_handle.spawn(async move { + loop { + let (sock, remote_addr) = tokio::select! { + res = listener.accept() => { + match res { + Ok(s) => s, + Err(e) => { + log::debug!(target: "rpc", "Failed to accept ipv4 connection: {:?}", e); + continue; + } + } + } + _ = cfg.stop_handle.clone().shutdown() => break, + }; + + let ip = remote_addr.ip(); + let cfg2 = cfg.clone(); + let svc = tower::service_fn(move |req: http::Request| { + let PerConnection { + methods, + service_builder, + metrics, + tokio_handle, + stop_handle, + rate_limit_whitelisted_ips, + } = cfg2.clone(); - Ok::<_, Infallible>(service_fn(move |req| { let proxy_ip = if rate_limit_trust_proxy_headers { get_proxy_ip(&req) } else { None }; @@ -193,9 +207,6 @@ where rate_limit }; - let PerConnection { service_builder, metrics, tokio_handle, stop_handle, methods } = - cfg.clone(); - let is_websocket = ws::is_upgrade_request(&req); let transport_label = if is_websocket { "ws" } else { "http" }; @@ -213,9 +224,9 @@ where ), }; - let rpc_middleware = - RpcServiceBuilder::new().option_layer(middleware_layer.clone()); - + let rpc_middleware = RpcServiceBuilder::new() + .rpc_logger(1024) + .option_layer(middleware_layer.clone()); let mut svc = service_builder.set_rpc_middleware(rpc_middleware).build(methods, stop_handle); @@ -232,17 +243,19 @@ where }); } - svc.call(req).await + // https://github.com/rust-lang/rust/issues/102211 the error type can't be inferred + // to be `Box` so we need to convert it to + // a concrete type as workaround. + svc.call(req).await.map_err(|e| BoxError::from(e)) } - })) - } - }); - - let server = hyper::Server::from_tcp(std_listener)?.serve(make_service); + }); - tokio::spawn(async move { - let graceful = server.with_graceful_shutdown(async move { stop_handle.shutdown().await }); - let _ = graceful.await; + cfg.tokio_handle.spawn(serve_with_graceful_shutdown( + sock, + svc, + cfg.stop_handle.clone().shutdown(), + )); + } }); log::info!( diff --git a/substrate/client/rpc-servers/src/middleware/mod.rs b/substrate/client/rpc-servers/src/middleware/mod.rs index 88ed8b2f43358..0a14be4dacf59 100644 --- a/substrate/client/rpc-servers/src/middleware/mod.rs +++ b/substrate/client/rpc-servers/src/middleware/mod.rs @@ -32,9 +32,11 @@ use jsonrpsee::{ }; mod metrics; +mod node_health; mod rate_limit; pub use metrics::*; +pub use node_health::*; pub use rate_limit::*; const MAX_JITTER: Duration = Duration::from_millis(50); diff --git a/substrate/client/rpc-servers/src/middleware/node_health.rs b/substrate/client/rpc-servers/src/middleware/node_health.rs new file mode 100644 index 0000000000000..69c9e0829ac98 --- /dev/null +++ b/substrate/client/rpc-servers/src/middleware/node_health.rs @@ -0,0 +1,203 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Middleware for handling `/health` and `/health/readiness` endpoints. + +use std::{ + error::Error, + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +use futures::future::FutureExt; +use http::{HeaderValue, Method, StatusCode, Uri}; +use jsonrpsee::{ + server::{HttpBody, HttpRequest, HttpResponse}, + types::{Response as RpcResponse, ResponseSuccess as RpcResponseSuccess}, +}; +use tower::Service; + +const RPC_SYSTEM_HEALTH_CALL: &str = r#"{"jsonrpc":"2.0","method":"system_health","id":0}"#; +const HEADER_VALUE_JSON: HeaderValue = HeaderValue::from_static("application/json; charset=utf-8"); + +/// Layer that applies [`NodeHealthProxy`] which +/// proxies `/health` and `/health/readiness` endpoints. +#[derive(Debug, Clone, Default)] +pub struct NodeHealthProxyLayer; + +impl tower::Layer for NodeHealthProxyLayer { + type Service = NodeHealthProxy; + + fn layer(&self, service: S) -> Self::Service { + NodeHealthProxy::new(service) + } +} + +/// Middleware that proxies `/health` and `/health/readiness` endpoints. +pub struct NodeHealthProxy(S); + +impl NodeHealthProxy { + /// Creates a new [`NodeHealthProxy`]. + pub fn new(service: S) -> Self { + Self(service) + } +} + +impl tower::Service> for NodeHealthProxy +where + S: Service, + S::Response: 'static, + S::Error: Into> + 'static, + S::Future: Send + 'static, +{ + type Response = S::Response; + type Error = Box; + type Future = + Pin> + Send + 'static>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.0.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, req: http::Request) -> Self::Future { + let mut req = req.map(|body| HttpBody::new(body)); + let maybe_intercept = InterceptRequest::from_http(&req); + + // Modify the request and proxy it to `system_health` + if let InterceptRequest::Health | InterceptRequest::Readiness = maybe_intercept { + // RPC methods are accessed with `POST`. + *req.method_mut() = Method::POST; + // Precautionary remove the URI. + *req.uri_mut() = Uri::from_static("/"); + + // Requests must have the following headers: + req.headers_mut().insert(http::header::CONTENT_TYPE, HEADER_VALUE_JSON); + req.headers_mut().insert(http::header::ACCEPT, HEADER_VALUE_JSON); + + // Adjust the body to reflect the method call. + req = req.map(|_| HttpBody::from(RPC_SYSTEM_HEALTH_CALL)); + } + + // Call the inner service and get a future that resolves to the response. + let fut = self.0.call(req); + + async move { + let res = fut.await.map_err(|err| err.into())?; + + Ok(match maybe_intercept { + InterceptRequest::Deny => + http_response(StatusCode::METHOD_NOT_ALLOWED, HttpBody::empty()), + InterceptRequest::No => res, + InterceptRequest::Health => { + let health = parse_rpc_response(res.into_body()).await?; + http_ok_response(serde_json::to_string(&health)?) + }, + InterceptRequest::Readiness => { + let health = parse_rpc_response(res.into_body()).await?; + if (!health.is_syncing && health.peers > 0) || !health.should_have_peers { + http_ok_response(HttpBody::empty()) + } else { + http_internal_error() + } + }, + }) + } + .boxed() + } +} + +// NOTE: This is duplicated here to avoid dependency to the `RPC API`. +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +struct Health { + /// Number of connected peers + pub peers: usize, + /// Is the node syncing + pub is_syncing: bool, + /// Should this node have any peers + /// + /// Might be false for local chains or when running without discovery. + pub should_have_peers: bool, +} + +fn http_ok_response>(body: S) -> HttpResponse { + http_response(StatusCode::OK, body) +} + +fn http_response>(status_code: StatusCode, body: S) -> HttpResponse { + HttpResponse::builder() + .status(status_code) + .header(http::header::CONTENT_TYPE, HEADER_VALUE_JSON) + .body(body.into()) + .expect("Header is valid; qed") +} + +fn http_internal_error() -> HttpResponse { + http_response(hyper::StatusCode::INTERNAL_SERVER_ERROR, HttpBody::empty()) +} + +async fn parse_rpc_response( + body: HttpBody, +) -> Result> { + use http_body_util::BodyExt; + + let bytes = body.collect().await?.to_bytes(); + + let raw_rp = serde_json::from_slice::>(&bytes)?; + let rp = RpcResponseSuccess::::try_from(raw_rp)?; + + Ok(rp.result) +} + +/// Whether the request should be treated as ordinary RPC call or be modified. +enum InterceptRequest { + /// Proxy `/health` to `system_health`. + Health, + /// Checks if node has at least one peer and is not doing major syncing. + /// + /// Returns HTTP status code 200 on success otherwise HTTP status code 500 is returned. + Readiness, + /// Treat as a ordinary RPC call and don't modify the request or response. + No, + /// Deny health or readiness calls that is not HTTP GET request. + /// + /// Returns HTTP status code 405. + Deny, +} + +impl InterceptRequest { + fn from_http(req: &HttpRequest) -> InterceptRequest { + match req.uri().path() { + "/health" => + if req.method() == http::Method::GET { + InterceptRequest::Health + } else { + InterceptRequest::Deny + }, + "/health/readiness" => + if req.method() == http::Method::GET { + InterceptRequest::Readiness + } else { + InterceptRequest::Deny + }, + // Forward all other requests to the RPC server. + _ => InterceptRequest::No, + } + } +} diff --git a/substrate/client/rpc-servers/src/utils.rs b/substrate/client/rpc-servers/src/utils.rs index d99b8e637d9df..d9d943c7c1fb3 100644 --- a/substrate/client/rpc-servers/src/utils.rs +++ b/substrate/client/rpc-servers/src/utils.rs @@ -25,10 +25,7 @@ use std::{ }; use forwarded_header_value::ForwardedHeaderValue; -use hyper::{ - header::{HeaderName, HeaderValue}, - Request, -}; +use http::header::{HeaderName, HeaderValue}; use jsonrpsee::{server::middleware::http::HostFilterLayer, RpcModule}; use tower_http::cors::{AllowOrigin, CorsLayer}; @@ -57,7 +54,7 @@ pub(crate) fn build_rpc_api(mut rpc_api: RpcModule) available_methods.sort(); rpc_api - .register_method("rpc_methods", move |_, _| { + .register_method("rpc_methods", move |_, _, _| { serde_json::json!({ "methods": available_methods, }) @@ -96,7 +93,7 @@ pub(crate) fn format_cors(maybe_cors: Option<&Vec>) -> String { /// 1. `Forwarded` header. /// 2. `X-Forwarded-For` header. /// 3. `X-Real-Ip`. -pub(crate) fn get_proxy_ip(req: &Request) -> Option { +pub(crate) fn get_proxy_ip(req: &http::Request) -> Option { if let Some(ip) = req .headers() .get(&FORWARDED) @@ -133,9 +130,10 @@ pub(crate) fn get_proxy_ip(req: &Request) -> Option { mod tests { use super::*; use hyper::header::HeaderValue; + use jsonrpsee::server::{HttpBody, HttpRequest}; - fn request() -> hyper::Request { - hyper::Request::builder().body(hyper::Body::empty()).unwrap() + fn request() -> http::Request { + HttpRequest::builder().body(HttpBody::empty()).unwrap() } #[test] diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index 8977c842d0380..0fcf5fd34e82c 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -16,46 +16,46 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } # Internal chain structures for "chain_spec". -sc-chain-spec = { path = "../chain-spec" } +sc-chain-spec = { workspace = true, default-features = true } # Pool for submitting extrinsics required by "transaction" -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sp-core = { path = "../../primitives/core" } -sp-runtime = { path = "../../primitives/runtime" } -sp-api = { path = "../../primitives/api" } -sp-rpc = { path = "../../primitives/rpc" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-version = { path = "../../primitives/version" } -sc-client-api = { path = "../api" } -sc-utils = { path = "../utils" } -sc-rpc = { path = "../rpc" } -codec = { package = "parity-scale-codec", version = "3.6.12" } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } thiserror = { workspace = true } serde = { workspace = true, default-features = true } -hex = "0.4" -futures = "0.3.30" -parking_lot = "0.12.1" -tokio-stream = { version = "0.1.14", features = ["sync"] } -tokio = { version = "1.22.0", features = ["sync"] } -array-bytes = "6.2.2" +hex = { workspace = true, default-features = true } +futures = { workspace = true } +parking_lot = { workspace = true, default-features = true } +tokio-stream = { features = ["sync"], workspace = true } +tokio = { features = ["sync"], workspace = true, default-features = true } +array-bytes = { workspace = true, default-features = true } log = { workspace = true, default-features = true } -futures-util = { version = "0.3.30", default-features = false } -rand = "0.8.5" -schnellru = "0.2.1" +futures-util = { workspace = true } +rand = { workspace = true, default-features = true } +schnellru = { workspace = true } [dev-dependencies] -jsonrpsee = { version = "0.22", features = ["server", "ws-client"] } +jsonrpsee = { features = ["server", "ws-client"], workspace = true } serde_json = { workspace = true, default-features = true } -tokio = { version = "1.22.0", features = ["macros"] } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -substrate-test-runtime = { path = "../../test-utils/runtime" } -substrate-test-runtime-transaction-pool = { path = "../../test-utils/runtime/transaction-pool" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-externalities = { path = "../../primitives/externalities" } -sp-maybe-compressed-blob = { path = "../../primitives/maybe-compressed-blob" } -sc-block-builder = { path = "../block-builder" } -sc-service = { path = "../service", features = ["test-helpers"] } -assert_matches = "1.3.0" -pretty_assertions = "1.2.1" -sc-transaction-pool = { path = "../transaction-pool" } +tokio = { features = ["macros"], workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +substrate-test-runtime = { workspace = true } +substrate-test-runtime-transaction-pool = { workspace = true } +sp-consensus = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +assert_matches = { workspace = true } +pretty_assertions = { workspace = true } +sc-transaction-pool = { workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs index 23cb0bbf54585..128d803521f6b 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs @@ -54,7 +54,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_body", raw_method)] + #[method(name = "chainHead_v1_body", with_extensions)] async fn chain_head_unstable_body( &self, follow_subscription: String, @@ -73,7 +73,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_header", raw_method)] + #[method(name = "chainHead_v1_header", with_extensions)] async fn chain_head_unstable_header( &self, follow_subscription: String, @@ -85,7 +85,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_storage", raw_method)] + #[method(name = "chainHead_v1_storage", with_extensions)] async fn chain_head_unstable_storage( &self, follow_subscription: String, @@ -99,7 +99,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_call", raw_method)] + #[method(name = "chainHead_v1_call", with_extensions)] async fn chain_head_unstable_call( &self, follow_subscription: String, @@ -118,7 +118,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_unpin", raw_method)] + #[method(name = "chainHead_v1_unpin", with_extensions)] async fn chain_head_unstable_unpin( &self, follow_subscription: String, @@ -131,7 +131,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_continue", raw_method)] + #[method(name = "chainHead_v1_continue", with_extensions)] async fn chain_head_unstable_continue( &self, follow_subscription: String, @@ -145,7 +145,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_v1_stopOperation", raw_method)] + #[method(name = "chainHead_v1_stopOperation", with_extensions)] async fn chain_head_unstable_stop_operation( &self, follow_subscription: String, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 6779180a41466..a056b4d437c8d 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -36,7 +36,7 @@ use crate::{ use codec::Encode; use futures::{channel::oneshot, future::FutureExt}; use jsonrpsee::{ - core::async_trait, server::ResponsePayload, types::SubscriptionId, ConnectionDetails, + core::async_trait, server::ResponsePayload, types::SubscriptionId, ConnectionId, Extensions, MethodResponseFuture, PendingSubscriptionSink, SubscriptionSink, }; use log::debug; @@ -251,14 +251,16 @@ where async fn chain_head_unstable_body( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, hash: Block::Hash, ) -> ResponsePayload<'static, MethodResponse> { - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { // The spec says to return `LimitReached` if the follow subscription is invalid or // stale. return ResponsePayload::success(MethodResponse::LimitReached); @@ -335,14 +337,16 @@ where async fn chain_head_unstable_header( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, hash: Block::Hash, ) -> Result, ChainHeadRpcError> { - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { return Ok(None); } @@ -371,16 +375,18 @@ where async fn chain_head_unstable_storage( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, hash: Block::Hash, items: Vec>, child_trie: Option, ) -> ResponsePayload<'static, MethodResponse> { - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { // The spec says to return `LimitReached` if the follow subscription is invalid or // stale. return ResponsePayload::success(MethodResponse::LimitReached); @@ -452,7 +458,7 @@ where async fn chain_head_unstable_call( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, hash: Block::Hash, function: String, @@ -463,10 +469,12 @@ where Err(err) => return ResponsePayload::error(err), }; - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { // The spec says to return `LimitReached` if the follow subscription is invalid or // stale. return ResponsePayload::success(MethodResponse::LimitReached); @@ -530,14 +538,16 @@ where async fn chain_head_unstable_unpin( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, hash_or_hashes: ListOrValue, ) -> Result<(), ChainHeadRpcError> { - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { return Ok(()); } @@ -566,14 +576,16 @@ where async fn chain_head_unstable_continue( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, operation_id: String, ) -> Result<(), ChainHeadRpcError> { - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { return Ok(()) } @@ -592,14 +604,16 @@ where async fn chain_head_unstable_stop_operation( &self, - connection_details: ConnectionDetails, + ext: &Extensions, follow_subscription: String, operation_id: String, ) -> Result<(), ChainHeadRpcError> { - if !self - .subscriptions - .contains_subscription(connection_details.id(), &follow_subscription) - { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + + if !self.subscriptions.contains_subscription(conn_id, &follow_subscription) { return Ok(()) } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index a753896b24c23..6dc3df76bdd79 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -32,7 +32,7 @@ use futures::{ }; use futures_util::future::Either; use jsonrpsee::SubscriptionSink; -use log::{debug, error}; +use log::debug; use sc_client_api::{ Backend, BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, }; @@ -572,7 +572,7 @@ where // The information from `.info()` is updated from the DB as the last // step of the finalization and it should be up to date. // If the info is outdated, there is nothing the RPC can do for now. - error!( + debug!( target: LOG_TARGET, "[follow][id={:?}] Client does not contain different best block", self.sub_id, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index a6edc344bc63f..d4d616f54dc88 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -846,6 +846,7 @@ impl> SubscriptionsInner { #[cfg(test)] mod tests { use super::*; + use jsonrpsee::ConnectionId; use sc_block_builder::BlockBuilderBuilder; use sc_service::client::new_in_mem; use sp_consensus::BlockOrigin; @@ -1420,17 +1421,20 @@ mod tests { rpc_connections.clone(), ); - let reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); - let mut reserved_sub_second = subscription_management.reserve_subscription(1).unwrap(); + let reserved_sub_first = + subscription_management.reserve_subscription(ConnectionId(1)).unwrap(); + let mut reserved_sub_second = + subscription_management.reserve_subscription(ConnectionId(1)).unwrap(); // Subscriptions reserved but not yet populated. assert_eq!(subs.read().subs.len(), 0); // Cannot reserve anymore. - assert!(subscription_management.reserve_subscription(1).is_none()); + assert!(subscription_management.reserve_subscription(ConnectionId(1)).is_none()); // Drop the first subscription. drop(reserved_sub_first); // Space is freed-up for the rpc connections. - let mut reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); + let mut reserved_sub_first = + subscription_management.reserve_subscription(ConnectionId(1)).unwrap(); // Insert subscriptions. let _sub_data_first = @@ -1445,7 +1449,8 @@ mod tests { // Check that the subscription is removed. assert_eq!(subs.read().subs.len(), 1); // Space is freed-up for the rpc connections. - let reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); + let reserved_sub_first = + subscription_management.reserve_subscription(ConnectionId(1)).unwrap(); // Drop all subscriptions. drop(reserved_sub_first); diff --git a/substrate/client/rpc-spec-v2/src/common/connections.rs b/substrate/client/rpc-spec-v2/src/common/connections.rs index c16a80bf49db9..f0c31d612ebdb 100644 --- a/substrate/client/rpc-spec-v2/src/common/connections.rs +++ b/substrate/client/rpc-spec-v2/src/common/connections.rs @@ -195,68 +195,71 @@ mod tests { #[test] fn reserve_space() { let rpc_connections = RpcConnections::new(2); - let reserved = rpc_connections.reserve_space(1); + let conn_id = ConnectionId(1); + let reserved = rpc_connections.reserve_space(conn_id); + assert!(reserved.is_some()); - assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert_eq!(1, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); assert_eq!(rpc_connections.data.lock().len(), 1); let reserved = reserved.unwrap(); let registered = reserved.register("identifier1".to_string()).unwrap(); - assert!(rpc_connections.contains_identifier(1, "identifier1")); - assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert!(rpc_connections.contains_identifier(conn_id, "identifier1")); + assert_eq!(1, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); drop(registered); // Data is dropped. - assert!(rpc_connections.data.lock().get(&1).is_none()); + assert!(rpc_connections.data.lock().get(&conn_id).is_none()); assert!(rpc_connections.data.lock().is_empty()); // Checks can still happen. - assert!(!rpc_connections.contains_identifier(1, "identifier1")); + assert!(!rpc_connections.contains_identifier(conn_id, "identifier1")); } #[test] fn reserve_space_capacity_reached() { let rpc_connections = RpcConnections::new(2); + let conn_id = ConnectionId(1); // Reserve identifier for connection 1. - let reserved = rpc_connections.reserve_space(1); + let reserved = rpc_connections.reserve_space(conn_id); assert!(reserved.is_some()); - assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert_eq!(1, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); // Add identifier for connection 1. let reserved = reserved.unwrap(); let registered = reserved.register("identifier1".to_string()).unwrap(); - assert!(rpc_connections.contains_identifier(1, "identifier1")); - assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert!(rpc_connections.contains_identifier(conn_id, "identifier1")); + assert_eq!(1, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); // Reserve identifier for connection 1 again. - let reserved = rpc_connections.reserve_space(1); + let reserved = rpc_connections.reserve_space(conn_id); assert!(reserved.is_some()); - assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert_eq!(2, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); // Add identifier for connection 1 again. let reserved = reserved.unwrap(); let registered_second = reserved.register("identifier2".to_string()).unwrap(); - assert!(rpc_connections.contains_identifier(1, "identifier2")); - assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert!(rpc_connections.contains_identifier(conn_id, "identifier2")); + assert_eq!(2, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); // Cannot reserve more identifiers. - let reserved = rpc_connections.reserve_space(1); + let reserved = rpc_connections.reserve_space(conn_id); assert!(reserved.is_none()); // Drop the first identifier. drop(registered); - assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); - assert!(rpc_connections.contains_identifier(1, "identifier2")); - assert!(!rpc_connections.contains_identifier(1, "identifier1")); + assert_eq!(1, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); + assert!(rpc_connections.contains_identifier(conn_id, "identifier2")); + assert!(!rpc_connections.contains_identifier(conn_id, "identifier1")); // Can reserve again after clearing the space. - let reserved = rpc_connections.reserve_space(1); + let reserved = rpc_connections.reserve_space(conn_id); assert!(reserved.is_some()); - assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert_eq!(2, rpc_connections.data.lock().get(&conn_id).unwrap().num_identifiers); // Ensure data is cleared. drop(reserved); drop(registered_second); - assert!(rpc_connections.data.lock().get(&1).is_none()); + assert!(rpc_connections.data.lock().get(&conn_id).is_none()); } } diff --git a/substrate/client/rpc-spec-v2/src/transaction/api.rs b/substrate/client/rpc-spec-v2/src/transaction/api.rs index ed358922d53ed..6af6f1678440a 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/api.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/api.rs @@ -48,7 +48,7 @@ pub trait TransactionBroadcastApi { /// /// This method is unstable and subject to change in the future. - #[method(name = "transaction_v1_broadcast", raw_method)] + #[method(name = "transaction_v1_broadcast", with_extensions)] async fn broadcast(&self, bytes: Bytes) -> RpcResult>; /// Broadcast an extrinsic to the chain. @@ -56,6 +56,6 @@ pub trait TransactionBroadcastApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "transaction_v1_stop", raw_method)] + #[method(name = "transaction_v1_stop", with_extensions)] async fn stop_broadcast(&self, operation_id: String) -> Result<(), ErrorBroadcast>; } diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs index 68c19010e31c5..2fd4ce2454565 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs @@ -27,7 +27,7 @@ use futures::{FutureExt, Stream, StreamExt}; use futures_util::stream::AbortHandle; use jsonrpsee::{ core::{async_trait, RpcResult}, - ConnectionDetails, + ConnectionId, Extensions, }; use parking_lot::RwLock; use rand::{distributions::Alphanumeric, Rng}; @@ -121,19 +121,18 @@ where ::Hash: Unpin, Client: HeaderBackend + BlockchainEvents + Send + Sync + 'static, { - async fn broadcast( - &self, - connection_details: ConnectionDetails, - bytes: Bytes, - ) -> RpcResult> { + async fn broadcast(&self, ext: &Extensions, bytes: Bytes) -> RpcResult> { let pool = self.pool.clone(); + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); // The unique ID of this operation. let id = self.generate_unique_id(); // Ensure that the connection has not reached the maximum number of active operations. - let Some(reserved_connection) = self.rpc_connections.reserve_space(connection_details.id()) - else { + let Some(reserved_connection) = self.rpc_connections.reserve_space(conn_id) else { return Ok(None) }; let Some(reserved_identifier) = reserved_connection.register(id.clone()) else { @@ -245,11 +244,16 @@ where async fn stop_broadcast( &self, - connection_details: ConnectionDetails, + ext: &Extensions, operation_id: String, ) -> Result<(), ErrorBroadcast> { + let conn_id = ext + .get::() + .copied() + .expect("ConnectionId is always set by jsonrpsee; qed"); + // The operation ID must correlate to the same connection ID. - if !self.rpc_connections.contains_identifier(connection_details.id(), &operation_id) { + if !self.rpc_connections.contains_identifier(conn_id, &operation_id) { return Err(ErrorBroadcast::InvalidOperationID) } diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml index 7dd46b2ab4c31..545d02bb30438 100644 --- a/substrate/client/rpc/Cargo.toml +++ b/substrate/client/rpc/Cargo.toml @@ -16,46 +16,46 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -jsonrpsee = { version = "0.22", features = ["server"] } +codec = { workspace = true, default-features = true } +futures = { workspace = true } +jsonrpsee = { features = ["server"], workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" +parking_lot = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sc-block-builder = { path = "../block-builder" } -sc-chain-spec = { path = "../chain-spec" } -sc-client-api = { path = "../api" } -sc-mixnet = { path = "../mixnet" } -sc-rpc-api = { path = "../rpc-api" } -sc-tracing = { path = "../tracing" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sc-utils = { path = "../utils" } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-keystore = { path = "../../primitives/keystore" } -sp-offchain = { path = "../../primitives/offchain" } -sp-rpc = { path = "../../primitives/rpc" } -sp-runtime = { path = "../../primitives/runtime" } -sp-session = { path = "../../primitives/session" } -sp-version = { path = "../../primitives/version" } -sp-statement-store = { path = "../../primitives/statement-store" } -tokio = "1.37" +sc-block-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-mixnet = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-offchain = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-session = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } [dev-dependencies] -env_logger = "0.11" -assert_matches = "1.3.0" -sc-block-builder = { path = "../block-builder" } -sc-network = { path = "../network" } -sc-network-common = { path = "../network/common" } -sc-transaction-pool = { path = "../transaction-pool" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -tokio = "1.37" -sp-io = { path = "../../primitives/io" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -pretty_assertions = "1.2.1" -tracing-subscriber = { version = "0.3", features = ["env-filter"] } +env_logger = { workspace = true } +assert_matches = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +pretty_assertions = { workspace = true } +tracing-subscriber = { features = ["env-filter"], workspace = true } [features] test-helpers = [] diff --git a/substrate/client/rpc/src/utils.rs b/substrate/client/rpc/src/utils.rs index 3b5372615e733..bc566ed37f230 100644 --- a/substrate/client/rpc/src/utils.rs +++ b/substrate/client/rpc/src/utils.rs @@ -130,7 +130,7 @@ async fn inner_pipe_from_stream( "Subscription buffer limit={} exceeded for subscription={} conn_id={}; dropping subscription", buf.max_cap, sink.method_name(), - sink.connection_id() + sink.connection_id().0 ); return } @@ -189,7 +189,7 @@ mod tests { async fn subscribe() -> Subscription { let mut module = RpcModule::new(()); module - .register_subscription("sub", "my_sub", "unsub", |_, pending, _| async move { + .register_subscription("sub", "my_sub", "unsub", |_, pending, _, _| async move { let stream = futures::stream::iter([0; 16]); pipe_from_stream(pending, stream).await; Ok(()) @@ -217,7 +217,7 @@ mod tests { let mut module = RpcModule::new(tx); module - .register_subscription("sub", "my_sub", "unsub", |_, pending, ctx| async move { + .register_subscription("sub", "my_sub", "unsub", |_, pending, ctx, _| async move { let stream = futures::stream::iter([0; 32]); pipe_from_stream(pending, stream).await; _ = ctx.unbounded_send(()); @@ -239,16 +239,21 @@ mod tests { let mut module = RpcModule::new(notify_tx); module - .register_subscription("sub", "my_sub", "unsub", |_, pending, notify_tx| async move { - // emulate empty stream for simplicity: otherwise we need some mechanism - // to sync buffer and channel send operations - let stream = futures::stream::empty::<()>(); - // this should exit immediately - pipe_from_stream(pending, stream).await; - // notify that the `pipe_from_stream` has returned - notify_tx.notify_one(); - Ok(()) - }) + .register_subscription( + "sub", + "my_sub", + "unsub", + |_, pending, notify_tx, _| async move { + // emulate empty stream for simplicity: otherwise we need some mechanism + // to sync buffer and channel send operations + let stream = futures::stream::empty::<()>(); + // this should exit immediately + pipe_from_stream(pending, stream).await; + // notify that the `pipe_from_stream` has returned + notify_tx.notify_one(); + Ok(()) + }, + ) .unwrap(); module.subscribe("sub", EmptyServerParams::new(), 1).await.unwrap(); diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index dfdd485f15c00..724e2ddfe56a5 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -28,64 +28,64 @@ runtime-benchmarks = [ ] [dependencies] -jsonrpsee = { version = "0.22", features = ["server"] } +jsonrpsee = { features = ["server"], workspace = true } thiserror = { workspace = true } -futures = "0.3.30" -rand = "0.8.5" -parking_lot = "0.12.1" +futures = { workspace = true } +rand = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } log = { workspace = true, default-features = true } -futures-timer = "3.0.1" -exit-future = "0.2.0" -pin-project = "1.0.12" +futures-timer = { workspace = true } +exit-future = { workspace = true } +pin-project = { workspace = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sc-keystore = { path = "../keystore" } -sp-runtime = { path = "../../primitives/runtime" } -sp-trie = { path = "../../primitives/trie" } -sp-externalities = { path = "../../primitives/externalities" } -sc-utils = { path = "../utils" } -sp-version = { path = "../../primitives/version" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-keystore = { path = "../../primitives/keystore" } -sp-session = { path = "../../primitives/session" } -sp-state-machine = { path = "../../primitives/state-machine" } -sp-consensus = { path = "../../primitives/consensus/common" } -sc-consensus = { path = "../consensus/common" } -sp-storage = { path = "../../primitives/storage" } -sc-network = { path = "../network" } -sc-network-common = { path = "../network/common" } -sc-network-light = { path = "../network/light" } -sc-network-sync = { path = "../network/sync" } -sc-network-types = { path = "../network/types" } -sc-network-transactions = { path = "../network/transactions" } -sc-chain-spec = { path = "../chain-spec" } -sc-client-api = { path = "../api" } -sp-api = { path = "../../primitives/api" } -sc-client-db = { path = "../db", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12" } -sc-executor = { path = "../executor" } -sc-transaction-pool = { path = "../transaction-pool" } -sp-transaction-pool = { path = "../../primitives/transaction-pool" } -sc-transaction-pool-api = { path = "../transaction-pool/api" } -sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof" } -sc-rpc-server = { path = "../rpc-servers" } -sc-rpc = { path = "../rpc" } -sc-rpc-spec-v2 = { path = "../rpc-spec-v2" } -sc-informant = { path = "../informant" } -sc-telemetry = { path = "../telemetry" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sc-tracing = { path = "../tracing" } -sc-sysinfo = { path = "../sysinfo" } -tracing = "0.1.29" -tracing-futures = { version = "0.2.4" } -async-trait = "0.1.79" -tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "time"] } -tempfile = "3.1.0" -directories = "5.0.1" -static_init = "1.0.3" -schnellru = "0.2.1" +sc-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-session = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } +sc-network-light = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sc-network-transactions = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sc-client-db = { workspace = true } +codec = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-transaction-storage-proof = { workspace = true, default-features = true } +sc-rpc-server = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-rpc-spec-v2 = { workspace = true, default-features = true } +sc-informant = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } +tracing-futures = { workspace = true } +async-trait = { workspace = true } +tokio = { features = ["parking_lot", "rt-multi-thread", "time"], workspace = true, default-features = true } +tempfile = { workspace = true } +directories = { workspace = true } +static_init = { workspace = true } +schnellru = { workspace = true } [dev-dependencies] -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -substrate-test-runtime = { path = "../../test-utils/runtime" } +substrate-test-runtime-client = { workspace = true } +substrate-test-runtime = { workspace = true } diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs index 9da4d21925769..1341aa0e72051 100644 --- a/substrate/client/service/src/client/call_executor.rs +++ b/substrate/client/service/src/client/call_executor.rs @@ -16,19 +16,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; +use super::{code_provider::CodeProvider, ClientConfig}; use sc_client_api::{ backend, call_executor::CallExecutor, execution_extensions::ExecutionExtensions, HeaderBackend, }; use sc_executor::{RuntimeVersion, RuntimeVersionOf}; use sp_api::ProofRecorder; -use sp_core::traits::{CallContext, CodeExecutor, RuntimeCode}; +use sp_core::traits::{CallContext, CodeExecutor}; use sp_externalities::Extensions; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, HashingFor}, }; -use sp_state_machine::{backend::AsTrieBackend, Ext, OverlayedChanges, StateMachine, StorageProof}; +use sp_state_machine::{backend::AsTrieBackend, OverlayedChanges, StateMachine, StorageProof}; use std::{cell::RefCell, sync::Arc}; /// Call executor that executes methods locally, querying all required @@ -36,8 +36,7 @@ use std::{cell::RefCell, sync::Arc}; pub struct LocalCallExecutor { backend: Arc, executor: E, - wasm_override: Arc>, - wasm_substitutes: WasmSubstitutes, + code_provider: CodeProvider, execution_extensions: Arc>, } @@ -53,81 +52,15 @@ where client_config: ClientConfig, execution_extensions: ExecutionExtensions, ) -> sp_blockchain::Result { - let wasm_override = client_config - .wasm_runtime_overrides - .as_ref() - .map(|p| WasmOverride::new(p.clone(), &executor)) - .transpose()?; - - let wasm_substitutes = WasmSubstitutes::new( - client_config.wasm_runtime_substitutes, - executor.clone(), - backend.clone(), - )?; + let code_provider = CodeProvider::new(&client_config, executor.clone(), backend.clone())?; Ok(LocalCallExecutor { backend, executor, - wasm_override: Arc::new(wasm_override), - wasm_substitutes, + code_provider, execution_extensions: Arc::new(execution_extensions), }) } - - /// Check if local runtime code overrides are enabled and one is available - /// for the given `BlockId`. If yes, return it; otherwise return the same - /// `RuntimeCode` instance that was passed. - fn check_override<'a>( - &'a self, - onchain_code: RuntimeCode<'a>, - state: &B::State, - hash: Block::Hash, - ) -> sp_blockchain::Result<(RuntimeCode<'a>, RuntimeVersion)> - where - Block: BlockT, - B: backend::Backend, - { - let on_chain_version = self.on_chain_runtime_version(&onchain_code, state)?; - let code_and_version = if let Some(d) = self.wasm_override.as_ref().as_ref().and_then(|o| { - o.get( - &on_chain_version.spec_version, - onchain_code.heap_pages, - &on_chain_version.spec_name, - ) - }) { - log::debug!(target: "wasm_overrides", "using WASM override for block {}", hash); - d - } else if let Some(s) = - self.wasm_substitutes - .get(on_chain_version.spec_version, onchain_code.heap_pages, hash) - { - log::debug!(target: "wasm_substitutes", "Using WASM substitute for block {:?}", hash); - s - } else { - log::debug!( - target: "wasm_overrides", - "Neither WASM override nor substitute available for block {hash}, using onchain code", - ); - (onchain_code, on_chain_version) - }; - - Ok(code_and_version) - } - - /// Returns the on chain runtime version. - fn on_chain_runtime_version( - &self, - code: &RuntimeCode, - state: &B::State, - ) -> sp_blockchain::Result { - let mut overlay = OverlayedChanges::default(); - - let mut ext = Ext::new(&mut overlay, state, None); - - self.executor - .runtime_version(&mut ext, code) - .map_err(|e| sp_blockchain::Error::VersionInvalid(e.to_string())) - } } impl Clone for LocalCallExecutor @@ -138,8 +71,7 @@ where LocalCallExecutor { backend: self.backend.clone(), executor: self.executor.clone(), - wasm_override: self.wasm_override.clone(), - wasm_substitutes: self.wasm_substitutes.clone(), + code_provider: self.code_provider.clone(), execution_extensions: self.execution_extensions.clone(), } } @@ -175,7 +107,7 @@ where let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; + let runtime_code = self.code_provider.maybe_override_code(runtime_code, &state, at_hash)?.0; let mut extensions = self.execution_extensions.extensions(at_hash, at_number); @@ -215,7 +147,7 @@ where let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; + let runtime_code = self.code_provider.maybe_override_code(runtime_code, &state, at_hash)?.0; let mut extensions = extensions.borrow_mut(); match recorder { @@ -263,7 +195,9 @@ where let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - self.check_override(runtime_code, &state, at_hash).map(|(_, v)| v) + self.code_provider + .maybe_override_code(runtime_code, &state, at_hash) + .map(|(_, v)| v) } fn prove_execution( @@ -281,7 +215,7 @@ where let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend); let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; + let runtime_code = self.code_provider.maybe_override_code(runtime_code, &state, at_hash)?.0; sp_state_machine::prove_execution_on_trie_backend( trie_backend, @@ -331,133 +265,3 @@ where self.executor.native_version() } } - -#[cfg(test)] -mod tests { - use super::*; - use backend::Backend; - use sc_client_api::in_mem; - use sc_executor::WasmExecutor; - use sp_core::{ - testing::TaskExecutor, - traits::{FetchRuntimeCode, WrappedRuntimeCode}, - }; - use std::collections::HashMap; - use substrate_test_runtime_client::{runtime, GenesisInit}; - - #[test] - fn should_get_override_if_exists() { - let executor = WasmExecutor::default(); - - let overrides = crate::client::wasm_override::dummy_overrides(); - let onchain_code = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); - let onchain_code = RuntimeCode { - code_fetcher: &onchain_code, - heap_pages: Some(128), - hash: vec![0, 0, 0, 0], - }; - - let backend = Arc::new(in_mem::Backend::::new()); - - // wasm_runtime_overrides is `None` here because we construct the - // LocalCallExecutor directly later on - let client_config = ClientConfig::default(); - - let genesis_block_builder = crate::GenesisBlockBuilder::new( - &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), - !client_config.no_genesis, - backend.clone(), - executor.clone(), - ) - .expect("Creates genesis block builder"); - - // client is used for the convenience of creating and inserting the genesis block. - let _client = - crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( - backend.clone(), - executor.clone(), - genesis_block_builder, - Box::new(TaskExecutor::new()), - None, - None, - client_config, - ) - .expect("Creates a client"); - - let call_executor = LocalCallExecutor { - backend: backend.clone(), - executor: executor.clone(), - wasm_override: Arc::new(Some(overrides)), - wasm_substitutes: WasmSubstitutes::new( - Default::default(), - executor.clone(), - backend.clone(), - ) - .unwrap(), - execution_extensions: Arc::new(ExecutionExtensions::new( - None, - Arc::new(executor.clone()), - )), - }; - - let check = call_executor - .check_override( - onchain_code, - &backend.state_at(backend.blockchain().info().genesis_hash).unwrap(), - backend.blockchain().info().genesis_hash, - ) - .expect("RuntimeCode override") - .0; - - assert_eq!(Some(vec![2, 2, 2, 2, 2, 2, 2, 2]), check.fetch_runtime_code().map(Into::into)); - } - - #[test] - fn returns_runtime_version_from_substitute() { - const SUBSTITUTE_SPEC_NAME: &str = "substitute-spec-name-cool"; - - let executor = WasmExecutor::default(); - - let backend = Arc::new(in_mem::Backend::::new()); - - // Let's only override the `spec_name` for our testing purposes. - let substitute = sp_version::embed::embed_runtime_version( - &substrate_test_runtime::WASM_BINARY_BLOATY.unwrap(), - sp_version::RuntimeVersion { - spec_name: SUBSTITUTE_SPEC_NAME.into(), - ..substrate_test_runtime::VERSION - }, - ) - .unwrap(); - - let client_config = crate::client::ClientConfig { - wasm_runtime_substitutes: vec![(0, substitute)].into_iter().collect::>(), - ..Default::default() - }; - - let genesis_block_builder = crate::GenesisBlockBuilder::new( - &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), - !client_config.no_genesis, - backend.clone(), - executor.clone(), - ) - .expect("Creates genesis block builder"); - - // client is used for the convenience of creating and inserting the genesis block. - let client = - crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( - backend.clone(), - executor.clone(), - genesis_block_builder, - Box::new(TaskExecutor::new()), - None, - None, - client_config, - ) - .expect("Creates a client"); - - let version = client.runtime_version_at(client.chain_info().genesis_hash).unwrap(); - - assert_eq!(SUBSTITUTE_SPEC_NAME, &*version.spec_name); - } -} diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 3c25c233775be..a2c9212f7b9c9 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -18,7 +18,10 @@ //! Substrate Client -use super::block_rules::{BlockRules, LookupResult as BlockLookupResult}; +use super::{ + block_rules::{BlockRules, LookupResult as BlockLookupResult}, + CodeProvider, +}; use crate::client::notification_pinning::NotificationPinningWorker; use log::{debug, info, trace, warn}; use parking_lot::{Mutex, RwLock}; @@ -57,10 +60,7 @@ use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_core::{ - storage::{ - well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData, - StorageKey, - }, + storage::{ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData, StorageKey}, traits::{CallContext, SpawnNamed}, }; use sp_runtime::{ @@ -115,6 +115,7 @@ where config: ClientConfig, telemetry: Option, unpin_worker_sender: TracingUnboundedSender>, + code_provider: CodeProvider, _phantom: PhantomData, } @@ -410,6 +411,7 @@ where Block, BlockImportOperation = >::BlockImportOperation, >, + E: Clone, B: 'static, { let info = backend.blockchain().info(); @@ -438,6 +440,7 @@ where ); let unpin_worker = NotificationPinningWorker::new(rx, backend.clone()); spawn_handle.spawn("notification-pinning-worker", None, Box::pin(unpin_worker.run())); + let code_provider = CodeProvider::new(&config, executor.clone(), backend.clone())?; Ok(Client { backend, @@ -453,6 +456,7 @@ where config, telemetry, unpin_worker_sender, + code_provider, _phantom: Default::default(), }) } @@ -475,13 +479,10 @@ where } /// Get the code at a given block. + /// + /// This takes any potential substitutes into account, but ignores overrides. pub fn code_at(&self, hash: Block::Hash) -> sp_blockchain::Result> { - Ok(StorageProvider::storage(self, hash, &StorageKey(well_known_keys::CODE.to_vec()))? - .expect( - "None is returned if there's no value stored for the given key;\ - ':code' key is always defined; qed", - ) - .0) + self.code_provider.code_at_ignoring_overrides(hash) } /// Get the RuntimeVersion at a given block. @@ -1779,7 +1780,7 @@ where /// Check block preconditions. async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { let BlockCheckParams { @@ -1861,10 +1862,10 @@ where } async fn check_block( - &mut self, + &self, block: BlockCheckParams, ) -> Result { - (&*self).check_block(block).await + (&self).check_block(block).await } } diff --git a/substrate/client/service/src/client/code_provider.rs b/substrate/client/service/src/client/code_provider.rs new file mode 100644 index 0000000000000..8ba7766ea65b5 --- /dev/null +++ b/substrate/client/service/src/client/code_provider.rs @@ -0,0 +1,348 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; +use sc_client_api::backend; +use sc_executor::{RuntimeVersion, RuntimeVersionOf}; +use sp_core::traits::{FetchRuntimeCode, RuntimeCode}; +use sp_runtime::traits::Block as BlockT; +use sp_state_machine::{Ext, OverlayedChanges}; +use std::sync::Arc; + +/// Provider for fetching `:code` of a block. +/// +/// As a node can run with code overrides or substitutes, this will ensure that these are taken into +/// account before returning the actual `code` for a block. +pub struct CodeProvider { + backend: Arc, + executor: Arc, + wasm_override: Arc>, + wasm_substitutes: WasmSubstitutes, +} + +impl Clone for CodeProvider { + fn clone(&self) -> Self { + Self { + backend: self.backend.clone(), + executor: self.executor.clone(), + wasm_override: self.wasm_override.clone(), + wasm_substitutes: self.wasm_substitutes.clone(), + } + } +} + +impl CodeProvider +where + Block: BlockT, + Backend: backend::Backend, + Executor: RuntimeVersionOf, +{ + /// Create a new instance. + pub fn new( + client_config: &ClientConfig, + executor: Executor, + backend: Arc, + ) -> sp_blockchain::Result { + let wasm_override = client_config + .wasm_runtime_overrides + .as_ref() + .map(|p| WasmOverride::new(p.clone(), &executor)) + .transpose()?; + + let executor = Arc::new(executor); + + let wasm_substitutes = WasmSubstitutes::new( + client_config.wasm_runtime_substitutes.clone(), + executor.clone(), + backend.clone(), + )?; + + Ok(Self { backend, executor, wasm_override: Arc::new(wasm_override), wasm_substitutes }) + } + + /// Returns the `:code` for the given `block`. + /// + /// This takes into account potential overrides/substitutes. + pub fn code_at_ignoring_overrides(&self, block: Block::Hash) -> sp_blockchain::Result> { + let state = self.backend.state_at(block)?; + + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + + self.maybe_override_code_internal(runtime_code, &state, block, true) + .and_then(|r| { + r.0.fetch_runtime_code().map(Into::into).ok_or_else(|| { + sp_blockchain::Error::Backend("Could not find `:code` in backend.".into()) + }) + }) + } + + /// Maybe override the given `onchain_code`. + /// + /// This takes into account potential overrides/substitutes. + pub fn maybe_override_code<'a>( + &'a self, + onchain_code: RuntimeCode<'a>, + state: &Backend::State, + hash: Block::Hash, + ) -> sp_blockchain::Result<(RuntimeCode<'a>, RuntimeVersion)> { + self.maybe_override_code_internal(onchain_code, state, hash, false) + } + + /// Maybe override the given `onchain_code`. + /// + /// This takes into account potential overrides(depending on `ignore_overrides`)/substitutes. + fn maybe_override_code_internal<'a>( + &'a self, + onchain_code: RuntimeCode<'a>, + state: &Backend::State, + hash: Block::Hash, + ignore_overrides: bool, + ) -> sp_blockchain::Result<(RuntimeCode<'a>, RuntimeVersion)> { + let on_chain_version = self.on_chain_runtime_version(&onchain_code, state)?; + let code_and_version = if let Some(d) = self.wasm_override.as_ref().as_ref().and_then(|o| { + if ignore_overrides { + return None + } + + o.get( + &on_chain_version.spec_version, + onchain_code.heap_pages, + &on_chain_version.spec_name, + ) + }) { + tracing::debug!(target: "code-provider::overrides", block = ?hash, "using WASM override"); + d + } else if let Some(s) = + self.wasm_substitutes + .get(on_chain_version.spec_version, onchain_code.heap_pages, hash) + { + tracing::debug!(target: "code-provider::substitutes", block = ?hash, "Using WASM substitute"); + s + } else { + tracing::debug!( + target: "code-provider", + block = ?hash, + "Neither WASM override nor substitute available, using onchain code", + ); + (onchain_code, on_chain_version) + }; + + Ok(code_and_version) + } + + /// Returns the on chain runtime version. + fn on_chain_runtime_version( + &self, + code: &RuntimeCode, + state: &Backend::State, + ) -> sp_blockchain::Result { + let mut overlay = OverlayedChanges::default(); + + let mut ext = Ext::new(&mut overlay, state, None); + + self.executor + .runtime_version(&mut ext, code) + .map_err(|e| sp_blockchain::Error::VersionInvalid(e.to_string())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use backend::Backend; + use sc_client_api::{in_mem, HeaderBackend}; + use sc_executor::WasmExecutor; + use sp_core::{ + testing::TaskExecutor, + traits::{FetchRuntimeCode, WrappedRuntimeCode}, + }; + use std::collections::HashMap; + use substrate_test_runtime_client::{runtime, GenesisInit}; + + #[test] + fn no_override_no_substitutes_work() { + let executor = WasmExecutor::default(); + + let code_fetcher = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); + let onchain_code = RuntimeCode { + code_fetcher: &code_fetcher, + heap_pages: Some(128), + hash: vec![0, 0, 0, 0], + }; + + let backend = Arc::new(in_mem::Backend::::new()); + + // wasm_runtime_overrides is `None` here because we construct the + // LocalCallExecutor directly later on + let client_config = ClientConfig::default(); + + let genesis_block_builder = crate::GenesisBlockBuilder::new( + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + !client_config.no_genesis, + backend.clone(), + executor.clone(), + ) + .expect("Creates genesis block builder"); + + // client is used for the convenience of creating and inserting the genesis block. + let _client = + crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( + backend.clone(), + executor.clone(), + genesis_block_builder, + Box::new(TaskExecutor::new()), + None, + None, + client_config.clone(), + ) + .expect("Creates a client"); + + let executor = Arc::new(executor); + + let code_provider = CodeProvider { + backend: backend.clone(), + executor: executor.clone(), + wasm_override: Arc::new(None), + wasm_substitutes: WasmSubstitutes::new(Default::default(), executor, backend.clone()) + .unwrap(), + }; + + let check = code_provider + .maybe_override_code( + onchain_code, + &backend.state_at(backend.blockchain().info().genesis_hash).unwrap(), + backend.blockchain().info().genesis_hash, + ) + .expect("RuntimeCode override") + .0; + + assert_eq!(code_fetcher.fetch_runtime_code(), check.fetch_runtime_code()); + } + + #[test] + fn should_get_override_if_exists() { + let executor = WasmExecutor::default(); + + let overrides = crate::client::wasm_override::dummy_overrides(); + let onchain_code = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); + let onchain_code = RuntimeCode { + code_fetcher: &onchain_code, + heap_pages: Some(128), + hash: vec![0, 0, 0, 0], + }; + + let backend = Arc::new(in_mem::Backend::::new()); + + // wasm_runtime_overrides is `None` here because we construct the + // LocalCallExecutor directly later on + let client_config = ClientConfig::default(); + + let genesis_block_builder = crate::GenesisBlockBuilder::new( + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + !client_config.no_genesis, + backend.clone(), + executor.clone(), + ) + .expect("Creates genesis block builder"); + + // client is used for the convenience of creating and inserting the genesis block. + let _client = + crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( + backend.clone(), + executor.clone(), + genesis_block_builder, + Box::new(TaskExecutor::new()), + None, + None, + client_config.clone(), + ) + .expect("Creates a client"); + + let executor = Arc::new(executor); + + let code_provider = CodeProvider { + backend: backend.clone(), + executor: executor.clone(), + wasm_override: Arc::new(Some(overrides)), + wasm_substitutes: WasmSubstitutes::new(Default::default(), executor, backend.clone()) + .unwrap(), + }; + + let check = code_provider + .maybe_override_code( + onchain_code, + &backend.state_at(backend.blockchain().info().genesis_hash).unwrap(), + backend.blockchain().info().genesis_hash, + ) + .expect("RuntimeCode override") + .0; + + assert_eq!(Some(vec![2, 2, 2, 2, 2, 2, 2, 2]), check.fetch_runtime_code().map(Into::into)); + } + + #[test] + fn returns_runtime_version_from_substitute() { + const SUBSTITUTE_SPEC_NAME: &str = "substitute-spec-name-cool"; + + let executor = WasmExecutor::default(); + + let backend = Arc::new(in_mem::Backend::::new()); + + // Let's only override the `spec_name` for our testing purposes. + let substitute = sp_version::embed::embed_runtime_version( + &substrate_test_runtime::WASM_BINARY_BLOATY.unwrap(), + sp_version::RuntimeVersion { + spec_name: SUBSTITUTE_SPEC_NAME.into(), + ..substrate_test_runtime::VERSION + }, + ) + .unwrap(); + + let client_config = crate::client::ClientConfig { + wasm_runtime_substitutes: vec![(0, substitute)].into_iter().collect::>(), + ..Default::default() + }; + + let genesis_block_builder = crate::GenesisBlockBuilder::new( + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + !client_config.no_genesis, + backend.clone(), + executor.clone(), + ) + .expect("Creates genesis block builder"); + + // client is used for the convenience of creating and inserting the genesis block. + let client = + crate::client::new_with_backend::<_, _, runtime::Block, _, runtime::RuntimeApi>( + backend.clone(), + executor.clone(), + genesis_block_builder, + Box::new(TaskExecutor::new()), + None, + None, + client_config, + ) + .expect("Creates a client"); + + let version = client.runtime_version_at(client.chain_info().genesis_hash).unwrap(); + + assert_eq!(SUBSTITUTE_SPEC_NAME, &*version.spec_name); + } +} diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index 0703cc2b47d14..ec77a92f162f0 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -47,14 +47,14 @@ mod block_rules; mod call_executor; mod client; +mod code_provider; mod notification_pinning; mod wasm_override; mod wasm_substitutes; -pub use self::{ - call_executor::LocalCallExecutor, - client::{Client, ClientConfig}, -}; +pub use call_executor::LocalCallExecutor; +pub use client::{Client, ClientConfig}; +pub(crate) use code_provider::CodeProvider; #[cfg(feature = "test-helpers")] pub use self::client::{new_in_mem, new_with_backend}; diff --git a/substrate/client/service/src/client/wasm_substitutes.rs b/substrate/client/service/src/client/wasm_substitutes.rs index 70db0ef20f5a8..07ca6c9606283 100644 --- a/substrate/client/service/src/client/wasm_substitutes.rs +++ b/substrate/client/service/src/client/wasm_substitutes.rs @@ -94,7 +94,7 @@ impl From for sp_blockchain::Error { pub struct WasmSubstitutes { /// spec_version -> WasmSubstitute substitutes: Arc>>, - executor: Executor, + executor: Arc, backend: Arc, } @@ -110,14 +110,14 @@ impl Clone for WasmSubstitutes WasmSubstitutes where - Executor: RuntimeVersionOf + Clone + 'static, + Executor: RuntimeVersionOf, Backend: backend::Backend, Block: BlockT, { /// Create a new instance. pub fn new( substitutes: HashMap, Vec>, - executor: Executor, + executor: Arc, backend: Arc, ) -> Result { let substitutes = substitutes diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs index 187e18aa3cace..e4788f1f3376c 100644 --- a/substrate/client/service/src/config.rs +++ b/substrate/client/service/src/config.rs @@ -280,7 +280,7 @@ impl Default for RpcMethods { static mut BASE_PATH_TEMP: Option = None; /// The base path that is used for everything that needs to be written on disk to run a node. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct BasePath { path: PathBuf, } diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index a51bb4012d5d8..63be296d1b216 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -137,7 +137,7 @@ pub struct PartialComponents, /// The chain task manager. pub task_manager: TaskManager, - /// A keystore container instance.. + /// A keystore container instance. pub keystore_container: KeystoreContainer, /// A chain selection algorithm instance. pub select_chain: SelectChain, diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 3c75423139527..ade7b3b4e6a0b 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -15,33 +15,33 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-channel = "1.8.0" -array-bytes = "6.2.2" -fdlimit = "0.3.0" -futures = "0.3.30" +async-channel = { workspace = true } +array-bytes = { workspace = true, default-features = true } +fdlimit = { workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12" } -parking_lot = "0.12.1" -tempfile = "3.1.0" -tokio = { version = "1.22.0", features = ["time"] } -sc-block-builder = { path = "../../block-builder" } -sc-client-api = { path = "../../api" } -sc-client-db = { path = "../../db", default-features = false } -sc-consensus = { path = "../../consensus/common" } -sc-executor = { path = "../../executor" } -sc-network = { path = "../../network" } -sc-network-sync = { path = "../../network/sync" } -sc-service = { path = "..", features = ["test-helpers"] } -sc-transaction-pool-api = { path = "../../transaction-pool/api" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-state-machine = { path = "../../../primitives/state-machine" } -sp-storage = { path = "../../../primitives/storage" } -sp-tracing = { path = "../../../primitives/tracing" } -sp-trie = { path = "../../../primitives/trie" } -sp-io = { path = "../../../primitives/io" } -substrate-test-runtime = { path = "../../../test-utils/runtime" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +codec = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +tempfile = { workspace = true } +tokio = { features = ["time"], workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-client-db = { workspace = true } +sc-consensus = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +substrate-test-runtime = { workspace = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/state-db/Cargo.toml b/substrate/client/state-db/Cargo.toml index e203eb5a3282f..be4f1ef973c98 100644 --- a/substrate/client/state-db/Cargo.toml +++ b/substrate/client/state-db/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } +codec = { features = ["derive"], workspace = true, default-features = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -sp-core = { path = "../../primitives/core" } +parking_lot = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } diff --git a/substrate/client/statement-store/Cargo.toml b/substrate/client/statement-store/Cargo.toml index 8ca6d11dbe0dc..1cb682f054d72 100644 --- a/substrate/client/statement-store/Cargo.toml +++ b/substrate/client/statement-store/Cargo.toml @@ -17,18 +17,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -parity-db = "0.4.12" -tokio = { version = "1.22.0", features = ["time"] } -sp-statement-store = { path = "../../primitives/statement-store" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-runtime = { path = "../../primitives/runtime" } -sc-client-api = { path = "../api" } -sc-keystore = { path = "../keystore" } +parking_lot = { workspace = true, default-features = true } +parity-db = { workspace = true } +tokio = { features = ["time"], workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } [dev-dependencies] -tempfile = "3.1.0" -env_logger = "0.11" +tempfile = { workspace = true } +env_logger = { workspace = true } diff --git a/substrate/client/storage-monitor/Cargo.toml b/substrate/client/storage-monitor/Cargo.toml index 5248ebdf9a650..8c490284dccc6 100644 --- a/substrate/client/storage-monitor/Cargo.toml +++ b/substrate/client/storage-monitor/Cargo.toml @@ -12,9 +12,9 @@ homepage = "https://substrate.io" workspace = true [dependencies] -clap = { version = "4.5.3", features = ["derive", "string"] } +clap = { features = ["derive", "string"], workspace = true } log = { workspace = true, default-features = true } -fs4 = "0.7.0" -sp-core = { path = "../../primitives/core" } -tokio = { version = "1.22.0", features = ["time"] } +fs4 = { workspace = true } +sp-core = { workspace = true, default-features = true } +tokio = { features = ["time"], workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index d5bdc920f7c9b..1cbaadb70fff9 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -15,15 +15,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +codec = { workspace = true, default-features = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -sc-chain-spec = { path = "../chain-spec" } -sc-client-api = { path = "../api" } -sc-consensus-babe = { path = "../consensus/babe" } -sc-consensus-epochs = { path = "../consensus/epochs" } -sc-consensus-grandpa = { path = "../consensus/grandpa" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-runtime = { path = "../../primitives/runtime" } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-epochs = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml index 32b7755c64b50..f79345d672429 100644 --- a/substrate/client/sysinfo/Cargo.toml +++ b/substrate/client/sysinfo/Cargo.toml @@ -17,20 +17,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.30" -libc = "0.2" +futures = { workspace = true } +libc = { workspace = true } log = { workspace = true, default-features = true } -rand = "0.8.5" -rand_pcg = "0.3.1" -derive_more = "0.99" -regex = "1" +rand = { workspace = true, default-features = true } +rand_pcg = { workspace = true } +derive_more = { workspace = true, default-features = true } +regex = { workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sc-telemetry = { path = "../telemetry" } -sp-core = { path = "../../primitives/core" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-io = { path = "../../primitives/io" } -sp-std = { path = "../../primitives/std" } +sc-telemetry = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } [dev-dependencies] -sp-runtime = { path = "../../primitives/runtime" } +sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/client/sysinfo/src/sysinfo.rs b/substrate/client/sysinfo/src/sysinfo.rs index 3fa583cf7aca1..37b35fcb91032 100644 --- a/substrate/client/sysinfo/src/sysinfo.rs +++ b/substrate/client/sysinfo/src/sysinfo.rs @@ -21,13 +21,13 @@ use crate::{ExecutionLimit, HwBench}; use sc_telemetry::SysInfo; use sp_core::{sr25519, Pair}; use sp_io::crypto::sr25519_verify; -use sp_std::{fmt, fmt::Formatter, prelude::*}; use derive_more::From; use rand::{seq::SliceRandom, Rng, RngCore}; use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer}; use std::{ - fmt::Display, + fmt, + fmt::{Display, Formatter}, fs::File, io::{Seek, SeekFrom, Write}, ops::{Deref, DerefMut}, diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index 0cce2acf6409c..a789ebc5f1afd 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -17,16 +17,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -chrono = "0.4.31" -futures = "0.3.30" -libp2p = { version = "0.51.4", features = ["dns", "tcp", "tokio", "wasm-ext", "websocket"] } +chrono = { workspace = true } +futures = { workspace = true } +libp2p = { features = ["dns", "tcp", "tokio", "wasm-ext", "websocket"], workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -pin-project = "1.0.12" -sc-utils = { path = "../utils" } -sc-network = { path = "../network" } -rand = "0.8.5" +parking_lot = { workspace = true, default-features = true } +pin-project = { workspace = true } +sc-utils = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -wasm-timer = "0.2.5" +wasm-timer = { workspace = true } diff --git a/substrate/client/telemetry/src/transport.rs b/substrate/client/telemetry/src/transport.rs index a82626caac2d3..ca6ceecbed63b 100644 --- a/substrate/client/telemetry/src/transport.rs +++ b/substrate/client/telemetry/src/transport.rs @@ -31,7 +31,7 @@ const CONNECT_TIMEOUT: Duration = Duration::from_secs(20); pub(crate) fn initialize_transport() -> Result { let transport = { let tcp_transport = libp2p::tcp::tokio::Transport::new(libp2p::tcp::Config::new()); - let inner = libp2p::dns::TokioDnsConfig::system(tcp_transport)?; + let inner = libp2p::dns::tokio::Transport::system(tcp_transport)?; libp2p::websocket::framed::WsConfig::new(inner).and_then(|connec, _| { let connec = connec .with(|item| { diff --git a/substrate/client/tracing/Cargo.toml b/substrate/client/tracing/Cargo.toml index df674d24c6dd7..cacb1351416a9 100644 --- a/substrate/client/tracing/Cargo.toml +++ b/substrate/client/tracing/Cargo.toml @@ -16,32 +16,32 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ansi_term = "0.12.1" -is-terminal = "0.4.9" -chrono = "0.4.31" -codec = { package = "parity-scale-codec", version = "3.6.12" } -lazy_static = "1.4.0" -libc = "0.2.152" +ansi_term = { workspace = true } +is-terminal = { workspace = true } +chrono = { workspace = true } +codec = { workspace = true, default-features = true } +lazy_static = { workspace = true } +libc = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -regex = "1.6.0" -rustc-hash = "1.1.0" +parking_lot = { workspace = true, default-features = true } +regex = { workspace = true } +rustc-hash = { workspace = true } serde = { workspace = true, default-features = true } thiserror = { workspace = true } -tracing = "0.1.29" -tracing-log = "0.2.0" +tracing = { workspace = true, default-features = true } +tracing-log = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter", "parking_lot"] } -sc-client-api = { path = "../api" } -sc-tracing-proc-macro = { path = "proc-macro" } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-rpc = { path = "../../primitives/rpc" } -sp-runtime = { path = "../../primitives/runtime" } -sp-tracing = { path = "../../primitives/tracing" } +sc-client-api = { workspace = true, default-features = true } +sc-tracing-proc-macro = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [dev-dependencies] -criterion = "0.5.1" +criterion = { workspace = true, default-features = true } tracing-subscriber = { workspace = true, features = ["chrono", "parking_lot"] } [[bench]] diff --git a/substrate/client/tracing/proc-macro/Cargo.toml b/substrate/client/tracing/proc-macro/Cargo.toml index fec34aa0bca93..9162bdc1ad8ab 100644 --- a/substrate/client/tracing/proc-macro/Cargo.toml +++ b/substrate/client/tracing/proc-macro/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "3.0.0" -proc-macro2 = "1.0.56" +proc-macro-crate = { workspace = true } +proc-macro2 = { workspace = true } quote = { features = ["proc-macro"], workspace = true } syn = { features = ["extra-traits", "full", "parsing", "proc-macro"], workspace = true } diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index 351650297ffc5..95b391faf169e 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -16,36 +16,36 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -futures-timer = "3.0.2" -linked-hash-map = "0.5.4" +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } +futures-timer = { workspace = true } +linked-hash-map = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" +parking_lot = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } -sc-client-api = { path = "../api" } -sc-transaction-pool-api = { path = "api" } -sc-utils = { path = "../utils" } -sp-api = { path = "../../primitives/api" } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-core = { path = "../../primitives/core" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-runtime = { path = "../../primitives/runtime" } -sp-tracing = { path = "../../primitives/tracing" } -sp-transaction-pool = { path = "../../primitives/transaction-pool" } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } [dev-dependencies] -array-bytes = "6.2.2" -assert_matches = "1.3.0" -criterion = "0.5.1" -sc-block-builder = { path = "../block-builder" } -sp-consensus = { path = "../../primitives/consensus/common" } -substrate-test-runtime = { path = "../../test-utils/runtime" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -substrate-test-runtime-transaction-pool = { path = "../../test-utils/runtime/transaction-pool" } +array-bytes = { workspace = true, default-features = true } +assert_matches = { workspace = true } +criterion = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +substrate-test-runtime = { workspace = true } +substrate-test-runtime-client = { workspace = true } +substrate-test-runtime-transaction-pool = { workspace = true } [[bench]] name = "basics" diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index be80a7706b3ef..6fec613c1114d 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -12,15 +12,15 @@ description = "Transaction pool client facing API." workspace = true [dependencies] -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/utils/Cargo.toml b/substrate/client/utils/Cargo.toml index a101f4b3f3ad0..4053d77a13ebf 100644 --- a/substrate/client/utils/Cargo.toml +++ b/substrate/client/utils/Cargo.toml @@ -13,18 +13,18 @@ readme = "README.md" workspace = true [dependencies] -async-channel = "1.8.0" -futures = "0.3.30" -futures-timer = "3.0.2" -lazy_static = "1.4.0" +async-channel = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +lazy_static = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -prometheus = { version = "0.13.0", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +parking_lot = { workspace = true, default-features = true } +prometheus = { workspace = true } +sp-arithmetic = { workspace = true } [features] default = ["metered"] metered = [] [dev-dependencies] -tokio-test = "0.4.2" +tokio-test = { workspace = true } diff --git a/substrate/deprecated/hashing/Cargo.toml b/substrate/deprecated/hashing/Cargo.toml index 8695ccc8fca22..9db6cb64050d6 100644 --- a/substrate/deprecated/hashing/Cargo.toml +++ b/substrate/deprecated/hashing/Cargo.toml @@ -16,7 +16,7 @@ maintenance = { status = "deprecated" } targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } +sp-crypto-hashing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/deprecated/hashing/proc-macro/Cargo.toml b/substrate/deprecated/hashing/proc-macro/Cargo.toml index aa78809241f03..ec5188bc53bd9 100644 --- a/substrate/deprecated/hashing/proc-macro/Cargo.toml +++ b/substrate/deprecated/hashing/proc-macro/Cargo.toml @@ -16,4 +16,4 @@ maintenance = { status = "deprecated" } targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-crypto-hashing-proc-macro = { path = "../../../primitives/crypto/hashing/proc-macro" } +sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index a3b3d1900e6e6..41ece6c9a27fe 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -18,51 +18,50 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # external deps -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } +], workspace = true } # primitive deps, used for developing FRAME pallets. -sp-runtime = { default-features = false, path = "../primitives/runtime" } -sp-std = { default-features = false, path = "../primitives/std" } -sp-io = { default-features = false, path = "../primitives/io" } -sp-core = { default-features = false, path = "../primitives/core" } -sp-arithmetic = { default-features = false, path = "../primitives/arithmetic" } +sp-runtime = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } +sp-arithmetic = { workspace = true } # frame deps, for developing FRAME pallets. -frame-support = { default-features = false, path = "support" } -frame-system = { default-features = false, path = "system" } +frame-support = { workspace = true } +frame-system = { workspace = true } # primitive types used for developing FRAME runtimes. -sp-version = { default-features = false, path = "../primitives/version", optional = true } -sp-api = { default-features = false, path = "../primitives/api", optional = true } -sp-block-builder = { default-features = false, path = "../primitives/block-builder", optional = true } -sp-transaction-pool = { default-features = false, path = "../primitives/transaction-pool", optional = true } -sp-offchain = { default-features = false, path = "../primitives/offchain", optional = true } -sp-session = { default-features = false, path = "../primitives/session", optional = true } -sp-consensus-aura = { default-features = false, path = "../primitives/consensus/aura", optional = true } -sp-consensus-grandpa = { default-features = false, path = "../primitives/consensus/grandpa", optional = true } -sp-inherents = { default-features = false, path = "../primitives/inherents", optional = true } -sp-storage = { default-features = false, path = "../primitives/storage", optional = true } +sp-version = { optional = true, workspace = true } +sp-api = { optional = true, workspace = true } +sp-block-builder = { optional = true, workspace = true } +sp-transaction-pool = { optional = true, workspace = true } +sp-offchain = { optional = true, workspace = true } +sp-session = { optional = true, workspace = true } +sp-consensus-aura = { optional = true, workspace = true } +sp-consensus-grandpa = { optional = true, workspace = true } +sp-inherents = { optional = true, workspace = true } +sp-storage = { optional = true, workspace = true } -frame-executive = { default-features = false, path = "../frame/executive", optional = true } -frame-system-rpc-runtime-api = { default-features = false, path = "../frame/system/rpc/runtime-api", optional = true } +frame-executive = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { optional = true, workspace = true } # Used for runtime benchmarking -frame-benchmarking = { default-features = false, path = "../frame/benchmarking", optional = true } -frame-system-benchmarking = { default-features = false, path = "../frame/system/benchmarking", optional = true } +frame-benchmarking = { optional = true, workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } # Used for try-runtime -frame-try-runtime = { default-features = false, path = "../frame/try-runtime", optional = true } +frame-try-runtime = { optional = true, workspace = true } -docify = "0.2.8" +docify = { workspace = true } log = { workspace = true } [dev-dependencies] -pallet-examples = { path = "./examples" } +pallet-examples = { workspace = true } [features] default = ["runtime", "std"] @@ -104,7 +103,6 @@ std = [ "sp-offchain?/std", "sp-runtime/std", "sp-session?/std", - "sp-std/std", "sp-storage/std", "sp-transaction-pool?/std", "sp-version?/std", diff --git a/substrate/frame/alliance/Cargo.toml b/substrate/frame/alliance/Cargo.toml index 10e2feba62376..451b86b35ddef 100644 --- a/substrate/frame/alliance/Cargo.toml +++ b/substrate/frame/alliance/Cargo.toml @@ -16,30 +16,29 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = { version = "6.2.2", optional = true } +array-bytes = { optional = true, workspace = true, default-features = true } log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing", default-features = false, optional = true } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-core = { workspace = true } +sp-crypto-hashing = { optional = true, workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -pallet-identity = { path = "../identity", default-features = false } -pallet-collective = { path = "../collective", default-features = false, optional = true } +pallet-identity = { workspace = true } +pallet-collective = { optional = true, workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -sp-crypto-hashing = { path = "../../primitives/crypto/hashing", default-features = false } -pallet-balances = { path = "../balances" } -pallet-collective = { path = "../collective" } +array-bytes = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true } +pallet-balances = { workspace = true, default-features = true } +pallet-collective = { workspace = true, default-features = true } [features] default = ["std"] @@ -57,7 +56,6 @@ std = [ "sp-crypto-hashing?/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "array-bytes", diff --git a/substrate/frame/alliance/src/lib.rs b/substrate/frame/alliance/src/lib.rs index ed771c7226ea9..be65f49e6e4ea 100644 --- a/substrate/frame/alliance/src/lib.rs +++ b/substrate/frame/alliance/src/lib.rs @@ -94,6 +94,9 @@ pub mod migration; mod types; pub mod weights; +extern crate alloc; + +use alloc::{boxed::Box, vec, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -101,7 +104,6 @@ use sp_runtime::{ traits::{Dispatchable, Saturating, StaticLookup, Zero}, DispatchError, RuntimeDebug, }; -use sp_std::prelude::*; use frame_support::{ dispatch::{DispatchResult, DispatchResultWithPostInfo, GetDispatchInfo, PostDispatchInfo}, diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index a9cfd6d0fde0e..1a0a899bcccb4 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -52,24 +52,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -parameter_types! { - pub const ExistentialDeposit: u64 = 1; - pub const MaxLocks: u32 = 10; -} +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = MaxLocks; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } const MOTION_DURATION_IN_BLOCKS: BlockNumber = 3; diff --git a/substrate/frame/alliance/src/tests.rs b/substrate/frame/alliance/src/tests.rs index edb515b8115a5..ec31ebf6a47ae 100644 --- a/substrate/frame/alliance/src/tests.rs +++ b/substrate/frame/alliance/src/tests.rs @@ -17,8 +17,9 @@ //! Tests for the alliance pallet. -use frame_support::{assert_noop, assert_ok, error::BadOrigin}; +use frame_support::{assert_noop, assert_ok}; use frame_system::{EventRecord, Phase}; +use sp_runtime::traits::BadOrigin; use super::*; use crate::{self as alliance, mock::*}; diff --git a/substrate/frame/alliance/src/types.rs b/substrate/frame/alliance/src/types.rs index 149030b52c674..75b949c19b325 100644 --- a/substrate/frame/alliance/src/types.rs +++ b/substrate/frame/alliance/src/types.rs @@ -15,11 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{traits::ConstU32, BoundedVec}; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; -use sp_std::prelude::*; /// A Multihash instance that only supports the basic functionality and no hashing. #[derive( diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index bfcda2299d5a1..4e867ece1bd27 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -16,23 +16,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -log = { version = "0.4.20", default-features = false } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-api = { path = "../../primitives/api", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +codec = { workspace = true } +log = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-arithmetic = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-assets = { path = "../assets" } -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } +pallet-balances = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } [features] default = ["std"] @@ -51,7 +50,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/asset-conversion/ops/Cargo.toml b/substrate/frame/asset-conversion/ops/Cargo.toml index c5efbf9f6f442..97a676fde10d5 100644 --- a/substrate/frame/asset-conversion/ops/Cargo.toml +++ b/substrate/frame/asset-conversion/ops/Cargo.toml @@ -15,23 +15,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -log = { version = "0.4.20", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } -pallet-asset-conversion = { path = "..", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } +codec = { workspace = true } +log = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +pallet-asset-conversion = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-arithmetic = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../../balances" } -pallet-assets = { path = "../../assets" } -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } +pallet-balances = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } [features] default = ["std"] @@ -50,7 +49,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/asset-conversion/ops/src/benchmarking.rs b/substrate/frame/asset-conversion/ops/src/benchmarking.rs index a7370f38bc4b0..384e17c9f0aac 100644 --- a/substrate/frame/asset-conversion/ops/src/benchmarking.rs +++ b/substrate/frame/asset-conversion/ops/src/benchmarking.rs @@ -28,7 +28,6 @@ use frame_system::RawOrigin as SystemOrigin; use pallet_asset_conversion::{BenchmarkHelper, Pallet as AssetConversion}; use sp_core::Get; use sp_runtime::traits::One; -use sp_std::prelude::*; /// Provides a pair of amounts expected to serve as sufficient initial liquidity for a pool. fn valid_liquidity_amount(ed1: T::Balance, ed2: T::Balance) -> (T::Balance, T::Balance) diff --git a/substrate/frame/asset-conversion/ops/src/lib.rs b/substrate/frame/asset-conversion/ops/src/lib.rs index a655a9cb44525..58c15b47a3eb3 100644 --- a/substrate/frame/asset-conversion/ops/src/lib.rs +++ b/substrate/frame/asset-conversion/ops/src/lib.rs @@ -42,6 +42,9 @@ pub mod weights; pub use pallet::*; pub use weights::WeightInfo; +extern crate alloc; + +use alloc::boxed::Box; use frame_support::traits::{ fungible::{Inspect as FungibleInspect, Mutate as FungibleMutate}, fungibles::{roles::ResetTeam, Inspect, Mutate, Refund}, @@ -50,7 +53,6 @@ use frame_support::traits::{ }; use pallet_asset_conversion::{PoolLocator, Pools}; use sp_runtime::traits::{TryConvert, Zero}; -use sp_std::boxed::Box; #[frame_support::pallet] pub mod pallet { diff --git a/substrate/frame/asset-conversion/ops/src/mock.rs b/substrate/frame/asset-conversion/ops/src/mock.rs index 9454b3a9ad448..5c05faa6aa88d 100644 --- a/substrate/frame/asset-conversion/ops/src/mock.rs +++ b/substrate/frame/asset-conversion/ops/src/mock.rs @@ -17,7 +17,6 @@ //! Test environment for Asset Conversion Ops pallet. -use super::*; use crate as pallet_asset_conversion_ops; use core::default::Default; use frame_support::{ @@ -52,7 +51,7 @@ construct_runtime!( } ); -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; type AccountData = pallet_balances::AccountData; @@ -60,7 +59,6 @@ impl frame_system::Config for Test { #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type ReserveIdentifier = [u8; 8]; type AccountStore = System; } diff --git a/substrate/frame/asset-conversion/src/benchmarking.rs b/substrate/frame/asset-conversion/src/benchmarking.rs index c5f68476b1d0a..b003fe482881d 100644 --- a/substrate/frame/asset-conversion/src/benchmarking.rs +++ b/substrate/frame/asset-conversion/src/benchmarking.rs @@ -19,6 +19,8 @@ use super::*; use crate::Pallet as AssetConversion; +use alloc::vec; +use core::marker::PhantomData; use frame_benchmarking::{v2::*, whitelisted_caller}; use frame_support::{ assert_ok, @@ -29,7 +31,6 @@ use frame_support::{ }; use frame_system::RawOrigin as SystemOrigin; use sp_core::Get; -use sp_std::{marker::PhantomData, prelude::*}; /// Benchmark Helper pub trait BenchmarkHelper { diff --git a/substrate/frame/asset-conversion/src/lib.rs b/substrate/frame/asset-conversion/src/lib.rs index 62acb693efb1c..a9dc30375e5a8 100644 --- a/substrate/frame/asset-conversion/src/lib.rs +++ b/substrate/frame/asset-conversion/src/lib.rs @@ -70,6 +70,9 @@ pub use swap::*; pub use types::*; pub use weights::WeightInfo; +extern crate alloc; + +use alloc::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec}; use codec::Codec; use frame_support::{ storage::{with_storage_layer, with_transaction}, @@ -93,7 +96,6 @@ use sp_runtime::{ }, DispatchError, Saturating, TokenError, TransactionOutcome, }; -use sp_std::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec}; #[frame_support::pallet] pub mod pallet { diff --git a/substrate/frame/asset-conversion/src/mock.rs b/substrate/frame/asset-conversion/src/mock.rs index 477866e0051bc..d8832d70488af 100644 --- a/substrate/frame/asset-conversion/src/mock.rs +++ b/substrate/frame/asset-conversion/src/mock.rs @@ -61,20 +61,11 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<100>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_assets::Config for Test { diff --git a/substrate/frame/asset-rate/Cargo.toml b/substrate/frame/asset-rate/Cargo.toml index 4662469e46ce4..4aeb6a39e8241 100644 --- a/substrate/frame/asset-rate/Cargo.toml +++ b/substrate/frame/asset-rate/Cargo.toml @@ -15,21 +15,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, optional = true } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-core = { optional = true, workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-io = { path = "../../primitives/io" } -sp-core = { path = "../../primitives/core", default-features = false } +pallet-balances = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-core = { workspace = true } [features] default = ["std"] @@ -43,7 +42,6 @@ std = [ "sp-core?/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/asset-rate/src/lib.rs b/substrate/frame/asset-rate/src/lib.rs index 69f8267a4f25f..cfb013a73f5e8 100644 --- a/substrate/frame/asset-rate/src/lib.rs +++ b/substrate/frame/asset-rate/src/lib.rs @@ -59,6 +59,9 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::boxed::Box; use frame_support::traits::{ fungible::Inspect, tokens::{ConversionFromAssetBalance, ConversionToAssetBalance}, @@ -67,7 +70,6 @@ use sp_runtime::{ traits::{CheckedDiv, Zero}, FixedPointNumber, FixedU128, }; -use sp_std::boxed::Box; pub use pallet::*; pub use weights::WeightInfo; diff --git a/substrate/frame/asset-rate/src/mock.rs b/substrate/frame/asset-rate/src/mock.rs index d01996dab193d..c829d78afa886 100644 --- a/substrate/frame/asset-rate/src/mock.rs +++ b/substrate/frame/asset-rate/src/mock.rs @@ -18,7 +18,7 @@ //! The crate's mock. use crate as pallet_asset_rate; -use frame_support::{derive_impl, traits::ConstU64}; +use frame_support::derive_impl; use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -38,20 +38,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = (); } impl pallet_asset_rate::Config for Test { diff --git a/substrate/frame/assets-freezer/Cargo.toml b/substrate/frame/assets-freezer/Cargo.toml new file mode 100644 index 0000000000000..97eadd8465a70 --- /dev/null +++ b/substrate/frame/assets-freezer/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "pallet-assets-freezer" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "MIT-0" +homepage = "https://substrate.io" +repository.workspace = true +description = "Provides freezing features to `pallet-assets`" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-assets = { workspace = true } +sp-runtime = { workspace = true } + +[dev-dependencies] +sp-io = { workspace = true } +sp-core = { workspace = true } +pallet-balances = { workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-assets/std", + "pallet-balances/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-assets/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/assets-freezer/src/impls.rs b/substrate/frame/assets-freezer/src/impls.rs new file mode 100644 index 0000000000000..cd383f1c3cd1e --- /dev/null +++ b/substrate/frame/assets-freezer/src/impls.rs @@ -0,0 +1,158 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; + +use frame_support::traits::{ + fungibles::{Inspect, InspectFreeze, MutateFreeze}, + tokens::{DepositConsequence, Fortitude, Preservation, Provenance, WithdrawConsequence}, +}; +use pallet_assets::FrozenBalance; +use sp_runtime::traits::Zero; + +// Implements [`FrozenBalance`] from [`pallet-assets`], so it can understand how much of an +// account balance is frozen, and is able to signal to this pallet when to clear the state of an +// account. +impl, I: 'static> FrozenBalance + for Pallet +{ + fn frozen_balance(asset: T::AssetId, who: &T::AccountId) -> Option { + FrozenBalances::::get(asset, who) + } + + fn died(asset: T::AssetId, who: &T::AccountId) { + FrozenBalances::::remove(asset.clone(), who); + Freezes::::remove(asset, who); + } +} + +// Implement [`fungibles::Inspect`](frame_support::traits::fungibles::Inspect) as it is bound by +// [`fungibles::InspectFreeze`](frame_support::traits::fungibles::InspectFreeze) and +// [`fungibles::MutateFreeze`](frame_support::traits::fungibles::MutateFreeze). To do so, we'll +// re-export all of `pallet-assets` implementation of the same trait. +impl, I: 'static> Inspect for Pallet { + type AssetId = T::AssetId; + type Balance = T::Balance; + + fn total_issuance(asset: Self::AssetId) -> Self::Balance { + pallet_assets::Pallet::::total_issuance(asset) + } + + fn minimum_balance(asset: Self::AssetId) -> Self::Balance { + pallet_assets::Pallet::::minimum_balance(asset) + } + + fn total_balance(asset: Self::AssetId, who: &T::AccountId) -> Self::Balance { + pallet_assets::Pallet::::total_balance(asset, who) + } + + fn balance(asset: Self::AssetId, who: &T::AccountId) -> Self::Balance { + pallet_assets::Pallet::::balance(asset, who) + } + + fn reducible_balance( + asset: Self::AssetId, + who: &T::AccountId, + preservation: Preservation, + force: Fortitude, + ) -> Self::Balance { + pallet_assets::Pallet::::reducible_balance(asset, who, preservation, force) + } + + fn can_deposit( + asset: Self::AssetId, + who: &T::AccountId, + amount: Self::Balance, + provenance: Provenance, + ) -> DepositConsequence { + pallet_assets::Pallet::::can_deposit(asset, who, amount, provenance) + } + + fn can_withdraw( + asset: Self::AssetId, + who: &T::AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence { + pallet_assets::Pallet::::can_withdraw(asset, who, amount) + } + + fn asset_exists(asset: Self::AssetId) -> bool { + pallet_assets::Pallet::::asset_exists(asset) + } +} + +impl, I: 'static> InspectFreeze for Pallet { + type Id = T::RuntimeFreezeReason; + + fn balance_frozen(asset: Self::AssetId, id: &Self::Id, who: &T::AccountId) -> Self::Balance { + let freezes = Freezes::::get(asset, who); + freezes.into_iter().find(|l| &l.id == id).map_or(Zero::zero(), |l| l.amount) + } + + fn can_freeze(asset: Self::AssetId, id: &Self::Id, who: &T::AccountId) -> bool { + let freezes = Freezes::::get(asset, who); + !freezes.is_full() || freezes.into_iter().any(|i| i.id == *id) + } +} + +impl, I: 'static> MutateFreeze for Pallet { + fn set_freeze( + asset: Self::AssetId, + id: &Self::Id, + who: &T::AccountId, + amount: Self::Balance, + ) -> sp_runtime::DispatchResult { + if amount.is_zero() { + return Self::thaw(asset, id, who); + } + let mut freezes = Freezes::::get(asset.clone(), who); + if let Some(i) = freezes.iter_mut().find(|i| &i.id == id) { + i.amount = amount; + } else { + freezes + .try_push(IdAmount { id: *id, amount }) + .map_err(|_| Error::::TooManyFreezes)?; + } + Self::update_freezes(asset, who, freezes.as_bounded_slice()) + } + + fn extend_freeze( + asset: Self::AssetId, + id: &Self::Id, + who: &T::AccountId, + amount: Self::Balance, + ) -> sp_runtime::DispatchResult { + if amount.is_zero() { + return Ok(()); + } + let mut freezes = Freezes::::get(asset.clone(), who); + if let Some(i) = freezes.iter_mut().find(|x| &x.id == id) { + i.amount = i.amount.max(amount); + } else { + freezes + .try_push(IdAmount { id: *id, amount }) + .map_err(|_| Error::::TooManyFreezes)?; + } + Self::update_freezes(asset, who, freezes.as_bounded_slice()) + } + + fn thaw(asset: Self::AssetId, id: &Self::Id, who: &T::AccountId) -> sp_runtime::DispatchResult { + let mut freezes = Freezes::::get(asset.clone(), who); + freezes.retain(|f| &f.id != id); + Self::update_freezes(asset, who, freezes.as_bounded_slice()) + } +} diff --git a/substrate/frame/assets-freezer/src/lib.rs b/substrate/frame/assets-freezer/src/lib.rs new file mode 100644 index 0000000000000..b42d41ac1d925 --- /dev/null +++ b/substrate/frame/assets-freezer/src/lib.rs @@ -0,0 +1,176 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Assets Freezer Pallet +//! +//! A pallet capable of freezing fungibles from `pallet-assets`. This is an extension of +//! `pallet-assets`, wrapping [`fungibles::Inspect`](`frame_support::traits::fungibles::Inspect`). +//! It implements both +//! [`fungibles::freeze::Inspect`](frame_support::traits::fungibles::freeze::Inspect) and +//! [`fungibles::freeze::Mutate`](frame_support::traits::fungibles::freeze::Mutate). The complexity +//! of the operations is `O(n)`. where `n` is the variant count of `RuntimeFreezeReason`. +//! +//! ## Pallet API +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events and errors. +//! +//! ## Overview +//! +//! This pallet provides the following functionality: +//! +//! - Pallet hooks allowing [`pallet-assets`] to know the frozen balance for an account on a given +//! asset (see [`pallet_assets::FrozenBalance`]). +//! - An implementation of +//! [`fungibles::freeze::Inspect`](frame_support::traits::fungibles::freeze::Inspect) and +//! [`fungibles::freeze::Mutate`](frame_support::traits::fungibles::freeze::Mutate), allowing +//! other pallets to manage freezes for the `pallet-assets` assets. + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::{ + pallet_prelude::*, + traits::{tokens::IdAmount, VariantCount, VariantCountOf}, + BoundedVec, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use sp_runtime::{ + traits::{Saturating, Zero}, + BoundedSlice, +}; + +pub use pallet::*; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +mod impls; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::config(with_default)] + pub trait Config: frame_system::Config + pallet_assets::Config { + /// The overarching freeze reason. + #[pallet::no_default_bounds] + type RuntimeFreezeReason: Parameter + Member + MaxEncodedLen + Copy + VariantCount; + + /// The overarching event type. + #[pallet::no_default_bounds] + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + } + + #[pallet::error] + pub enum Error { + /// Number of freezes on an account would exceed `MaxFreezes`. + TooManyFreezes, + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { + // `who`s frozen balance was increased by `amount`. + Frozen { who: T::AccountId, asset_id: T::AssetId, amount: T::Balance }, + // `who`s frozen balance was decreased by `amount`. + Thawed { who: T::AccountId, asset_id: T::AssetId, amount: T::Balance }, + } + + /// A map that stores freezes applied on an account for a given AssetId. + #[pallet::storage] + pub(super) type Freezes, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::AssetId, + Blake2_128Concat, + T::AccountId, + BoundedVec< + IdAmount, + VariantCountOf, + >, + ValueQuery, + >; + + /// A map that stores the current total frozen balance for every account on a given AssetId. + #[pallet::storage] + pub(super) type FrozenBalances, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::AssetId, + Blake2_128Concat, + T::AccountId, + T::Balance, + >; + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() + } + } +} + +impl, I: 'static> Pallet { + fn update_freezes( + asset: T::AssetId, + who: &T::AccountId, + freezes: BoundedSlice< + IdAmount, + VariantCountOf, + >, + ) -> DispatchResult { + let prev_frozen = FrozenBalances::::get(asset.clone(), who).unwrap_or_default(); + let after_frozen = freezes.into_iter().map(|f| f.amount).max().unwrap_or_else(Zero::zero); + FrozenBalances::::set(asset.clone(), who, Some(after_frozen)); + if freezes.is_empty() { + Freezes::::remove(asset.clone(), who); + FrozenBalances::::remove(asset.clone(), who); + } else { + Freezes::::insert(asset.clone(), who, freezes); + } + if prev_frozen > after_frozen { + let amount = prev_frozen.saturating_sub(after_frozen); + Self::deposit_event(Event::Thawed { asset_id: asset, who: who.clone(), amount }); + } else if after_frozen > prev_frozen { + let amount = after_frozen.saturating_sub(prev_frozen); + Self::deposit_event(Event::Frozen { asset_id: asset, who: who.clone(), amount }); + } + Ok(()) + } + + #[cfg(any(test, feature = "try-runtime"))] + fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + for (asset, who, _) in FrozenBalances::::iter() { + let max_frozen_amount = + Freezes::::get(asset.clone(), who.clone()).iter().map(|l| l.amount).max(); + + frame_support::ensure!( + FrozenBalances::::get(asset, who) == max_frozen_amount, + "The `FrozenAmount` is not equal to the maximum amount in `Freezes` for (`asset`, `who`)" + ); + } + + Ok(()) + } +} diff --git a/substrate/frame/assets-freezer/src/mock.rs b/substrate/frame/assets-freezer/src/mock.rs new file mode 100644 index 0000000000000..5e04dfe8e2b9c --- /dev/null +++ b/substrate/frame/assets-freezer/src/mock.rs @@ -0,0 +1,155 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests mock for `pallet-assets-freezer`. + +use crate as pallet_assets_freezer; +pub use crate::*; +use codec::{Compact, Decode, Encode, MaxEncodedLen}; +use frame_support::{ + derive_impl, + traits::{AsEnsureOriginWithArg, ConstU64}, +}; +use scale_info::TypeInfo; +use sp_core::{ConstU32, H256}; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; + +pub type AccountId = u64; +pub type Balance = u64; +pub type AssetId = u32; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + Assets: pallet_assets, + AssetsFreezer: pallet_assets_freezer, + Balances: pallet_balances, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Nonce = u64; + type Hash = H256; + type RuntimeCall = RuntimeCall; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type RuntimeHoldReason = (); + type RuntimeFreezeReason = (); +} + +impl pallet_assets::Config for Test { + type AssetId = AssetId; + type AssetIdParameter = Compact; + type AssetDeposit = ConstU64<1>; + type Balance = Balance; + type AssetAccountDeposit = ConstU64<1>; + type MetadataDepositBase = (); + type MetadataDepositPerByte = (); + type ApprovalDeposit = (); + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type StringLimit = ConstU32<32>; + type Extra = (); + type RemoveItemsLimit = ConstU32<10>; + type CallbackHandle = (); + type Currency = Balances; + type Freezer = AssetsFreezer; + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +#[derive( + Decode, Encode, MaxEncodedLen, PartialEq, Eq, Ord, PartialOrd, TypeInfo, Debug, Clone, Copy, +)] +pub enum DummyFreezeReason { + Governance, + Staking, + Other, +} + +impl VariantCount for DummyFreezeReason { + // Intentionally set below the actual count of variants, to allow testing for `can_freeze` + const VARIANT_COUNT: u32 = 2; +} + +impl Config for Test { + type RuntimeFreezeReason = DummyFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + +pub fn new_test_ext(execute: impl FnOnce()) -> sp_io::TestExternalities { + let t = RuntimeGenesisConfig { + assets: pallet_assets::GenesisConfig { + assets: vec![(1, 0, true, 1)], + metadata: vec![], + accounts: vec![(1, 1, 100)], + next_asset_id: None, + }, + system: Default::default(), + balances: Default::default(), + } + .build_storage() + .unwrap(); + let mut ext: sp_io::TestExternalities = t.into(); + ext.execute_with(|| { + System::set_block_number(1); + execute(); + frame_support::assert_ok!(AssetsFreezer::do_try_state()); + }); + + ext +} diff --git a/substrate/frame/assets-freezer/src/tests.rs b/substrate/frame/assets-freezer/src/tests.rs new file mode 100644 index 0000000000000..4f2dea79c705a --- /dev/null +++ b/substrate/frame/assets-freezer/src/tests.rs @@ -0,0 +1,304 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for pallet-assets-freezer. + +use crate::mock::*; + +use codec::Compact; +use frame_support::{ + assert_ok, assert_storage_noop, + traits::{ + fungibles::{Inspect, InspectFreeze, MutateFreeze}, + tokens::{Fortitude, Preservation}, + }, +}; +use pallet_assets::FrozenBalance; + +const WHO: AccountId = 1; +const ASSET_ID: AssetId = 1; + +fn test_set_freeze(id: DummyFreezeReason, amount: Balance) { + let mut freezes = Freezes::::get(ASSET_ID, WHO); + + if let Some(i) = freezes.iter_mut().find(|l| l.id == id) { + i.amount = amount; + } else { + freezes + .try_push(IdAmount { id, amount }) + .expect("freeze is added without exceeding bounds; qed"); + } + + assert_ok!(AssetsFreezer::update_freezes(ASSET_ID, &WHO, freezes.as_bounded_slice())); +} + +fn test_thaw(id: DummyFreezeReason) { + let mut freezes = Freezes::::get(ASSET_ID, WHO); + freezes.retain(|l| l.id != id); + + assert_ok!(AssetsFreezer::update_freezes(ASSET_ID, &WHO, freezes.as_bounded_slice())); +} + +mod impl_frozen_balance { + use super::*; + + #[test] + fn frozen_balance_works() { + new_test_ext(|| { + assert_eq!(AssetsFreezer::frozen_balance(ASSET_ID, &WHO), None); + test_set_freeze(DummyFreezeReason::Governance, 1); + assert_eq!(AssetsFreezer::frozen_balance(ASSET_ID, &WHO), Some(1u64)); + test_set_freeze(DummyFreezeReason::Staking, 3); + assert_eq!(AssetsFreezer::frozen_balance(ASSET_ID, &WHO), Some(3u64)); + test_set_freeze(DummyFreezeReason::Governance, 2); + assert_eq!(AssetsFreezer::frozen_balance(ASSET_ID, &WHO), Some(3u64)); + // also test thawing works to reduce a balance, and finally thawing everything resets to + // None + test_thaw(DummyFreezeReason::Governance); + assert_eq!(AssetsFreezer::frozen_balance(ASSET_ID, &WHO), Some(3u64)); + test_thaw(DummyFreezeReason::Staking); + assert_eq!(AssetsFreezer::frozen_balance(ASSET_ID, &WHO), None); + }); + } + + #[test] + fn died_works() { + new_test_ext(|| { + test_set_freeze(DummyFreezeReason::Governance, 1); + AssetsFreezer::died(ASSET_ID, &WHO); + assert!(FrozenBalances::::get(ASSET_ID, WHO).is_none()); + assert!(Freezes::::get(ASSET_ID, WHO).is_empty()); + }); + } +} + +mod impl_inspect_freeze { + use super::*; + + #[test] + fn balance_frozen_works() { + new_test_ext(|| { + assert_eq!( + AssetsFreezer::balance_frozen(ASSET_ID, &DummyFreezeReason::Governance, &WHO), + 0u64 + ); + test_set_freeze(DummyFreezeReason::Governance, 1); + assert_eq!( + AssetsFreezer::balance_frozen(ASSET_ID, &DummyFreezeReason::Governance, &WHO), + 1u64 + ); + test_set_freeze(DummyFreezeReason::Staking, 3); + assert_eq!( + AssetsFreezer::balance_frozen(ASSET_ID, &DummyFreezeReason::Staking, &WHO), + 3u64 + ); + test_set_freeze(DummyFreezeReason::Staking, 2); + assert_eq!( + AssetsFreezer::balance_frozen(ASSET_ID, &DummyFreezeReason::Staking, &WHO), + 2u64 + ); + // also test thawing works to reduce a balance, and finally thawing everything resets to + // 0 + test_thaw(DummyFreezeReason::Governance); + assert_eq!( + AssetsFreezer::balance_frozen(ASSET_ID, &DummyFreezeReason::Governance, &WHO), + 0u64 + ); + test_thaw(DummyFreezeReason::Staking); + assert_eq!( + AssetsFreezer::balance_frozen(ASSET_ID, &DummyFreezeReason::Staking, &WHO), + 0u64 + ); + }); + } + + /// This tests it's not possible to freeze once the freezes [`BoundedVec`] is full. This is, + /// the lenght of the vec is equal to [`Config::MaxFreezes`]. + /// This test assumes a mock configuration where this parameter is set to `2`. + #[test] + fn can_freeze_works() { + new_test_ext(|| { + test_set_freeze(DummyFreezeReason::Governance, 1); + assert!(AssetsFreezer::can_freeze(ASSET_ID, &DummyFreezeReason::Staking, &WHO)); + test_set_freeze(DummyFreezeReason::Staking, 1); + assert!(!AssetsFreezer::can_freeze(ASSET_ID, &DummyFreezeReason::Other, &WHO)); + }); + } +} + +mod impl_mutate_freeze { + use super::*; + + #[test] + fn set_freeze_works() { + new_test_ext(|| { + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 99 + ); + assert_ok!(AssetsFreezer::set_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 10 + )); + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 89 + ); + System::assert_last_event( + Event::::Frozen { asset_id: ASSET_ID, who: WHO, amount: 10 }.into(), + ); + assert_ok!(AssetsFreezer::set_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 8 + )); + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 91 + ); + System::assert_last_event( + Event::::Thawed { asset_id: ASSET_ID, who: WHO, amount: 2 }.into(), + ); + }); + } + + #[test] + fn extend_freeze_works() { + new_test_ext(|| { + assert_ok!(AssetsFreezer::set_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 10 + )); + assert_storage_noop!(assert_ok!(AssetsFreezer::extend_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 8 + ))); + System::assert_last_event( + Event::::Frozen { asset_id: ASSET_ID, who: WHO, amount: 10 }.into(), + ); + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 89 + ); + assert_ok!(AssetsFreezer::extend_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 11 + )); + System::assert_last_event( + Event::::Frozen { asset_id: ASSET_ID, who: WHO, amount: 1 }.into(), + ); + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 88 + ); + }); + } + + #[test] + fn thaw_works() { + new_test_ext(|| { + assert_ok!(AssetsFreezer::set_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 10 + )); + System::assert_has_event( + Event::::Frozen { asset_id: ASSET_ID, who: WHO, amount: 10 }.into(), + ); + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 89 + ); + assert_ok!(AssetsFreezer::thaw(ASSET_ID, &DummyFreezeReason::Governance, &WHO)); + System::assert_has_event( + Event::::Thawed { asset_id: ASSET_ID, who: WHO, amount: 10 }.into(), + ); + assert_eq!( + Assets::reducible_balance( + ASSET_ID, + &WHO, + Preservation::Preserve, + Fortitude::Polite, + ), + 99 + ); + }); + } +} + +mod with_pallet_assets { + use frame_support::assert_noop; + + use super::*; + + #[test] + fn frozen_balance_affects_balance_transferring() { + new_test_ext(|| { + assert_ok!(AssetsFreezer::set_freeze( + ASSET_ID, + &DummyFreezeReason::Governance, + &WHO, + 20 + )); + assert_noop!( + Assets::transfer(RuntimeOrigin::signed(WHO), Compact(ASSET_ID), 2, 80), + pallet_assets::Error::::BalanceLow, + ); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(WHO), Compact(ASSET_ID), 2, 79)); + }); + } +} diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index 9647ae4db6baa..802b8b9f39d80 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -16,23 +16,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } +impl-trait-for-tuples = "0.2.2" log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } # Needed for various traits. In our case, `OnFinalize`. -sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-runtime = { workspace = true } # Needed for type-safe access to storage DB. -frame-support = { path = "../support", default-features = false } +frame-support = { workspace = true } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. -frame-system = { path = "../system", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -sp-core = { path = "../../primitives/core", default-features = false } +frame-system = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +sp-core = { workspace = true } [dev-dependencies] -sp-std = { path = "../../primitives/std" } -sp-io = { path = "../../primitives/io" } -pallet-balances = { path = "../balances" } +sp-std = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] @@ -47,7 +47,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/assets/src/benchmarking.rs b/substrate/frame/assets/src/benchmarking.rs index 1b65bb953d77c..97cc04174a0c6 100644 --- a/substrate/frame/assets/src/benchmarking.rs +++ b/substrate/frame/assets/src/benchmarking.rs @@ -20,13 +20,13 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; +use alloc::vec; use frame_benchmarking::v1::{ account, benchmarks_instance_pallet, whitelist_account, whitelisted_caller, BenchmarkError, }; use frame_support::traits::{EnsureOrigin, Get, UnfilteredDispatchable}; use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::Bounded; -use sp_std::prelude::*; use crate::Pallet as Assets; @@ -76,7 +76,7 @@ fn swap_is_sufficient, I: 'static>(s: &mut bool) { let asset_id = default_asset_id::(); Asset::::mutate(&asset_id.into(), |maybe_a| { if let Some(ref mut a) = maybe_a { - sp_std::mem::swap(s, &mut a.is_sufficient) + core::mem::swap(s, &mut a.is_sufficient) } }); } diff --git a/substrate/frame/assets/src/functions.rs b/substrate/frame/assets/src/functions.rs index 9309d01011757..c218c4ddc952c 100644 --- a/substrate/frame/assets/src/functions.rs +++ b/substrate/frame/assets/src/functions.rs @@ -18,6 +18,7 @@ //! Functions for the Assets pallet. use super::*; +use alloc::vec; use frame_support::{defensive, traits::Get, BoundedVec}; #[must_use] @@ -35,20 +36,20 @@ impl, I: 'static> Pallet { /// Return the extra "sid-car" data for `id`/`who`, or `None` if the account doesn't exist. pub fn adjust_extra( id: T::AssetId, - who: impl sp_std::borrow::Borrow, + who: impl core::borrow::Borrow, ) -> Option> { ExtraMutator::maybe_new(id, who) } /// Get the asset `id` balance of `who`, or zero if the asset-account doesn't exist. - pub fn balance(id: T::AssetId, who: impl sp_std::borrow::Borrow) -> T::Balance { + pub fn balance(id: T::AssetId, who: impl core::borrow::Borrow) -> T::Balance { Self::maybe_balance(id, who).unwrap_or_default() } /// Get the asset `id` balance of `who` if the asset-account exists. pub fn maybe_balance( id: T::AssetId, - who: impl sp_std::borrow::Borrow, + who: impl core::borrow::Borrow, ) -> Option { Account::::get(id, who.borrow()).map(|a| a.balance) } @@ -132,6 +133,9 @@ impl, I: 'static> Pallet { Some(details) => details, None => return DepositConsequence::UnknownAsset, }; + if details.status == AssetStatus::Destroying { + return DepositConsequence::UnknownAsset + } if increase_supply && details.supply.checked_add(&amount).is_none() { return DepositConsequence::Overflow } @@ -175,6 +179,9 @@ impl, I: 'static> Pallet { if details.status == AssetStatus::Frozen { return Frozen } + if details.status == AssetStatus::Destroying { + return UnknownAsset + } if amount.is_zero() { return Success } @@ -709,6 +716,9 @@ impl, I: 'static> Pallet { ) -> DispatchResult { ensure!(!Asset::::contains_key(&id), Error::::InUse); ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); + if let Some(next_id) = NextAssetId::::get() { + ensure!(id == next_id, Error::::BadAssetId); + } Asset::::insert( &id, diff --git a/substrate/frame/assets/src/impl_fungibles.rs b/substrate/frame/assets/src/impl_fungibles.rs index 30122f6d788ff..578fa08c4e63e 100644 --- a/substrate/frame/assets/src/impl_fungibles.rs +++ b/substrate/frame/assets/src/impl_fungibles.rs @@ -17,6 +17,7 @@ //! Implementations for fungibles trait. +use alloc::vec::Vec; use frame_support::{ defensive, traits::tokens::{ diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index d521492255589..b9b5b2388dfbe 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -167,13 +167,16 @@ mod impl_stored_map; mod types; pub use types::*; +extern crate alloc; + use scale_info::TypeInfo; use sp_runtime::{ traits::{AtLeast32BitUnsigned, CheckedAdd, CheckedSub, Saturating, StaticLookup, Zero}, ArithmeticError, DispatchError, TokenError, }; -use sp_std::prelude::*; +use alloc::vec::Vec; +use core::marker::PhantomData; use frame_support::{ dispatch::DispatchResult, ensure, @@ -182,7 +185,7 @@ use frame_support::{ traits::{ tokens::{fungibles, DepositConsequence, WithdrawConsequence}, BalanceStatus::Reserved, - Currency, EnsureOriginWithArg, ReservableCurrency, StoredMap, + Currency, EnsureOriginWithArg, Incrementable, ReservableCurrency, StoredMap, }, }; use frame_system::Config as SystemConfig; @@ -206,8 +209,37 @@ pub trait AssetsCallback { } } -/// Empty implementation in case no callbacks are required. -impl AssetsCallback for () {} +#[impl_trait_for_tuples::impl_for_tuples(10)] +impl AssetsCallback for Tuple { + fn created(id: &AssetId, owner: &AccountId) -> Result<(), ()> { + for_tuples!( #( Tuple::created(id, owner)?; )* ); + Ok(()) + } + + fn destroyed(id: &AssetId) -> Result<(), ()> { + for_tuples!( #( Tuple::destroyed(id)?; )* ); + Ok(()) + } +} + +/// Auto-increment the [`NextAssetId`] when an asset is created. +/// +/// This has not effect if the [`NextAssetId`] value is not present. +pub struct AutoIncAssetId(PhantomData<(T, I)>); +impl, I> AssetsCallback for AutoIncAssetId +where + T::AssetId: Incrementable, +{ + fn created(_: &T::AssetId, _: &T::AccountId) -> Result<(), ()> { + let Some(next_id) = NextAssetId::::get() else { + // Auto increment for the asset id is not enabled. + return Ok(()); + }; + let next_id = next_id.increment().ok_or(())?; + NextAssetId::::put(next_id); + Ok(()) + } +} #[frame_support::pallet] pub mod pallet { @@ -361,6 +393,11 @@ pub mod pallet { type Extra: Member + Parameter + Default + MaxEncodedLen; /// Callback methods for asset state change (e.g. asset created or destroyed) + /// + /// Types implementing the [`AssetsCallback`] can be chained when listed together as a + /// tuple. + /// The [`AutoIncAssetId`] callback, in conjunction with the [`NextAssetId`], can be + /// used to set up auto-incrementing asset IDs for this collection. type CallbackHandle: AssetsCallback; /// Weight information for extrinsics in this pallet. @@ -415,6 +452,18 @@ pub mod pallet { ValueQuery, >; + /// The asset ID enforced for the next asset creation, if any present. Otherwise, this storage + /// item has no effect. + /// + /// This can be useful for setting up constraints for IDs of the new assets. For example, by + /// providing an initial [`NextAssetId`] and using the [`crate::AutoIncAssetId`] callback, an + /// auto-increment model can be applied to all new asset IDs. + /// + /// The initial next asset ID can be set using the [`GenesisConfig`] or the + /// [SetNextAssetId](`migration::next_asset_id::SetNextAssetId`) migration. + #[pallet::storage] + pub type NextAssetId, I: 'static = ()> = StorageValue<_, T::AssetId, OptionQuery>; + #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig, I: 'static = ()> { @@ -424,6 +473,13 @@ pub mod pallet { pub metadata: Vec<(T::AssetId, Vec, Vec, u8)>, /// Genesis accounts: id, account_id, balance pub accounts: Vec<(T::AssetId, T::AccountId, T::Balance)>, + /// Genesis [`NextAssetId`]. + /// + /// Refer to the [`NextAssetId`] item for more information. + /// + /// This does not enforce the asset ID for the [assets](`GenesisConfig::assets`) within the + /// genesis config. It sets the [`NextAssetId`] after they have been created. + pub next_asset_id: Option, } #[pallet::genesis_build] @@ -485,6 +541,10 @@ pub mod pallet { ); assert!(result.is_ok()); } + + if let Some(next_asset_id) = &self.next_asset_id { + NextAssetId::::put(next_asset_id); + } } } @@ -622,6 +682,8 @@ pub mod pallet { NotFrozen, /// Callback action resulted in error CallbackFailed, + /// The asset ID must be equal to the [`NextAssetId`]. + BadAssetId, } #[pallet::call(weight(>::WeightInfo))] @@ -636,7 +698,7 @@ pub mod pallet { /// /// Parameters: /// - `id`: The identifier of the new asset. This must not be currently in use to identify - /// an existing asset. + /// an existing asset. If [`NextAssetId`] is set, then this must be equal to it. /// - `admin`: The admin of this class of assets. The admin is the initial address of each /// member of the asset class's admin team. /// - `min_balance`: The minimum balance of this new asset that any single account must @@ -659,6 +721,10 @@ pub mod pallet { ensure!(!Asset::::contains_key(&id), Error::::InUse); ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); + if let Some(next_id) = NextAssetId::::get() { + ensure!(id == next_id, Error::::BadAssetId); + } + let deposit = T::AssetDeposit::get(); T::Currency::reserve(&owner, deposit)?; @@ -698,7 +764,7 @@ pub mod pallet { /// Unlike `create`, no funds are reserved. /// /// - `id`: The identifier of the new asset. This must not be currently in use to identify - /// an existing asset. + /// an existing asset. If [`NextAssetId`] is set, then this must be equal to it. /// - `owner`: The owner of this class of assets. The owner has full superuser permissions /// over this asset, but may later change and configure the permissions using /// `transfer_ownership` and `set_team`. diff --git a/substrate/frame/assets/src/migration.rs b/substrate/frame/assets/src/migration.rs index dd7c12293e80f..9096f25fb791f 100644 --- a/substrate/frame/assets/src/migration.rs +++ b/substrate/frame/assets/src/migration.rs @@ -22,6 +22,30 @@ use log; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; +pub mod next_asset_id { + use super::*; + use sp_core::Get; + + /// Set [`NextAssetId`] to the value of `ID` if [`NextAssetId`] does not exist yet. + pub struct SetNextAssetId, I: 'static = ()>( + core::marker::PhantomData<(ID, T, I)>, + ); + impl, I: 'static> OnRuntimeUpgrade for SetNextAssetId + where + T::AssetId: Incrementable, + ID: Get, + { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + if !NextAssetId::::exists() { + NextAssetId::::put(ID::get()); + T::DbWeight::get().reads_writes(1, 1) + } else { + T::DbWeight::get().reads(1) + } + } + } +} + pub mod v1 { use frame_support::{pallet_prelude::*, weights::Weight}; diff --git a/substrate/frame/assets/src/mock.rs b/substrate/frame/assets/src/mock.rs index f6173a451fffa..2c160840e1478 100644 --- a/substrate/frame/assets/src/mock.rs +++ b/substrate/frame/assets/src/mock.rs @@ -23,7 +23,7 @@ use crate as pallet_assets; use codec::Encode; use frame_support::{ construct_runtime, derive_impl, parameter_types, - traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, + traits::{AsEnsureOriginWithArg, ConstU32}, }; use sp_io::storage; use sp_runtime::BuildStorage; @@ -49,20 +49,9 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<3>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); - type FreezeIdentifier = (); - type MaxFreezes = (); } pub struct AssetsCallbackHandle; @@ -114,7 +103,7 @@ impl Config for Test { type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = frame_system::EnsureRoot; type Freezer = TestFreezer; - type CallbackHandle = AssetsCallbackHandle; + type CallbackHandle = (AssetsCallbackHandle, AutoIncAssetId); } use std::collections::HashMap; @@ -178,6 +167,7 @@ pub(crate) fn new_test_ext() -> sp_io::TestExternalities { // id, account_id, balance (999, 1, 100), ], + next_asset_id: None, }; config.assimilate_storage(&mut storage).unwrap(); diff --git a/substrate/frame/assets/src/tests.rs b/substrate/frame/assets/src/tests.rs index c7021bcad5310..c751fbdcaf1bb 100644 --- a/substrate/frame/assets/src/tests.rs +++ b/substrate/frame/assets/src/tests.rs @@ -22,7 +22,11 @@ use crate::{mock::*, Error}; use frame_support::{ assert_noop, assert_ok, dispatch::GetDispatchInfo, - traits::{fungibles::InspectEnumerable, tokens::Preservation::Protect, Currency}, + traits::{ + fungibles::InspectEnumerable, + tokens::{Preservation::Protect, Provenance}, + Currency, + }, }; use pallet_balances::Error as BalancesError; use sp_io::storage; @@ -1777,3 +1781,100 @@ fn asset_destroy_refund_existence_deposit() { assert_eq!(Balances::reserved_balance(&admin), 0); }); } + +#[test] +fn increasing_or_decreasing_destroying_asset_should_not_work() { + new_test_ext().execute_with(|| { + use frame_support::traits::fungibles::Inspect; + + let admin = 1; + let admin_origin = RuntimeOrigin::signed(admin); + + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, admin, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + + assert_eq!(Assets::can_deposit(0, &1, 10, Provenance::Extant), DepositConsequence::Success); + assert_eq!(Assets::can_withdraw(0, &1, 10), WithdrawConsequence::<_>::Success); + assert_eq!(Assets::can_increase(0, &1, 10, false), DepositConsequence::Success); + assert_eq!(Assets::can_decrease(0, &1, 10, false), WithdrawConsequence::<_>::Success); + + assert_ok!(Assets::start_destroy(admin_origin, 0)); + + assert_eq!( + Assets::can_deposit(0, &1, 10, Provenance::Extant), + DepositConsequence::UnknownAsset + ); + assert_eq!(Assets::can_withdraw(0, &1, 10), WithdrawConsequence::<_>::UnknownAsset); + assert_eq!(Assets::can_increase(0, &1, 10, false), DepositConsequence::UnknownAsset); + assert_eq!(Assets::can_decrease(0, &1, 10, false), WithdrawConsequence::<_>::UnknownAsset); + }); +} + +#[test] +fn asset_id_cannot_be_reused() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + // Asset id can be reused till auto increment is not enabled. + assert_ok!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 1)); + + assert_ok!(Assets::start_destroy(RuntimeOrigin::signed(1), 0)); + assert_ok!(Assets::finish_destroy(RuntimeOrigin::signed(1), 0)); + + assert!(!Asset::::contains_key(0)); + + // Asset id `0` is reused. + assert_ok!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 1)); + assert!(Asset::::contains_key(0)); + + assert_ok!(Assets::start_destroy(RuntimeOrigin::signed(1), 0)); + assert_ok!(Assets::finish_destroy(RuntimeOrigin::signed(1), 0)); + + assert!(!Asset::::contains_key(0)); + + // Enable auto increment. Next asset id must be 5. + pallet::NextAssetId::::put(5); + + assert_noop!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 1), Error::::BadAssetId); + assert_noop!(Assets::create(RuntimeOrigin::signed(1), 1, 1, 1), Error::::BadAssetId); + assert_noop!( + Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1), + Error::::BadAssetId + ); + assert_noop!( + Assets::force_create(RuntimeOrigin::root(), 1, 1, true, 1), + Error::::BadAssetId + ); + + // Asset with id `5` is created. + assert_ok!(Assets::create(RuntimeOrigin::signed(1), 5, 1, 1)); + assert!(Asset::::contains_key(5)); + + // Destroy asset with id `6`. + assert_ok!(Assets::start_destroy(RuntimeOrigin::signed(1), 5)); + assert_ok!(Assets::finish_destroy(RuntimeOrigin::signed(1), 5)); + + assert!(!Asset::::contains_key(0)); + + // Asset id `5` cannot be reused. + assert_noop!(Assets::create(RuntimeOrigin::signed(1), 5, 1, 1), Error::::BadAssetId); + + assert_ok!(Assets::create(RuntimeOrigin::signed(1), 6, 1, 1)); + assert!(Asset::::contains_key(6)); + + // Destroy asset with id `6`. + assert_ok!(Assets::start_destroy(RuntimeOrigin::signed(1), 6)); + assert_ok!(Assets::finish_destroy(RuntimeOrigin::signed(1), 6)); + + assert!(!Asset::::contains_key(6)); + + // Asset id `6` cannot be reused with force. + assert_noop!( + Assets::force_create(RuntimeOrigin::root(), 6, 1, false, 1), + Error::::BadAssetId + ); + + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 7, 1, false, 1)); + assert!(Asset::::contains_key(7)); + }); +} diff --git a/substrate/frame/atomic-swap/Cargo.toml b/substrate/frame/atomic-swap/Cargo.toml index 8083c12d4b39f..0331006301296 100644 --- a/substrate/frame/atomic-swap/Cargo.toml +++ b/substrate/frame/atomic-swap/Cargo.toml @@ -16,17 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] @@ -39,7 +38,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/atomic-swap/src/lib.rs b/substrate/frame/atomic-swap/src/lib.rs index dc0300dc1a5c9..c3010f5c9c03b 100644 --- a/substrate/frame/atomic-swap/src/lib.rs +++ b/substrate/frame/atomic-swap/src/lib.rs @@ -42,6 +42,9 @@ mod tests; +extern crate alloc; + +use alloc::vec::Vec; use codec::{Decode, Encode}; use core::{ marker::PhantomData, @@ -58,7 +61,6 @@ use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::RuntimeDebug; -use sp_std::vec::Vec; /// Pending atomic swap operation. #[derive(Clone, Eq, PartialEq, RuntimeDebugNoBound, Encode, Decode, TypeInfo, MaxEncodedLen)] diff --git a/substrate/frame/atomic-swap/src/tests.rs b/substrate/frame/atomic-swap/src/tests.rs index 9f51f04208aac..47ebe6a8f0acf 100644 --- a/substrate/frame/atomic-swap/src/tests.rs +++ b/substrate/frame/atomic-swap/src/tests.rs @@ -20,10 +20,7 @@ use super::*; use crate as pallet_atomic_swap; -use frame_support::{ - derive_impl, - traits::{ConstU32, ConstU64}, -}; +use frame_support::{derive_impl, traits::ConstU32}; use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -43,20 +40,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl Config for Test { diff --git a/substrate/frame/aura/Cargo.toml b/substrate/frame/aura/Cargo.toml index 9264d2f4a643c..f9d7459276387 100644 --- a/substrate/frame/aura/Cargo.toml +++ b/substrate/frame/aura/Cargo.toml @@ -16,20 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-timestamp = { path = "../timestamp", default-features = false } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false } -sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-timestamp = { workspace = true } +sp-application-crypto = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io" } +sp-core = { workspace = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -45,7 +44,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/aura/src/lib.rs b/substrate/frame/aura/src/lib.rs index 3ca1444aaae9b..f829578fb2851 100644 --- a/substrate/frame/aura/src/lib.rs +++ b/substrate/frame/aura/src/lib.rs @@ -38,6 +38,9 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ traits::{DisabledValidators, FindAuthor, Get, OnTimestampSet, OneSessionHandler}, @@ -50,7 +53,6 @@ use sp_runtime::{ traits::{IsMember, Member, SaturatedConversion, Saturating, Zero}, RuntimeAppPublic, }; -use sp_std::prelude::*; pub mod migrations; mod mock; @@ -66,7 +68,7 @@ const LOG_TARGET: &str = "runtime::aura"; /// /// This was the default behavior of the Aura pallet and may be used for /// backwards compatibility. -pub struct MinimumPeriodTimesTwo(sp_std::marker::PhantomData); +pub struct MinimumPeriodTimesTwo(core::marker::PhantomData); impl Get for MinimumPeriodTimesTwo { fn get() -> T::Moment { @@ -119,7 +121,7 @@ pub mod pallet { } #[pallet::pallet] - pub struct Pallet(sp_std::marker::PhantomData); + pub struct Pallet(core::marker::PhantomData); #[pallet::hooks] impl Hooks> for Pallet { @@ -362,7 +364,7 @@ impl FindAuthor for Pallet { /// We can not implement `FindAuthor` twice, because the compiler does not know if /// `u32 == T::AuthorityId` and thus, prevents us to implement the trait twice. #[doc(hidden)] -pub struct FindAccountFromAuthorIndex(sp_std::marker::PhantomData<(T, Inner)>); +pub struct FindAccountFromAuthorIndex(core::marker::PhantomData<(T, Inner)>); impl> FindAuthor for FindAccountFromAuthorIndex diff --git a/substrate/frame/authority-discovery/Cargo.toml b/substrate/frame/authority-discovery/Cargo.toml index c21f9b5c90455..9d55a7fd5dcb8 100644 --- a/substrate/frame/authority-discovery/Cargo.toml +++ b/substrate/frame/authority-discovery/Cargo.toml @@ -16,23 +16,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-session = { path = "../session", default-features = false, features = [ +], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-session = { features = [ "historical", -] } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false } -sp-authority-discovery = { path = "../../primitives/authority-discovery", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +], workspace = true } +sp-application-crypto = { workspace = true } +sp-authority-discovery = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -47,7 +46,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/authority-discovery/src/lib.rs b/substrate/frame/authority-discovery/src/lib.rs index 16f71960d693b..220b39292b575 100644 --- a/substrate/frame/authority-discovery/src/lib.rs +++ b/substrate/frame/authority-discovery/src/lib.rs @@ -23,12 +23,14 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::vec::Vec; use frame_support::{ traits::{Get, OneSessionHandler}, WeakBoundedVec, }; use sp_authority_discovery::AuthorityId; -use sp_std::prelude::*; pub use pallet::*; @@ -62,7 +64,7 @@ pub mod pallet { pub struct GenesisConfig { pub keys: Vec, #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] @@ -168,6 +170,7 @@ impl OneSessionHandler for Pallet { mod tests { use super::*; use crate as pallet_authority_discovery; + use alloc::vec; use frame_support::{derive_impl, parameter_types, traits::ConstU32}; use sp_application_crypto::Pair; use sp_authority_discovery::AuthorityPair; diff --git a/substrate/frame/authorship/Cargo.toml b/substrate/frame/authorship/Cargo.toml index dd78e3404ef0b..8ecacca93e0ab 100644 --- a/substrate/frame/authorship/Cargo.toml +++ b/substrate/frame/authorship/Cargo.toml @@ -16,19 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +], workspace = true } +impl-trait-for-tuples = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -40,7 +39,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/authorship/src/lib.rs b/substrate/frame/authorship/src/lib.rs index d8f1baab23c8b..a0cca806e7863 100644 --- a/substrate/frame/authorship/src/lib.rs +++ b/substrate/frame/authorship/src/lib.rs @@ -22,7 +22,6 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::traits::FindAuthor; -use sp_std::prelude::*; pub use pallet::*; diff --git a/substrate/frame/babe/Cargo.toml b/substrate/frame/babe/Cargo.toml index d06b7f7454648..3dfda3b618be3 100644 --- a/substrate/frame/babe/Cargo.toml +++ b/substrate/frame/babe/Cargo.toml @@ -16,31 +16,30 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-authorship = { path = "../authorship", default-features = false } -pallet-session = { path = "../session", default-features = false } -pallet-timestamp = { path = "../timestamp", default-features = false } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } -sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features = false, features = ["serde"] } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive", "serde"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-authorship = { workspace = true } +pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } +sp-consensus-babe = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-session = { workspace = true } +sp-staking = { features = ["serde"], workspace = true } [dev-dependencies] -frame-election-provider-support = { path = "../election-provider-support" } -pallet-balances = { path = "../balances" } -pallet-offences = { path = "../offences" } -pallet-staking = { path = "../staking" } -pallet-staking-reward-curve = { path = "../staking/reward-curve" } -sp-core = { path = "../../primitives/core" } +frame-election-provider-support = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-offences = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] @@ -65,7 +64,6 @@ std = [ "sp-runtime/std", "sp-session/std", "sp-staking/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/babe/src/equivocation.rs b/substrate/frame/babe/src/equivocation.rs index ed1df640583b2..4be07bdae1f01 100644 --- a/substrate/frame/babe/src/equivocation.rs +++ b/substrate/frame/babe/src/equivocation.rs @@ -33,6 +33,7 @@ //! that the `ValidateUnsigned` for the BABE pallet is used in the runtime //! definition. +use alloc::{boxed::Box, vec, vec::Vec}; use frame_support::traits::{Get, KeyOwnerProofSystem}; use frame_system::pallet_prelude::HeaderFor; use log::{error, info}; @@ -50,7 +51,6 @@ use sp_staking::{ offence::{Kind, Offence, OffenceReportSystem, ReportOffence}, SessionIndex, }; -use sp_std::prelude::*; use crate::{Call, Config, Error, Pallet, LOG_TARGET}; @@ -104,7 +104,7 @@ impl Offence for EquivocationOffence { /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the the authorship pallet. -pub struct EquivocationReportSystem(sp_std::marker::PhantomData<(T, R, P, L)>); +pub struct EquivocationReportSystem(core::marker::PhantomData<(T, R, P, L)>); impl OffenceReportSystem, (EquivocationProof>, T::KeyOwnerProof)> diff --git a/substrate/frame/babe/src/lib.rs b/substrate/frame/babe/src/lib.rs index 686ba6ec2d634..3e33d5627c47b 100644 --- a/substrate/frame/babe/src/lib.rs +++ b/substrate/frame/babe/src/lib.rs @@ -21,6 +21,9 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(unused_must_use, unsafe_code, unused_variables, unused_must_use)] +extern crate alloc; + +use alloc::{boxed::Box, vec, vec::Vec}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchResultWithPostInfo, Pays}, @@ -44,7 +47,6 @@ use sp_runtime::{ }; use sp_session::{GetSessionNumber, GetValidatorCount}; use sp_staking::{offence::OffenceReportSystem, SessionIndex}; -use sp_std::prelude::*; pub use sp_consensus_babe::AuthorityId; @@ -96,11 +98,11 @@ pub struct SameAuthoritiesForever; impl EpochChangeTrigger for SameAuthoritiesForever { fn trigger(now: BlockNumberFor) { - if >::should_epoch_change(now) { - let authorities = >::authorities(); + if Pallet::::should_epoch_change(now) { + let authorities = Authorities::::get(); let next_authorities = authorities.clone(); - >::enact_epoch_change(authorities, next_authorities, None); + Pallet::::enact_epoch_change(authorities, next_authorities, None); } } } @@ -185,12 +187,10 @@ pub mod pallet { /// Current epoch index. #[pallet::storage] - #[pallet::getter(fn epoch_index)] pub type EpochIndex = StorageValue<_, u64, ValueQuery>; /// Current epoch authorities. #[pallet::storage] - #[pallet::getter(fn authorities)] pub type Authorities = StorageValue< _, WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, @@ -200,12 +200,10 @@ pub mod pallet { /// The slot at which the first epoch actually started. This is 0 /// until the first block of the chain. #[pallet::storage] - #[pallet::getter(fn genesis_slot)] pub type GenesisSlot = StorageValue<_, Slot, ValueQuery>; /// Current slot number. #[pallet::storage] - #[pallet::getter(fn current_slot)] pub type CurrentSlot = StorageValue<_, Slot, ValueQuery>; /// The epoch randomness for the *current* epoch. @@ -222,20 +220,19 @@ pub mod pallet { // array size because the metadata API currently doesn't resolve the // variable to its underlying value. #[pallet::storage] - #[pallet::getter(fn randomness)] pub type Randomness = StorageValue<_, BabeRandomness, ValueQuery>; /// Pending epoch configuration change that will be applied when the next epoch is enacted. #[pallet::storage] - pub(super) type PendingEpochConfigChange = StorageValue<_, NextConfigDescriptor>; + pub type PendingEpochConfigChange = StorageValue<_, NextConfigDescriptor>; /// Next epoch randomness. #[pallet::storage] - pub(super) type NextRandomness = StorageValue<_, BabeRandomness, ValueQuery>; + pub type NextRandomness = StorageValue<_, BabeRandomness, ValueQuery>; /// Next epoch authorities. #[pallet::storage] - pub(super) type NextAuthorities = StorageValue< + pub type NextAuthorities = StorageValue< _, WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, ValueQuery, @@ -251,11 +248,11 @@ pub mod pallet { /// We reset all segments and return to `0` at the beginning of every /// epoch. #[pallet::storage] - pub(super) type SegmentIndex = StorageValue<_, u32, ValueQuery>; + pub type SegmentIndex = StorageValue<_, u32, ValueQuery>; /// TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay. #[pallet::storage] - pub(super) type UnderConstruction = StorageMap< + pub type UnderConstruction = StorageMap< _, Twox64Concat, u32, @@ -266,16 +263,14 @@ pub mod pallet { /// Temporary value (cleared at block finalization) which is `Some` /// if per-block initialization has already been called for current block. #[pallet::storage] - #[pallet::getter(fn initialized)] - pub(super) type Initialized = StorageValue<_, Option>; + pub type Initialized = StorageValue<_, Option>; /// This field should always be populated during block processing unless /// secondary plain slots are enabled (which don't contain a VRF output). /// /// It is set in `on_finalize`, before it will contain the value from the last block. #[pallet::storage] - #[pallet::getter(fn author_vrf_randomness)] - pub(super) type AuthorVrfRandomness = StorageValue<_, Option, ValueQuery>; + pub type AuthorVrfRandomness = StorageValue<_, Option, ValueQuery>; /// The block numbers when the last and current epoch have started, respectively `N-1` and /// `N`. @@ -292,19 +287,17 @@ pub mod pallet { /// on block finalization. Querying this storage entry outside of block /// execution context should always yield zero. #[pallet::storage] - #[pallet::getter(fn lateness)] - pub(super) type Lateness = StorageValue<_, BlockNumberFor, ValueQuery>; + pub type Lateness = StorageValue<_, BlockNumberFor, ValueQuery>; /// The configuration for the current epoch. Should never be `None` as it is initialized in /// genesis. #[pallet::storage] - #[pallet::getter(fn epoch_config)] - pub(super) type EpochConfig = StorageValue<_, BabeEpochConfiguration>; + pub type EpochConfig = StorageValue<_, BabeEpochConfiguration>; /// The configuration for the next epoch, `None` if the config will not change /// (you can fallback to `EpochConfig` instead in that case). #[pallet::storage] - pub(super) type NextEpochConfig = StorageValue<_, BabeEpochConfiguration>; + pub type NextEpochConfig = StorageValue<_, BabeEpochConfiguration>; /// A list of the last 100 skipped epochs and the corresponding session index /// when the epoch was skipped. @@ -315,8 +308,7 @@ pub mod pallet { /// a validator was the owner of a given key on a given session, and what the /// active epoch index was during that session. #[pallet::storage] - #[pallet::getter(fn skipped_epochs)] - pub(super) type SkippedEpochs = + pub type SkippedEpochs = StorageValue<_, BoundedVec<(u64, SessionIndex), ConstU32<100>>, ValueQuery>; #[derive(frame_support::DefaultNoBound)] @@ -325,7 +317,7 @@ pub mod pallet { pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, pub epoch_config: BabeEpochConfiguration, #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] @@ -368,7 +360,7 @@ pub mod pallet { .and_then(|(authority, _)| { let public = authority.as_inner_ref(); let transcript = sp_consensus_babe::make_vrf_transcript( - &Self::randomness(), + &Randomness::::get(), CurrentSlot::::get(), EpochIndex::::get(), ); @@ -510,7 +502,7 @@ impl FindAuthor for Pallet { impl IsMember for Pallet { fn is_member(authority_id: &AuthorityId) -> bool { - >::authorities().iter().any(|id| &id.0 == authority_id) + Authorities::::get().iter().any(|id| &id.0 == authority_id) } } @@ -526,6 +518,47 @@ impl pallet_session::ShouldEndSession> for Pallet Pallet { + /// Public function to access epoch_index storage. + pub fn epoch_index() -> u64 { + EpochIndex::::get() + } + /// Public function to access authorities storage. + pub fn authorities() -> WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities> { + Authorities::::get() + } + /// Public function to access genesis_slot storage. + pub fn genesis_slot() -> Slot { + GenesisSlot::::get() + } + /// Public function to access current_slot storage. + pub fn current_slot() -> Slot { + CurrentSlot::::get() + } + /// Public function to access randomness storage. + pub fn randomness() -> BabeRandomness { + Randomness::::get() + } + /// Public function to access initialized storage. + pub fn initialized() -> Option> { + Initialized::::get() + } + /// Public function to access author_vrf_randomness storage. + pub fn author_vrf_randomness() -> Option { + AuthorVrfRandomness::::get() + } + /// Public function to access lateness storage. + pub fn lateness() -> BlockNumberFor { + Lateness::::get() + } + /// Public function to access epoch_config storage. + pub fn epoch_config() -> Option { + EpochConfig::::get() + } + /// Public function to access skipped_epochs storage. + pub fn skipped_epochs() -> BoundedVec<(u64, SessionIndex), ConstU32<100>> { + SkippedEpochs::::get() + } + /// Determine the BABE slot duration based on the Timestamp module configuration. pub fn slot_duration() -> T::Moment { // we double the minimum block-period so each author can always propose within @@ -588,7 +621,7 @@ impl Pallet { ) { // PRECONDITION: caller has done initialization and is guaranteed // by the session module to be called before this. - debug_assert!(Self::initialized().is_some()); + debug_assert!(Initialized::::get().is_some()); if authorities.is_empty() { log::warn!(target: LOG_TARGET, "Ignoring empty epoch change."); @@ -655,8 +688,8 @@ impl Pallet { NextAuthorities::::put(&next_authorities); // Update the start blocks of the previous and new current epoch. - >::mutate(|(previous_epoch_start_block, current_epoch_start_block)| { - *previous_epoch_start_block = sp_std::mem::take(current_epoch_start_block); + EpochStart::::mutate(|(previous_epoch_start_block, current_epoch_start_block)| { + *previous_epoch_start_block = core::mem::take(current_epoch_start_block); *current_epoch_start_block = >::block_number(); }); @@ -701,8 +734,8 @@ impl Pallet { epoch_index: EpochIndex::::get(), start_slot: Self::current_epoch_start(), duration: T::EpochDuration::get(), - authorities: Self::authorities().into_inner(), - randomness: Self::randomness(), + authorities: Authorities::::get().into_inner(), + randomness: Randomness::::get(), config: EpochConfig::::get() .expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed"), } @@ -779,8 +812,8 @@ impl Pallet { // we use the same values as genesis because we haven't collected any // randomness yet. let next = NextEpochDescriptor { - authorities: Self::authorities().into_inner(), - randomness: Self::randomness(), + authorities: Authorities::::get().into_inner(), + randomness: Randomness::::get(), }; Self::deposit_consensus(ConsensusLog::NextEpochData(next)); @@ -789,7 +822,7 @@ impl Pallet { fn initialize(now: BlockNumberFor) { // since `initialize` can be called twice (e.g. if session module is present) // let's ensure that we only do the initialization once per block - let initialized = Self::initialized().is_some(); + let initialized = Initialized::::get().is_some(); if initialized { return } @@ -837,7 +870,7 @@ impl Pallet { /// randomness. Returns the new randomness. fn randomness_change_epoch(next_epoch_index: u64) -> BabeRandomness { let this_randomness = NextRandomness::::get(); - let segment_idx: u32 = SegmentIndex::::mutate(|s| sp_std::mem::replace(s, 0)); + let segment_idx: u32 = SegmentIndex::::mutate(|s| core::mem::replace(s, 0)); // overestimate to the segment being full. let rho_size = (segment_idx.saturating_add(1) * UNDER_CONSTRUCTION_SEGMENT_LENGTH) as usize; @@ -940,7 +973,7 @@ impl frame_support::traits::EstimateNextSessionRotation frame_support::traits::Lateness> for Pallet { fn lateness(&self) -> BlockNumberFor { - Self::lateness() + Lateness::::get() } } @@ -1024,7 +1057,7 @@ pub mod migrations { fn pallet_prefix() -> &'static str; } - struct __OldNextEpochConfig(sp_std::marker::PhantomData); + struct __OldNextEpochConfig(core::marker::PhantomData); impl frame_support::traits::StorageInstance for __OldNextEpochConfig { fn pallet_prefix() -> &'static str { T::pallet_prefix() diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index 395a86e652880..e193a2e3b6454 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -28,7 +28,6 @@ use frame_support::{ traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; -use pallet_staking::FixedNominationsQuota; use sp_consensus_babe::{AuthorityId, AuthorityPair, Randomness, Slot, VrfSignature}; use sp_core::{ crypto::{KeyTypeId, Pair, VrfSecret}, @@ -112,20 +111,11 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } pallet_staking_reward_curve::build! { @@ -142,7 +132,6 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const SessionsPerEra: SessionIndex = 3; pub const BondingDuration: EraIndex = 3; - pub const SlashDeferDuration: EraIndex = 0; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -157,35 +146,20 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBounds; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { - type RewardRemainder = (); - type CurrencyToVote = (); - type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type CurrencyBalance = ::Balance; - type Slash = (); - type Reward = (); type SessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; - type SlashDeferDuration = SlashDeferDuration; type AdminOrigin = frame_system::EnsureRoot; type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxExposurePageSize = ConstU32<64>; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { @@ -239,7 +213,7 @@ pub fn go_to_block(n: u64, s: u64) { /// Slots will grow accordingly to blocks pub fn progress_to_block(n: u64) { - let mut slot = u64::from(Babe::current_slot()) + 1; + let mut slot = u64::from(CurrentSlot::::get()) + 1; for i in System::block_number() + 1..=n { go_to_block(i, slot); slot += 1; @@ -298,7 +272,8 @@ pub fn make_vrf_signature_and_randomness( slot: Slot, pair: &sp_consensus_babe::AuthorityPair, ) -> (VrfSignature, Randomness) { - let transcript = sp_consensus_babe::make_vrf_transcript(&Babe::randomness(), slot, 0); + let transcript = + sp_consensus_babe::make_vrf_transcript(&pallet_babe::Randomness::::get(), slot, 0); let randomness = pair.as_ref().make_bytes(sp_consensus_babe::RANDOMNESS_VRF_CONTEXT, &transcript); diff --git a/substrate/frame/babe/src/tests.rs b/substrate/frame/babe/src/tests.rs index e65f1844f88f9..b9a214ca105c8 100644 --- a/substrate/frame/babe/src/tests.rs +++ b/substrate/frame/babe/src/tests.rs @@ -43,7 +43,7 @@ fn empty_randomness_is_correct() { #[test] fn initial_values() { - new_test_ext(4).execute_with(|| assert_eq!(Babe::authorities().len(), 4)) + new_test_ext(4).execute_with(|| assert_eq!(Authorities::::get().len(), 4)) } #[test] @@ -68,25 +68,25 @@ fn first_block_epoch_zero_start() { let pre_digest = make_primary_pre_digest(0, genesis_slot, vrf_signature); - assert_eq!(Babe::genesis_slot(), Slot::from(0)); + assert_eq!(GenesisSlot::::get(), Slot::from(0)); System::reset_events(); System::initialize(&1, &Default::default(), &pre_digest); // see implementation of the function for details why: we issue an // epoch-change digest but don't do it via the normal session mechanism. assert!(!Babe::should_end_session(1)); - assert_eq!(Babe::genesis_slot(), genesis_slot); - assert_eq!(Babe::current_slot(), genesis_slot); - assert_eq!(Babe::epoch_index(), 0); + assert_eq!(GenesisSlot::::get(), genesis_slot); + assert_eq!(CurrentSlot::::get(), genesis_slot); + assert_eq!(EpochIndex::::get(), 0); Babe::on_finalize(1); let header = System::finalize(); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); assert_eq!(SegmentIndex::::get(), 0); assert_eq!(UnderConstruction::::get(0), vec![vrf_randomness]); - assert_eq!(Babe::randomness(), [0; 32]); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(Randomness::::get(), [0; 32]); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!(header.digest.logs.len(), 2); @@ -95,8 +95,8 @@ fn first_block_epoch_zero_start() { let consensus_log = sp_consensus_babe::ConsensusLog::NextEpochData( sp_consensus_babe::digests::NextEpochDescriptor { - authorities: Babe::authorities().into_inner(), - randomness: Babe::randomness(), + authorities: Authorities::::get().into_inner(), + randomness: Randomness::::get(), }, ); let consensus_digest = DigestItem::Consensus(BABE_ENGINE_ID, consensus_log.encode()); @@ -118,19 +118,19 @@ fn current_slot_is_processed_on_initialization() { System::reset_events(); System::initialize(&1, &Default::default(), &pre_digest); - assert_eq!(Babe::current_slot(), Slot::from(0)); - assert!(Babe::initialized().is_none()); + assert_eq!(CurrentSlot::::get(), Slot::from(0)); + assert!(Initialized::::get().is_none()); // current slot is updated on initialization Babe::initialize(1); - assert_eq!(Babe::current_slot(), genesis_slot); - assert!(Babe::initialized().is_some()); + assert_eq!(CurrentSlot::::get(), genesis_slot); + assert!(Initialized::::get().is_some()); // but author vrf randomness isn't - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); // instead it is updated on block finalization Babe::on_finalize(1); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); }) } @@ -151,16 +151,16 @@ where // author vrf randomness is not updated on initialization Babe::initialize(1); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); // instead it is updated on block finalization to account for any // epoch changes that might happen during the block Babe::on_finalize(1); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); // and it is kept after finalizing the block System::finalize(); - assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(AuthorVrfRandomness::::get(), Some(vrf_randomness)); }) } @@ -182,14 +182,14 @@ fn no_author_vrf_output_for_secondary_plain() { System::reset_events(); System::initialize(&1, &Default::default(), &secondary_plain_pre_digest); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); Babe::initialize(1); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); Babe::on_finalize(1); System::finalize(); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(AuthorVrfRandomness::::get(), None); }) } @@ -210,14 +210,14 @@ fn can_predict_next_epoch_change() { assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); - assert_eq!(*Babe::genesis_slot(), 6); - assert_eq!(*Babe::current_slot(), 6); - assert_eq!(Babe::epoch_index(), 0); + assert_eq!(*GenesisSlot::::get(), 6); + assert_eq!(*CurrentSlot::::get(), 6); + assert_eq!(EpochIndex::::get(), 0); progress_to_block(5); - assert_eq!(Babe::epoch_index(), 5 / 3); - assert_eq!(*Babe::current_slot(), 10); + assert_eq!(EpochIndex::::get(), 5 / 3); + assert_eq!(*CurrentSlot::::get(), 10); // next epoch change will be at assert_eq!(*Babe::current_epoch_start(), 9); // next change will be 12, 2 slots from now @@ -266,9 +266,9 @@ fn can_enact_next_config() { assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); - assert_eq!(*Babe::genesis_slot(), 6); - assert_eq!(*Babe::current_slot(), 6); - assert_eq!(Babe::epoch_index(), 0); + assert_eq!(*GenesisSlot::::get(), 6); + assert_eq!(*CurrentSlot::::get(), 6); + assert_eq!(EpochIndex::::get(), 0); go_to_block(2, 7); let current_config = BabeEpochConfiguration { @@ -431,7 +431,7 @@ fn report_equivocation_current_session_works() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); let validators = Session::validators(); // make sure that all authorities have the same balance @@ -508,7 +508,7 @@ fn report_equivocation_old_session_works() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); // we will use the validator at index 0 as the offending authority let offending_validator_index = 1; @@ -566,7 +566,7 @@ fn report_equivocation_invalid_key_owner_proof() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); // we will use the validator at index 0 as the offending authority let offending_validator_index = 0; @@ -629,7 +629,7 @@ fn report_equivocation_invalid_equivocation_proof() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); // we will use the validator at index 0 as the offending authority let offending_validator_index = 0; @@ -734,7 +734,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { ext.execute_with(|| { start_era(1); - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); // generate and report an equivocation for the validator at index 0 let offending_validator_index = 0; @@ -848,7 +848,7 @@ fn report_equivocation_after_skipped_epochs_works() { assert_eq!(SkippedEpochs::::get(), vec![(10, 1)]); // generate an equivocation proof for validator at index 1 - let authorities = Babe::authorities(); + let authorities = Authorities::::get(); let offending_validator_index = 1; let offending_authority_pair = pairs .into_iter() diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index 3429d2f28a6cc..549c4334b55db 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -16,41 +16,40 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } +], workspace = true } # primitives -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +sp-runtime = { workspace = true } # FRAME -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -frame-election-provider-support = { path = "../election-provider-support", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-election-provider-support = { workspace = true } # third party log = { workspace = true } -docify = "0.2.8" -aquamarine = { version = "0.5.0" } +docify = { workspace = true } +aquamarine = { workspace = true } # Optional imports for benchmarking -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -pallet-balances = { path = "../balances", default-features = false, optional = true } -sp-core = { path = "../../primitives/core", default-features = false, optional = true } -sp-io = { path = "../../primitives/io", default-features = false, optional = true } -sp-tracing = { path = "../../primitives/tracing", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +pallet-balances = { optional = true, workspace = true } +sp-core = { optional = true, workspace = true } +sp-io = { optional = true, workspace = true } +sp-tracing = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -sp-tracing = { path = "../../primitives/tracing" } -pallet-balances = { path = "../balances" } -frame-election-provider-support = { path = "../election-provider-support" } -frame-benchmarking = { path = "../benchmarking" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } [features] default = ["std"] @@ -66,7 +65,6 @@ std = [ "sp-core?/std", "sp-io?/std", "sp-runtime/std", - "sp-std/std", "sp-tracing?/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/bags-list/fuzzer/Cargo.toml b/substrate/frame/bags-list/fuzzer/Cargo.toml index 20760141b2361..f8631be159cd4 100644 --- a/substrate/frame/bags-list/fuzzer/Cargo.toml +++ b/substrate/frame/bags-list/fuzzer/Cargo.toml @@ -13,10 +13,10 @@ publish = false workspace = true [dependencies] -honggfuzz = "0.5" -rand = { version = "0.8", features = ["small_rng", "std"] } -frame-election-provider-support = { path = "../../election-provider-support", features = ["fuzz"] } -pallet-bags-list = { path = "..", features = ["fuzz"] } +honggfuzz = { workspace = true } +rand = { features = ["small_rng", "std"], workspace = true, default-features = true } +frame-election-provider-support = { features = ["fuzz"], workspace = true, default-features = true } +pallet-bags-list = { features = ["fuzz"], workspace = true, default-features = true } [[bin]] name = "bags-list" diff --git a/substrate/frame/bags-list/remote-tests/Cargo.toml b/substrate/frame/bags-list/remote-tests/Cargo.toml index 266355f5cabe1..45f2498aa88b4 100644 --- a/substrate/frame/bags-list/remote-tests/Cargo.toml +++ b/substrate/frame/bags-list/remote-tests/Cargo.toml @@ -17,21 +17,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # frame -pallet-staking = { path = "../../staking" } -pallet-bags-list = { path = "..", features = ["fuzz"] } -frame-election-provider-support = { path = "../../election-provider-support" } -frame-system = { path = "../../system" } -frame-support = { path = "../../support" } +pallet-staking = { workspace = true, default-features = true } +pallet-bags-list = { features = ["fuzz"], workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } # core -sp-storage = { path = "../../../primitives/storage" } -sp-core = { path = "../../../primitives/core" } -sp-tracing = { path = "../../../primitives/tracing" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-std = { path = "../../../primitives/std" } +sp-storage = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } # utils -remote-externalities = { package = "frame-remote-externalities", path = "../../../utils/frame/remote-externalities" } +remote-externalities = { workspace = true, default-features = true } # others log = { workspace = true, default-features = true } diff --git a/substrate/frame/bags-list/remote-tests/src/lib.rs b/substrate/frame/bags-list/remote-tests/src/lib.rs index 9f7c22d99dad1..1f0584fa07e5b 100644 --- a/substrate/frame/bags-list/remote-tests/src/lib.rs +++ b/substrate/frame/bags-list/remote-tests/src/lib.rs @@ -19,7 +19,6 @@ use frame_election_provider_support::ScoreProvider; use pallet_bags_list::Instance1; -use sp_std::prelude::*; /// A common log target to use. pub const LOG_TARGET: &str = "runtime::bags-list::remote-tests"; diff --git a/substrate/frame/bags-list/src/benchmarks.rs b/substrate/frame/bags-list/src/benchmarks.rs index 0c3955c0d7b79..55f4c24835ea6 100644 --- a/substrate/frame/bags-list/src/benchmarks.rs +++ b/substrate/frame/bags-list/src/benchmarks.rs @@ -19,6 +19,7 @@ use super::*; use crate::list::List; +use alloc::{vec, vec::Vec}; use frame_benchmarking::v1::{ account, benchmarks_instance_pallet, whitelist_account, whitelisted_caller, }; diff --git a/substrate/frame/bags-list/src/lib.rs b/substrate/frame/bags-list/src/lib.rs index 8e3d4cc1f012d..f6af1da5e7b78 100644 --- a/substrate/frame/bags-list/src/lib.rs +++ b/substrate/frame/bags-list/src/lib.rs @@ -122,11 +122,13 @@ #[doc = docify::embed!("src/tests.rs", examples_work)] pub mod example {} +extern crate alloc; + +use alloc::boxed::Box; use codec::FullCodec; use frame_election_provider_support::{ScoreProvider, SortedListProvider}; use frame_system::ensure_signed; use sp_runtime::traits::{AtLeast32BitUnsigned, Bounded, StaticLookup}; -use sp_std::prelude::*; #[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] use sp_runtime::TryRuntimeError; @@ -238,7 +240,7 @@ pub mod pallet { + Eq + Ord + PartialOrd - + sp_std::fmt::Debug + + core::fmt::Debug + Copy + AtLeast32BitUnsigned + Bounded @@ -465,7 +467,7 @@ impl, I: 'static> SortedListProvider for Pallet let node = list::Node::::get(who).unwrap(); let current_bag_idx = thresholds .iter() - .chain(sp_std::iter::once(&T::Score::max_value())) + .chain(core::iter::once(&T::Score::max_value())) .position(|w| w == &node.bag_upper) .unwrap(); diff --git a/substrate/frame/bags-list/src/list/mod.rs b/substrate/frame/bags-list/src/list/mod.rs index e90530341a155..696b64d40e9b9 100644 --- a/substrate/frame/bags-list/src/list/mod.rs +++ b/substrate/frame/bags-list/src/list/mod.rs @@ -25,7 +25,12 @@ //! interface. use crate::Config; +use alloc::{ + boxed::Box, + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, +}; use codec::{Decode, Encode, MaxEncodedLen}; +use core::{iter, marker::PhantomData}; use frame_election_provider_support::ScoreProvider; use frame_support::{ defensive, ensure, @@ -34,14 +39,15 @@ use frame_support::{ }; use scale_info::TypeInfo; use sp_runtime::traits::{Bounded, Zero}; -use sp_std::{ - boxed::Box, - collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - iter, - marker::PhantomData, - prelude::*, -}; +#[cfg(any( + test, + feature = "try-runtime", + feature = "fuzz", + feature = "std", + feature = "runtime-benchmarks" +))] +use alloc::vec::Vec; #[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] use sp_runtime::TryRuntimeError; @@ -274,7 +280,7 @@ impl, I: 'static> List { let start_node = Node::::get(start).ok_or(ListError::NodeNotFound)?; let start_node_upper = start_node.bag_upper; - let start_bag = sp_std::iter::successors(start_node.next(), |prev| prev.next()); + let start_bag = core::iter::successors(start_node.next(), |prev| prev.next()); let thresholds = T::BagThresholds::get(); let idx = thresholds.partition_point(|&threshold| start_node_upper > threshold); @@ -341,7 +347,7 @@ impl, I: 'static> List { if !Self::contains(id) { return Err(ListError::NodeNotFound) } - let _ = Self::remove_many(sp_std::iter::once(id)); + let _ = Self::remove_many(core::iter::once(id)); Ok(()) } @@ -591,7 +597,7 @@ impl, I: 'static> List { Box::new(iter) } else { // otherwise, insert it here. - Box::new(iter.chain(sp_std::iter::once(T::Score::max_value()))) + Box::new(iter.chain(core::iter::once(T::Score::max_value()))) }; iter.filter_map(|t| { @@ -673,7 +679,7 @@ impl, I: 'static> Bag { /// Iterate over the nodes in this bag. pub(crate) fn iter(&self) -> impl Iterator> { - sp_std::iter::successors(self.head(), |prev| prev.next()) + core::iter::successors(self.head(), |prev| prev.next()) } /// Insert a new id into this bag. @@ -804,7 +810,7 @@ impl, I: 'static> Bag { #[cfg(feature = "std")] #[allow(dead_code)] pub fn std_iter(&self) -> impl Iterator> { - sp_std::iter::successors(self.head(), |prev| prev.next()) + core::iter::successors(self.head(), |prev| prev.next()) } } diff --git a/substrate/frame/bags-list/src/list/tests.rs b/substrate/frame/bags-list/src/list/tests.rs index cd39b08317267..e5fff76d75c70 100644 --- a/substrate/frame/bags-list/src/list/tests.rs +++ b/substrate/frame/bags-list/src/list/tests.rs @@ -777,7 +777,8 @@ mod bags { assert_eq!(bag_1000.tail, Some(4)); assert_eq!(bag_1000.iter().count(), 3); bag_1000.insert_node_unchecked(node(4, None, None, bag_1000.bag_upper)); // panics in debug - assert_eq!(bag_1000.iter().count(), 3); // in release we expect it to silently ignore the request. + assert_eq!(bag_1000.iter().count(), 3); // in release we expect it to silently ignore the + // request. }); } diff --git a/substrate/frame/bags-list/src/migrations.rs b/substrate/frame/bags-list/src/migrations.rs index 7df63a6a44c54..bfc70fef370a5 100644 --- a/substrate/frame/bags-list/src/migrations.rs +++ b/substrate/frame/bags-list/src/migrations.rs @@ -28,10 +28,10 @@ use frame_support::ensure; use sp_runtime::TryRuntimeError; #[cfg(feature = "try-runtime")] -use sp_std::vec::Vec; +use alloc::vec::Vec; /// A struct that does not migration, but only checks that the counter prefix exists and is correct. -pub struct CheckCounterPrefix, I: 'static>(sp_std::marker::PhantomData<(T, I)>); +pub struct CheckCounterPrefix, I: 'static>(core::marker::PhantomData<(T, I)>); impl, I: 'static> OnRuntimeUpgrade for CheckCounterPrefix { fn on_runtime_upgrade() -> frame_support::weights::Weight { frame_support::weights::Weight::zero() @@ -88,7 +88,7 @@ mod old { } /// A struct that migrates all bags lists to contain a score value. -pub struct AddScore, I: 'static = ()>(sp_std::marker::PhantomData<(T, I)>); +pub struct AddScore, I: 'static = ()>(core::marker::PhantomData<(T, I)>); impl, I: 'static> OnRuntimeUpgrade for AddScore { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 4da14aea12809..85734dce9e9b2 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -16,22 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -docify = "0.2.8" +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +docify = { workspace = true } [dev-dependencies] -pallet-transaction-payment = { path = "../transaction-payment" } -frame-support = { path = "../support", features = ["experimental"] } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -paste = "1.0.12" +pallet-transaction-payment = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +paste = { workspace = true, default-features = true } [features] default = ["std"] @@ -46,7 +45,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] # Enable support for setting the existential deposit to zero. insecure_zero_ed = [] diff --git a/substrate/frame/balances/src/impl_currency.rs b/substrate/frame/balances/src/impl_currency.rs index d5fe9934e239e..454aead1773f2 100644 --- a/substrate/frame/balances/src/impl_currency.rs +++ b/substrate/frame/balances/src/impl_currency.rs @@ -39,8 +39,8 @@ pub use imbalances::{NegativeImbalance, PositiveImbalance}; // of the inner member. mod imbalances { use super::{result, Config, Imbalance, RuntimeDebug, Saturating, TryDrop, Zero}; + use core::mem; use frame_support::traits::SameOrOther; - use sp_std::mem; /// Opaque, move-only struct with private fields that serves as a token denoting that /// funds have been created without any equal and opposite accounting. diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 56eb81b49e2db..0aaf618b303f4 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -150,7 +150,11 @@ mod tests; mod types; pub mod weights; +extern crate alloc; + +use alloc::vec::Vec; use codec::{Codec, MaxEncodedLen}; +use core::{cmp, fmt::Debug, mem, result}; use frame_support::{ ensure, pallet_prelude::DispatchResult, @@ -158,6 +162,7 @@ use frame_support::{ tokens::{ fungible, BalanceStatus as Status, DepositConsequence, Fortitude::{self, Force, Polite}, + IdAmount, Preservation::{Expendable, Preserve, Protect}, WithdrawConsequence, }, @@ -175,10 +180,8 @@ use sp_runtime::{ }, ArithmeticError, DispatchError, FixedPointOperand, Perbill, RuntimeDebug, TokenError, }; -use sp_std::{cmp, fmt::Debug, mem, prelude::*, result}; pub use types::{ - AccountData, AdjustmentDirection, BalanceLock, DustCleaner, ExtraFlags, IdAmount, Reasons, - ReserveData, + AccountData, AdjustmentDirection, BalanceLock, DustCleaner, ExtraFlags, Reasons, ReserveData, }; pub use weights::WeightInfo; @@ -222,13 +225,13 @@ pub mod pallet { type ExistentialDeposit = ConstU64<1>; type ReserveIdentifier = (); - type FreezeIdentifier = (); + type FreezeIdentifier = Self::RuntimeFreezeReason; type DustRemoval = (); type MaxLocks = ConstU32<100>; type MaxReserves = ConstU32<100>; - type MaxFreezes = ConstU32<100>; + type MaxFreezes = VariantCountOf; type WeightInfo = (); } @@ -525,7 +528,7 @@ pub mod pallet { .iter() .map(|(x, _)| x) .cloned() - .collect::>(); + .collect::>(); assert!( endowed_accounts.len() == self.balances.len(), @@ -856,13 +859,13 @@ pub mod pallet { } /// Get the free balance of an account. - pub fn free_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { + pub fn free_balance(who: impl core::borrow::Borrow) -> T::Balance { Self::account(who.borrow()).free } /// Get the balance of an account that can be used for transfers, reservations, or any other /// non-locking, non-transaction-fee activity. Will be at most `free_balance`. - pub fn usable_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { + pub fn usable_balance(who: impl core::borrow::Borrow) -> T::Balance { >::reducible_balance(who.borrow(), Expendable, Polite) } @@ -870,14 +873,12 @@ pub mod pallet { /// or any other kind of fees, though). Will be at most `free_balance`. /// /// This requires that the account stays alive. - pub fn usable_balance_for_fees( - who: impl sp_std::borrow::Borrow, - ) -> T::Balance { + pub fn usable_balance_for_fees(who: impl core::borrow::Borrow) -> T::Balance { >::reducible_balance(who.borrow(), Protect, Polite) } /// Get the reserved balance of an account. - pub fn reserved_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { + pub fn reserved_balance(who: impl core::borrow::Borrow) -> T::Balance { Self::account(who.borrow()).reserved } diff --git a/substrate/frame/balances/src/tests/dispatchable_tests.rs b/substrate/frame/balances/src/tests/dispatchable_tests.rs index 4bc96f6b43d97..ebc9f1b1a3695 100644 --- a/substrate/frame/balances/src/tests/dispatchable_tests.rs +++ b/substrate/frame/balances/src/tests/dispatchable_tests.rs @@ -281,7 +281,7 @@ fn force_adjust_total_issuance_saturates() { ExtBuilder::default().build_and_execute_with(|| { assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), 1337, 64)); let ti = Balances::total_issuance(); - let max = Balance::max_value(); + let max = ::Balance::max_value(); assert_eq!(ti, 64); // Increment saturates: diff --git a/substrate/frame/balances/src/tests/fungible_tests.rs b/substrate/frame/balances/src/tests/fungible_tests.rs index 52fbe10bedec0..1a09303a6590d 100644 --- a/substrate/frame/balances/src/tests/fungible_tests.rs +++ b/substrate/frame/balances/src/tests/fungible_tests.rs @@ -18,13 +18,20 @@ //! Tests regarding the functionality of the `fungible` trait set implementations. use super::*; -use frame_support::traits::tokens::{ - Fortitude::{Force, Polite}, - Precision::{BestEffort, Exact}, - Preservation::{Expendable, Preserve, Protect}, - Restriction::Free, +use frame_support::traits::{ + tokens::{ + Fortitude::{Force, Polite}, + Precision::{BestEffort, Exact}, + Preservation::{Expendable, Preserve, Protect}, + Restriction::Free, + }, + Consideration, Footprint, LinearStoragePrice, }; -use fungible::{Inspect, InspectFreeze, InspectHold, Mutate, MutateFreeze, MutateHold, Unbalanced}; +use fungible::{ + FreezeConsideration, HoldConsideration, Inspect, InspectFreeze, InspectHold, + LoneFreezeConsideration, LoneHoldConsideration, Mutate, MutateFreeze, MutateHold, Unbalanced, +}; +use sp_core::ConstU64; #[test] fn inspect_trait_reducible_balance_basic_works() { @@ -493,3 +500,161 @@ fn withdraw_precision_exact_works() { ); }); } + +#[test] +fn freeze_consideration_works() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + type Consideration = FreezeConsideration< + u64, + Balances, + FooReason, + LinearStoragePrice, ConstU64<1>, u64>, + Footprint, + >; + + let who = 4; + // freeze amount taken somewhere outside of our (Consideration) scope. + let extend_freeze = 15; + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 10); + + let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 4); + + assert_ok!(Balances::increase_frozen(&TestId::Foo, &who, extend_freeze)); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 4 + extend_freeze); + + let ticket = ticket.update(&who, Footprint::from_parts(8, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 8 + extend_freeze); + + assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), None); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0 + extend_freeze); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 10 + extend_freeze); + + let _ = ticket.drop(&who).unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0 + extend_freeze); + }); +} + +#[test] +fn hold_consideration_works() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + type Consideration = HoldConsideration< + u64, + Balances, + FooReason, + LinearStoragePrice, ConstU64<1>, u64>, + Footprint, + >; + + let who = 4; + // hold amount taken somewhere outside of our (Consideration) scope. + let extend_hold = 15; + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 10); + + let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 4); + + assert_ok!(Balances::hold(&TestId::Foo, &who, extend_hold)); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 4 + extend_hold); + + let ticket = ticket.update(&who, Footprint::from_parts(8, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 8 + extend_hold); + + assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), None); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0 + extend_hold); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 10 + extend_hold); + + let _ = ticket.drop(&who).unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0 + extend_hold); + }); +} + +#[test] +fn lone_freeze_consideration_works() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + type Consideration = LoneFreezeConsideration< + u64, + Balances, + FooReason, + LinearStoragePrice, ConstU64<1>, u64>, + Footprint, + >; + + let who = 4; + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 10); + + assert_ok!(Balances::increase_frozen(&TestId::Foo, &who, 5)); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 15); + + let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 4); + + assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), None); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 10); + + let _ = ticket.drop(&who).unwrap(); + assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0); + }); +} + +#[test] +fn lone_hold_consideration_works() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + type Consideration = LoneHoldConsideration< + u64, + Balances, + FooReason, + LinearStoragePrice, ConstU64<1>, u64>, + Footprint, + >; + + let who = 4; + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 10); + + assert_ok!(Balances::hold(&TestId::Foo, &who, 5)); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 15); + + let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 4); + + assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), None); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0); + + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 10); + + let _ = ticket.drop(&who).unwrap(); + assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0); + }); +} diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs index 0abf2251290fe..ba0cdabdabbbd 100644 --- a/substrate/frame/balances/src/tests/mod.rs +++ b/substrate/frame/balances/src/tests/mod.rs @@ -27,7 +27,7 @@ use frame_support::{ parameter_types, traits::{ fungible, ConstU32, ConstU8, Imbalance as ImbalanceT, OnUnbalanced, StorageMapShim, - StoredMap, VariantCount, WhitelistedStorageKeys, + StoredMap, VariantCount, VariantCountOf, WhitelistedStorageKeys, }, weights::{IdentityFee, Weight}, }; @@ -107,22 +107,21 @@ impl pallet_transaction_payment::Config for Test { type FeeMultiplierUpdate = (); } -pub(crate) type Balance = u64; +parameter_types! { + pub FooReason: TestId = TestId::Foo; +} +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl Config for Test { - type Balance = Balance; type DustRemoval = DustTrap; - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = TestAccountStore; - type MaxLocks = ConstU32<50>; type MaxReserves = ConstU32<2>; type ReserveIdentifier = TestId; - type WeightInfo = (); type RuntimeHoldReason = TestId; - type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeFreezeReason = TestId; type FreezeIdentifier = TestId; - type MaxFreezes = ConstU32<2>; + type MaxFreezes = VariantCountOf; } #[derive(Clone)] diff --git a/substrate/frame/balances/src/types.rs b/substrate/frame/balances/src/types.rs index 3e36a83575c89..917b7507d7c94 100644 --- a/substrate/frame/balances/src/types.rs +++ b/substrate/frame/balances/src/types.rs @@ -78,15 +78,6 @@ pub struct ReserveData { pub amount: Balance, } -/// An identifier and balance. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] -pub struct IdAmount { - /// An identifier for this item. - pub id: Id, - /// Some amount for this item. - pub amount: Balance, -} - /// All balance information for an account. #[derive(Encode, Decode, Clone, PartialEq, Eq, Default, RuntimeDebug, MaxEncodedLen, TypeInfo)] pub struct AccountData { diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index 51abc306265d6..11a7b281e87d9 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -12,28 +12,27 @@ homepage = "https://substrate.io" workspace = true [dependencies] -array-bytes = { version = "6.2.2", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +array-bytes = { optional = true, workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -binary-merkle-tree = { path = "../../utils/binary-merkle-tree", default-features = false } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-beefy = { path = "../beefy", default-features = false } -pallet-mmr = { path = "../merkle-mountain-range", default-features = false } -pallet-session = { path = "../session", default-features = false } -sp-consensus-beefy = { path = "../../primitives/consensus/beefy", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-state-machine = { path = "../../primitives/state-machine", default-features = false } +binary-merkle-tree = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-beefy = { workspace = true } +pallet-mmr = { workspace = true } +pallet-session = { workspace = true } +sp-consensus-beefy = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-api = { workspace = true } +sp-state-machine = { workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -sp-staking = { path = "../../primitives/staking" } +array-bytes = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } [features] default = ["std"] @@ -56,7 +55,6 @@ std = [ "sp-runtime/std", "sp-staking/std", "sp-state-machine/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/beefy-mmr/src/lib.rs b/substrate/frame/beefy-mmr/src/lib.rs index e423f1b342f2f..ec341cad2084a 100644 --- a/substrate/frame/beefy-mmr/src/lib.rs +++ b/substrate/frame/beefy-mmr/src/lib.rs @@ -33,20 +33,24 @@ //! //! and thanks to versioning can be easily updated in the future. -use sp_runtime::traits::{Convert, Member}; -use sp_std::prelude::*; +extern crate alloc; +use sp_runtime::traits::{Convert, Header, Member}; + +use alloc::vec::Vec; use codec::Decode; -use pallet_mmr::{LeafDataProvider, ParentNumberAndHash}; +use pallet_mmr::{primitives::AncestryProof, LeafDataProvider, ParentNumberAndHash}; use sp_consensus_beefy::{ + known_payloads, mmr::{BeefyAuthoritySet, BeefyDataProvider, BeefyNextAuthoritySet, MmrLeaf, MmrLeafVersion}, - ValidatorSet as BeefyValidatorSet, + AncestryHelper, Commitment, ConsensusLog, ValidatorSet as BeefyValidatorSet, }; use frame_support::{crypto::ecdsa::ECDSAExt, traits::Get}; -use frame_system::pallet_prelude::BlockNumberFor; +use frame_system::pallet_prelude::{BlockNumberFor, HeaderFor}; pub use pallet::*; +use sp_runtime::generic::OpaqueDigestItemId; #[cfg(test)] mod mock; @@ -54,7 +58,7 @@ mod mock; mod tests; /// A BEEFY consensus digest item with MMR root hash. -pub struct DepositBeefyDigest(sp_std::marker::PhantomData); +pub struct DepositBeefyDigest(core::marker::PhantomData); impl pallet_mmr::primitives::OnNewRoot for DepositBeefyDigest where @@ -172,6 +176,75 @@ where } } +impl AncestryHelper> for Pallet +where + T: pallet_mmr::Config, +{ + type Proof = AncestryProof>; + type ValidationContext = MerkleRootOf; + + fn extract_validation_context(header: HeaderFor) -> Option { + // Check if the provided header is canonical. + let expected_hash = frame_system::Pallet::::block_hash(header.number()); + if expected_hash != header.hash() { + return None; + } + + // Extract the MMR root from the header digest + header.digest().convert_first(|l| { + l.try_to(OpaqueDigestItemId::Consensus(&sp_consensus_beefy::BEEFY_ENGINE_ID)) + .and_then(|log: ConsensusLog<::BeefyId>| match log { + ConsensusLog::MmrRoot(mmr_root) => Some(mmr_root), + _ => None, + }) + }) + } + + fn is_non_canonical( + commitment: &Commitment>, + proof: Self::Proof, + context: Self::ValidationContext, + ) -> bool { + let commitment_leaf_count = + match pallet_mmr::Pallet::::block_num_to_leaf_count(commitment.block_number) { + Ok(commitment_leaf_count) => commitment_leaf_count, + Err(_) => { + // We can't prove that the commitment is non-canonical if the + // `commitment.block_number` is invalid. + return false + }, + }; + if commitment_leaf_count != proof.prev_leaf_count { + // Can't prove that the commitment is non-canonical if the `commitment.block_number` + // doesn't match the ancestry proof. + return false; + } + + let canonical_mmr_root = context; + let canonical_prev_root = + match pallet_mmr::Pallet::::verify_ancestry_proof(canonical_mmr_root, proof) { + Ok(canonical_prev_root) => canonical_prev_root, + Err(_) => { + // Can't prove that the commitment is non-canonical if the proof + // is invalid. + return false + }, + }; + + let commitment_root = + match commitment.payload.get_decoded::>(&known_payloads::MMR_ROOT_ID) { + Some(commitment_root) => commitment_root, + None => { + // If the commitment doesn't contain any MMR root, while the proof is valid, + // the commitment is invalid + return true + }, + }; + + canonical_prev_root != commitment_root + } +} + impl Pallet { /// Return the currently active BEEFY authority set proof. pub fn authority_set_proof() -> BeefyAuthoritySet> { diff --git a/substrate/frame/beefy-mmr/src/mock.rs b/substrate/frame/beefy-mmr/src/mock.rs index d59c219d3e71e..0521bdabbe495 100644 --- a/substrate/frame/beefy-mmr/src/mock.rs +++ b/substrate/frame/beefy-mmr/src/mock.rs @@ -101,6 +101,7 @@ impl pallet_beefy::Config for Test { type MaxNominators = ConstU32<1000>; type MaxSetIdSessionEntries = ConstU64<100>; type OnNewValidatorSet = BeefyMmr; + type AncestryHelper = BeefyMmr; type WeightInfo = (); type KeyOwnerProof = sp_core::Void; type EquivocationReportSystem = (); diff --git a/substrate/frame/beefy-mmr/src/tests.rs b/substrate/frame/beefy-mmr/src/tests.rs index fac799bf64e43..f99835a1dc0a5 100644 --- a/substrate/frame/beefy-mmr/src/tests.rs +++ b/substrate/frame/beefy-mmr/src/tests.rs @@ -19,11 +19,15 @@ use std::vec; use codec::{Decode, Encode}; use sp_consensus_beefy::{ + known_payloads, mmr::{BeefyNextAuthoritySet, MmrLeafVersion}, - ValidatorSet, + AncestryHelper, Commitment, Payload, ValidatorSet, }; -use sp_core::H256; +use sp_core::{ + offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, + H256, +}; use sp_io::TestExternalities; use sp_runtime::{traits::Keccak256, DigestItem}; @@ -31,8 +35,9 @@ use frame_support::traits::OnInitialize; use crate::mock::*; -fn init_block(block: u64) { - System::set_block_number(block); +fn init_block(block: u64, maybe_parent_hash: Option) { + let parent_hash = maybe_parent_hash.unwrap_or(H256::repeat_byte(block as u8)); + System::initialize(&block, &parent_hash, &Default::default()); Session::on_initialize(block); Mmr::on_initialize(block); Beefy::on_initialize(block); @@ -61,38 +66,32 @@ fn read_mmr_leaf(ext: &mut TestExternalities, key: Vec) -> MmrLeaf { fn should_contain_mmr_digest() { let mut ext = new_test_ext(vec![1, 2, 3, 4]); ext.execute_with(|| { - init_block(1); - + init_block(1, None); assert_eq!( System::digest().logs, vec![ beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new(vec![mock_beefy_id(1), mock_beefy_id(2)], 1).unwrap() )), - beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( - "95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc" - ))) + beefy_log(ConsensusLog::MmrRoot(H256::from_slice(&[ + 117, 0, 56, 25, 185, 195, 71, 232, 67, 213, 27, 178, 64, 168, 137, 220, 64, + 184, 64, 240, 83, 245, 18, 93, 185, 202, 125, 205, 17, 254, 18, 143 + ]))) ] ); // unique every time - init_block(2); - + init_block(2, None); assert_eq!( System::digest().logs, vec![ - beefy_log(ConsensusLog::AuthoritiesChange( - ValidatorSet::new(vec![mock_beefy_id(1), mock_beefy_id(2)], 1).unwrap() - )), - beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( - "95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc" - ))), beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new(vec![mock_beefy_id(3), mock_beefy_id(4)], 2).unwrap() )), - beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( - "a73271a0974f1e67d6e9b8dd58e506177a2e556519a330796721e98279a753e2" - ))), + beefy_log(ConsensusLog::MmrRoot(H256::from_slice(&[ + 193, 246, 48, 7, 89, 204, 186, 109, 167, 226, 188, 211, 8, 243, 203, 154, 234, + 235, 136, 210, 245, 7, 209, 27, 241, 90, 156, 113, 137, 65, 191, 139 + ]))), ] ); }); @@ -106,7 +105,7 @@ fn should_contain_valid_leaf_data() { let mut ext = new_test_ext(vec![1, 2, 3, 4]); let parent_hash = ext.execute_with(|| { - init_block(1); + init_block(1, None); frame_system::Pallet::::parent_hash() }); @@ -115,7 +114,7 @@ fn should_contain_valid_leaf_data() { mmr_leaf, MmrLeaf { version: MmrLeafVersion::new(1, 5), - parent_number_and_hash: (0_u64, H256::repeat_byte(0x45)), + parent_number_and_hash: (0_u64, H256::repeat_byte(1)), beefy_next_authority_set: BeefyNextAuthoritySet { id: 2, len: 2, @@ -131,7 +130,7 @@ fn should_contain_valid_leaf_data() { // build second block on top let parent_hash = ext.execute_with(|| { - init_block(2); + init_block(2, None); frame_system::Pallet::::parent_hash() }); @@ -140,7 +139,7 @@ fn should_contain_valid_leaf_data() { mmr_leaf, MmrLeaf { version: MmrLeafVersion::new(1, 5), - parent_number_and_hash: (1_u64, H256::repeat_byte(0x45)), + parent_number_and_hash: (1_u64, H256::repeat_byte(2)), beefy_next_authority_set: BeefyNextAuthoritySet { id: 3, len: 2, @@ -175,7 +174,7 @@ fn should_update_authorities() { assert_eq!(auth_set.keyset_commitment, next_auth_set.keyset_commitment); let announced_set = next_auth_set; - init_block(1); + init_block(1, None); let auth_set = BeefyMmr::authority_set_proof(); let next_auth_set = BeefyMmr::next_authority_set_proof(); @@ -191,7 +190,7 @@ fn should_update_authorities() { assert_eq!(want, next_auth_set.keyset_commitment); let announced_set = next_auth_set; - init_block(2); + init_block(2, None); let auth_set = BeefyMmr::authority_set_proof(); let next_auth_set = BeefyMmr::next_authority_set_proof(); @@ -207,3 +206,176 @@ fn should_update_authorities() { assert_eq!(want, next_auth_set.keyset_commitment); }); } + +#[test] +fn extract_validation_context_should_work_correctly() { + let mut ext = new_test_ext(vec![1, 2]); + + // Register offchain ext. + let (offchain, _offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db()); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + + ext.execute_with(|| { + init_block(1, None); + let h1 = System::finalize(); + init_block(2, Some(h1.hash())); + let h2 = System::finalize(); + + // Check the MMR root log + let expected_mmr_root: [u8; 32] = array_bytes::hex_n_into_unchecked( + "b2106eff9894288bc212b3a9389caa54efd37962c3a7b71b3b0b06a0911b88a5", + ); + assert_eq!( + System::digest().logs, + vec![beefy_log(ConsensusLog::MmrRoot(H256::from_slice(&expected_mmr_root)))] + ); + + // Make sure that all the info about h2 was stored on-chain + init_block(3, Some(h2.hash())); + + // `extract_validation_context` should return the MMR root when the provided header + // is part of the chain, + assert_eq!( + BeefyMmr::extract_validation_context(h2.clone()), + Some(H256::from_slice(&expected_mmr_root)) + ); + + // `extract_validation_context` should return `None` when the provided header + // is not part of the chain. + let mut fork_h2 = h2; + fork_h2.state_root = H256::repeat_byte(0); + assert_eq!(BeefyMmr::extract_validation_context(fork_h2), None); + }); +} + +#[test] +fn is_non_canonical_should_work_correctly() { + let mut ext = new_test_ext(vec![1, 2]); + + let mut prev_roots = vec![]; + ext.execute_with(|| { + for block_num in 1..=500 { + init_block(block_num, None); + prev_roots.push(Mmr::mmr_root()) + } + }); + ext.persist_offchain_overlay(); + + // Register offchain ext. + let (offchain, _offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db()); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + + ext.execute_with(|| { + let valid_proof = Mmr::generate_ancestry_proof(250, None).unwrap(); + let mut invalid_proof = valid_proof.clone(); + invalid_proof.items.push((300, Default::default())); + + // The commitment is invalid if it has no MMR root payload and the proof is valid. + assert_eq!( + BeefyMmr::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry([0, 0], vec![]), + block_number: 250, + validator_set_id: 0 + }, + valid_proof.clone(), + Mmr::mmr_root(), + ), + true + ); + + // If the `commitment.payload` contains an MMR root that doesn't match the ancestry proof, + // it's non-canonical. + assert_eq!( + BeefyMmr::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + H256::repeat_byte(0).encode(), + ), + block_number: 250, + validator_set_id: 0, + }, + valid_proof.clone(), + Mmr::mmr_root(), + ), + true + ); + + // Should return false if the proof is invalid, no matter the payload. + assert_eq!( + BeefyMmr::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + H256::repeat_byte(0).encode(), + ), + block_number: 250, + validator_set_id: 0 + }, + invalid_proof, + Mmr::mmr_root(), + ), + false + ); + + // Can't prove that the commitment is non-canonical if the `commitment.block_number` + // doesn't match the ancestry proof. + assert_eq!( + BeefyMmr::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + prev_roots[250 - 1].encode(), + ), + block_number: 300, + validator_set_id: 0, + }, + valid_proof, + Mmr::mmr_root(), + ), + false + ); + + // For each previous block, the check: + // - should return false, if the commitment is targeting the canonical chain + // - should return true if the commitment is NOT targeting the canonical chain + for prev_block_number in 1usize..=500 { + let proof = Mmr::generate_ancestry_proof(prev_block_number as u64, None).unwrap(); + + assert_eq!( + BeefyMmr::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + prev_roots[prev_block_number - 1].encode(), + ), + block_number: prev_block_number as u64, + validator_set_id: 0, + }, + proof.clone(), + Mmr::mmr_root(), + ), + false + ); + + assert_eq!( + BeefyMmr::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + H256::repeat_byte(0).encode(), + ), + block_number: prev_block_number as u64, + validator_set_id: 0, + }, + proof, + Mmr::mmr_root(), + ), + true + ) + } + }); +} diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index 890ac1399b9df..089b817e226f3 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -12,31 +12,30 @@ homepage = "https://substrate.io" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } +scale-info = { features = ["derive", "serde"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-authorship = { path = "../authorship", default-features = false } -pallet-session = { path = "../session", default-features = false } -sp-consensus-beefy = { path = "../../primitives/consensus/beefy", default-features = false, features = ["serde"] } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-authorship = { workspace = true } +pallet-session = { workspace = true } +sp-consensus-beefy = { features = ["serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-session = { workspace = true } +sp-staking = { features = ["serde"], workspace = true } [dev-dependencies] -frame-election-provider-support = { path = "../election-provider-support" } -pallet-balances = { path = "../balances" } -pallet-offences = { path = "../offences" } -pallet-staking = { path = "../staking" } -pallet-staking-reward-curve = { path = "../staking/reward-curve" } -pallet-timestamp = { path = "../timestamp" } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -sp-staking = { path = "../../primitives/staking" } -sp-state-machine = { path = "../../primitives/state-machine", default-features = false } +frame-election-provider-support = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-offences = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } +sp-state-machine = { workspace = true } [features] default = ["std"] @@ -61,7 +60,6 @@ std = [ "sp-session/std", "sp-staking/std", "sp-state-machine/std", - "sp-std/std", ] try-runtime = [ "frame-election-provider-support/try-runtime", diff --git a/substrate/frame/beefy/src/default_weights.rs b/substrate/frame/beefy/src/default_weights.rs index 8042f0c932eb6..70dd3bb02bf1e 100644 --- a/substrate/frame/beefy/src/default_weights.rs +++ b/substrate/frame/beefy/src/default_weights.rs @@ -24,7 +24,11 @@ use frame_support::weights::{ }; impl crate::WeightInfo for () { - fn report_equivocation(validator_count: u32, max_nominators_per_validator: u32) -> Weight { + fn report_voting_equivocation( + votes_count: u32, + validator_count: u32, + max_nominators_per_validator: u32, + ) -> Weight { // we take the validator set count from the membership proof to // calculate the weight but we set a floor of 100 validators. let validator_count = validator_count.max(100) as u64; @@ -37,7 +41,10 @@ impl crate::WeightInfo for () { ) .saturating_add(DbWeight::get().reads(5)) // check equivocation proof - .saturating_add(Weight::from_parts(95u64 * WEIGHT_REF_TIME_PER_MICROS, 0)) + .saturating_add(Weight::from_parts( + (50u64 * WEIGHT_REF_TIME_PER_MICROS).saturating_mul(votes_count as u64), + 0, + )) // report offence .saturating_add(Weight::from_parts(110u64 * WEIGHT_REF_TIME_PER_MICROS, 0)) .saturating_add(Weight::from_parts( @@ -50,6 +57,11 @@ impl crate::WeightInfo for () { .saturating_add(DbWeight::get().reads(2)) } + // TODO: Calculate + fn report_fork_voting(_validator_count: u32, _max_nominators_per_validator: u32) -> Weight { + Weight::MAX + } + fn set_new_genesis() -> Weight { DbWeight::get().writes(1) } diff --git a/substrate/frame/beefy/src/equivocation.rs b/substrate/frame/beefy/src/equivocation.rs index aecc9e721d5c4..15345e6ae1997 100644 --- a/substrate/frame/beefy/src/equivocation.rs +++ b/substrate/frame/beefy/src/equivocation.rs @@ -34,11 +34,15 @@ //! that the `ValidateUnsigned` for the BEEFY pallet is used in the runtime //! definition. +use alloc::{vec, vec::Vec}; use codec::{self as codec, Decode, Encode}; use frame_support::traits::{Get, KeyOwnerProofSystem}; -use frame_system::pallet_prelude::BlockNumberFor; +use frame_system::pallet_prelude::{BlockNumberFor, HeaderFor}; use log::{error, info}; -use sp_consensus_beefy::{DoubleVotingProof, ValidatorSetId, KEY_TYPE as BEEFY_KEY_TYPE}; +use sp_consensus_beefy::{ + check_commitment_signature, AncestryHelper, DoubleVotingProof, ForkVotingProof, + FutureBlockVotingProof, ValidatorSetId, KEY_TYPE as BEEFY_KEY_TYPE, +}; use sp_runtime::{ transaction_validity::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, @@ -51,7 +55,6 @@ use sp_staking::{ offence::{Kind, Offence, OffenceReportSystem, ReportOffence}, SessionIndex, }; -use sp_std::prelude::*; use super::{Call, Config, Error, Pallet, LOG_TARGET}; @@ -118,18 +121,143 @@ where /// `offchain::SendTransactionTypes`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. -/// - Offence reporter for unsigned transactions is fetched via the the authorship pallet. -pub struct EquivocationReportSystem(sp_std::marker::PhantomData<(T, R, P, L)>); +/// - Offence reporter for unsigned transactions is fetched via the authorship pallet. +pub struct EquivocationReportSystem(core::marker::PhantomData<(T, R, P, L)>); /// Equivocation evidence convenience alias. -pub type EquivocationEvidenceFor = ( - DoubleVotingProof< - BlockNumberFor, - ::BeefyId, - <::BeefyId as RuntimeAppPublic>::Signature, - >, - ::KeyOwnerProof, -); +pub enum EquivocationEvidenceFor { + DoubleVotingProof( + DoubleVotingProof< + BlockNumberFor, + T::BeefyId, + ::Signature, + >, + T::KeyOwnerProof, + ), + ForkVotingProof( + ForkVotingProof< + HeaderFor, + T::BeefyId, + >>::Proof, + >, + T::KeyOwnerProof, + ), + FutureBlockVotingProof(FutureBlockVotingProof, T::BeefyId>, T::KeyOwnerProof), +} + +impl EquivocationEvidenceFor { + /// Returns the authority id of the equivocator. + fn offender_id(&self) -> &T::BeefyId { + match self { + EquivocationEvidenceFor::DoubleVotingProof(equivocation_proof, _) => + equivocation_proof.offender_id(), + EquivocationEvidenceFor::ForkVotingProof(equivocation_proof, _) => + &equivocation_proof.vote.id, + EquivocationEvidenceFor::FutureBlockVotingProof(equivocation_proof, _) => + &equivocation_proof.vote.id, + } + } + + /// Returns the round number at which the equivocation occurred. + fn round_number(&self) -> &BlockNumberFor { + match self { + EquivocationEvidenceFor::DoubleVotingProof(equivocation_proof, _) => + equivocation_proof.round_number(), + EquivocationEvidenceFor::ForkVotingProof(equivocation_proof, _) => + &equivocation_proof.vote.commitment.block_number, + EquivocationEvidenceFor::FutureBlockVotingProof(equivocation_proof, _) => + &equivocation_proof.vote.commitment.block_number, + } + } + + /// Returns the set id at which the equivocation occurred. + fn set_id(&self) -> ValidatorSetId { + match self { + EquivocationEvidenceFor::DoubleVotingProof(equivocation_proof, _) => + equivocation_proof.set_id(), + EquivocationEvidenceFor::ForkVotingProof(equivocation_proof, _) => + equivocation_proof.vote.commitment.validator_set_id, + EquivocationEvidenceFor::FutureBlockVotingProof(equivocation_proof, _) => + equivocation_proof.vote.commitment.validator_set_id, + } + } + + /// Returns the set id at which the equivocation occurred. + fn key_owner_proof(&self) -> &T::KeyOwnerProof { + match self { + EquivocationEvidenceFor::DoubleVotingProof(_, key_owner_proof) => key_owner_proof, + EquivocationEvidenceFor::ForkVotingProof(_, key_owner_proof) => key_owner_proof, + EquivocationEvidenceFor::FutureBlockVotingProof(_, key_owner_proof) => key_owner_proof, + } + } + + fn checked_offender

(&self) -> Option + where + P: KeyOwnerProofSystem<(KeyTypeId, T::BeefyId), Proof = T::KeyOwnerProof>, + { + let key = (BEEFY_KEY_TYPE, self.offender_id().clone()); + P::check_proof(key, self.key_owner_proof().clone()) + } + + fn check_equivocation_proof(self) -> Result<(), Error> { + match self { + EquivocationEvidenceFor::DoubleVotingProof(equivocation_proof, _) => { + // Validate equivocation proof (check votes are different and signatures are valid). + if !sp_consensus_beefy::check_double_voting_proof(&equivocation_proof) { + return Err(Error::::InvalidDoubleVotingProof); + } + + return Ok(()) + }, + EquivocationEvidenceFor::ForkVotingProof(equivocation_proof, _) => { + let ForkVotingProof { vote, ancestry_proof, header } = equivocation_proof; + + let maybe_validation_context = , + >>::extract_validation_context(header); + let validation_context = match maybe_validation_context { + Some(validation_context) => validation_context, + None => { + return Err(Error::::InvalidForkVotingProof); + }, + }; + + let is_non_canonical = + >>::is_non_canonical( + &vote.commitment, + ancestry_proof, + validation_context, + ); + if !is_non_canonical { + return Err(Error::::InvalidForkVotingProof); + } + + let is_signature_valid = + check_commitment_signature(&vote.commitment, &vote.id, &vote.signature); + if !is_signature_valid { + return Err(Error::::InvalidForkVotingProof); + } + + Ok(()) + }, + EquivocationEvidenceFor::FutureBlockVotingProof(equivocation_proof, _) => { + let FutureBlockVotingProof { vote } = equivocation_proof; + // Check if the commitment actually targets a future block + if vote.commitment.block_number < frame_system::Pallet::::block_number() { + return Err(Error::::InvalidFutureBlockVotingProof); + } + + let is_signature_valid = + check_commitment_signature(&vote.commitment, &vote.id, &vote.signature); + if !is_signature_valid { + return Err(Error::::InvalidForkVotingProof); + } + + Ok(()) + }, + } + } +} impl OffenceReportSystem, EquivocationEvidenceFor> for EquivocationReportSystem @@ -148,13 +276,8 @@ where fn publish_evidence(evidence: EquivocationEvidenceFor) -> Result<(), ()> { use frame_system::offchain::SubmitTransaction; - let (equivocation_proof, key_owner_proof) = evidence; - - let call = Call::report_equivocation_unsigned { - equivocation_proof: Box::new(equivocation_proof), - key_owner_proof, - }; + let call: Call = evidence.into(); let res = SubmitTransaction::>::submit_unsigned_transaction(call.into()); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report."), @@ -166,18 +289,10 @@ where fn check_evidence( evidence: EquivocationEvidenceFor, ) -> Result<(), TransactionValidityError> { - let (equivocation_proof, key_owner_proof) = evidence; - - // Check the membership proof to extract the offender's id - let key = (BEEFY_KEY_TYPE, equivocation_proof.offender_id().clone()); - let offender = P::check_proof(key, key_owner_proof).ok_or(InvalidTransaction::BadProof)?; + let offender = evidence.checked_offender::

().ok_or(InvalidTransaction::BadProof)?; // Check if the offence has already been reported, and if so then we can discard the report. - let time_slot = TimeSlot { - set_id: equivocation_proof.set_id(), - round: *equivocation_proof.round_number(), - }; - + let time_slot = TimeSlot { set_id: evidence.set_id(), round: *evidence.round_number() }; if R::is_known_offence(&[offender], &time_slot) { Err(InvalidTransaction::Stale.into()) } else { @@ -189,47 +304,37 @@ where reporter: Option, evidence: EquivocationEvidenceFor, ) -> Result<(), DispatchError> { - let (equivocation_proof, key_owner_proof) = evidence; let reporter = reporter.or_else(|| pallet_authorship::Pallet::::author()); - let offender = equivocation_proof.offender_id().clone(); - - // We check the equivocation within the context of its set id (and - // associated session) and round. We also need to know the validator - // set count at the time of the offence since it is required to calculate - // the slash amount. - let set_id = equivocation_proof.set_id(); - let round = *equivocation_proof.round_number(); - let session_index = key_owner_proof.session(); - let validator_set_count = key_owner_proof.validator_count(); - // Validate the key ownership proof extracting the id of the offender. - let offender = P::check_proof((BEEFY_KEY_TYPE, offender), key_owner_proof) - .ok_or(Error::::InvalidKeyOwnershipProof)?; + // We check the equivocation within the context of its set id (and associated session). + let set_id = evidence.set_id(); + let round = *evidence.round_number(); + let set_id_session_index = crate::SetIdSession::::get(set_id) + .ok_or(Error::::InvalidEquivocationProofSession)?; - // Validate equivocation proof (check votes are different and signatures are valid). - if !sp_consensus_beefy::check_equivocation_proof(&equivocation_proof) { - return Err(Error::::InvalidEquivocationProof.into()) - } - - // Check that the session id for the membership proof is within the - // bounds of the set id reported in the equivocation. - let set_id_session_index = - crate::SetIdSession::::get(set_id).ok_or(Error::::InvalidEquivocationProof)?; + // Check that the session id for the membership proof is within the bounds + // of the set id reported in the equivocation. + let key_owner_proof = evidence.key_owner_proof(); + let validator_count = key_owner_proof.validator_count(); + let session_index = key_owner_proof.session(); if session_index != set_id_session_index { - return Err(Error::::InvalidEquivocationProof.into()) + return Err(Error::::InvalidEquivocationProofSession.into()) } + // Validate the key ownership proof extracting the id of the offender. + let offender = + evidence.checked_offender::

().ok_or(Error::::InvalidKeyOwnershipProof)?; + + evidence.check_equivocation_proof()?; + let offence = EquivocationOffence { time_slot: TimeSlot { set_id, round }, session_index, - validator_set_count, + validator_set_count: validator_count, offender, }; - R::report_offence(reporter.into_iter().collect(), offence) - .map_err(|_| Error::::DuplicateOffenceReport)?; - - Ok(()) + .map_err(|_| Error::::DuplicateOffenceReport.into()) } } @@ -239,49 +344,37 @@ where /// unsigned equivocation reports. impl Pallet { pub fn validate_unsigned(source: TransactionSource, call: &Call) -> TransactionValidity { - if let Call::report_equivocation_unsigned { equivocation_proof, key_owner_proof } = call { - // discard equivocation report not coming from the local node - match source { - TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, - _ => { - log::warn!( - target: LOG_TARGET, - "rejecting unsigned report equivocation transaction because it is not local/in-block." - ); - return InvalidTransaction::Call.into() - }, - } - - let evidence = (*equivocation_proof.clone(), key_owner_proof.clone()); - T::EquivocationReportSystem::check_evidence(evidence)?; - - let longevity = - >::Longevity::get(); - - ValidTransaction::with_tag_prefix("BeefyEquivocation") - // We assign the maximum priority for any equivocation report. - .priority(TransactionPriority::MAX) - // Only one equivocation report for the same offender at the same slot. - .and_provides(( - equivocation_proof.offender_id().clone(), - equivocation_proof.set_id(), - *equivocation_proof.round_number(), - )) - .longevity(longevity) - // We don't propagate this. This can never be included on a remote node. - .propagate(false) - .build() - } else { - InvalidTransaction::Call.into() + // discard equivocation report not coming from the local node + match source { + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, + _ => { + log::warn!( + target: LOG_TARGET, + "rejecting unsigned report equivocation transaction because it is not local/in-block." + ); + return InvalidTransaction::Call.into() + }, } + + let evidence = call.to_equivocation_evidence_for().ok_or(InvalidTransaction::Call)?; + let tag = (evidence.offender_id().clone(), evidence.set_id(), *evidence.round_number()); + T::EquivocationReportSystem::check_evidence(evidence)?; + + let longevity = + >::Longevity::get(); + ValidTransaction::with_tag_prefix("BeefyEquivocation") + // We assign the maximum priority for any equivocation report. + .priority(TransactionPriority::MAX) + // Only one equivocation report for the same offender at the same slot. + .and_provides(tag) + .longevity(longevity) + // We don't propagate this. This can never be included on a remote node. + .propagate(false) + .build() } pub fn pre_dispatch(call: &Call) -> Result<(), TransactionValidityError> { - if let Call::report_equivocation_unsigned { equivocation_proof, key_owner_proof } = call { - let evidence = (*equivocation_proof.clone(), key_owner_proof.clone()); - T::EquivocationReportSystem::check_evidence(evidence) - } else { - Err(InvalidTransaction::Call.into()) - } + let evidence = call.to_equivocation_evidence_for().ok_or(InvalidTransaction::Call)?; + T::EquivocationReportSystem::check_evidence(evidence) } } diff --git a/substrate/frame/beefy/src/lib.rs b/substrate/frame/beefy/src/lib.rs index 63f3e9bb309c6..fd9a0027c6fc6 100644 --- a/substrate/frame/beefy/src/lib.rs +++ b/substrate/frame/beefy/src/lib.rs @@ -17,6 +17,9 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{boxed::Box, vec::Vec}; use codec::{Encode, MaxEncodedLen}; use frame_support::{ @@ -28,7 +31,7 @@ use frame_support::{ }; use frame_system::{ ensure_none, ensure_signed, - pallet_prelude::{BlockNumberFor, OriginFor}, + pallet_prelude::{BlockNumberFor, HeaderFor, OriginFor}, }; use log; use sp_runtime::{ @@ -38,11 +41,11 @@ use sp_runtime::{ }; use sp_session::{GetSessionNumber, GetValidatorCount}; use sp_staking::{offence::OffenceReportSystem, SessionIndex}; -use sp_std::prelude::*; use sp_consensus_beefy::{ - AuthorityIndex, BeefyAuthorityId, ConsensusLog, DoubleVotingProof, OnNewValidatorSet, - ValidatorSet, BEEFY_ENGINE_ID, GENESIS_AUTHORITY_SET_ID, + AncestryHelper, AuthorityIndex, BeefyAuthorityId, ConsensusLog, DoubleVotingProof, + ForkVotingProof, FutureBlockVotingProof, OnNewValidatorSet, ValidatorSet, BEEFY_ENGINE_ID, + GENESIS_AUTHORITY_SET_ID, }; mod default_weights; @@ -98,6 +101,9 @@ pub mod pallet { /// weight MMR root over validators and make it available for Light Clients. type OnNewValidatorSet: OnNewValidatorSet<::BeefyId>; + /// Hook for checking commitment canonicity. + type AncestryHelper: AncestryHelper>; + /// Weights for this pallet. type WeightInfo: WeightInfo; @@ -188,8 +194,14 @@ pub mod pallet { pub enum Error { /// A key ownership proof provided as part of an equivocation report is invalid. InvalidKeyOwnershipProof, - /// An equivocation proof provided as part of an equivocation report is invalid. - InvalidEquivocationProof, + /// A double voting proof provided as part of an equivocation report is invalid. + InvalidDoubleVotingProof, + /// A fork voting proof provided as part of an equivocation report is invalid. + InvalidForkVotingProof, + /// A future block voting proof provided as part of an equivocation report is invalid. + InvalidFutureBlockVotingProof, + /// The session of the equivocation proof is invalid + InvalidEquivocationProofSession, /// A given equivocation report is valid but already previously reported. DuplicateOffenceReport, /// Submitted configuration is invalid. @@ -203,11 +215,11 @@ pub mod pallet { /// against the extracted offender. If both are valid, the offence /// will be reported. #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::report_equivocation( + #[pallet::weight(T::WeightInfo::report_double_voting( key_owner_proof.validator_count(), T::MaxNominators::get(), ))] - pub fn report_equivocation( + pub fn report_double_voting( origin: OriginFor, equivocation_proof: Box< DoubleVotingProof< @@ -222,7 +234,7 @@ pub mod pallet { T::EquivocationReportSystem::process_evidence( Some(reporter), - (*equivocation_proof, key_owner_proof), + EquivocationEvidenceFor::DoubleVotingProof(*equivocation_proof, key_owner_proof), )?; // Waive the fee since the report is valid and beneficial Ok(Pays::No.into()) @@ -238,11 +250,11 @@ pub mod pallet { /// if the block author is defined it will be defined as the equivocation /// reporter. #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::report_equivocation( + #[pallet::weight(T::WeightInfo::report_double_voting( key_owner_proof.validator_count(), T::MaxNominators::get(), ))] - pub fn report_equivocation_unsigned( + pub fn report_double_voting_unsigned( origin: OriginFor, equivocation_proof: Box< DoubleVotingProof< @@ -257,7 +269,7 @@ pub mod pallet { T::EquivocationReportSystem::process_evidence( None, - (*equivocation_proof, key_owner_proof), + EquivocationEvidenceFor::DoubleVotingProof(*equivocation_proof, key_owner_proof), )?; Ok(Pays::No.into()) } @@ -278,6 +290,126 @@ pub mod pallet { GenesisBlock::::put(Some(genesis_block)); Ok(()) } + + /// Report fork voting equivocation. This method will verify the equivocation proof + /// and validate the given key ownership proof against the extracted offender. + /// If both are valid, the offence will be reported. + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::report_fork_voting( + key_owner_proof.validator_count(), + T::MaxNominators::get(), + ))] + pub fn report_fork_voting( + origin: OriginFor, + equivocation_proof: Box< + ForkVotingProof< + HeaderFor, + T::BeefyId, + >>::Proof, + >, + >, + key_owner_proof: T::KeyOwnerProof, + ) -> DispatchResultWithPostInfo { + let reporter = ensure_signed(origin)?; + + T::EquivocationReportSystem::process_evidence( + Some(reporter), + EquivocationEvidenceFor::ForkVotingProof(*equivocation_proof, key_owner_proof), + )?; + // Waive the fee since the report is valid and beneficial + Ok(Pays::No.into()) + } + + /// Report fork voting equivocation. This method will verify the equivocation proof + /// and validate the given key ownership proof against the extracted offender. + /// If both are valid, the offence will be reported. + /// + /// This extrinsic must be called unsigned and it is expected that only + /// block authors will call it (validated in `ValidateUnsigned`), as such + /// if the block author is defined it will be defined as the equivocation + /// reporter. + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::report_fork_voting( + key_owner_proof.validator_count(), + T::MaxNominators::get(), + ))] + pub fn report_fork_voting_unsigned( + origin: OriginFor, + equivocation_proof: Box< + ForkVotingProof< + HeaderFor, + T::BeefyId, + >>::Proof, + >, + >, + key_owner_proof: T::KeyOwnerProof, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + + T::EquivocationReportSystem::process_evidence( + None, + EquivocationEvidenceFor::ForkVotingProof(*equivocation_proof, key_owner_proof), + )?; + // Waive the fee since the report is valid and beneficial + Ok(Pays::No.into()) + } + + /// Report future block voting equivocation. This method will verify the equivocation proof + /// and validate the given key ownership proof against the extracted offender. + /// If both are valid, the offence will be reported. + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::report_fork_voting( + key_owner_proof.validator_count(), + T::MaxNominators::get(), + ))] + pub fn report_future_block_voting( + origin: OriginFor, + equivocation_proof: Box, T::BeefyId>>, + key_owner_proof: T::KeyOwnerProof, + ) -> DispatchResultWithPostInfo { + let reporter = ensure_signed(origin)?; + + T::EquivocationReportSystem::process_evidence( + Some(reporter), + EquivocationEvidenceFor::FutureBlockVotingProof( + *equivocation_proof, + key_owner_proof, + ), + )?; + // Waive the fee since the report is valid and beneficial + Ok(Pays::No.into()) + } + + /// Report future block voting equivocation. This method will verify the equivocation proof + /// and validate the given key ownership proof against the extracted offender. + /// If both are valid, the offence will be reported. + /// + /// This extrinsic must be called unsigned and it is expected that only + /// block authors will call it (validated in `ValidateUnsigned`), as such + /// if the block author is defined it will be defined as the equivocation + /// reporter. + #[pallet::call_index(6)] + #[pallet::weight(T::WeightInfo::report_fork_voting( + key_owner_proof.validator_count(), + T::MaxNominators::get(), + ))] + pub fn report_future_block_voting_unsigned( + origin: OriginFor, + equivocation_proof: Box, T::BeefyId>>, + key_owner_proof: T::KeyOwnerProof, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + + T::EquivocationReportSystem::process_evidence( + None, + EquivocationEvidenceFor::FutureBlockVotingProof( + *equivocation_proof, + key_owner_proof, + ), + )?; + // Waive the fee since the report is valid and beneficial + Ok(Pays::No.into()) + } } #[pallet::hooks] @@ -300,6 +432,48 @@ pub mod pallet { Self::validate_unsigned(source, call) } } + + impl Call { + pub fn to_equivocation_evidence_for(&self) -> Option> { + match self { + Call::report_double_voting_unsigned { equivocation_proof, key_owner_proof } => + Some(EquivocationEvidenceFor::::DoubleVotingProof( + *equivocation_proof.clone(), + key_owner_proof.clone(), + )), + Call::report_fork_voting_unsigned { equivocation_proof, key_owner_proof } => + Some(EquivocationEvidenceFor::::ForkVotingProof( + *equivocation_proof.clone(), + key_owner_proof.clone(), + )), + _ => None, + } + } + } + + impl From> for Call { + fn from(evidence: EquivocationEvidenceFor) -> Self { + match evidence { + EquivocationEvidenceFor::DoubleVotingProof(equivocation_proof, key_owner_proof) => + Call::report_double_voting_unsigned { + equivocation_proof: Box::new(equivocation_proof), + key_owner_proof, + }, + EquivocationEvidenceFor::ForkVotingProof(equivocation_proof, key_owner_proof) => + Call::report_fork_voting_unsigned { + equivocation_proof: Box::new(equivocation_proof), + key_owner_proof, + }, + EquivocationEvidenceFor::FutureBlockVotingProof( + equivocation_proof, + key_owner_proof, + ) => Call::report_future_block_voting_unsigned { + equivocation_proof: Box::new(equivocation_proof), + key_owner_proof, + }, + } + } + } } #[cfg(any(feature = "try-runtime", test))] @@ -367,7 +541,7 @@ impl Pallet { /// Submits an extrinsic to report an equivocation. This method will create /// an unsigned extrinsic with a call to `report_equivocation_unsigned` and /// will push the transaction to the pool. Only useful in an offchain context. - pub fn submit_unsigned_equivocation_report( + pub fn submit_unsigned_double_voting_report( equivocation_proof: DoubleVotingProof< BlockNumberFor, T::BeefyId, @@ -375,7 +549,11 @@ impl Pallet { >, key_owner_proof: T::KeyOwnerProof, ) -> Option<()> { - T::EquivocationReportSystem::publish_evidence((equivocation_proof, key_owner_proof)).ok() + T::EquivocationReportSystem::publish_evidence(EquivocationEvidenceFor::DoubleVotingProof( + equivocation_proof, + key_owner_proof, + )) + .ok() } fn change_authorities( @@ -526,6 +704,20 @@ impl IsMember for Pallet { } pub trait WeightInfo { - fn report_equivocation(validator_count: u32, max_nominators_per_validator: u32) -> Weight; + fn report_voting_equivocation( + votes_count: u32, + validator_count: u32, + max_nominators_per_validator: u32, + ) -> Weight; + fn report_double_voting(validator_count: u32, max_nominators_per_validator: u32) -> Weight { + Self::report_voting_equivocation(2, validator_count, max_nominators_per_validator) + } + fn report_fork_voting(validator_count: u32, max_nominators_per_validator: u32) -> Weight; + fn report_future_block_voting( + validator_count: u32, + max_nominators_per_validator: u32, + ) -> Weight { + Self::report_voting_equivocation(1, validator_count, max_nominators_per_validator) + } fn set_new_genesis() -> Weight; } diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 0b87de6bf5d79..03efccff76430 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use codec::{Decode, Encode}; +use scale_info::TypeInfo; use std::vec; use frame_election_provider_support::{ @@ -28,8 +30,12 @@ use frame_support::{ use pallet_session::historical as pallet_session_historical; use sp_core::{crypto::KeyTypeId, ConstU128}; use sp_runtime::{ - app_crypto::ecdsa::Public, curve::PiecewiseLinear, impl_opaque_keys, testing::TestXt, - traits::OpaqueKeys, BuildStorage, Perbill, + app_crypto::ecdsa::Public, + curve::PiecewiseLinear, + impl_opaque_keys, + testing::TestXt, + traits::{Header as HeaderT, OpaqueKeys}, + BuildStorage, Perbill, }; use sp_staking::{EraIndex, SessionIndex}; use sp_state_machine::BasicExternalities; @@ -37,6 +43,7 @@ use sp_state_machine::BasicExternalities; use crate as pallet_beefy; pub use sp_consensus_beefy::{ecdsa_crypto::AuthorityId as BeefyId, ConsensusLog, BEEFY_ENGINE_ID}; +use sp_consensus_beefy::{AncestryHelper, Commitment}; impl_opaque_keys! { pub struct MockSessionKeys { @@ -75,11 +82,46 @@ where type Extrinsic = TestXt; } +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +pub struct MockAncestryProofContext { + pub is_valid: bool, +} + +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +pub struct MockAncestryProof { + pub is_non_canonical: bool, +} + parameter_types! { pub const Period: u64 = 1; pub const ReportLongevity: u64 = BondingDuration::get() as u64 * SessionsPerEra::get() as u64 * Period::get(); pub const MaxSetIdSessionEntries: u32 = BondingDuration::get() * SessionsPerEra::get(); + + pub storage AncestryProofContext: Option = Some( + MockAncestryProofContext { + is_valid: true, + } + ); +} + +pub struct MockAncestryHelper; + +impl AncestryHelper

for MockAncestryHelper { + type Proof = MockAncestryProof; + type ValidationContext = MockAncestryProofContext; + + fn extract_validation_context(_header: Header) -> Option { + AncestryProofContext::get() + } + + fn is_non_canonical( + _commitment: &Commitment, + proof: Self::Proof, + context: Self::ValidationContext, + ) -> bool { + context.is_valid && proof.is_non_canonical + } } impl pallet_beefy::Config for Test { @@ -88,6 +130,7 @@ impl pallet_beefy::Config for Test { type MaxNominators = ConstU32<1000>; type MaxSetIdSessionEntries = MaxSetIdSessionEntries; type OnNewValidatorSet = (); + type AncestryHelper = MockAncestryHelper; type WeightInfo = (); type KeyOwnerProof = >::Proof; type EquivocationReportSystem = @@ -120,20 +163,11 @@ impl pallet_authorship::Config for Test { type EventHandler = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); - type FreezeIdentifier = (); - type MaxFreezes = (); } impl pallet_timestamp::Config for Test { @@ -171,35 +205,19 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBoundsOnChain; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { - type RewardRemainder = (); - type CurrencyToVote = (); type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type CurrencyBalance = ::Balance; - type Slash = (); - type Reward = (); - type SessionsPerEra = SessionsPerEra; - type BondingDuration = BondingDuration; - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxExposurePageSize = ConstU32<64>; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs index 6a6aa245ce1f9..a63b3532b6983 100644 --- a/substrate/frame/beefy/src/tests.rs +++ b/substrate/frame/beefy/src/tests.rs @@ -20,18 +20,22 @@ use std::vec; use frame_support::{ assert_err, assert_ok, - dispatch::{GetDispatchInfo, Pays}, + dispatch::{DispatchResultWithPostInfo, Pays}, traits::{Currency, KeyOwnerProofSystem, OnInitialize}, }; use sp_consensus_beefy::{ - check_equivocation_proof, + check_double_voting_proof, ecdsa_crypto, known_payloads::MMR_ROOT_ID, - test_utils::{generate_equivocation_proof, Keyring as BeefyKeyring}, - Payload, ValidatorSet, KEY_TYPE as BEEFY_KEY_TYPE, + test_utils::{ + generate_double_voting_proof, generate_fork_voting_proof, + generate_future_block_voting_proof, Keyring as BeefyKeyring, + }, + Payload, ValidatorSet, ValidatorSetId, KEY_TYPE as BEEFY_KEY_TYPE, }; use sp_runtime::DigestItem; +use sp_session::MembershipProof; -use crate::{self as beefy, mock::*, Call, Config, Error, Weight, WeightInfo}; +use crate::{self as beefy, mock::*, Call, Config, Error, WeightInfo}; fn init_block(block: u64) { System::set_block_number(block); @@ -222,51 +226,90 @@ fn should_sign_and_verify() { // generate an equivocation proof, with two votes in the same round for // same payload signed by the same key - let equivocation_proof = generate_equivocation_proof( + let equivocation_proof = generate_double_voting_proof( (1, payload1.clone(), set_id, &BeefyKeyring::Bob), (1, payload1.clone(), set_id, &BeefyKeyring::Bob), ); // expect invalid equivocation proof - assert!(!check_equivocation_proof::<_, _, Keccak256>(&equivocation_proof)); + assert!(!check_double_voting_proof::<_, _, Keccak256>(&equivocation_proof)); // generate an equivocation proof, with two votes in different rounds for // different payloads signed by the same key - let equivocation_proof = generate_equivocation_proof( + let equivocation_proof = generate_double_voting_proof( (1, payload1.clone(), set_id, &BeefyKeyring::Bob), (2, payload2.clone(), set_id, &BeefyKeyring::Bob), ); // expect invalid equivocation proof - assert!(!check_equivocation_proof::<_, _, Keccak256>(&equivocation_proof)); + assert!(!check_double_voting_proof::<_, _, Keccak256>(&equivocation_proof)); // generate an equivocation proof, with two votes by different authorities - let equivocation_proof = generate_equivocation_proof( + let equivocation_proof = generate_double_voting_proof( (1, payload1.clone(), set_id, &BeefyKeyring::Alice), (1, payload2.clone(), set_id, &BeefyKeyring::Bob), ); // expect invalid equivocation proof - assert!(!check_equivocation_proof::<_, _, Keccak256>(&equivocation_proof)); + assert!(!check_double_voting_proof::<_, _, Keccak256>(&equivocation_proof)); // generate an equivocation proof, with two votes in different set ids - let equivocation_proof = generate_equivocation_proof( + let equivocation_proof = generate_double_voting_proof( (1, payload1.clone(), set_id, &BeefyKeyring::Bob), (1, payload2.clone(), set_id + 1, &BeefyKeyring::Bob), ); // expect invalid equivocation proof - assert!(!check_equivocation_proof::<_, _, Keccak256>(&equivocation_proof)); + assert!(!check_double_voting_proof::<_, _, Keccak256>(&equivocation_proof)); // generate an equivocation proof, with two votes in the same round for // different payloads signed by the same key let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - let equivocation_proof = generate_equivocation_proof( + let equivocation_proof = generate_double_voting_proof( (1, payload1, set_id, &BeefyKeyring::Bob), (1, payload2, set_id, &BeefyKeyring::Bob), ); // expect valid equivocation proof - assert!(check_equivocation_proof::<_, _, Keccak256>(&equivocation_proof)); + assert!(check_double_voting_proof::<_, _, Keccak256>(&equivocation_proof)); } -#[test] -fn report_equivocation_current_set_works() { +trait ReportEquivocationFn: + FnMut( + u64, + ValidatorSetId, + &BeefyKeyring, + MembershipProof, +) -> DispatchResultWithPostInfo +{ +} + +impl ReportEquivocationFn for F where + F: FnMut( + u64, + ValidatorSetId, + &BeefyKeyring, + MembershipProof, + ) -> DispatchResultWithPostInfo +{ +} + +fn report_double_voting( + block_num: u64, + set_id: ValidatorSetId, + equivocation_keyring: &BeefyKeyring, + key_owner_proof: MembershipProof, +) -> DispatchResultWithPostInfo { + let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); + let equivocation_proof = generate_double_voting_proof( + (block_num, payload1, set_id, &equivocation_keyring), + (block_num, payload2, set_id, &equivocation_keyring), + ); + + Beefy::report_double_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ) +} + +fn report_equivocation_current_set_works(mut f: impl ReportEquivocationFn) { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { @@ -297,24 +340,11 @@ fn report_equivocation_current_set_works() { let equivocation_key = &authorities[equivocation_authority_index]; let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation proof, with two votes in the same round for - // different payloads signed by the same key - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id, &equivocation_keyring), - (block_num, payload2, set_id, &equivocation_keyring), - ); - // create the key ownership proof let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); // report the equivocation and the tx should be dispatched successfully - assert_ok!(Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ),); + assert_ok!(f(block_num, set_id, &equivocation_keyring, key_owner_proof)); start_era(2); @@ -345,8 +375,7 @@ fn report_equivocation_current_set_works() { }); } -#[test] -fn report_equivocation_old_set_works() { +fn report_equivocation_old_set_works(mut f: impl ReportEquivocationFn) { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { @@ -384,20 +413,8 @@ fn report_equivocation_old_set_works() { let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation proof for the old set, - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, old_set_id, &equivocation_keyring), - (block_num, payload2, old_set_id, &equivocation_keyring), - ); - // report the equivocation and the tx should be dispatched successfully - assert_ok!(Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ),); + assert_ok!(f(block_num, old_set_id, &equivocation_keyring, key_owner_proof)); start_era(3); @@ -428,8 +445,7 @@ fn report_equivocation_old_set_works() { }); } -#[test] -fn report_equivocation_invalid_set_id() { +fn report_equivocation_invalid_set_id(mut f: impl ReportEquivocationFn) { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { @@ -446,28 +462,15 @@ fn report_equivocation_invalid_set_id() { let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation for a future set - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id + 1, &equivocation_keyring), - (block_num, payload2, set_id + 1, &equivocation_keyring), - ); - // the call for reporting the equivocation should error assert_err!( - Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ), - Error::::InvalidEquivocationProof, + f(block_num, set_id + 1, &equivocation_keyring, key_owner_proof), + Error::::InvalidEquivocationProofSession, ); }); } -#[test] -fn report_equivocation_invalid_session() { +fn report_equivocation_invalid_session(mut f: impl ReportEquivocationFn) { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { @@ -488,29 +491,16 @@ fn report_equivocation_invalid_session() { let set_id = Beefy::validator_set().unwrap().id(); - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation proof at following era set id = 2 - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id, &equivocation_keyring), - (block_num, payload2, set_id, &equivocation_keyring), - ); - // report an equivocation for the current set using an key ownership // proof from the previous set, the session should be invalid. assert_err!( - Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ), - Error::::InvalidEquivocationProof, + f(block_num, set_id + 1, &equivocation_keyring, key_owner_proof), + Error::::InvalidEquivocationProofSession, ); }); } -#[test] -fn report_equivocation_invalid_key_owner_proof() { +fn report_equivocation_invalid_key_owner_proof(mut f: impl ReportEquivocationFn) { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { @@ -532,14 +522,6 @@ fn report_equivocation_invalid_key_owner_proof() { let equivocation_key = &authorities[equivocation_authority_index]; let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation proof for the authority at index 0 - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id + 1, &equivocation_keyring), - (block_num, payload2, set_id + 1, &equivocation_keyring), - ); - // we need to start a new era otherwise the key ownership proof won't be // checked since the authorities are part of the current session start_era(2); @@ -547,18 +529,81 @@ fn report_equivocation_invalid_key_owner_proof() { // report an equivocation for the current set using a key ownership // proof for a different key than the one in the equivocation proof. assert_err!( - Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - invalid_key_owner_proof, - ), + f(block_num, set_id, &equivocation_keyring, invalid_key_owner_proof), Error::::InvalidKeyOwnershipProof, ); }); } +fn valid_equivocation_reports_dont_pay_fees(mut f: impl ReportEquivocationFn) { + let authorities = test_authorities(); + + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { + start_era(1); + + let block_num = System::block_number(); + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); + let set_id = validator_set.id(); + + let equivocation_authority_index = 0; + let equivocation_key = &authorities[equivocation_authority_index]; + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); + + // create the key ownership proof. + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + + // report the equivocation. + let post_info = + f(block_num, set_id, &equivocation_keyring, key_owner_proof.clone()).unwrap(); + + // the original weight should be kept, but given that the report + // is valid the fee is waived. + assert!(post_info.actual_weight.is_none()); + assert_eq!(post_info.pays_fee, Pays::No); + + // report the equivocation again which is invalid now since it is + // duplicate. + let post_info = f(block_num, set_id, &equivocation_keyring, key_owner_proof) + .err() + .unwrap() + .post_info; + + // the fee is not waived and the original weight is kept. + assert!(post_info.actual_weight.is_none()); + assert_eq!(post_info.pays_fee, Pays::Yes); + }) +} + +// Test double voting reporting logic. + #[test] -fn report_equivocation_invalid_equivocation_proof() { +fn report_double_voting_current_set_works() { + report_equivocation_current_set_works(report_double_voting); +} + +#[test] +fn report_double_voting_old_set_works() { + report_equivocation_old_set_works(report_double_voting); +} + +#[test] +fn report_double_voting_invalid_set_id() { + report_equivocation_invalid_set_id(report_double_voting); +} + +#[test] +fn report_double_voting_invalid_session() { + report_equivocation_invalid_session(report_double_voting); +} + +#[test] +fn report_double_voting_invalid_key_owner_proof() { + report_equivocation_invalid_key_owner_proof(report_double_voting); +} + +#[test] +fn report_double_voting_invalid_equivocation_proof() { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { @@ -578,12 +623,12 @@ fn report_equivocation_invalid_equivocation_proof() { let assert_invalid_equivocation_proof = |equivocation_proof| { assert_err!( - Beefy::report_equivocation_unsigned( + Beefy::report_double_voting_unsigned( RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof.clone(), ), - Error::::InvalidEquivocationProof, + Error::::InvalidDoubleVotingProof, ); }; @@ -594,31 +639,31 @@ fn report_equivocation_invalid_equivocation_proof() { // both votes target the same block number and payload, // there is no equivocation. - assert_invalid_equivocation_proof(generate_equivocation_proof( + assert_invalid_equivocation_proof(generate_double_voting_proof( (block_num, payload1.clone(), set_id, &equivocation_keyring), (block_num, payload1.clone(), set_id, &equivocation_keyring), )); // votes targeting different rounds, there is no equivocation. - assert_invalid_equivocation_proof(generate_equivocation_proof( + assert_invalid_equivocation_proof(generate_double_voting_proof( (block_num, payload1.clone(), set_id, &equivocation_keyring), (block_num + 1, payload2.clone(), set_id, &equivocation_keyring), )); // votes signed with different authority keys - assert_invalid_equivocation_proof(generate_equivocation_proof( + assert_invalid_equivocation_proof(generate_double_voting_proof( (block_num, payload1.clone(), set_id, &equivocation_keyring), (block_num, payload1.clone(), set_id, &BeefyKeyring::Charlie), )); // votes signed with a key that isn't part of the authority set - assert_invalid_equivocation_proof(generate_equivocation_proof( + assert_invalid_equivocation_proof(generate_double_voting_proof( (block_num, payload1.clone(), set_id, &equivocation_keyring), (block_num, payload1.clone(), set_id, &BeefyKeyring::Dave), )); // votes targeting different set ids - assert_invalid_equivocation_proof(generate_equivocation_proof( + assert_invalid_equivocation_proof(generate_double_voting_proof( (block_num, payload1, set_id, &equivocation_keyring), (block_num, payload2, set_id + 1, &equivocation_keyring), )); @@ -626,7 +671,7 @@ fn report_equivocation_invalid_equivocation_proof() { } #[test] -fn report_equivocation_validate_unsigned_prevents_duplicates() { +fn report_double_voting_validate_unsigned_prevents_duplicates() { use sp_runtime::transaction_validity::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, ValidTransaction, @@ -649,14 +694,14 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - let equivocation_proof = generate_equivocation_proof( + let equivocation_proof = generate_double_voting_proof( (block_num, payload1, set_id, &equivocation_keyring), (block_num, payload2, set_id, &equivocation_keyring), ); let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); - let call = Call::report_equivocation_unsigned { + let call = Call::report_double_voting_unsigned { equivocation_proof: Box::new(equivocation_proof.clone()), key_owner_proof: key_owner_proof.clone(), }; @@ -691,7 +736,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { assert_ok!(::pre_dispatch(&call)); // we submit the report - Beefy::report_equivocation_unsigned( + Beefy::report_double_voting_unsigned( RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof, @@ -716,11 +761,11 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { } #[test] -fn report_equivocation_has_valid_weight() { +fn report_double_voting_has_valid_weight() { // the weight depends on the size of the validator set, // but there's a lower bound of 100 validators. assert!((1..=100) - .map(|validators| ::WeightInfo::report_equivocation(validators, 1000)) + .map(|validators| ::WeightInfo::report_double_voting(validators, 1000)) .collect::>() .windows(2) .all(|w| w[0] == w[1])); @@ -728,20 +773,75 @@ fn report_equivocation_has_valid_weight() { // after 100 validators the weight should keep increasing // with every extra validator. assert!((100..=1000) - .map(|validators| ::WeightInfo::report_equivocation(validators, 1000)) + .map(|validators| ::WeightInfo::report_double_voting(validators, 1000)) .collect::>() .windows(2) .all(|w| w[0].ref_time() < w[1].ref_time())); } #[test] -fn valid_equivocation_reports_dont_pay_fees() { +fn valid_double_voting_reports_dont_pay_fees() { + valid_equivocation_reports_dont_pay_fees(report_double_voting) +} + +// Test fork voting reporting logic. + +fn report_fork_voting( + block_num: u64, + set_id: ValidatorSetId, + equivocation_keyring: &BeefyKeyring, + key_owner_proof: MembershipProof, +) -> DispatchResultWithPostInfo { + let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let equivocation_proof = generate_fork_voting_proof( + (block_num, payload, set_id, &equivocation_keyring), + MockAncestryProof { is_non_canonical: true }, + System::finalize(), + ); + + Beefy::report_fork_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ) +} + +#[test] +fn report_fork_voting_current_set_works() { + report_equivocation_current_set_works(report_fork_voting); +} + +#[test] +fn report_fork_voting_old_set_works() { + report_equivocation_old_set_works(report_fork_voting); +} + +#[test] +fn report_fork_voting_invalid_set_id() { + report_equivocation_invalid_set_id(report_fork_voting); +} + +#[test] +fn report_fork_voting_invalid_session() { + report_equivocation_invalid_session(report_fork_voting); +} + +#[test] +fn report_fork_voting_invalid_key_owner_proof() { + report_equivocation_invalid_key_owner_proof(report_fork_voting); +} + +#[test] +fn report_fork_voting_invalid_equivocation_proof() { let authorities = test_authorities(); - ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { - start_era(1); + let mut ext = ExtBuilder::default().add_authorities(authorities).build(); + let mut era = 1; + let (block_num, set_id, equivocation_keyring, key_owner_proof) = ext.execute_with(|| { + start_era(era); let block_num = System::block_number(); + let validator_set = Beefy::validator_set().unwrap(); let authorities = validator_set.validators(); let set_id = validator_set.id(); @@ -750,56 +850,224 @@ fn valid_equivocation_reports_dont_pay_fees() { let equivocation_key = &authorities[equivocation_authority_index]; let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - // generate equivocation proof - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id, &equivocation_keyring), - (block_num, payload2, set_id, &equivocation_keyring), + // generate a key ownership proof at set id in era 1 + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + + era += 1; + start_era(era); + (block_num, set_id, equivocation_keyring, key_owner_proof) + }); + ext.persist_offchain_overlay(); + + ext.execute_with(|| { + let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + + // vote signed with a key that isn't part of the authority set + let equivocation_proof = generate_fork_voting_proof( + (block_num, payload.clone(), set_id, &BeefyKeyring::Dave), + MockAncestryProof { is_non_canonical: true }, + System::finalize(), + ); + assert_err!( + Beefy::report_fork_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof.clone(), + ), + Error::::InvalidKeyOwnershipProof, ); - // create the key ownership proof. - let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + // Simulate InvalidForkVotingProof error. + let equivocation_proof = generate_fork_voting_proof( + (block_num + 1, payload.clone(), set_id, &equivocation_keyring), + MockAncestryProof { is_non_canonical: false }, + System::finalize(), + ); + assert_err!( + Beefy::report_fork_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof.clone(), + ), + Error::::InvalidForkVotingProof, + ); + }); +} - // check the dispatch info for the call. - let info = Call::::report_equivocation_unsigned { - equivocation_proof: Box::new(equivocation_proof.clone()), - key_owner_proof: key_owner_proof.clone(), +#[test] +fn report_fork_voting_invalid_context() { + let authorities = test_authorities(); + + let mut ext = ExtBuilder::default().add_authorities(authorities).build(); + + let mut era = 1; + let block_num = ext.execute_with(|| { + assert_eq!(Staking::current_era(), Some(0)); + assert_eq!(Session::current_index(), 0); + start_era(era); + + let block_num = System::block_number(); + era += 1; + start_era(era); + block_num + }); + ext.persist_offchain_overlay(); + + ext.execute_with(|| { + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); + let set_id = validator_set.id(); + let validators = Session::validators(); + + // make sure that all validators have the same balance + for validator in &validators { + assert_eq!(Balances::total_balance(validator), 10_000_000); + assert_eq!(Staking::slashable_balance_of(validator), 10_000); + + assert_eq!( + Staking::eras_stakers(era, validator), + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, + ); } - .get_dispatch_info(); - // it should have non-zero weight and the fee has to be paid. - assert!(info.weight.any_gt(Weight::zero())); - assert_eq!(info.pays_fee, Pays::Yes); + assert_eq!(authorities.len(), 2); + let equivocation_authority_index = 1; + let equivocation_key = &authorities[equivocation_authority_index]; + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - // report the equivocation. - let post_info = Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof.clone()), - key_owner_proof.clone(), - ) - .unwrap(); + let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - // the original weight should be kept, but given that the report - // is valid the fee is waived. - assert!(post_info.actual_weight.is_none()); - assert_eq!(post_info.pays_fee, Pays::No); + // generate a fork equivocation proof, with a vote in the same round for a + // different payload than finalized + let equivocation_proof = generate_fork_voting_proof( + (block_num, payload, set_id, &equivocation_keyring), + MockAncestryProof { is_non_canonical: true }, + System::finalize(), + ); - // report the equivocation again which is invalid now since it is - // duplicate. - let post_info = Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ) - .err() - .unwrap() - .post_info; + // create the key ownership proof + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); - // the fee is not waived and the original weight is kept. - assert!(post_info.actual_weight.is_none()); - assert_eq!(post_info.pays_fee, Pays::Yes); - }) + // report an equivocation for the current set. Simulate a failure of + // `extract_validation_context` + AncestryProofContext::set(&None); + assert_err!( + Beefy::report_fork_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof.clone()), + key_owner_proof.clone(), + ), + Error::::InvalidForkVotingProof, + ); + + // report an equivocation for the current set. Simulate an invalid context. + AncestryProofContext::set(&Some(MockAncestryProofContext { is_valid: false })); + assert_err!( + Beefy::report_fork_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ), + Error::::InvalidForkVotingProof, + ); + }); +} + +#[test] +fn valid_fork_voting_reports_dont_pay_fees() { + valid_equivocation_reports_dont_pay_fees(report_fork_voting) +} + +// Test future block voting reporting logic. + +fn report_future_block_voting( + block_num: u64, + set_id: ValidatorSetId, + equivocation_keyring: &BeefyKeyring, + key_owner_proof: MembershipProof, +) -> DispatchResultWithPostInfo { + let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let equivocation_proof = generate_future_block_voting_proof(( + block_num + 100, + payload, + set_id, + &equivocation_keyring, + )); + + Beefy::report_future_block_voting_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ) +} + +#[test] +fn report_future_block_voting_current_set_works() { + report_equivocation_current_set_works(report_future_block_voting); +} + +#[test] +fn report_future_block_voting_old_set_works() { + report_equivocation_old_set_works(report_future_block_voting); +} + +#[test] +fn report_future_block_voting_invalid_set_id() { + report_equivocation_invalid_set_id(report_future_block_voting); +} + +#[test] +fn report_future_block_voting_invalid_session() { + report_equivocation_invalid_session(report_future_block_voting); +} + +#[test] +fn report_future_block_voting_invalid_key_owner_proof() { + report_equivocation_invalid_key_owner_proof(report_future_block_voting); +} + +#[test] +fn report_future_block_voting_invalid_equivocation_proof() { + let authorities = test_authorities(); + + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { + start_era(1); + + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); + let set_id = validator_set.id(); + + let equivocation_authority_index = 0; + let equivocation_key = &authorities[equivocation_authority_index]; + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); + + // create the key ownership proof + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + + start_era(2); + + let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + + // vote targeting old block + assert_err!( + Beefy::report_future_block_voting_unsigned( + RuntimeOrigin::none(), + Box::new(generate_future_block_voting_proof(( + 1, + payload.clone(), + set_id, + &equivocation_keyring, + ))), + key_owner_proof.clone(), + ), + Error::::InvalidFutureBlockVotingProof, + ); + }); +} + +#[test] +fn valid_future_block_voting_reports_dont_pay_fees() { + valid_equivocation_reports_dont_pay_fees(report_future_block_voting) } #[test] diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index b5824ab2ec2ee..61bc706963bb6 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -16,29 +16,28 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -linregress = { version = "0.5.1", optional = true } +codec = { workspace = true } +linregress = { optional = true, workspace = true } log = { workspace = true } -paste = "1.0" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +paste = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -frame-support = { path = "../support", default-features = false } -frame-support-procedural = { path = "../support/procedural", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-storage = { path = "../../primitives/storage", default-features = false } -static_assertions = "1.1.0" +frame-support = { workspace = true } +frame-support-procedural = { workspace = true } +frame-system = { workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-runtime-interface = { workspace = true } +sp-storage = { workspace = true } +static_assertions = { workspace = true, default-features = true } [dev-dependencies] -array-bytes = "6.2.2" -rusty-fork = { version = "0.3.0", default-features = false } -sp-keystore = { path = "../../primitives/keystore" } +array-bytes = { workspace = true, default-features = true } +rusty-fork = { workspace = true } +sp-keystore = { workspace = true, default-features = true } [features] default = ["std"] @@ -58,7 +57,6 @@ std = [ "sp-keystore/std", "sp-runtime-interface/std", "sp-runtime/std", - "sp-std/std", "sp-storage/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/benchmarking/pov/Cargo.toml b/substrate/frame/benchmarking/pov/Cargo.toml index e4f3c272a63e4..6a449fb597d56 100644 --- a/substrate/frame/benchmarking/pov/Cargo.toml +++ b/substrate/frame/benchmarking/pov/Cargo.toml @@ -15,14 +15,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "..", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] @@ -34,7 +33,6 @@ std = [ "scale-info/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/benchmarking/pov/src/benchmarking.rs b/substrate/frame/benchmarking/pov/src/benchmarking.rs index 7e6aa8e6bf605..bf3d406d0b2b0 100644 --- a/substrate/frame/benchmarking/pov/src/benchmarking.rs +++ b/substrate/frame/benchmarking/pov/src/benchmarking.rs @@ -247,7 +247,7 @@ mod benchmarks { #[benchmark(pov_mode = Measured)] fn measured_storage_value_read_linear_size(l: Linear<0, { 1 << 22 }>) { - let v: sp_runtime::BoundedVec = sp_std::vec![0u8; l as usize].try_into().unwrap(); + let v: sp_runtime::BoundedVec = alloc::vec![0u8; l as usize].try_into().unwrap(); LargeValue::::put(&v); #[block] { @@ -257,7 +257,7 @@ mod benchmarks { #[benchmark(pov_mode = MaxEncodedLen)] fn mel_storage_value_read_linear_size(l: Linear<0, { 1 << 22 }>) { - let v: sp_runtime::BoundedVec = sp_std::vec![0u8; l as usize].try_into().unwrap(); + let v: sp_runtime::BoundedVec = alloc::vec![0u8; l as usize].try_into().unwrap(); LargeValue::::put(&v); #[block] { @@ -267,7 +267,7 @@ mod benchmarks { #[benchmark(pov_mode = Measured)] fn measured_storage_double_value_read_linear_size(l: Linear<0, { 1 << 22 }>) { - let v: sp_runtime::BoundedVec = sp_std::vec![0u8; l as usize].try_into().unwrap(); + let v: sp_runtime::BoundedVec = alloc::vec![0u8; l as usize].try_into().unwrap(); LargeValue::::put(&v); LargeValue2::::put(&v); #[block] @@ -279,7 +279,7 @@ mod benchmarks { #[benchmark(pov_mode = MaxEncodedLen)] fn mel_storage_double_value_read_linear_size(l: Linear<0, { 1 << 22 }>) { - let v: sp_runtime::BoundedVec = sp_std::vec![0u8; l as usize].try_into().unwrap(); + let v: sp_runtime::BoundedVec = alloc::vec![0u8; l as usize].try_into().unwrap(); LargeValue::::put(&v); LargeValue2::::put(&v); #[block] @@ -293,7 +293,7 @@ mod benchmarks { Pov::LargeValue2: Measured })] fn mel_mixed_storage_double_value_read_linear_size(l: Linear<0, { 1 << 22 }>) { - let v: sp_runtime::BoundedVec = sp_std::vec![0u8; l as usize].try_into().unwrap(); + let v: sp_runtime::BoundedVec = alloc::vec![0u8; l as usize].try_into().unwrap(); LargeValue::::put(&v); LargeValue2::::put(&v); #[block] @@ -307,7 +307,7 @@ mod benchmarks { Pov::LargeValue2: MaxEncodedLen })] fn measured_mixed_storage_double_value_read_linear_size(l: Linear<0, { 1 << 22 }>) { - let v: sp_runtime::BoundedVec = sp_std::vec![0u8; l as usize].try_into().unwrap(); + let v: sp_runtime::BoundedVec = alloc::vec![0u8; l as usize].try_into().unwrap(); LargeValue::::put(&v); LargeValue2::::put(&v); #[block] @@ -319,8 +319,8 @@ mod benchmarks { #[benchmark(pov_mode = Measured)] fn storage_map_unbounded_both_measured_read(i: Linear<0, 1000>) { - UnboundedMap::::insert(i, sp_std::vec![0; i as usize]); - UnboundedMap2::::insert(i, sp_std::vec![0; i as usize]); + UnboundedMap::::insert(i, alloc::vec![0; i as usize]); + UnboundedMap2::::insert(i, alloc::vec![0; i as usize]); #[block] { assert!(UnboundedMap::::get(i).is_some()); @@ -333,7 +333,7 @@ mod benchmarks { })] fn storage_map_partial_unbounded_read(i: Linear<0, 1000>) { Map1M::::insert(i, 0); - UnboundedMap::::insert(i, sp_std::vec![0; i as usize]); + UnboundedMap::::insert(i, alloc::vec![0; i as usize]); #[block] { assert!(Map1M::::get(i).is_some()); @@ -346,7 +346,7 @@ mod benchmarks { })] fn storage_map_partial_unbounded_ignored_read(i: Linear<0, 1000>) { Map1M::::insert(i, 0); - UnboundedMap::::insert(i, sp_std::vec![0; i as usize]); + UnboundedMap::::insert(i, alloc::vec![0; i as usize]); #[block] { assert!(Map1M::::get(i).is_some()); @@ -379,7 +379,7 @@ mod benchmarks { #[benchmark] fn storage_iteration() { for i in 0..65000 { - UnboundedMapTwox::::insert(i, sp_std::vec![0; 64]); + UnboundedMapTwox::::insert(i, alloc::vec![0; 64]); } #[block] { diff --git a/substrate/frame/benchmarking/pov/src/lib.rs b/substrate/frame/benchmarking/pov/src/lib.rs index eb02ccc983c09..4cdbaec2305c1 100644 --- a/substrate/frame/benchmarking/pov/src/lib.rs +++ b/substrate/frame/benchmarking/pov/src/lib.rs @@ -23,13 +23,15 @@ mod benchmarking; mod tests; mod weights; +extern crate alloc; + pub use pallet::*; #[frame_support::pallet] pub mod pallet { + use alloc::vec::Vec; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use sp_std::prelude::*; #[pallet::pallet] pub struct Pallet(_); diff --git a/substrate/frame/benchmarking/src/baseline.rs b/substrate/frame/benchmarking/src/baseline.rs index e76d5aed7b8d3..711d2123ca831 100644 --- a/substrate/frame/benchmarking/src/baseline.rs +++ b/substrate/frame/benchmarking/src/baseline.rs @@ -21,12 +21,12 @@ #![cfg(feature = "runtime-benchmarks")] use crate::benchmarks; +use alloc::{vec, vec::Vec}; use frame_system::Pallet as System; use sp_runtime::{ traits::{AppVerify, Hash}, RuntimeAppPublic, }; -use sp_std::{vec, vec::Vec}; mod crypto { use sp_application_crypto::{app_crypto, sr25519, KeyTypeId}; diff --git a/substrate/frame/benchmarking/src/lib.rs b/substrate/frame/benchmarking/src/lib.rs index d4ee0abbecce5..625da2a24bd0a 100644 --- a/substrate/frame/benchmarking/src/lib.rs +++ b/substrate/frame/benchmarking/src/lib.rs @@ -19,6 +19,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + #[cfg(feature = "std")] mod analysis; #[cfg(test)] @@ -35,6 +37,7 @@ pub mod v1; /// The exports are not stable and should not be relied on. #[doc(hidden)] pub mod __private { + pub use alloc::{boxed::Box, str, vec, vec::Vec}; pub use codec; pub use frame_support::{storage, traits}; pub use log; @@ -42,7 +45,6 @@ pub mod __private { pub use sp_core::defer; pub use sp_io::storage::root as storage_root; pub use sp_runtime::{traits::Zero, StateVersion}; - pub use sp_std::{self, boxed::Box, str, vec, vec::Vec}; pub use sp_storage::{well_known_keys, TrackedStorageKey}; } diff --git a/substrate/frame/benchmarking/src/tests.rs b/substrate/frame/benchmarking/src/tests.rs index bcca5fe7c2f2a..09011eadb03ae 100644 --- a/substrate/frame/benchmarking/src/tests.rs +++ b/substrate/frame/benchmarking/src/tests.rs @@ -25,7 +25,6 @@ use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; -use sp_std::prelude::*; use std::cell::RefCell; #[frame_support::pallet(dev_mode)] @@ -130,7 +129,6 @@ mod benchmarks { use frame_support::{assert_err, assert_ok, ensure, traits::Get}; use frame_system::RawOrigin; use rusty_fork::rusty_fork_test; - use sp_std::prelude::*; // Additional used internally by the benchmark macro. use super::pallet_test::{Call, Config, Pallet}; diff --git a/substrate/frame/benchmarking/src/tests_instance.rs b/substrate/frame/benchmarking/src/tests_instance.rs index d6e1cf99ef73f..ecffbd1a018fa 100644 --- a/substrate/frame/benchmarking/src/tests_instance.rs +++ b/substrate/frame/benchmarking/src/tests_instance.rs @@ -25,7 +25,6 @@ use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; -use sp_std::prelude::*; #[frame_support::pallet] mod pallet_test { @@ -131,7 +130,6 @@ mod benchmarks { use crate::account; use frame_support::ensure; use frame_system::RawOrigin; - use sp_std::prelude::*; // Additional used internally by the benchmark macro. use super::pallet_test::{Call, Config, Pallet}; diff --git a/substrate/frame/benchmarking/src/utils.rs b/substrate/frame/benchmarking/src/utils.rs index bfa25f63ef33e..ca362f7aa7efe 100644 --- a/substrate/frame/benchmarking/src/utils.rs +++ b/substrate/frame/benchmarking/src/utils.rs @@ -16,6 +16,7 @@ // limitations under the License. //! Interfaces, types and utils for benchmarking a FRAME runtime. +use alloc::vec::Vec; use codec::{Decode, Encode}; use frame_support::{dispatch::DispatchErrorWithPostInfo, pallet_prelude::*, traits::StorageInfo}; use scale_info::TypeInfo; @@ -23,7 +24,6 @@ use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_io::hashing::blake2_256; use sp_runtime::{traits::TrailingZeroInput, DispatchError}; -use sp_std::vec::Vec; use sp_storage::TrackedStorageKey; /// An alphabet of possible parameters to use for benchmarking. diff --git a/substrate/frame/benchmarking/src/v1.rs b/substrate/frame/benchmarking/src/v1.rs index 07778646237ed..d687f9fdfa104 100644 --- a/substrate/frame/benchmarking/src/v1.rs +++ b/substrate/frame/benchmarking/src/v1.rs @@ -1741,9 +1741,9 @@ pub fn show_benchmark_debug_info( * Components: {:?}\n\ * Verify: {:?}\n\ * Error message: {}", - sp_std::str::from_utf8(instance_string) + alloc::str::from_utf8(instance_string) .expect("it's all just strings ran through the wasm interface. qed"), - sp_std::str::from_utf8(benchmark) + alloc::str::from_utf8(benchmark) .expect("it's all just strings ran through the wasm interface. qed"), components, verify, diff --git a/substrate/frame/bounties/Cargo.toml b/substrate/frame/bounties/Cargo.toml index fac0054359060..cb9fa1f964b06 100644 --- a/substrate/frame/bounties/Cargo.toml +++ b/substrate/frame/bounties/Cargo.toml @@ -16,22 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } +], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-treasury = { path = "../treasury", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-treasury = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] @@ -47,7 +46,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/bounties/src/benchmarking.rs b/substrate/frame/bounties/src/benchmarking.rs index 3558847c8fedd..de93ba5c4ce7a 100644 --- a/substrate/frame/bounties/src/benchmarking.rs +++ b/substrate/frame/bounties/src/benchmarking.rs @@ -21,6 +21,7 @@ use super::*; +use alloc::{vec, vec::Vec}; use frame_benchmarking::v1::{ account, benchmarks_instance_pallet, whitelisted_caller, BenchmarkError, }; @@ -177,7 +178,7 @@ benchmarks_instance_pallet! { Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; let approve_origin = - T::ApproveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; }: close_bounty(approve_origin, bounty_id) close_bounty_active { @@ -186,7 +187,7 @@ benchmarks_instance_pallet! { Treasury::::on_initialize(BlockNumberFor::::zero()); let bounty_id = BountyCount::::get() - 1; let approve_origin = - T::ApproveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; }: close_bounty(approve_origin, bounty_id) verify { assert_last_event::(Event::BountyCanceled { index: bounty_id }.into()) @@ -231,5 +232,5 @@ benchmarks_instance_pallet! { } } - impl_benchmark_test_suite!(Bounties, crate::tests::new_test_ext(), crate::tests::Test) + impl_benchmark_test_suite!(Bounties, crate::tests::ExtBuilder::default().build(), crate::tests::Test) } diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs index c930868bf1015..7b89a6e3e76f5 100644 --- a/substrate/frame/bounties/src/lib.rs +++ b/substrate/frame/bounties/src/lib.rs @@ -87,7 +87,9 @@ pub mod migrations; mod tests; pub mod weights; -use sp_std::prelude::*; +extern crate alloc; + +use alloc::vec::Vec; use frame_support::traits::{ Currency, ExistenceRequirement::AllowDeath, Get, Imbalance, OnUnbalanced, ReservableCurrency, @@ -245,6 +247,9 @@ pub mod pallet { /// The child bounty manager. type ChildBountyManager: ChildBountyManager>; + + /// Handler for the unbalanced decrease when slashing for a rejected bounty. + type OnSlash: OnUnbalanced>; } #[pallet::error] @@ -804,6 +809,54 @@ pub mod pallet { Ok(()) } } + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() + } + } +} + +#[cfg(any(feature = "try-runtime", test))] +impl, I: 'static> Pallet { + /// Ensure the correctness of the state of this pallet. + /// + /// This should be valid before or after each state transition of this pallet. + pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + Self::try_state_bounties_count()?; + + Ok(()) + } + + /// # Invariants + /// + /// * `BountyCount` should be greater or equals to the length of the number of items in + /// `Bounties`. + /// * `BountyCount` should be greater or equals to the length of the number of items in + /// `BountyDescriptions`. + /// * Number of items in `Bounties` should be the same as `BountyDescriptions` length. + fn try_state_bounties_count() -> Result<(), sp_runtime::TryRuntimeError> { + let bounties_length = Bounties::::iter().count() as u32; + + ensure!( + >::get() >= bounties_length, + "`BountyCount` must be grater or equals the number of `Bounties` in storage" + ); + + let bounties_description_length = BountyDescriptions::::iter().count() as u32; + ensure!( + >::get() >= bounties_description_length, + "`BountyCount` must be grater or equals the number of `BountiesDescriptions` in storage." + ); + + ensure!( + bounties_length == bounties_description_length, + "Number of `Bounties` in storage must be the same as the Number of `BountiesDescription` in storage." + ); + Ok(()) + } } impl, I: 'static> Pallet { diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs index a89f4ff9fbf30..7cd4798267450 100644 --- a/substrate/frame/bounties/src/tests.rs +++ b/substrate/frame/bounties/src/tests.rs @@ -66,23 +66,11 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); pub static Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub const TreasuryPalletId2: PalletId = PalletId(*b"py/trsr2"); @@ -95,13 +83,8 @@ parameter_types! { impl pallet_treasury::Config for Test { type PalletId = TreasuryPalletId; type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -122,13 +105,8 @@ impl pallet_treasury::Config for Test { impl pallet_treasury::Config for Test { type PalletId = TreasuryPalletId2; type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -167,6 +145,7 @@ impl Config for Test { type MaximumReasonLength = ConstU32<16384>; type WeightInfo = (); type ChildBountyManager = (); + type OnSlash = (); } impl Config for Test { @@ -182,23 +161,42 @@ impl Config for Test { type MaximumReasonLength = ConstU32<16384>; type WeightInfo = (); type ChildBountyManager = (); + type OnSlash = (); } type TreasuryError = pallet_treasury::Error; type TreasuryError1 = pallet_treasury::Error; -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig { - system: frame_system::GenesisConfig::default(), - balances: pallet_balances::GenesisConfig { balances: vec![(0, 100), (1, 98), (2, 1)] }, - treasury: Default::default(), - treasury_1: Default::default(), +pub struct ExtBuilder {} + +impl Default for ExtBuilder { + fn default() -> Self { + Self {} + } +} + +impl ExtBuilder { + pub fn build(self) -> sp_io::TestExternalities { + let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig { + system: frame_system::GenesisConfig::default(), + balances: pallet_balances::GenesisConfig { balances: vec![(0, 100), (1, 98), (2, 1)] }, + treasury: Default::default(), + treasury_1: Default::default(), + } + .build_storage() + .unwrap() + .into(); + ext.execute_with(|| System::set_block_number(1)); + ext + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(|| { + test(); + Bounties::do_try_state().expect("All invariants must hold after a test"); + Bounties1::do_try_state().expect("All invariants must hold after a test"); + }) } - .build_storage() - .unwrap() - .into(); - ext.execute_with(|| System::set_block_number(1)); - ext } fn last_event() -> BountiesEvent { @@ -212,7 +210,7 @@ fn last_event() -> BountiesEvent { #[test] fn genesis_config_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { assert_eq!(Treasury::pot(), 0); assert_eq!(Treasury::proposal_count(), 0); }); @@ -220,63 +218,19 @@ fn genesis_config_works() { #[test] fn minting_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { // Check that accumulate works when we have Some value in Dummy already. Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); }); } -#[test] -fn spend_proposal_takes_min_deposit() { - new_test_ext().execute_with(|| { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); - assert_eq!(Balances::free_balance(0), 99); - assert_eq!(Balances::reserved_balance(0), 1); - }); -} - -#[test] -fn spend_proposal_takes_proportional_deposit() { - new_test_ext().execute_with(|| { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 5); - }); -} - -#[test] -fn spend_proposal_fails_when_proposer_poor() { - new_test_ext().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3) - }, - TreasuryError::InsufficientProposersBalance, - ); - }); -} - #[test] fn accepted_spend_proposal_ignored_outside_spend_period() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 100, 3) }); >::on_initialize(1); assert_eq!(Balances::free_balance(3), 0); @@ -286,7 +240,7 @@ fn accepted_spend_proposal_ignored_outside_spend_period() { #[test] fn unused_pot_should_diminish() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { let init_total_issuance = Balances::total_issuance(); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Balances::total_issuance(), init_total_issuance + 100); @@ -297,112 +251,13 @@ fn unused_pot_should_diminish() { }); } -#[test] -fn rejected_spend_proposal_ignored_on_spend_period() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - - >::on_initialize(2); - assert_eq!(Balances::free_balance(3), 0); - assert_eq!(Treasury::pot(), 50); - }); -} - -#[test] -fn reject_already_rejected_spend_proposal_fails() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - assert_noop!( - { - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }, - TreasuryError::InvalidIndex - ); - }); -} - -#[test] -fn reject_non_existent_spend_proposal_fails() { - new_test_ext().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }, - pallet_treasury::Error::::InvalidIndex - ); - }); -} - -#[test] -fn accept_non_existent_spend_proposal_fails() { - new_test_ext().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, - TreasuryError::InvalidIndex - ); - }); -} - -#[test] -fn accept_already_rejected_spend_proposal_fails() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, - TreasuryError::InvalidIndex - ); - }); -} - #[test] fn accepted_spend_proposal_enacted_on_spend_period() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 100, 3) }); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 100); @@ -412,18 +267,11 @@ fn accepted_spend_proposal_enacted_on_spend_period() { #[test] fn pot_underflow_should_not_diminish() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 150, 3) }); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed @@ -439,31 +287,17 @@ fn pot_underflow_should_not_diminish() { // i.e. pot should not include existential deposit needed for account survival. #[test] fn treasury_account_doesnt_get_deleted() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), treasury_balance, 3) }); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 1) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), Treasury::pot(), 3) }); >::on_initialize(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied @@ -486,22 +320,8 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist assert_eq!(Treasury::pot(), 0); // Pot is empty - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 1) - }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 99, 3) }); + assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 1, 3) }); >::on_initialize(2); assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed @@ -519,7 +339,7 @@ fn inexistent_account_works() { #[test] fn propose_bounty_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -556,7 +376,7 @@ fn propose_bounty_works() { #[test] fn propose_bounty_validation_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -585,7 +405,7 @@ fn propose_bounty_validation_works() { #[test] fn close_bounty_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_noop!(Bounties::close_bounty(RuntimeOrigin::root(), 0), Error::::InvalidIndex); @@ -610,7 +430,7 @@ fn close_bounty_works() { #[test] fn approve_bounty_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_noop!( @@ -671,7 +491,7 @@ fn approve_bounty_works() { #[test] fn assign_curator_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -741,7 +561,7 @@ fn assign_curator_works() { #[test] fn unassign_curator_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); @@ -794,7 +614,7 @@ fn unassign_curator_works() { #[test] fn award_and_claim_bounty_works() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 10); @@ -861,7 +681,7 @@ fn award_and_claim_bounty_works() { #[test] fn claim_handles_high_fee() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 30); @@ -902,7 +722,7 @@ fn claim_handles_high_fee() { #[test] fn cancel_and_refund() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -945,7 +765,7 @@ fn cancel_and_refund() { #[test] fn award_and_cancel() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); @@ -988,7 +808,7 @@ fn award_and_cancel() { #[test] fn expire_and_unassign() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); @@ -1036,7 +856,7 @@ fn expire_and_unassign() { #[test] fn extend_expiry() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 10); @@ -1172,7 +992,7 @@ fn genesis_funding_works() { #[test] fn unassign_curator_self() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); @@ -1213,7 +1033,7 @@ fn unassign_curator_self() { fn accept_curator_handles_different_deposit_calculations() { // This test will verify that a bounty with and without a fee results // in a different curator deposit: one using the value, and one using the fee. - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { // Case 1: With a fee let user = 1; let bounty_index = 0; @@ -1290,7 +1110,7 @@ fn accept_curator_handles_different_deposit_calculations() { #[test] fn approve_bounty_works_second_instance() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { // Set burn to 0 to make tracking funds easier. Burn::set(Permill::from_percent(0)); @@ -1316,7 +1136,7 @@ fn approve_bounty_works_second_instance() { #[test] fn approve_bounty_insufficient_spend_limit_errors() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -1334,7 +1154,7 @@ fn approve_bounty_insufficient_spend_limit_errors() { #[test] fn approve_bounty_instance1_insufficient_spend_limit_errors() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury1::account_id(), 101); @@ -1352,7 +1172,7 @@ fn approve_bounty_instance1_insufficient_spend_limit_errors() { #[test] fn propose_curator_insufficient_spend_limit_errors() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -1375,7 +1195,7 @@ fn propose_curator_insufficient_spend_limit_errors() { #[test] fn propose_curator_instance1_insufficient_spend_limit_errors() { - new_test_ext().execute_with(|| { + ExtBuilder::default().build_and_execute(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index 8a84fbfdfb701..7593a4e8da14a 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -16,22 +16,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -bitvec = { version = "1.0.0", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +bitvec = { workspace = true } +sp-api = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } [dev-dependencies] -sp-io = { path = "../../primitives/io" } -sp-tracing = { path = "../../primitives/tracing" } -pretty_assertions = "1.3.0" +sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +pretty_assertions = { workspace = true } [features] default = ["std"] @@ -49,7 +48,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 9cb5ad096c83b..33df56c95f65d 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -20,6 +20,7 @@ use super::*; use crate::{CoreAssignment::Task, Pallet as Broker}; +use alloc::{vec, vec::Vec}; use frame_benchmarking::v2::*; use frame_support::{ storage::bounded_vec::BoundedVec, @@ -32,7 +33,6 @@ use frame_system::{Pallet as System, RawOrigin}; use sp_arithmetic::{traits::Zero, Perbill}; use sp_core::Get; use sp_runtime::{traits::BlockNumberProvider, Saturating}; -use sp_std::{vec, vec::Vec}; const SEED: u32 = 0; const MAX_CORE_COUNT: u16 = 1_000; @@ -99,7 +99,7 @@ fn setup_and_start_sale() -> Result { // Assume Leases to be filled for worst case setup_leases::(T::MaxLeasedCores::get(), 1, 10); - Broker::::do_start_sales(10u32.into(), MAX_CORE_COUNT.into()) + Broker::::do_start_sales(10_000_000u32.into(), MAX_CORE_COUNT.into()) .map_err(|_| BenchmarkError::Weightless)?; Ok(T::MaxReservedCores::get() @@ -201,7 +201,7 @@ mod benches { let latest_region_begin = Broker::::latest_timeslice_ready_to_commit(&config); - let initial_price = 10u32.into(); + let initial_price = 10_000_000u32.into(); let origin = T::AdminOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -214,8 +214,8 @@ mod benches { Event::SaleInitialized { sale_start: 2u32.into(), leadin_length: 1u32.into(), - start_price: 1000u32.into(), - end_price: 10u32.into(), + start_price: 1_000_000_000u32.into(), + end_price: 10_000_000u32.into(), region_begin: latest_region_begin + config.region_length, region_end: latest_region_begin + config.region_length * 2, ideal_cores_sold: 0, @@ -240,13 +240,13 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), 10u32.into()); + _(RawOrigin::Signed(caller.clone()), 10_000_000u32.into()); - assert_eq!(SaleInfo::::get().unwrap().sellout_price, Some(10u32.into())); + assert_eq!(SaleInfo::::get().unwrap().sellout_price, Some(10_000_000u32.into())); assert_last_event::( Event::Purchased { who: caller, @@ -255,7 +255,7 @@ mod benches { core, mask: CoreMask::complete(), }, - price: 10u32.into(), + price: 10_000_000u32.into(), duration: 3u32.into(), } .into(), @@ -274,10 +274,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(20u32.into()), + T::Currency::minimum_balance().saturating_add(20_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; Broker::::do_assign(region, None, 1001, Final) @@ -303,10 +303,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -336,10 +336,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; #[extrinsic_call] @@ -368,10 +368,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; #[extrinsic_call] @@ -404,10 +404,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; #[extrinsic_call] @@ -439,10 +439,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -475,14 +475,14 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); T::Currency::set_balance( &Broker::::account_id(), - T::Currency::minimum_balance().saturating_add(200u32.into()), + T::Currency::minimum_balance().saturating_add(200_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -491,7 +491,7 @@ mod benches { Broker::::do_pool(region, None, recipient.clone(), Final) .map_err(|_| BenchmarkError::Weightless)?; - let revenue = 10u32.into(); + let revenue = 10_000_000u32.into(); InstaPoolHistory::::insert( region.begin, InstaPoolHistoryRecord { @@ -508,7 +508,7 @@ mod benches { assert_last_event::( Event::RevenueClaimPaid { who: recipient, - amount: 200u32.into(), + amount: 200_000_000u32.into(), next: if m < new_config_record::().region_length { Some(RegionId { begin: region.begin.saturating_add(m), @@ -534,11 +534,11 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(30u32.into()), + T::Currency::minimum_balance().saturating_add(30_000_000u32.into()), ); T::Currency::set_balance(&Broker::::account_id(), T::Currency::minimum_balance()); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -549,10 +549,11 @@ mod benches { let beneficiary: RelayAccountIdOf = account("beneficiary", 0, SEED); #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), 20u32.into(), beneficiary.clone()); + _(RawOrigin::Signed(caller.clone()), 20_000_000u32.into(), beneficiary.clone()); assert_last_event::( - Event::CreditPurchased { who: caller, beneficiary, amount: 20u32.into() }.into(), + Event::CreditPurchased { who: caller, beneficiary, amount: 20_000_000u32.into() } + .into(), ); Ok(()) @@ -568,10 +569,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; advance_to::( @@ -602,10 +603,10 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(10u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) .map_err(|_| BenchmarkError::Weightless)?; let recipient: T::AccountId = account("recipient", 0, SEED); @@ -634,7 +635,7 @@ mod benches { fn drop_history() -> Result<(), BenchmarkError> { setup_and_start_sale::()?; let when = 5u32.into(); - let revenue = 10u32.into(); + let revenue = 10_000_000u32.into(); let region_len = Configuration::::get().unwrap().region_length; advance_to::( @@ -672,7 +673,7 @@ mod benches { let id = PotentialRenewalId { core, when }; let record = PotentialRenewalRecord { - price: 1u32.into(), + price: 1_000_000u32.into(), completion: CompletionStatus::Complete(new_schedule()), }; PotentialRenewals::::insert(id, record); @@ -732,23 +733,27 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(30u32.into()), + T::Currency::minimum_balance().saturating_add(30_000_000u32.into()), + ); + T::Currency::set_balance( + &Broker::::account_id(), + T::Currency::minimum_balance().saturating_add(90_000_000u32.into()), ); - T::Currency::set_balance(&Broker::::account_id(), T::Currency::minimum_balance()); let timeslice_period: u32 = T::TimeslicePeriod::get().try_into().ok().unwrap(); let multiplicator = 5; - ::ensure_notify_revenue_info( - (timeslice_period * multiplicator).into(), - 10u32.into(), - ); + + RevenueInbox::::put(OnDemandRevenueRecord { + until: (timeslice_period * multiplicator).into(), + amount: 10_000_000u32.into(), + }); let timeslice = multiplicator - 1; InstaPoolHistory::::insert( timeslice, InstaPoolHistoryRecord { - private_contributions: 1u32.into(), - system_contributions: 9u32.into(), + private_contributions: 4u32.into(), + system_contributions: 6u32.into(), maybe_payout: None, }, ); @@ -761,8 +766,8 @@ mod benches { assert_last_event::( Event::ClaimsReady { when: timeslice.into(), - system_payout: 9u32.into(), - private_payout: 1u32.into(), + system_payout: 6_000_000u32.into(), + private_payout: 4_000_000u32.into(), } .into(), ); @@ -776,7 +781,7 @@ mod benches { let config = new_config_record::(); let now = frame_system::Pallet::::block_number(); - let end_price = 10u32.into(); + let end_price = 10_000_000u32.into(); let commit_timeslice = Broker::::latest_timeslice_ready_to_commit(&config); let sale = SaleInfoRecordOf:: { sale_start: now, @@ -815,8 +820,8 @@ mod benches { Event::SaleInitialized { sale_start: 2u32.into(), leadin_length: 1u32.into(), - start_price: 1000u32.into(), - end_price: 10u32.into(), + start_price: 1_000_000_000u32.into(), + end_price: 10_000_000u32.into(), region_begin: sale.region_begin + config.region_length, region_end: sale.region_end + config.region_length, ideal_cores_sold: 0, @@ -889,6 +894,7 @@ mod benches { T::Coretime::request_revenue_info_at(rc_block); } } + #[benchmark] fn notify_core_count() -> Result<(), BenchmarkError> { let admin_origin = @@ -901,6 +907,21 @@ mod benches { Ok(()) } + #[benchmark] + fn notify_revenue() -> Result<(), BenchmarkError> { + let admin_origin = + T::AdminOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + + #[extrinsic_call] + _( + admin_origin as T::RuntimeOrigin, + OnDemandRevenueRecord { until: 100u32.into(), amount: 100_000_000u32.into() }, + ); + + assert!(RevenueInbox::::take().is_some()); + Ok(()) + } + #[benchmark] fn do_tick_base() -> Result<(), BenchmarkError> { setup_and_start_sale::()?; @@ -939,6 +960,31 @@ mod benches { Ok(()) } + #[benchmark] + fn on_new_timeslice() -> Result<(), BenchmarkError> { + setup_and_start_sale::()?; + + advance_to::(2); + + let caller: T::AccountId = whitelisted_caller(); + T::Currency::set_balance( + &caller.clone(), + T::Currency::minimum_balance().saturating_add(u32::MAX.into()), + ); + + let _region = Broker::::do_purchase(caller.clone(), (u32::MAX / 2).into()) + .map_err(|_| BenchmarkError::Weightless)?; + + let timeslice = Broker::::current_timeslice(); + + #[block] + { + T::Coretime::on_new_timeslice(timeslice); + } + + Ok(()) + } + // Implements a test for each benchmark. Execute with: // `cargo test -p pallet-broker --features runtime-benchmarks`. impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/broker/src/coretime_interface.rs b/substrate/frame/broker/src/coretime_interface.rs index 58efa7fa92bb0..9c18e2c4ff0b3 100644 --- a/substrate/frame/broker/src/coretime_interface.rs +++ b/substrate/frame/broker/src/coretime_interface.rs @@ -17,13 +17,16 @@ #![deny(missing_docs)] +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; +use core::fmt::Debug; use frame_support::Parameter; use scale_info::TypeInfo; use sp_arithmetic::traits::AtLeast32BitUnsigned; use sp_core::RuntimeDebug; use sp_runtime::traits::BlockNumberProvider; -use sp_std::vec::Vec; + +use crate::Timeslice; /// Index of a Polkadot Core. pub type CoreIndex = u16; @@ -62,7 +65,7 @@ pub trait CoretimeInterface { type AccountId: Parameter; /// A (Relay-chain-side) balance. - type Balance: AtLeast32BitUnsigned; + type Balance: AtLeast32BitUnsigned + Encode + Decode + MaxEncodedLen + TypeInfo + Debug; /// A provider for the relay chain block number. type RelayChainBlockNumberProvider: BlockNumberProvider; @@ -107,22 +110,10 @@ pub trait CoretimeInterface { end_hint: Option>, ); - /// Provide the amount of revenue accumulated from Instantaneous Coretime Sales from Relay-chain - /// block number `last_until` to `until`, not including `until` itself. `last_until` is defined - /// as being the `until` argument of the last `notify_revenue` message sent, or zero for the - /// first call. If `revenue` is `None`, this indicates that the information is no longer - /// available. - /// - /// This explicitly disregards the possibility of multiple parachains requesting and being - /// notified of revenue information. The Relay-chain must be configured to ensure that only a - /// single revenue information destination exists. - fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)>; - - /// Ensure that revenue information is updated to the provided value. - /// - /// This is only used for benchmarking. - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(when: RCBlockNumberOf, revenue: Self::Balance); + /// A hook supposed to be called right after a new timeslice has begun. Likely to be used for + /// batching different matters happened during the timeslice that may benifit from batched + /// processing. + fn on_new_timeslice(_timeslice: Timeslice) {} } impl CoretimeInterface for () { @@ -140,9 +131,4 @@ impl CoretimeInterface for () { _end_hint: Option>, ) { } - fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)> { - None - } - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(_when: RCBlockNumberOf, _revenue: Self::Balance) {} } diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index 79c1a1f797963..9e7a56e52812e 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -16,6 +16,7 @@ // limitations under the License. use super::*; +use coretime_interface::CoretimeInterface; use frame_support::{ pallet_prelude::{DispatchResult, *}, traits::{fungible::Mutate, tokens::Preservation::Expendable, DefensiveResult}, @@ -458,6 +459,11 @@ impl Pallet { Ok(()) } + pub(crate) fn do_notify_revenue(revenue: OnDemandRevenueRecordOf) -> DispatchResult { + RevenueInbox::::put(revenue); + Ok(()) + } + pub(crate) fn do_swap_leases(id: TaskId, other: TaskId) -> DispatchResult { let mut id_leases_count = 0; let mut other_leases_count = 0; @@ -472,7 +478,6 @@ impl Pallet { } }) }); - Ok(()) } diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index 0774c02e1cf10..45c33c1bfa615 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -47,25 +47,27 @@ pub use core_mask::*; pub use coretime_interface::*; pub use types::*; +extern crate alloc; + /// The log target for this pallet. const LOG_TARGET: &str = "runtime::broker"; #[frame_support::pallet] pub mod pallet { use super::*; + use alloc::vec::Vec; use frame_support::{ pallet_prelude::{DispatchResult, DispatchResultWithPostInfo, *}, traits::{ fungible::{Balanced, Credit, Mutate}, - EnsureOrigin, OnUnbalanced, + BuildGenesisConfig, EnsureOrigin, OnUnbalanced, }, PalletId, }; use frame_system::pallet_prelude::*; use sp_runtime::traits::{Convert, ConvertBack}; - use sp_std::vec::Vec; - const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(3); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -174,6 +176,10 @@ pub mod pallet { #[pallet::storage] pub type CoreCountInbox = StorageValue<_, CoreIndex, OptionQuery>; + /// Received revenue info from the relay chain. + #[pallet::storage] + pub type RevenueInbox = StorageValue<_, OnDemandRevenueRecordOf, OptionQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -488,6 +494,20 @@ pub mod pallet { NoClaimTimeslices, } + #[derive(frame_support::DefaultNoBound)] + #[pallet::genesis_config] + pub struct GenesisConfig { + #[serde(skip)] + pub _config: core::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + frame_system::Pallet::::inc_providers(&Pallet::::account_id()); + } + } + #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(_now: BlockNumberFor) -> Weight { @@ -804,6 +824,17 @@ pub mod pallet { Ok(()) } + #[pallet::call_index(20)] + #[pallet::weight(T::WeightInfo::notify_revenue())] + pub fn notify_revenue( + origin: OriginFor, + revenue: OnDemandRevenueRecordOf, + ) -> DispatchResult { + T::AdminOrigin::ensure_origin_or_root(origin)?; + Self::do_notify_revenue(revenue)?; + Ok(()) + } + #[pallet::call_index(99)] #[pallet::weight(T::WeightInfo::swap_leases())] pub fn swap_leases(origin: OriginFor, id: TaskId, other: TaskId) -> DispatchResult { diff --git a/substrate/frame/broker/src/migration.rs b/substrate/frame/broker/src/migration.rs index f354e447fe84e..c2a243d6f0e8e 100644 --- a/substrate/frame/broker/src/migration.rs +++ b/substrate/frame/broker/src/migration.rs @@ -23,9 +23,9 @@ use frame_support::traits::{Get, UncheckedOnRuntimeUpgrade}; use sp_runtime::Saturating; #[cfg(feature = "try-runtime")] -use frame_support::ensure; +use alloc::vec::Vec; #[cfg(feature = "try-runtime")] -use sp_std::vec::Vec; +use frame_support::ensure; mod v1 { use super::*; @@ -128,6 +128,36 @@ mod v2 { } } +mod v3 { + use super::*; + use frame_system::Pallet as System; + + pub struct MigrateToV3Impl(PhantomData); + + impl UncheckedOnRuntimeUpgrade for MigrateToV3Impl { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let acc = Pallet::::account_id(); + System::::inc_providers(&acc); + // calculate and return migration weights + T::DbWeight::get().writes(1) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + Ok(System::::providers(&Pallet::::account_id()).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let old_providers = u32::decode(&mut &state[..]).expect("Known good"); + let new_providers = System::::providers(&Pallet::::account_id()) as u32; + + ensure!(new_providers == old_providers + 1, "Providers count should increase by one"); + Ok(()) + } + } +} + /// Migrate the pallet storage from `0` to `1`. pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< 0, @@ -144,3 +174,11 @@ pub type MigrateV1ToV2 = frame_support::migrations::VersionedMigration< Pallet, ::DbWeight, >; + +pub type MigrateV2ToV3 = frame_support::migrations::VersionedMigration< + 2, + 3, + v3::MigrateToV3Impl, + Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/broker/src/mock.rs b/substrate/frame/broker/src/mock.rs index 6fff6aa10080c..6b1d2bbf7015d 100644 --- a/substrate/frame/broker/src/mock.rs +++ b/substrate/frame/broker/src/mock.rs @@ -18,6 +18,7 @@ #![cfg(test)] use crate::{test_fungibles::TestFungibles, *}; +use alloc::collections::btree_map::BTreeMap; use frame_support::{ assert_ok, derive_impl, ensure, ord_parameter_types, parameter_types, traits::{ @@ -34,7 +35,6 @@ use sp_runtime::{ traits::{BlockNumberProvider, Identity}, BuildStorage, Saturating, }; -use sp_std::collections::btree_map::BTreeMap; type Block = frame_system::mocking::MockBlock; @@ -70,7 +70,6 @@ parameter_types! { pub static CoretimeWorkplan: BTreeMap<(u32, CoreIndex), Vec<(CoreAssignment, PartsOf57600)>> = Default::default(); pub static CoretimeUsage: BTreeMap> = Default::default(); pub static CoretimeInPool: CoreMaskBitCount = 0; - pub static NotifyRevenueInfo: Vec<(u32, u64)> = Default::default(); } pub struct TestCoretimeProvider; @@ -90,11 +89,10 @@ impl CoretimeInterface for TestCoretimeProvider { ); } - let when = when as u32; let mut total = 0; CoretimeSpending::mutate(|s| { s.retain(|(n, a)| { - if *n < when { + if *n < when as u32 { total += a; false } else { @@ -102,7 +100,8 @@ impl CoretimeInterface for TestCoretimeProvider { } }) }); - NotifyRevenueInfo::mutate(|s| s.insert(0, (when, total))); + mint_to_pot(total); + RevenueInbox::::put(OnDemandRevenueRecord { until: when, amount: total }); } fn credit_account(who: Self::AccountId, amount: Self::Balance) { CoretimeCredit::mutate(|c| c.entry(who).or_default().saturating_accrue(amount)); @@ -125,19 +124,13 @@ impl CoretimeInterface for TestCoretimeProvider { ); CoretimeTrace::mutate(|v| v.push(item)); } - fn check_notify_revenue_info() -> Option<(RCBlockNumberOf, Self::Balance)> { - NotifyRevenueInfo::mutate(|s| s.pop()).map(|v| (v.0 as _, v.1)) - } - #[cfg(feature = "runtime-benchmarks")] - fn ensure_notify_revenue_info(when: RCBlockNumberOf, revenue: Self::Balance) { - NotifyRevenueInfo::mutate(|s| s.push((when as u32, revenue))); - } } + impl TestCoretimeProvider { - pub fn spend_instantaneous(who: u64, price: u64) -> Result<(), ()> { - let mut c = CoretimeCredit::get(); + pub fn spend_instantaneous(_who: u64, price: u64) -> Result<(), ()> { + let c = CoretimeCredit::get(); ensure!(CoretimeInPool::get() > 0, ()); - c.insert(who, c.get(&who).ok_or(())?.checked_sub(price).ok_or(())?); + // c.insert(who, c.get(&who).ok_or(())?.checked_sub(price).ok_or(())?); CoretimeCredit::set(c); CoretimeSpending::mutate(|v| { v.push((RCBlockNumberProviderOf::::current_block_number() as u32, price)) @@ -223,6 +216,11 @@ pub fn pot() -> u64 { balance(Broker::account_id()) } +pub fn mint_to_pot(amount: u64) { + let imb = ::Currency::issue(amount); + let _ = ::Currency::resolve(&Broker::account_id(), imb); +} + pub fn revenue() -> u64 { balance(0) } diff --git a/substrate/frame/broker/src/nonfungible_impl.rs b/substrate/frame/broker/src/nonfungible_impl.rs index 80dcc175df539..e272ecbe0081b 100644 --- a/substrate/frame/broker/src/nonfungible_impl.rs +++ b/substrate/frame/broker/src/nonfungible_impl.rs @@ -16,11 +16,11 @@ // limitations under the License. use super::*; +use alloc::vec::Vec; use frame_support::{ pallet_prelude::{DispatchResult, *}, traits::nonfungible::{Inspect, Mutate, Transfer}, }; -use sp_std::vec::Vec; impl Inspect for Pallet { type ItemId = u128; diff --git a/substrate/frame/broker/src/test_fungibles.rs b/substrate/frame/broker/src/test_fungibles.rs index d18bff1495331..b0a06fc1a326d 100644 --- a/substrate/frame/broker/src/test_fungibles.rs +++ b/substrate/frame/broker/src/test_fungibles.rs @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use alloc::collections::btree_map::BTreeMap; use codec::{Decode, Encode}; use frame_support::{ parameter_types, @@ -29,7 +30,6 @@ use scale_info::TypeInfo; use sp_arithmetic::traits::Zero; use sp_core::{Get, TypedGet}; use sp_runtime::{DispatchError, DispatchResult}; -use sp_std::collections::btree_map::BTreeMap; parameter_types! { static TestAssetOf: BTreeMap<(u32, Vec), Vec> = Default::default(); diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index e953afd6dc3c8..2a8ea24b447ad 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -451,6 +451,8 @@ fn renewals_affect_price() { #[test] fn instapool_payouts_work() { + // Commented out code is from the reference test implementation and should be uncommented as + // soon as we have the credit system implemented TestExt::new().endow(1, 1000).execute_with(|| { let item = ScheduleItem { assignment: Pool, mask: CoreMask::complete() }; assert_ok!(Broker::do_reserve(Schedule::truncate_from(vec![item]))); @@ -458,11 +460,13 @@ fn instapool_payouts_work() { advance_to(2); let region = Broker::do_purchase(1, u64::max_value()).unwrap(); assert_ok!(Broker::do_pool(region, None, 2, Final)); - assert_ok!(Broker::do_purchase_credit(1, 20, 1)); + // assert_ok!(Broker::do_purchase_credit(1, 20, 1)); advance_to(8); assert_ok!(TestCoretimeProvider::spend_instantaneous(1, 10)); advance_to(11); - assert_eq!(pot(), 14); + // Should get revenue amount 10 from RC, from which 6 is system payout (goes to account0 + // instantly) and the rest is private (kept in the pot until claimed) + assert_eq!(pot(), 4); assert_eq!(revenue(), 106); // Cannot claim for 0 timeslices. @@ -470,13 +474,15 @@ fn instapool_payouts_work() { // Revenue can be claimed. assert_ok!(Broker::do_claim_revenue(region, 100)); - assert_eq!(pot(), 10); + assert_eq!(pot(), 0); assert_eq!(balance(2), 4); }); } #[test] fn instapool_partial_core_payouts_work() { + // Commented out code is from the reference test implementation and should be uncommented as + // soon as we have the credit system implemented TestExt::new().endow(1, 1000).execute_with(|| { let item = ScheduleItem { assignment: Pool, mask: CoreMask::complete() }; assert_ok!(Broker::do_reserve(Schedule::truncate_from(vec![item]))); @@ -487,7 +493,7 @@ fn instapool_partial_core_payouts_work() { Broker::do_interlace(region, None, CoreMask::from_chunk(0, 20)).unwrap(); assert_ok!(Broker::do_pool(region1, None, 2, Final)); assert_ok!(Broker::do_pool(region2, None, 3, Final)); - assert_ok!(Broker::do_purchase_credit(1, 40, 1)); + // assert_ok!(Broker::do_purchase_credit(1, 40, 1)); advance_to(8); assert_ok!(TestCoretimeProvider::spend_instantaneous(1, 40)); advance_to(11); @@ -502,6 +508,8 @@ fn instapool_partial_core_payouts_work() { #[test] fn instapool_core_payouts_work_with_partitioned_region() { + // Commented out code is from the reference test implementation and should be uncommented as + // soon as we have the credit system implemented TestExt::new().endow(1, 1000).execute_with(|| { assert_ok!(Broker::do_start_sales(100, 1)); advance_to(2); @@ -514,14 +522,14 @@ fn instapool_core_payouts_work_with_partitioned_region() { // coretime will be purchased from `region2`. assert_ok!(Broker::do_pool(region1, None, 2, Final)); assert_ok!(Broker::do_pool(region2, None, 3, Final)); - assert_ok!(Broker::do_purchase_credit(1, 20, 1)); + // assert_ok!(Broker::do_purchase_credit(1, 20, 1)); advance_to(8); assert_ok!(TestCoretimeProvider::spend_instantaneous(1, 10)); advance_to(11); - assert_eq!(pot(), 20); + assert_eq!(pot(), 10); assert_eq!(revenue(), 100); assert_ok!(Broker::do_claim_revenue(region1, 100)); - assert_eq!(pot(), 10); + assert_eq!(pot(), 0); assert_eq!(balance(2), 10); advance_to(12); assert_ok!(TestCoretimeProvider::spend_instantaneous(1, 10)); diff --git a/substrate/frame/broker/src/tick_impls.rs b/substrate/frame/broker/src/tick_impls.rs index 20637cf7b903c..71a1286d73915 100644 --- a/substrate/frame/broker/src/tick_impls.rs +++ b/substrate/frame/broker/src/tick_impls.rs @@ -16,10 +16,10 @@ // limitations under the License. use super::*; -use frame_support::{pallet_prelude::*, weights::WeightMeter}; +use alloc::{vec, vec::Vec}; +use frame_support::{pallet_prelude::*, traits::defensive_prelude::*, weights::WeightMeter}; use sp_arithmetic::traits::{One, SaturatedConversion, Saturating, Zero}; use sp_runtime::traits::ConvertBack; -use sp_std::{vec, vec::Vec}; use CompletionStatus::Complete; impl Pallet { @@ -76,6 +76,8 @@ impl Pallet { let rc_block = T::TimeslicePeriod::get() * status.last_timeslice.into(); T::Coretime::request_revenue_info_at(rc_block); meter.consume(T::WeightInfo::request_revenue_info_at()); + T::Coretime::on_new_timeslice(status.last_timeslice); + meter.consume(T::WeightInfo::on_new_timeslice()); } Status::::put(&status); @@ -93,15 +95,23 @@ impl Pallet { } pub(crate) fn process_revenue() -> bool { - let Some((until, amount)) = T::Coretime::check_notify_revenue_info() else { return false }; + let Some(OnDemandRevenueRecord { until, amount }) = RevenueInbox::::take() else { + return false + }; let when: Timeslice = (until / T::TimeslicePeriod::get()).saturating_sub(One::one()).saturated_into(); - let mut revenue = T::ConvertBalance::convert_back(amount); + let mut revenue = T::ConvertBalance::convert_back(amount.clone()); if revenue.is_zero() { Self::deposit_event(Event::::HistoryDropped { when, revenue }); InstaPoolHistory::::remove(when); return true } + + log::debug!( + target: "pallet_broker::process_revenue", + "Received {amount:?} from RC, converted into {revenue:?} revenue", + ); + let mut r = InstaPoolHistory::::get(when).unwrap_or_default(); if r.maybe_payout.is_some() { Self::deposit_event(Event::::HistoryIgnored { when, revenue }); @@ -112,7 +122,7 @@ impl Pallet { let system_payout = if !total_contrib.is_zero() { let system_payout = revenue.saturating_mul(r.system_contributions.into()) / total_contrib.into(); - let _ = Self::charge(&Self::account_id(), system_payout); + Self::charge(&Self::account_id(), system_payout).defensive_ok(); revenue.saturating_reduce(system_payout); system_payout @@ -120,6 +130,11 @@ impl Pallet { Zero::zero() }; + log::debug!( + target: "pallet_broker::process_revenue", + "Charged {system_payout:?} for system payouts, {revenue:?} remaining for private contributions", + ); + if !revenue.is_zero() && r.private_contributions > 0 { r.maybe_payout = Some(revenue); InstaPoolHistory::::insert(when, &r); diff --git a/substrate/frame/broker/src/types.rs b/substrate/frame/broker/src/types.rs index 885cac9a5c23d..dcfa9a77e4f3a 100644 --- a/substrate/frame/broker/src/types.rs +++ b/substrate/frame/broker/src/types.rs @@ -255,6 +255,21 @@ pub struct LeaseRecordItem { pub type LeasesRecord = BoundedVec; pub type LeasesRecordOf = LeasesRecord<::MaxLeasedCores>; +/// Record for On demand core sales. +/// +/// The blocknumber is the relay chain block height `until` which the original request +/// for revenue was made. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct OnDemandRevenueRecord { + /// The height of the Relay-chain at the time the revenue request was made. + pub until: RelayBlockNumber, + /// The accumulated balance of on demand sales made on the relay chain. + pub amount: RelayBalance, +} + +pub type OnDemandRevenueRecordOf = + OnDemandRevenueRecord, RelayBalanceOf>; + /// Configuration of this pallet. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ConfigRecord { diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index d9d9d348e47e9..4889c2577ddd8 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -75,8 +75,10 @@ pub trait WeightInfo { fn process_core_schedule() -> Weight; fn request_revenue_info_at() -> Weight; fn notify_core_count() -> Weight; + fn notify_revenue() -> Weight; fn do_tick_base() -> Weight; fn swap_leases() -> Weight; + fn on_new_timeslice() -> Weight; } /// Weights for `pallet_broker` using the Substrate node and recommended hardware. @@ -88,8 +90,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_945_000 picoseconds. - Weight::from_parts(2_142_000, 0) + // Minimum execution time: 1_977_000 picoseconds. + Weight::from_parts(2_114_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -98,8 +100,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 16_274_000 picoseconds. - Weight::from_parts(16_828_000, 7496) + // Minimum execution time: 16_880_000 picoseconds. + Weight::from_parts(17_506_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -109,8 +111,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 15_080_000 picoseconds. - Weight::from_parts(15_874_000, 7496) + // Minimum execution time: 15_569_000 picoseconds. + Weight::from_parts(16_123_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -120,8 +122,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 8_761_000 picoseconds. - Weight::from_parts(9_203_000, 1526) + // Minimum execution time: 8_962_000 picoseconds. + Weight::from_parts(9_389_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -144,10 +146,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 26_057_000 picoseconds. - Weight::from_parts(46_673_357, 8499) - // Standard Error: 456 - .saturating_add(Weight::from_parts(2_677, 0).saturating_mul(n.into())) + // Minimum execution time: 27_119_000 picoseconds. + Weight::from_parts(47_930_900, 8499) + // Standard Error: 464 + .saturating_add(Weight::from_parts(2_940, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } @@ -165,8 +167,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `651` // Estimated: `2136` - // Minimum execution time: 40_907_000 picoseconds. - Weight::from_parts(42_566_000, 2136) + // Minimum execution time: 42_429_000 picoseconds. + Weight::from_parts(43_538_000, 2136) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -188,8 +190,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `769` // Estimated: `4698` - // Minimum execution time: 65_209_000 picoseconds. - Weight::from_parts(68_604_000, 4698) + // Minimum execution time: 62_957_000 picoseconds. + Weight::from_parts(66_821_000, 4698) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -199,8 +201,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 15_860_000 picoseconds. - Weight::from_parts(16_393_000, 3551) + // Minimum execution time: 16_146_000 picoseconds. + Weight::from_parts(16_775_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -210,8 +212,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 17_651_000 picoseconds. - Weight::from_parts(18_088_000, 3551) + // Minimum execution time: 17_720_000 picoseconds. + Weight::from_parts(18_916_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -221,8 +223,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 18_576_000 picoseconds. - Weight::from_parts(19_810_000, 3551) + // Minimum execution time: 19_088_000 picoseconds. + Weight::from_parts(19_732_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -238,8 +240,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `741` // Estimated: `4681` - // Minimum execution time: 31_015_000 picoseconds. - Weight::from_parts(31_932_000, 4681) + // Minimum execution time: 30_522_000 picoseconds. + Weight::from_parts(31_573_000, 4681) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -257,8 +259,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `776` // Estimated: `5996` - // Minimum execution time: 36_473_000 picoseconds. - Weight::from_parts(37_382_000, 5996) + // Minimum execution time: 35_833_000 picoseconds. + Weight::from_parts(36_830_000, 5996) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -273,10 +275,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ยฑ0)` - // Minimum execution time: 64_957_000 picoseconds. - Weight::from_parts(66_024_232, 6196) - // Standard Error: 50_170 - .saturating_add(Weight::from_parts(1_290_632, 0).saturating_mul(m.into())) + // Minimum execution time: 65_882_000 picoseconds. + Weight::from_parts(67_506_904, 6196) + // Standard Error: 49_386 + .saturating_add(Weight::from_parts(1_197_959, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -288,8 +290,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 39_939_000 picoseconds. - Weight::from_parts(40_788_000, 3593) + // Minimum execution time: 41_860_000 picoseconds. + Weight::from_parts(42_478_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -301,8 +303,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `604` // Estimated: `3551` - // Minimum execution time: 31_709_000 picoseconds. - Weight::from_parts(37_559_000, 3551) + // Minimum execution time: 32_593_000 picoseconds. + Weight::from_parts(35_399_000, 3551) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -316,8 +318,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 42_895_000 picoseconds. - Weight::from_parts(53_945_000, 3533) + // Minimum execution time: 41_934_000 picoseconds. + Weight::from_parts(50_480_000, 3533) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -333,8 +335,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `995` // Estimated: `3593` - // Minimum execution time: 50_770_000 picoseconds. - Weight::from_parts(63_117_000, 3593) + // Minimum execution time: 47_167_000 picoseconds. + Weight::from_parts(54_289_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -346,18 +348,20 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 33_396_000 picoseconds. - Weight::from_parts(36_247_000, 4698) + // Minimum execution time: 29_755_000 picoseconds. + Weight::from_parts(32_857_000, 4698) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// The range of component `n` is `[0, 1000]`. - fn request_core_count(_n: u32, ) -> Weight { + fn request_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_625_000 picoseconds. - Weight::from_parts(4_011_396, 0) + // Minimum execution time: 3_793_000 picoseconds. + Weight::from_parts(4_086_907, 0) + // Standard Error: 14 + .saturating_add(Weight::from_parts(60, 0).saturating_mul(n.into())) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -366,13 +370,13 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 6_217_000 picoseconds. - Weight::from_parts(6_608_394, 1487) + // Minimum execution time: 6_262_000 picoseconds. + Weight::from_parts(6_734_896, 1487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -383,10 +387,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `972` - // Estimated: `4437` - // Minimum execution time: 46_853_000 picoseconds. - Weight::from_parts(47_740_000, 4437) + // Measured: `829` + // Estimated: `3593` + // Minimum execution time: 39_812_000 picoseconds. + Weight::from_parts(41_227_000, 3593) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -405,8 +409,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 34_240_000 picoseconds. - Weight::from_parts(35_910_175, 8499) + // Minimum execution time: 34_576_000 picoseconds. + Weight::from_parts(36_303_629, 8499) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } @@ -418,8 +422,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 7_083_000 picoseconds. - Weight::from_parts(7_336_000, 3493) + // Minimum execution time: 6_978_000 picoseconds. + Weight::from_parts(7_206_000, 3493) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -431,8 +435,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 15_029_000 picoseconds. - Weight::from_parts(15_567_000, 4681) + // Minimum execution time: 15_063_000 picoseconds. + Weight::from_parts(15_463_000, 4681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -440,8 +444,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 123_000 picoseconds. - Weight::from_parts(136_000, 0) + // Minimum execution time: 126_000 picoseconds. + Weight::from_parts(157_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -449,8 +453,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_775_000 picoseconds. - Weight::from_parts(1_911_000, 0) + // Minimum execution time: 1_803_000 picoseconds. + Weight::from_parts(1_965_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Broker::RevenueInbox` (r:0 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) + fn notify_revenue() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_902_000 picoseconds. + Weight::from_parts(2_116_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -459,16 +473,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:0) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `603` - // Estimated: `4068` - // Minimum execution time: 11_859_000 picoseconds. - Weight::from_parts(12_214_000, 4068) + // Measured: `441` + // Estimated: `1516` + // Minimum execution time: 9_313_000 picoseconds. + Weight::from_parts(9_699_000, 1516) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) @@ -476,11 +490,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 5_864_000 picoseconds. - Weight::from_parts(6_231_000, 1526) + // Minimum execution time: 5_984_000 picoseconds. + Weight::from_parts(6_296_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 229_000 picoseconds. + Weight::from_parts(268_000, 0) + } } // For backwards compatibility and tests. @@ -491,8 +512,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_945_000 picoseconds. - Weight::from_parts(2_142_000, 0) + // Minimum execution time: 1_977_000 picoseconds. + Weight::from_parts(2_114_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -501,8 +522,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 16_274_000 picoseconds. - Weight::from_parts(16_828_000, 7496) + // Minimum execution time: 16_880_000 picoseconds. + Weight::from_parts(17_506_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -512,8 +533,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 15_080_000 picoseconds. - Weight::from_parts(15_874_000, 7496) + // Minimum execution time: 15_569_000 picoseconds. + Weight::from_parts(16_123_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -523,8 +544,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 8_761_000 picoseconds. - Weight::from_parts(9_203_000, 1526) + // Minimum execution time: 8_962_000 picoseconds. + Weight::from_parts(9_389_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -547,10 +568,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 26_057_000 picoseconds. - Weight::from_parts(46_673_357, 8499) - // Standard Error: 456 - .saturating_add(Weight::from_parts(2_677, 0).saturating_mul(n.into())) + // Minimum execution time: 27_119_000 picoseconds. + Weight::from_parts(47_930_900, 8499) + // Standard Error: 464 + .saturating_add(Weight::from_parts(2_940, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } @@ -568,8 +589,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `651` // Estimated: `2136` - // Minimum execution time: 40_907_000 picoseconds. - Weight::from_parts(42_566_000, 2136) + // Minimum execution time: 42_429_000 picoseconds. + Weight::from_parts(43_538_000, 2136) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -591,8 +612,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `769` // Estimated: `4698` - // Minimum execution time: 65_209_000 picoseconds. - Weight::from_parts(68_604_000, 4698) + // Minimum execution time: 62_957_000 picoseconds. + Weight::from_parts(66_821_000, 4698) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -602,8 +623,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 15_860_000 picoseconds. - Weight::from_parts(16_393_000, 3551) + // Minimum execution time: 16_146_000 picoseconds. + Weight::from_parts(16_775_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -613,8 +634,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 17_651_000 picoseconds. - Weight::from_parts(18_088_000, 3551) + // Minimum execution time: 17_720_000 picoseconds. + Weight::from_parts(18_916_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -624,8 +645,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 18_576_000 picoseconds. - Weight::from_parts(19_810_000, 3551) + // Minimum execution time: 19_088_000 picoseconds. + Weight::from_parts(19_732_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -641,8 +662,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `741` // Estimated: `4681` - // Minimum execution time: 31_015_000 picoseconds. - Weight::from_parts(31_932_000, 4681) + // Minimum execution time: 30_522_000 picoseconds. + Weight::from_parts(31_573_000, 4681) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -660,8 +681,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `776` // Estimated: `5996` - // Minimum execution time: 36_473_000 picoseconds. - Weight::from_parts(37_382_000, 5996) + // Minimum execution time: 35_833_000 picoseconds. + Weight::from_parts(36_830_000, 5996) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -676,10 +697,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ยฑ0)` - // Minimum execution time: 64_957_000 picoseconds. - Weight::from_parts(66_024_232, 6196) - // Standard Error: 50_170 - .saturating_add(Weight::from_parts(1_290_632, 0).saturating_mul(m.into())) + // Minimum execution time: 65_882_000 picoseconds. + Weight::from_parts(67_506_904, 6196) + // Standard Error: 49_386 + .saturating_add(Weight::from_parts(1_197_959, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -691,8 +712,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 39_939_000 picoseconds. - Weight::from_parts(40_788_000, 3593) + // Minimum execution time: 41_860_000 picoseconds. + Weight::from_parts(42_478_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -704,8 +725,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `604` // Estimated: `3551` - // Minimum execution time: 31_709_000 picoseconds. - Weight::from_parts(37_559_000, 3551) + // Minimum execution time: 32_593_000 picoseconds. + Weight::from_parts(35_399_000, 3551) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -719,8 +740,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 42_895_000 picoseconds. - Weight::from_parts(53_945_000, 3533) + // Minimum execution time: 41_934_000 picoseconds. + Weight::from_parts(50_480_000, 3533) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -736,8 +757,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `995` // Estimated: `3593` - // Minimum execution time: 50_770_000 picoseconds. - Weight::from_parts(63_117_000, 3593) + // Minimum execution time: 47_167_000 picoseconds. + Weight::from_parts(54_289_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -749,18 +770,20 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 33_396_000 picoseconds. - Weight::from_parts(36_247_000, 4698) + // Minimum execution time: 29_755_000 picoseconds. + Weight::from_parts(32_857_000, 4698) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// The range of component `n` is `[0, 1000]`. - fn request_core_count(_n: u32, ) -> Weight { + fn request_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_625_000 picoseconds. - Weight::from_parts(4_011_396, 0) + // Minimum execution time: 3_793_000 picoseconds. + Weight::from_parts(4_086_907, 0) + // Standard Error: 14 + .saturating_add(Weight::from_parts(60, 0).saturating_mul(n.into())) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -769,13 +792,13 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 6_217_000 picoseconds. - Weight::from_parts(6_608_394, 1487) + // Minimum execution time: 6_262_000 picoseconds. + Weight::from_parts(6_734_896, 1487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -786,10 +809,10 @@ impl WeightInfo for () { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `972` - // Estimated: `4437` - // Minimum execution time: 46_853_000 picoseconds. - Weight::from_parts(47_740_000, 4437) + // Measured: `829` + // Estimated: `3593` + // Minimum execution time: 39_812_000 picoseconds. + Weight::from_parts(41_227_000, 3593) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -808,8 +831,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 34_240_000 picoseconds. - Weight::from_parts(35_910_175, 8499) + // Minimum execution time: 34_576_000 picoseconds. + Weight::from_parts(36_303_629, 8499) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } @@ -821,8 +844,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 7_083_000 picoseconds. - Weight::from_parts(7_336_000, 3493) + // Minimum execution time: 6_978_000 picoseconds. + Weight::from_parts(7_206_000, 3493) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -834,8 +857,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 15_029_000 picoseconds. - Weight::from_parts(15_567_000, 4681) + // Minimum execution time: 15_063_000 picoseconds. + Weight::from_parts(15_463_000, 4681) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -843,8 +866,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 123_000 picoseconds. - Weight::from_parts(136_000, 0) + // Minimum execution time: 126_000 picoseconds. + Weight::from_parts(157_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -852,8 +875,18 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_775_000 picoseconds. - Weight::from_parts(1_911_000, 0) + // Minimum execution time: 1_803_000 picoseconds. + Weight::from_parts(1_965_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Broker::RevenueInbox` (r:0 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) + fn notify_revenue() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_902_000 picoseconds. + Weight::from_parts(2_116_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -862,16 +895,16 @@ impl WeightInfo for () { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:0) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `603` - // Estimated: `4068` - // Minimum execution time: 11_859_000 picoseconds. - Weight::from_parts(12_214_000, 4068) + // Measured: `441` + // Estimated: `1516` + // Minimum execution time: 9_313_000 picoseconds. + Weight::from_parts(9_699_000, 1516) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) @@ -879,9 +912,16 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 5_864_000 picoseconds. - Weight::from_parts(6_231_000, 1526) + // Minimum execution time: 5_984_000 picoseconds. + Weight::from_parts(6_296_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 229_000 picoseconds. + Weight::from_parts(268_000, 0) + } } diff --git a/substrate/frame/child-bounties/Cargo.toml b/substrate/frame/child-bounties/Cargo.toml index 09271632df54b..9c979b41462c4 100644 --- a/substrate/frame/child-bounties/Cargo.toml +++ b/substrate/frame/child-bounties/Cargo.toml @@ -16,23 +16,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } +], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-bounties = { path = "../bounties", default-features = false } -pallet-treasury = { path = "../treasury", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-bounties = { workspace = true } +pallet-treasury = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] @@ -49,7 +48,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking", diff --git a/substrate/frame/child-bounties/src/benchmarking.rs b/substrate/frame/child-bounties/src/benchmarking.rs index 947cfcfaa96a2..b1f6370f33405 100644 --- a/substrate/frame/child-bounties/src/benchmarking.rs +++ b/substrate/frame/child-bounties/src/benchmarking.rs @@ -21,6 +21,8 @@ use super::*; +use alloc::{vec, vec::Vec}; + use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; diff --git a/substrate/frame/child-bounties/src/lib.rs b/substrate/frame/child-bounties/src/lib.rs index 04a1f9799cb86..911fd4c4c49f7 100644 --- a/substrate/frame/child-bounties/src/lib.rs +++ b/substrate/frame/child-bounties/src/lib.rs @@ -56,7 +56,9 @@ mod benchmarking; mod tests; pub mod weights; -use sp_std::prelude::*; +extern crate alloc; + +use alloc::vec::Vec; use frame_support::traits::{ Currency, diff --git a/substrate/frame/child-bounties/src/tests.rs b/substrate/frame/child-bounties/src/tests.rs index d9405d3d28977..125844fa70e2c 100644 --- a/substrate/frame/child-bounties/src/tests.rs +++ b/substrate/frame/child-bounties/src/tests.rs @@ -69,23 +69,11 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub TreasuryAccount: u128 = Treasury::account_id(); @@ -95,13 +83,8 @@ parameter_types! { impl pallet_treasury::Config for Test { type PalletId = TreasuryPalletId; type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); @@ -138,6 +121,7 @@ impl pallet_bounties::Config for Test { type MaximumReasonLength = ConstU32<300>; type WeightInfo = (); type ChildBountyManager = ChildBounties; + type OnSlash = (); } impl pallet_child_bounties::Config for Test { type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/collective/Cargo.toml b/substrate/frame/collective/Cargo.toml index d966370238bc4..7d9a4c41c8129 100644 --- a/substrate/frame/collective/Cargo.toml +++ b/substrate/frame/collective/Cargo.toml @@ -16,16 +16,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] @@ -39,7 +38,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/collective/src/lib.rs b/substrate/frame/collective/src/lib.rs index d0009d02f68c2..3544a8cddb45c 100644 --- a/substrate/frame/collective/src/lib.rs +++ b/substrate/frame/collective/src/lib.rs @@ -41,14 +41,17 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{boxed::Box, vec, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; +use core::{marker::PhantomData, result}; use scale_info::TypeInfo; use sp_io::storage; use sp_runtime::{ traits::{Dispatchable, Hash}, DispatchError, RuntimeDebug, }; -use sp_std::{marker::PhantomData, prelude::*, result}; use frame_support::{ dispatch::{ @@ -239,7 +242,7 @@ pub mod pallet { #[pallet::genesis_build] impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) { - use sp_std::collections::btree_set::BTreeSet; + use alloc::collections::btree_set::BTreeSet; let members_set: BTreeSet<_> = self.members.iter().collect(); assert_eq!( members_set.len(), diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 70363562f6af3..252151fb9193b 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -18,64 +18,64 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -paste = { version = "1.0", default-features = false } -bitflags = "1.3" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +paste = { workspace = true } +bitflags = { workspace = true } +codec = { features = [ "derive", "max-encoded-len", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -smallvec = { version = "1", default-features = false, features = [ +smallvec = { features = [ "const_generics", -] } -wasmi = { version = "0.32.3", default-features = false } -impl-trait-for-tuples = "0.2" +], workspace = true } +wasmi = { workspace = true } +impl-trait-for-tuples = { workspace = true } # Only used in benchmarking to generate contract code -wasm-instrument = { version = "0.4", optional = true, default-features = false } -rand = { version = "0.8", optional = true, default-features = false } -rand_pcg = { version = "0.3", optional = true } +wasm-instrument = { optional = true, workspace = true } +rand = { optional = true, workspace = true } +rand_pcg = { optional = true, workspace = true } # Substrate Dependencies -environmental = { version = "1.1.4", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-balances = { path = "../balances", default-features = false, optional = true } -pallet-contracts-uapi = { path = "uapi" } -pallet-contracts-proc-macro = { path = "proc-macro" } -sp-api = { path = "../../primitives/api", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +environmental = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { optional = true, workspace = true } +pallet-contracts-uapi = { workspace = true, default-features = true } +pallet-contracts-proc-macro = { workspace = true, default-features = true } +sp-api = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +xcm = { workspace = true } +xcm-builder = { workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -assert_matches = "1" -env_logger = "0.11" -pretty_assertions = "1" -wat = "1" -pallet-contracts-fixtures = { path = "./fixtures" } +array-bytes = { workspace = true, default-features = true } +assert_matches = { workspace = true } +env_logger = { workspace = true } +pretty_assertions = { workspace = true } +wat = { workspace = true } +pallet-contracts-fixtures = { workspace = true } # Polkadot Dependencies -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder" } +xcm-builder = { workspace = true, default-features = true } # Substrate Dependencies -pallet-balances = { path = "../balances" } -pallet-timestamp = { path = "../timestamp" } -pallet-message-queue = { path = "../message-queue" } -pallet-insecure-randomness-collective-flip = { path = "../insecure-randomness-collective-flip" } -pallet-utility = { path = "../utility" } -pallet-assets = { path = "../assets" } -pallet-proxy = { path = "../proxy" } -sp-keystore = { path = "../../primitives/keystore" } -sp-tracing = { path = "../../primitives/tracing" } +pallet-balances = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } +pallet-insecure-randomness-collective-flip = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/contracts/fixtures/Cargo.toml b/substrate/frame/contracts/fixtures/Cargo.toml index 8c93c6f16f66f..6b0751571cc9c 100644 --- a/substrate/frame/contracts/fixtures/Cargo.toml +++ b/substrate/frame/contracts/fixtures/Cargo.toml @@ -11,17 +11,17 @@ description = "Fixtures for testing contracts pallet." workspace = true [dependencies] -frame-system = { path = "../../system" } -sp-runtime = { path = "../../../primitives/runtime" } -anyhow = "1.0.81" +frame-system = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +anyhow = { workspace = true } [build-dependencies] -parity-wasm = "0.45.0" -tempfile = "3.8.1" -toml = "0.8.2" -twox-hash = "1.6.3" +parity-wasm = { workspace = true } +tempfile = { workspace = true } +toml = { workspace = true } +twox-hash = { workspace = true, default-features = true } polkavm-linker = { workspace = true, optional = true } -anyhow = "1.0.81" +anyhow = { workspace = true } [features] riscv = ["polkavm-linker"] diff --git a/substrate/frame/contracts/fixtures/contracts/create_transient_storage_and_call.rs b/substrate/frame/contracts/fixtures/contracts/create_transient_storage_and_call.rs new file mode 100644 index 0000000000000..6bafee5557715 --- /dev/null +++ b/substrate/frame/contracts/fixtures/contracts/create_transient_storage_and_call.rs @@ -0,0 +1,56 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This calls another contract as passed as its account id. It also creates some transient storage. +#![no_std] +#![no_main] + +use common::input; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!( + buffer, + len: u32, + input: [u8; 4], + callee: [u8; 32], + ); + + let data = [0u8; 16 * 1024]; + let value = &data[..len as usize]; + #[allow(deprecated)] + api::set_transient_storage(buffer, value); + + // Call the callee + api::call_v2( + uapi::CallFlags::empty(), + callee, + 0u64, // How much ref_time weight to devote for the execution. 0 = all. + 0u64, // How much proof_size weight to devote for the execution. 0 = all. + None, + &0u64.to_le_bytes(), // Value transferred to the contract. + input, + None, + ) + .unwrap(); +} diff --git a/substrate/frame/contracts/fixtures/contracts/set_transient_storage.rs b/substrate/frame/contracts/fixtures/contracts/set_transient_storage.rs new file mode 100644 index 0000000000000..e4fde08314ce8 --- /dev/null +++ b/substrate/frame/contracts/fixtures/contracts/set_transient_storage.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +#![no_main] + +use common::input; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!(len: u32, ); + + let buffer = [0u8; 16 * 1024]; + let data = &buffer[..len as usize]; + + // Place a garbage value in the transient storage, with the size specified by the call input. + let mut key = [0u8; 32]; + key[0] = 1; + + #[allow(deprecated)] + api::set_transient_storage(&key, data); +} diff --git a/substrate/frame/contracts/fixtures/contracts/transient_storage.rs b/substrate/frame/contracts/fixtures/contracts/transient_storage.rs new file mode 100644 index 0000000000000..c797e17887bc8 --- /dev/null +++ b/substrate/frame/contracts/fixtures/contracts/transient_storage.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This contract tests the transient storage APIs. +#![no_std] +#![no_main] + +use common::unwrap_output; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + const KEY: [u8; 32] = [1u8; 32]; + const VALUE_1: [u8; 4] = [1u8; 4]; + const VALUE_2: [u8; 4] = [2u8; 4]; + const VALUE_3: [u8; 4] = [3u8; 4]; + + #[allow(deprecated)] + { + let existing = api::set_transient_storage(&KEY, &VALUE_1); + assert_eq!(existing, None); + assert_eq!(api::contains_transient_storage(&KEY), Some(VALUE_1.len() as _)); + unwrap_output!(val, [0u8; 4], api::get_transient_storage, &KEY); + assert_eq!(**val, VALUE_1); + + let existing = api::set_transient_storage(&KEY, &VALUE_2); + assert_eq!(existing, Some(VALUE_1.len() as _)); + unwrap_output!(val, [0u8; 4], api::get_transient_storage, &KEY); + assert_eq!(**val, VALUE_2); + + api::clear_transient_storage(&KEY); + assert_eq!(api::contains_transient_storage(&KEY), None); + + let existing = api::set_transient_storage(&KEY, &VALUE_3); + assert_eq!(existing, None); + unwrap_output!(val, [0u8; 32], api::take_transient_storage, &KEY); + assert_eq!(**val, VALUE_3); + } +} diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml index a348b7308d123..85d98206ea57b 100644 --- a/substrate/frame/contracts/mock-network/Cargo.toml +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -12,41 +12,40 @@ description = "A mock network for testing pallet-contracts" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-assets = { path = "../../assets" } -pallet-balances = { path = "../../balances" } -pallet-contracts = { path = ".." } -pallet-contracts-uapi = { path = "../uapi", default-features = false } -pallet-contracts-proc-macro = { path = "../proc-macro" } -pallet-insecure-randomness-collective-flip = { path = "../../insecure-randomness-collective-flip" } -pallet-message-queue = { path = "../../message-queue" } -pallet-proxy = { path = "../../proxy" } -pallet-timestamp = { path = "../../timestamp" } -pallet-utility = { path = "../../utility" } -pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../../polkadot/parachain" } -polkadot-primitives = { path = "../../../../polkadot/primitives" } -polkadot-runtime-parachains = { path = "../../../../polkadot/runtime/parachains" } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-api = { path = "../../../primitives/api", default-features = false } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-tracing = { path = "../../../primitives/tracing" } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../../polkadot/xcm/xcm-builder" } -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } -xcm-simulator = { path = "../../../../polkadot/xcm/xcm-simulator" } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-assets = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-contracts = { workspace = true, default-features = true } +pallet-contracts-uapi = { workspace = true } +pallet-contracts-proc-macro = { workspace = true, default-features = true } +pallet-insecure-randomness-collective-flip = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +xcm = { workspace = true } +xcm-builder = { workspace = true, default-features = true } +xcm-executor = { workspace = true } +xcm-simulator = { workspace = true, default-features = true } [dev-dependencies] -assert_matches = "1" -pretty_assertions = "1" -pallet-contracts-fixtures = { path = "../fixtures" } +assert_matches = { workspace = true } +pretty_assertions = { workspace = true } +pallet-contracts-fixtures = { workspace = true } [features] default = ["std"] @@ -67,7 +66,6 @@ std = [ "sp-io/std", "sp-keystore/std", "sp-runtime/std", - "sp-std/std", "xcm-executor/std", "xcm/std", ] diff --git a/substrate/frame/contracts/mock-network/src/lib.rs b/substrate/frame/contracts/mock-network/src/lib.rs index 20ded0f4a0b84..34cc95f2eae0e 100644 --- a/substrate/frame/contracts/mock-network/src/lib.rs +++ b/substrate/frame/contracts/mock-network/src/lib.rs @@ -112,6 +112,7 @@ pub fn para_ext(para_id: u32) -> sp_io::TestExternalities { (0u128, ALICE, INITIAL_BALANCE), (0u128, relay_sovereign_account_id(), INITIAL_BALANCE), ], + next_asset_id: None, } .assimilate_storage(&mut t) .unwrap(); diff --git a/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs index bfdf6dd97eaf1..6e922c16c2982 100644 --- a/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs +++ b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs @@ -25,7 +25,6 @@ use polkadot_parachain_primitives::primitives::{ use polkadot_primitives::BlockNumber as RelayBlockNumber; use sp_runtime::traits::{Get, Hash}; -use sp_std::prelude::*; use xcm::{latest::prelude::*, VersionedXcm}; #[frame_support::pallet] diff --git a/substrate/frame/contracts/mock-network/src/parachain.rs b/substrate/frame/contracts/mock-network/src/parachain.rs index f35846ba32c31..3579b46ea6e9c 100644 --- a/substrate/frame/contracts/mock-network/src/parachain.rs +++ b/substrate/frame/contracts/mock-network/src/parachain.rs @@ -35,7 +35,6 @@ use pallet_xcm::XcmPassthrough; use sp_core::{ConstU32, ConstU64, H256}; use sp_runtime::traits::{Get, IdentityLookup, MaybeEquivalence}; -use sp_std::prelude::*; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom, diff --git a/substrate/frame/contracts/proc-macro/Cargo.toml b/substrate/frame/contracts/proc-macro/Cargo.toml index 4080cd0442dbc..3651b172d75fb 100644 --- a/substrate/frame/contracts/proc-macro/Cargo.toml +++ b/substrate/frame/contracts/proc-macro/Cargo.toml @@ -18,6 +18,6 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro2 = "1.0.56" +proc-macro2 = { workspace = true } quote = { workspace = true } syn = { features = ["full"], workspace = true } diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs index f91f8660cd315..84ea7de00a2f1 100644 --- a/substrate/frame/contracts/proc-macro/src/lib.rs +++ b/substrate/frame/contracts/proc-macro/src/lib.rs @@ -649,7 +649,7 @@ fn expand_functions(def: &EnvDef, expand_mode: ExpandMode) -> TokenStream2 { quote! { let result = #body; if ::log::log_enabled!(target: "runtime::contracts::strace", ::log::Level::Trace) { - use sp_std::fmt::Write; + use core::fmt::Write; let mut w = sp_std::Writer::default(); let _ = core::write!(&mut w, #trace_fmt_str, #( #trace_fmt_args, )* result); let msg = core::str::from_utf8(&w.inner()).unwrap_or_default(); diff --git a/substrate/frame/contracts/src/benchmarking/call_builder.rs b/substrate/frame/contracts/src/benchmarking/call_builder.rs index 5d73d825fca9a..5833639d7ce25 100644 --- a/substrate/frame/contracts/src/benchmarking/call_builder.rs +++ b/substrate/frame/contracts/src/benchmarking/call_builder.rs @@ -17,17 +17,18 @@ use crate::{ benchmarking::{Contract, WasmModule}, - exec::Stack, + exec::{Ext, Key, Stack}, storage::meter::Meter, + transient_storage::MeterEntry, wasm::Runtime, - BalanceOf, Config, DebugBufferVec, Determinism, ExecReturnValue, GasMeter, Origin, Schedule, - TypeInfo, WasmBlob, Weight, + BalanceOf, Config, DebugBufferVec, Determinism, Error, ExecReturnValue, GasMeter, Origin, + Schedule, TypeInfo, WasmBlob, Weight, }; +use alloc::{vec, vec::Vec}; use codec::{Encode, HasCompact}; use core::fmt::Debug; use frame_benchmarking::benchmarking; use sp_core::Get; -use sp_std::prelude::*; type StackExt<'a, T> = Stack<'a, T, WasmBlob>; @@ -56,6 +57,7 @@ pub struct CallSetup { debug_message: Option>, determinism: Determinism, data: Vec, + transient_storage_size: u32, } impl Default for CallSetup @@ -103,6 +105,7 @@ where debug_message: None, determinism: Determinism::Enforced, data: vec![], + transient_storage_size: 0, } } @@ -126,6 +129,11 @@ where self.data = value; } + /// Set the transient storage size. + pub fn set_transient_storage_size(&mut self, size: u32) { + self.transient_storage_size = size; + } + /// Set the debug message. pub fn enable_debug_message(&mut self) { self.debug_message = Some(Default::default()); @@ -148,7 +156,7 @@ where /// Build the call stack. pub fn ext(&mut self) -> (StackExt<'_, T>, WasmBlob) { - StackExt::bench_new_call( + let mut ext = StackExt::bench_new_call( self.dest.clone(), self.origin.clone(), &mut self.gas_meter, @@ -157,7 +165,11 @@ where self.value, self.debug_message.as_mut(), self.determinism, - ) + ); + if self.transient_storage_size > 0 { + Self::with_transient_storage(&mut ext.0, self.transient_storage_size).unwrap(); + } + ext } /// Prepare a call to the module. @@ -169,6 +181,30 @@ where let (func, store) = module.bench_prepare_call(ext, input); PreparedCall { func, store } } + + /// Add transient_storage + fn with_transient_storage(ext: &mut StackExt, size: u32) -> Result<(), &'static str> { + let &MeterEntry { amount, limit } = ext.transient_storage().meter().current(); + ext.transient_storage().meter().current_mut().limit = size; + for i in 1u32.. { + let mut key_data = i.to_le_bytes().to_vec(); + while key_data.last() == Some(&0) { + key_data.pop(); + } + let key = Key::::try_from_var(key_data).unwrap(); + if let Err(e) = ext.set_transient_storage(&key, Some(Vec::new()), false) { + // Restore previous settings. + ext.transient_storage().meter().current_mut().limit = limit; + ext.transient_storage().meter().current_mut().amount = amount; + if e == Error::::OutOfTransientStorage.into() { + break; + } else { + return Err("Initialization of the transient storage failed"); + } + } + } + Ok(()) + } } #[macro_export] diff --git a/substrate/frame/contracts/src/benchmarking/code.rs b/substrate/frame/contracts/src/benchmarking/code.rs index 65bcf30683c05..1473022b55378 100644 --- a/substrate/frame/contracts/src/benchmarking/code.rs +++ b/substrate/frame/contracts/src/benchmarking/code.rs @@ -25,9 +25,9 @@ //! compiles it down into a `WasmModule` that can be used as a contract's code. use crate::Config; +use alloc::{borrow::ToOwned, vec, vec::Vec}; use frame_support::traits::Get; use sp_runtime::{traits::Hash, Saturating}; -use sp_std::{borrow::ToOwned, prelude::*}; use wasm_instrument::parity_wasm::{ builder, elements::{ @@ -338,7 +338,7 @@ pub mod body { .cycle() .take(instructions.len() * usize::try_from(repetitions).unwrap()) .cloned() - .chain(sp_std::iter::once(Instruction::End)) + .chain(core::iter::once(Instruction::End)) .collect(), ); FuncBody::new(locals.to_vec(), instructions) diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index 80c7e863d2994..620e6544b08f9 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -31,9 +31,11 @@ use crate::{ migration::{ codegen::LATEST_MIGRATION_VERSION, v09, v10, v11, v12, v13, v14, v15, v16, MigrationStep, }, + storage::WriteOutcome, wasm::BenchEnv, Pallet as Contracts, *, }; +use alloc::{vec, vec::Vec}; use codec::{Encode, MaxEncodedLen}; use frame_benchmarking::v2::*; use frame_support::{ @@ -46,7 +48,6 @@ use frame_system::RawOrigin; use pallet_balances; use pallet_contracts_uapi::{CallFlags, ReturnErrorCode}; use sp_runtime::traits::{Bounded, Hash}; -use sp_std::prelude::*; use wasm_instrument::parity_wasm::elements::{Instruction, Local, ValueType}; /// How many runs we do per API benchmark. @@ -186,7 +187,7 @@ fn caller_funding() -> BalanceOf { #[benchmarks( where - as codec::HasCompact>::Type: Clone + Eq + PartialEq + sp_std::fmt::Debug + scale_info::TypeInfo + codec::Encode, + as codec::HasCompact>::Type: Clone + Eq + PartialEq + core::fmt::Debug + scale_info::TypeInfo + codec::Encode, T: Config + pallet_balances::Config, BalanceOf: From< as Currency>::Balance>, as Currency>::Balance: From>, @@ -1162,6 +1163,296 @@ mod benchmarks { Ok(()) } + // We use both full and empty benchmarks here instead of benchmarking transient_storage + // (BTreeMap) directly. This approach is necessary because benchmarking this BTreeMap is very + // slow. Additionally, we use linear regression for our benchmarks, and the BTreeMap's log(n) + // complexity can introduce approximation errors. + #[benchmark(pov_mode = Ignored)] + fn set_transient_storage_empty() -> Result<(), BenchmarkError> { + let max_value_len = T::Schedule::get().limits.payload_len; + let max_key_len = T::MaxStorageKeyLen::get(); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + let value = Some(vec![42u8; max_value_len as _]); + let mut setup = CallSetup::::default(); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); + runtime.ext().transient_storage().meter().current_mut().limit = u32::MAX; + let result; + #[block] + { + result = runtime.ext().set_transient_storage(&key, value, false); + } + + assert_eq!(result, Ok(WriteOutcome::New)); + assert_eq!(runtime.ext().get_transient_storage(&key), Some(vec![42u8; max_value_len as _])); + Ok(()) + } + + #[benchmark(pov_mode = Ignored)] + fn set_transient_storage_full() -> Result<(), BenchmarkError> { + let max_value_len = T::Schedule::get().limits.payload_len; + let max_key_len = T::MaxStorageKeyLen::get(); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + let value = Some(vec![42u8; max_value_len as _]); + let mut setup = CallSetup::::default(); + setup.set_transient_storage_size(T::MaxTransientStorageSize::get()); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); + runtime.ext().transient_storage().meter().current_mut().limit = u32::MAX; + let result; + #[block] + { + result = runtime.ext().set_transient_storage(&key, value, false); + } + + assert_eq!(result, Ok(WriteOutcome::New)); + assert_eq!(runtime.ext().get_transient_storage(&key), Some(vec![42u8; max_value_len as _])); + Ok(()) + } + + #[benchmark(pov_mode = Ignored)] + fn get_transient_storage_empty() -> Result<(), BenchmarkError> { + let max_value_len = T::Schedule::get().limits.payload_len; + let max_key_len = T::MaxStorageKeyLen::get(); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + + let mut setup = CallSetup::::default(); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); + runtime.ext().transient_storage().meter().current_mut().limit = u32::MAX; + runtime + .ext() + .set_transient_storage(&key, Some(vec![42u8; max_value_len as _]), false) + .map_err(|_| "Failed to write to transient storage during setup.")?; + let result; + #[block] + { + result = runtime.ext().get_transient_storage(&key); + } + + assert_eq!(result, Some(vec![42u8; max_value_len as _])); + Ok(()) + } + + #[benchmark(pov_mode = Ignored)] + fn get_transient_storage_full() -> Result<(), BenchmarkError> { + let max_value_len = T::Schedule::get().limits.payload_len; + let max_key_len = T::MaxStorageKeyLen::get(); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + + let mut setup = CallSetup::::default(); + setup.set_transient_storage_size(T::MaxTransientStorageSize::get()); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); + runtime.ext().transient_storage().meter().current_mut().limit = u32::MAX; + runtime + .ext() + .set_transient_storage(&key, Some(vec![42u8; max_value_len as _]), false) + .map_err(|_| "Failed to write to transient storage during setup.")?; + let result; + #[block] + { + result = runtime.ext().get_transient_storage(&key); + } + + assert_eq!(result, Some(vec![42u8; max_value_len as _])); + Ok(()) + } + + // The weight of journal rollbacks should be taken into account when setting storage. + #[benchmark(pov_mode = Ignored)] + fn rollback_transient_storage() -> Result<(), BenchmarkError> { + let max_value_len = T::Schedule::get().limits.payload_len; + let max_key_len = T::MaxStorageKeyLen::get(); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + + let mut setup = CallSetup::::default(); + setup.set_transient_storage_size(T::MaxTransientStorageSize::get()); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); + runtime.ext().transient_storage().meter().current_mut().limit = u32::MAX; + runtime.ext().transient_storage().start_transaction(); + runtime + .ext() + .set_transient_storage(&key, Some(vec![42u8; max_value_len as _]), false) + .map_err(|_| "Failed to write to transient storage during setup.")?; + #[block] + { + runtime.ext().transient_storage().rollback_transaction(); + } + + assert_eq!(runtime.ext().get_transient_storage(&key), None); + Ok(()) + } + + // n: new byte size + // o: old byte size + #[benchmark(pov_mode = Measured)] + fn seal_set_transient_storage( + n: Linear<0, { T::Schedule::get().limits.payload_len }>, + o: Linear<0, { T::Schedule::get().limits.payload_len }>, + ) -> Result<(), BenchmarkError> { + let max_key_len = T::MaxStorageKeyLen::get(); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + let value = vec![1u8; n as usize]; + build_runtime!(runtime, memory: [ key.to_vec(), value.clone(), ]); + runtime.ext().transient_storage().meter().current_mut().limit = u32::MAX; + runtime + .ext() + .set_transient_storage(&key, Some(vec![42u8; o as usize]), false) + .map_err(|_| "Failed to write to transient storage during setup.")?; + + let result; + #[block] + { + result = BenchEnv::seal0_set_transient_storage( + &mut runtime, + &mut memory, + 0, // key_ptr + max_key_len, // key_len + max_key_len, // value_ptr + n, // value_len + ); + } + + assert_ok!(result); + assert_eq!(runtime.ext().get_transient_storage(&key).unwrap(), value); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_clear_transient_storage( + n: Linear<0, { T::Schedule::get().limits.payload_len }>, + ) -> Result<(), BenchmarkError> { + let max_key_len = T::MaxStorageKeyLen::get(); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + build_runtime!(runtime, memory: [ key.to_vec(), ]); + runtime.ext().transient_storage().meter().current_mut().limit = u32::MAX; + runtime + .ext() + .set_transient_storage(&key, Some(vec![42u8; n as usize]), false) + .map_err(|_| "Failed to write to transient storage during setup.")?; + + let result; + #[block] + { + result = + BenchEnv::seal0_clear_transient_storage(&mut runtime, &mut memory, 0, max_key_len); + } + + assert_ok!(result); + assert!(runtime.ext().get_transient_storage(&key).is_none()); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_get_transient_storage( + n: Linear<0, { T::Schedule::get().limits.payload_len }>, + ) -> Result<(), BenchmarkError> { + let max_key_len = T::MaxStorageKeyLen::get(); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + build_runtime!(runtime, memory: [ key.to_vec(), n.to_le_bytes(), vec![0u8; n as _], ]); + runtime.ext().transient_storage().meter().current_mut().limit = u32::MAX; + runtime + .ext() + .set_transient_storage(&key, Some(vec![42u8; n as usize]), false) + .map_err(|_| "Failed to write to transient storage during setup.")?; + + let out_ptr = max_key_len + 4; + let result; + #[block] + { + result = BenchEnv::seal0_get_transient_storage( + &mut runtime, + &mut memory, + 0, // key_ptr + max_key_len, // key_len + out_ptr, // out_ptr + max_key_len, // out_len_ptr + ); + } + + assert_ok!(result); + assert_eq!( + &runtime.ext().get_transient_storage(&key).unwrap(), + &memory[out_ptr as usize..] + ); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_contains_transient_storage( + n: Linear<0, { T::Schedule::get().limits.payload_len }>, + ) -> Result<(), BenchmarkError> { + let max_key_len = T::MaxStorageKeyLen::get(); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + build_runtime!(runtime, memory: [ key.to_vec(), ]); + runtime.ext().transient_storage().meter().current_mut().limit = u32::MAX; + runtime + .ext() + .set_transient_storage(&key, Some(vec![42u8; n as usize]), false) + .map_err(|_| "Failed to write to transient storage during setup.")?; + + let result; + #[block] + { + result = BenchEnv::seal0_contains_transient_storage( + &mut runtime, + &mut memory, + 0, + max_key_len, + ); + } + + assert_eq!(result.unwrap(), n); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn seal_take_transient_storage( + n: Linear<0, { T::Schedule::get().limits.payload_len }>, + ) -> Result<(), BenchmarkError> { + let n = T::Schedule::get().limits.payload_len; + let max_key_len = T::MaxStorageKeyLen::get(); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + build_runtime!(runtime, memory: [ key.to_vec(), n.to_le_bytes(), vec![0u8; n as _], ]); + runtime.ext().transient_storage().meter().current_mut().limit = u32::MAX; + let value = vec![42u8; n as usize]; + runtime + .ext() + .set_transient_storage(&key, Some(value.clone()), false) + .map_err(|_| "Failed to write to transient storage during setup.")?; + + let out_ptr = max_key_len + 4; + let result; + #[block] + { + result = BenchEnv::seal0_take_transient_storage( + &mut runtime, + &mut memory, + 0, // key_ptr + max_key_len, // key_len + out_ptr, // out_ptr + max_key_len, // out_len_ptr + ); + } + + assert_ok!(result); + assert!(&runtime.ext().get_transient_storage(&key).is_none()); + assert_eq!(&value, &memory[out_ptr as usize..]); + Ok(()) + } + // We transfer to unique accounts. #[benchmark(pov_mode = Measured)] fn seal_transfer() { diff --git a/substrate/frame/contracts/src/chain_extension.rs b/substrate/frame/contracts/src/chain_extension.rs index f3a67fcb09a08..b9bb451fd734a 100644 --- a/substrate/frame/contracts/src/chain_extension.rs +++ b/substrate/frame/contracts/src/chain_extension.rs @@ -74,17 +74,18 @@ use crate::{ wasm::{Runtime, RuntimeCosts}, Error, }; +use alloc::vec::Vec; use codec::{Decode, MaxEncodedLen}; +use core::marker::PhantomData; use frame_support::weights::Weight; use sp_runtime::DispatchError; -use sp_std::{marker::PhantomData, vec::Vec}; pub use crate::{exec::Ext, gas::ChargedAmount, storage::meter::Diff, Config}; pub use frame_system::Config as SysConfig; pub use pallet_contracts_uapi::ReturnFlags; /// Result that returns a [`DispatchError`] on error. -pub type Result = sp_std::result::Result; +pub type Result = core::result::Result; /// A trait used to extend the set of contract callable functions. /// diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index 84a3f7dc2a140..31e0bf50b73e7 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -20,10 +20,13 @@ use crate::{ gas::GasMeter, primitives::{ExecReturnValue, StorageDeposit}, storage::{self, meter::Diff, WriteOutcome}, + transient_storage::TransientStorage, BalanceOf, CodeHash, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, DebugBufferVec, Determinism, Error, Event, Nonce, Origin, Pallet as Contracts, Schedule, LOG_TARGET, }; +use alloc::vec::Vec; +use core::{fmt::Debug, marker::PhantomData, mem}; use frame_support::{ crypto::ecdsa::ECDSAExt, dispatch::{DispatchResult, DispatchResultWithPostInfo}, @@ -49,7 +52,6 @@ use sp_runtime::{ traits::{Convert, Dispatchable, Zero}, DispatchError, }; -use sp_std::{fmt::Debug, marker::PhantomData, mem, prelude::*, vec::Vec}; pub type AccountIdOf = ::AccountId; pub type MomentOf = <::Time as Time>::Moment; @@ -209,6 +211,27 @@ pub trait Ext: sealing::Sealed { take_old: bool, ) -> Result; + /// Returns the transient storage entry of the executing account for the given `key`. + /// + /// Returns `None` if the `key` wasn't previously set by `set_transient_storage` or + /// was deleted. + fn get_transient_storage(&self, key: &Key) -> Option>; + + /// Returns `Some(len)` (in bytes) if a transient storage item exists at `key`. + /// + /// Returns `None` if the `key` wasn't previously set by `set_transient_storage` or + /// was deleted. + fn get_transient_storage_size(&self, key: &Key) -> Option; + + /// Sets the transient storage entry for the given key to the specified value. If `value` is + /// `None` then the storage entry is deleted. + fn set_transient_storage( + &mut self, + key: &Key, + value: Option>, + take_old: bool, + ) -> Result; + /// Returns the caller. fn caller(&self) -> Origin; @@ -307,6 +330,12 @@ pub trait Ext: sealing::Sealed { #[cfg(any(test, feature = "runtime-benchmarks"))] fn contract_info(&mut self) -> &mut ContractInfo; + /// Get a mutable reference to the transient storage. + /// Useful in benchmarks when it is sometimes necessary to modify and inspect the transient + /// storage directly. + #[cfg(feature = "runtime-benchmarks")] + fn transient_storage(&mut self) -> &mut TransientStorage; + /// Sets new code hash for existing contract. fn set_code_hash(&mut self, hash: CodeHash) -> DispatchResult; @@ -473,6 +502,8 @@ pub struct Stack<'a, T: Config, E> { debug_message: Option<&'a mut DebugBufferVec>, /// The determinism requirement of this call stack. determinism: Determinism, + /// Transient storage used to store data, which is kept for the duration of a transaction. + transient_storage: TransientStorage, /// No executable is held by the struct but influences its behaviour. _phantom: PhantomData, } @@ -796,6 +827,7 @@ where frames: Default::default(), debug_message, determinism, + transient_storage: TransientStorage::new(T::MaxTransientStorageSize::get()), _phantom: Default::default(), }; @@ -926,6 +958,9 @@ where let entry_point = frame.entry_point; let delegated_code_hash = if frame.delegate_caller.is_some() { Some(*executable.code_hash()) } else { None }; + + self.transient_storage.start_transaction(); + let do_transaction = || { // We need to charge the storage deposit before the initial transfer so that // it can create the account in case the initial transfer is < ed. @@ -1046,6 +1081,12 @@ where Err(error) => (false, Err(error.into())), }; + if success { + self.transient_storage.commit_transaction(); + } else { + self.transient_storage.rollback_transaction(); + } + self.pop_frame(success); output } @@ -1185,12 +1226,12 @@ where /// /// The iterator starts with the top frame and ends with the root frame. fn frames(&self) -> impl Iterator> { - sp_std::iter::once(&self.first_frame).chain(&self.frames).rev() + core::iter::once(&self.first_frame).chain(&self.frames).rev() } /// Same as `frames` but with a mutable reference as iterator item. fn frames_mut(&mut self) -> impl Iterator> { - sp_std::iter::once(&mut self.first_frame).chain(&mut self.frames).rev() + core::iter::once(&mut self.first_frame).chain(&mut self.frames).rev() } /// Returns whether the current contract is on the stack multiple times. @@ -1374,6 +1415,24 @@ where ) } + fn get_transient_storage(&self, key: &Key) -> Option> { + self.transient_storage.read(self.address(), key) + } + + fn get_transient_storage_size(&self, key: &Key) -> Option { + self.transient_storage.read(self.address(), key).map(|value| value.len() as _) + } + + fn set_transient_storage( + &mut self, + key: &Key, + value: Option>, + take_old: bool, + ) -> Result { + let account_id = self.address().clone(); + self.transient_storage.write(&account_id, key, value, take_old) + } + fn address(&self) -> &T::AccountId { &self.top_frame().account_id } @@ -1518,6 +1577,11 @@ where self.top_frame_mut().contract_info() } + #[cfg(feature = "runtime-benchmarks")] + fn transient_storage(&mut self) -> &mut TransientStorage { + &mut self.transient_storage + } + fn set_code_hash(&mut self, hash: CodeHash) -> DispatchResult { let frame = top_frame_mut!(self); if !E::from_storage(hash, &mut frame.nested_gas)?.is_deterministic() { @@ -3826,6 +3890,262 @@ mod tests { }); } + #[test] + fn set_transient_storage_works() { + let code_hash = MockLoader::insert(Call, |ctx, _| { + // Write + assert_eq!( + ctx.ext.set_transient_storage(&Key::Fix([1; 32]), Some(vec![1, 2, 3]), false), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_transient_storage(&Key::Fix([2; 32]), Some(vec![4, 5, 6]), true), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_transient_storage(&Key::Fix([3; 32]), None, false), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_transient_storage(&Key::Fix([4; 32]), None, true), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_transient_storage(&Key::Fix([5; 32]), Some(vec![]), false), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_transient_storage(&Key::Fix([6; 32]), Some(vec![]), true), + Ok(WriteOutcome::New) + ); + + // Overwrite + assert_eq!( + ctx.ext.set_transient_storage(&Key::Fix([1; 32]), Some(vec![42]), false), + Ok(WriteOutcome::Overwritten(3)) + ); + assert_eq!( + ctx.ext.set_transient_storage(&Key::Fix([2; 32]), Some(vec![48]), true), + Ok(WriteOutcome::Taken(vec![4, 5, 6])) + ); + assert_eq!( + ctx.ext.set_transient_storage(&Key::Fix([3; 32]), None, false), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_transient_storage(&Key::Fix([4; 32]), None, true), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_transient_storage(&Key::Fix([5; 32]), Some(vec![]), false), + Ok(WriteOutcome::Overwritten(0)) + ); + assert_eq!( + ctx.ext.set_transient_storage(&Key::Fix([6; 32]), Some(vec![]), true), + Ok(WriteOutcome::Taken(vec![])) + ); + + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + place_contract(&BOB, code_hash); + let contract_origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new(&contract_origin, None, 0).unwrap(); + assert_ok!(MockStack::run_call( + contract_origin, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + &schedule, + 0, + vec![], + None, + Determinism::Enforced + )); + }); + } + + #[test] + fn get_transient_storage_works() { + // Call stack: BOB -> CHARLIE(success) -> BOB' (success) + let storage_key_1 = &Key::Fix([1; 32]); + let storage_key_2 = &Key::Fix([2; 32]); + let storage_key_3 = &Key::Fix([3; 32]); + let code_bob = MockLoader::insert(Call, |ctx, _| { + if ctx.input_data[0] == 0 { + assert_eq!( + ctx.ext.set_transient_storage(storage_key_1, Some(vec![1, 2]), false), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.call( + Weight::zero(), + BalanceOf::::zero(), + CHARLIE, + 0, + vec![], + true, + false, + ), + exec_success() + ); + assert_eq!(ctx.ext.get_transient_storage(storage_key_1), Some(vec![3])); + assert_eq!(ctx.ext.get_transient_storage(storage_key_2), Some(vec![])); + assert_eq!(ctx.ext.get_transient_storage(storage_key_3), None); + } else { + assert_eq!( + ctx.ext.set_transient_storage(storage_key_1, Some(vec![3]), true), + Ok(WriteOutcome::Taken(vec![1, 2])) + ); + assert_eq!( + ctx.ext.set_transient_storage(storage_key_2, Some(vec![]), false), + Ok(WriteOutcome::New) + ); + } + exec_success() + }); + let code_charlie = MockLoader::insert(Call, |ctx, _| { + assert!(ctx + .ext + .call(Weight::zero(), BalanceOf::::zero(), BOB, 0, vec![99], true, false) + .is_ok()); + // CHARLIE can not read BOB`s storage. + assert_eq!(ctx.ext.get_transient_storage(storage_key_1), None); + exec_success() + }); + + // This one tests passing the input data into a contract via call. + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + place_contract(&BOB, code_bob); + place_contract(&CHARLIE, code_charlie); + let contract_origin = Origin::from_account_id(ALICE); + let mut storage_meter = + storage::meter::Meter::new(&contract_origin, Some(0), 0).unwrap(); + + let result = MockStack::run_call( + contract_origin, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + &schedule, + 0, + vec![0], + None, + Determinism::Enforced, + ); + assert_matches!(result, Ok(_)); + }); + } + + #[test] + fn get_transient_storage_size_works() { + let storage_key_1 = &Key::Fix([1; 32]); + let storage_key_2 = &Key::Fix([2; 32]); + let storage_key_3 = &Key::Fix([3; 32]); + let code_hash = MockLoader::insert(Call, |ctx, _| { + assert_eq!( + ctx.ext.set_transient_storage(storage_key_1, Some(vec![1, 2, 3]), false), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.set_transient_storage(storage_key_2, Some(vec![]), false), + Ok(WriteOutcome::New) + ); + assert_eq!(ctx.ext.get_transient_storage_size(storage_key_1), Some(3)); + assert_eq!(ctx.ext.get_transient_storage_size(storage_key_2), Some(0)); + assert_eq!(ctx.ext.get_transient_storage_size(storage_key_3), None); + + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + place_contract(&BOB, code_hash); + let contract_origin = Origin::from_account_id(ALICE); + let mut storage_meter = + storage::meter::Meter::new(&contract_origin, Some(0), 0).unwrap(); + assert_ok!(MockStack::run_call( + contract_origin, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + &schedule, + 0, + vec![], + None, + Determinism::Enforced + )); + }); + } + + #[test] + fn rollback_transient_storage_works() { + // Call stack: BOB -> CHARLIE (trap) -> BOB' (success) + let storage_key = &Key::Fix([1; 32]); + let code_bob = MockLoader::insert(Call, |ctx, _| { + if ctx.input_data[0] == 0 { + assert_eq!( + ctx.ext.set_transient_storage(storage_key, Some(vec![1, 2]), false), + Ok(WriteOutcome::New) + ); + assert_eq!( + ctx.ext.call( + Weight::zero(), + BalanceOf::::zero(), + CHARLIE, + 0, + vec![], + true, + false + ), + exec_trapped() + ); + assert_eq!(ctx.ext.get_transient_storage(storage_key), Some(vec![1, 2])); + } else { + let overwritten_length = ctx.ext.get_transient_storage_size(storage_key).unwrap(); + assert_eq!( + ctx.ext.set_transient_storage(storage_key, Some(vec![3]), false), + Ok(WriteOutcome::Overwritten(overwritten_length)) + ); + assert_eq!(ctx.ext.get_transient_storage(storage_key), Some(vec![3])); + } + exec_success() + }); + let code_charlie = MockLoader::insert(Call, |ctx, _| { + assert!(ctx + .ext + .call(Weight::zero(), BalanceOf::::zero(), BOB, 0, vec![99], true, false) + .is_ok()); + exec_trapped() + }); + + // This one tests passing the input data into a contract via call. + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + place_contract(&BOB, code_bob); + place_contract(&CHARLIE, code_charlie); + let contract_origin = Origin::from_account_id(ALICE); + let mut storage_meter = + storage::meter::Meter::new(&contract_origin, Some(0), 0).unwrap(); + + let result = MockStack::run_call( + contract_origin, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + &schedule, + 0, + vec![0], + None, + Determinism::Enforced, + ); + assert_matches!(result, Ok(_)); + }); + } + #[test] fn ecdsa_to_eth_address_returns_proper_value() { let bob_ch = MockLoader::insert(Call, |ctx, _| { diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index e9cf28a66912b..093adc07ab48b 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -87,6 +87,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(feature = "runtime-benchmarks", recursion_limit = "1024")] +extern crate alloc; mod address; mod benchmarking; mod exec; @@ -96,6 +97,7 @@ pub use primitives::*; mod schedule; mod storage; +mod transient_storage; mod wasm; pub mod chain_extension; @@ -115,11 +117,11 @@ use crate::{ wasm::{CodeInfo, WasmBlob}, }; use codec::{Codec, Decode, Encode, HasCompact, MaxEncodedLen}; +use core::fmt::Debug; use environmental::*; use frame_support::{ dispatch::{GetDispatchInfo, Pays, PostDispatchInfo, RawOrigin, WithPostDispatchInfo}, ensure, - error::BadOrigin, traits::{ fungible::{Inspect, Mutate, MutateHold}, ConstU32, Contains, Get, Randomness, Time, @@ -135,10 +137,9 @@ use frame_system::{ use scale_info::TypeInfo; use smallvec::Array; use sp_runtime::{ - traits::{Convert, Dispatchable, Saturating, StaticLookup, Zero}, + traits::{BadOrigin, Convert, Dispatchable, Saturating, StaticLookup, Zero}, DispatchError, RuntimeDebug, }; -use sp_std::{fmt::Debug, prelude::*}; pub use crate::{ address::{AddressGenerator, DefaultAddressGenerator}, @@ -388,6 +389,11 @@ pub mod pallet { #[pallet::constant] type MaxStorageKeyLen: Get; + /// The maximum size of the transient storage in bytes. + /// This includes keys, values, and previous entries used for storage rollback. + #[pallet::constant] + type MaxTransientStorageSize: Get; + /// The maximum number of delegate_dependencies that a contract can lock with /// [`chain_extension::Ext::lock_delegate_dependency`]. #[pallet::constant] @@ -529,7 +535,7 @@ pub mod pallet { } } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + #[derive_impl(frame_system::config_preludes::TestDefaultConfig, no_aggregated_types)] impl frame_system::DefaultConfig for TestDefaultConfig {} #[frame_support::register_default_impl(TestDefaultConfig)] @@ -554,6 +560,7 @@ pub mod pallet { type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type MaxDelegateDependencies = MaxDelegateDependencies; type MaxStorageKeyLen = ConstU32<128>; + type MaxTransientStorageSize = ConstU32<{ 1 * 1024 * 1024 }>; type Migrations = (); type Time = Self; type Randomness = Self; @@ -605,7 +612,11 @@ pub mod pallet { // Max call depth is CallStack::size() + 1 let max_call_depth = u32::try_from(T::CallStack::size().saturating_add(1)) .expect("CallStack size is too big"); - + // Transient storage uses a BTreeMap, which has overhead compared to the raw size of + // key-value data. To ensure safety, a margin of 2x the raw key-value size is used. + let max_transient_storage_size = T::MaxTransientStorageSize::get() + .checked_mul(2) + .expect("MaxTransientStorageSize is too large"); // Check that given configured `MaxCodeLen`, runtime heap memory limit can't be broken. // // In worst case, the decoded Wasm contract code would be `x16` times larger than the @@ -615,7 +626,7 @@ pub mod pallet { // Next, the pallet keeps the Wasm blob for each // contract, hence we add up `MaxCodeLen` to the safety margin. // - // Finally, the inefficiencies of the freeing-bump allocator + // The inefficiencies of the freeing-bump allocator // being used in the client for the runtime memory allocations, could lead to possible // memory allocations for contract code grow up to `x4` times in some extreme cases, // which gives us total multiplier of `17*4` for `MaxCodeLen`. @@ -624,17 +635,20 @@ pub mod pallet { // memory should be available. Note that maximum allowed heap memory and stack size per // each contract (stack frame) should also be counted. // + // The pallet holds transient storage with a size up to `max_transient_storage_size`. + // // Finally, we allow 50% of the runtime memory to be utilized by the contracts call // stack, keeping the rest for other facilities, such as PoV, etc. // // This gives us the following formula: // - // `(MaxCodeLen * 17 * 4 + MAX_STACK_SIZE + max_heap_size) * max_call_depth < - // max_runtime_mem/2` + // `(MaxCodeLen * 17 * 4 + MAX_STACK_SIZE + max_heap_size) * max_call_depth + + // max_transient_storage_size < max_runtime_mem/2` // // Hence the upper limit for the `MaxCodeLen` can be defined as follows: let code_len_limit = max_runtime_mem .saturating_div(2) + .saturating_sub(max_transient_storage_size) .saturating_div(max_call_depth) .saturating_sub(max_heap_size) .saturating_sub(MAX_STACK_SIZE) @@ -1235,6 +1249,8 @@ pub mod pallet { DelegateDependencyAlreadyExists, /// Can not add a delegate dependency to the code hash of the contract itself. CannotAddSelfAsDelegateDependency, + /// Can not add more data to transient storage. + OutOfTransientStorage, } /// A reason for the pallet contracts placing a hold on funds. diff --git a/substrate/frame/contracts/src/migration.rs b/substrate/frame/contracts/src/migration.rs index c633ba9c2d50d..29ac74d0d50a9 100644 --- a/substrate/frame/contracts/src/migration.rs +++ b/substrate/frame/contracts/src/migration.rs @@ -69,17 +69,16 @@ include!(concat!(env!("OUT_DIR"), "/migration_codegen.rs")); use crate::{weights::WeightInfo, Config, Error, MigrationInProgress, Pallet, Weight, LOG_TARGET}; use codec::{Codec, Decode}; +use core::marker::PhantomData; use frame_support::{ pallet_prelude::*, traits::{ConstU32, OnRuntimeUpgrade}, weights::WeightMeter, }; use sp_runtime::Saturating; -use sp_std::marker::PhantomData; #[cfg(feature = "try-runtime")] -use sp_std::prelude::*; - +use alloc::vec::Vec; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; @@ -604,7 +603,7 @@ mod test { let mut meter = WeightMeter::with_limit(Weight::from_all(1)); let result = Migrations::steps(version, &cursor, &mut meter); - cursor = vec![1u8, 0].try_into().unwrap(); + cursor = alloc::vec![1u8, 0].try_into().unwrap(); assert_eq!(result, StepResult::InProgress { cursor: cursor.clone(), steps_done: 1 }); assert_eq!(meter.consumed(), Weight::from_all(1)); diff --git a/substrate/frame/contracts/src/migration/v09.rs b/substrate/frame/contracts/src/migration/v09.rs index 7e84191910d9f..6a67395174f5d 100644 --- a/substrate/frame/contracts/src/migration/v09.rs +++ b/substrate/frame/contracts/src/migration/v09.rs @@ -22,13 +22,13 @@ use crate::{ weights::WeightInfo, CodeHash, Config, Determinism, Pallet, Weight, LOG_TARGET, }; +use alloc::vec::Vec; use codec::{Decode, Encode}; use frame_support::{ pallet_prelude::*, storage_alias, weights::WeightMeter, DefaultNoBound, Identity, }; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; -use sp_std::prelude::*; mod v8 { use super::*; @@ -56,7 +56,7 @@ pub fn store_old_dummy_code(len: usize) { instruction_weights_version: 0, initial: 0, maximum: 0, - code: vec![42u8; len], + code: alloc::vec![42u8; len], }; let hash = T::Hashing::hash(&module.code); v8::CodeStorage::::insert(hash, module); diff --git a/substrate/frame/contracts/src/migration/v10.rs b/substrate/frame/contracts/src/migration/v10.rs index 61632a1fd1bad..23e7fd238bb7c 100644 --- a/substrate/frame/contracts/src/migration/v10.rs +++ b/substrate/frame/contracts/src/migration/v10.rs @@ -40,13 +40,15 @@ use frame_support::{ DefaultNoBound, }; use sp_core::hexdisplay::HexDisplay; -#[cfg(feature = "try-runtime")] -use sp_runtime::TryRuntimeError; use sp_runtime::{ traits::{Hash, TrailingZeroInput, Zero}, Perbill, Saturating, }; -use sp_std::prelude::*; + +#[cfg(feature = "try-runtime")] +use alloc::vec::Vec; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; mod v9 { use super::*; diff --git a/substrate/frame/contracts/src/migration/v11.rs b/substrate/frame/contracts/src/migration/v11.rs index 9b4316162ca62..bd128e22869f4 100644 --- a/substrate/frame/contracts/src/migration/v11.rs +++ b/substrate/frame/contracts/src/migration/v11.rs @@ -23,11 +23,13 @@ use crate::{ weights::WeightInfo, Config, Pallet, TrieId, Weight, LOG_TARGET, }; +use alloc::vec::Vec; use codec::{Decode, Encode}; +use core::marker::PhantomData; use frame_support::{pallet_prelude::*, storage_alias, weights::WeightMeter, DefaultNoBound}; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; -use sp_std::{marker::PhantomData, prelude::*}; + mod v10 { use super::*; diff --git a/substrate/frame/contracts/src/migration/v12.rs b/substrate/frame/contracts/src/migration/v12.rs index aad51a9edcab2..3c33591e43514 100644 --- a/substrate/frame/contracts/src/migration/v12.rs +++ b/substrate/frame/contracts/src/migration/v12.rs @@ -23,6 +23,7 @@ use crate::{ weights::WeightInfo, AccountIdOf, BalanceOf, CodeHash, Config, Determinism, Pallet, Weight, LOG_TARGET, }; +use alloc::vec::Vec; use codec::{Decode, Encode}; use frame_support::{ pallet_prelude::*, storage_alias, traits::ReservableCurrency, weights::WeightMeter, @@ -33,7 +34,6 @@ use sp_core::hexdisplay::HexDisplay; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; use sp_runtime::{traits::Zero, FixedPointNumber, FixedU128, Saturating}; -use sp_std::prelude::*; mod v11 { use super::*; @@ -109,7 +109,7 @@ where { use sp_runtime::traits::Hash; - let code = vec![42u8; len]; + let code = alloc::vec![42u8; len]; let hash = T::Hashing::hash(&code); PristineCode::::insert(hash, code.clone()); diff --git a/substrate/frame/contracts/src/migration/v13.rs b/substrate/frame/contracts/src/migration/v13.rs index 6929bbce28e59..d1888b338dac7 100644 --- a/substrate/frame/contracts/src/migration/v13.rs +++ b/substrate/frame/contracts/src/migration/v13.rs @@ -26,7 +26,6 @@ use crate::{ use codec::{Decode, Encode}; use frame_support::{pallet_prelude::*, storage_alias, weights::WeightMeter, DefaultNoBound}; use sp_runtime::BoundedBTreeMap; -use sp_std::prelude::*; mod v12 { use super::*; diff --git a/substrate/frame/contracts/src/migration/v14.rs b/substrate/frame/contracts/src/migration/v14.rs index 017fd6d0c15b7..11336fe2e7d35 100644 --- a/substrate/frame/contracts/src/migration/v14.rs +++ b/substrate/frame/contracts/src/migration/v14.rs @@ -26,6 +26,8 @@ use crate::{ weights::WeightInfo, BalanceOf, CodeHash, Config, Determinism, HoldReason, Pallet, Weight, LOG_TARGET, }; +#[cfg(feature = "try-runtime")] +use alloc::collections::btree_map::BTreeMap; use codec::{Decode, Encode}; #[cfg(feature = "try-runtime")] use environmental::Vec; @@ -42,8 +44,6 @@ use sp_core::hexdisplay::HexDisplay; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; use sp_runtime::{traits::Zero, Saturating}; -#[cfg(feature = "try-runtime")] -use sp_std::collections::btree_map::BTreeMap; mod v13 { use super::*; @@ -80,8 +80,8 @@ where T: Config, OldCurrency: ReservableCurrency<::AccountId> + 'static, { + use alloc::vec; use sp_runtime::traits::Hash; - use sp_std::vec; let len = T::MaxCodeLen::get(); let code = vec![42u8; len as usize]; diff --git a/substrate/frame/contracts/src/migration/v15.rs b/substrate/frame/contracts/src/migration/v15.rs index 3c700d1c0b021..11f07282d5614 100644 --- a/substrate/frame/contracts/src/migration/v15.rs +++ b/substrate/frame/contracts/src/migration/v15.rs @@ -28,6 +28,8 @@ use crate::{ AccountIdOf, BalanceOf, CodeHash, Config, HoldReason, Pallet, TrieId, Weight, LOG_TARGET, }; #[cfg(feature = "try-runtime")] +use alloc::vec::Vec; +#[cfg(feature = "try-runtime")] use frame_support::traits::fungible::InspectHold; use frame_support::{ pallet_prelude::*, @@ -44,8 +46,6 @@ use sp_core::hexdisplay::HexDisplay; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; use sp_runtime::{traits::Zero, Saturating}; -#[cfg(feature = "try-runtime")] -use sp_std::vec::Vec; mod v14 { use super::*; diff --git a/substrate/frame/contracts/src/migration/v16.rs b/substrate/frame/contracts/src/migration/v16.rs index 74fbc997718d6..3d5b2d2a85fcd 100644 --- a/substrate/frame/contracts/src/migration/v16.rs +++ b/substrate/frame/contracts/src/migration/v16.rs @@ -26,7 +26,6 @@ use crate::{ use codec::{Decode, Encode}; use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound}; use sp_runtime::{BoundedBTreeMap, Saturating}; -use sp_std::prelude::*; #[cfg(feature = "runtime-benchmarks")] pub fn store_old_contract_info( diff --git a/substrate/frame/contracts/src/primitives.rs b/substrate/frame/contracts/src/primitives.rs index ab73b28e8c49f..622a69f529fb5 100644 --- a/substrate/frame/contracts/src/primitives.rs +++ b/substrate/frame/contracts/src/primitives.rs @@ -17,6 +17,7 @@ //! A crate that hosts a common definitions that are relevant for the pallet-contracts. +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::weights::Weight; use pallet_contracts_uapi::ReturnFlags; @@ -25,7 +26,6 @@ use sp_runtime::{ traits::{Saturating, Zero}, DispatchError, RuntimeDebug, }; -use sp_std::prelude::*; /// Result type of a `bare_call` or `bare_instantiate` call as well as `ContractsApi::call` and /// `ContractsApi::instantiate`. diff --git a/substrate/frame/contracts/src/storage.rs b/substrate/frame/contracts/src/storage.rs index 1e9739a1599eb..65e7129cdf845 100644 --- a/substrate/frame/contracts/src/storage.rs +++ b/substrate/frame/contracts/src/storage.rs @@ -25,7 +25,9 @@ use crate::{ BalanceOf, CodeHash, CodeInfo, Config, ContractInfoOf, DeletionQueue, DeletionQueueCounter, Error, TrieId, SENTINEL, }; +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; +use core::marker::PhantomData; use frame_support::{ storage::child::{self, ChildInfo}, weights::{Weight, WeightMeter}, @@ -38,7 +40,6 @@ use sp_runtime::{ traits::{Hash, Saturating, Zero}, BoundedBTreeMap, DispatchError, DispatchResult, RuntimeDebug, }; -use sp_std::{marker::PhantomData, prelude::*}; use self::meter::Diff; @@ -334,7 +335,7 @@ impl ContractInfo { } /// Information about what happened to the pre-existing value when calling [`ContractInfo::write`]. -#[cfg_attr(test, derive(Debug, PartialEq))] +#[cfg_attr(any(test, feature = "runtime-benchmarks"), derive(Debug, PartialEq))] pub enum WriteOutcome { /// No value existed at the specified key. New, diff --git a/substrate/frame/contracts/src/storage/meter.rs b/substrate/frame/contracts/src/storage/meter.rs index 7c55ce5d3f0c4..951cb25994e1f 100644 --- a/substrate/frame/contracts/src/storage/meter.rs +++ b/substrate/frame/contracts/src/storage/meter.rs @@ -22,6 +22,8 @@ use crate::{ Inspect, Origin, Pallet, StorageDeposit as Deposit, System, LOG_TARGET, }; +use alloc::vec::Vec; +use core::{fmt::Debug, marker::PhantomData}; use frame_support::{ ensure, traits::{ @@ -37,7 +39,6 @@ use sp_runtime::{ traits::{Saturating, Zero}, DispatchError, FixedPointNumber, FixedU128, }; -use sp_std::{fmt::Debug, marker::PhantomData, vec::Vec}; /// Deposit that uses the native fungible's balance type. pub type DepositOf = Deposit>; diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index c20577a3f6458..cc2a69b5c4196 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -411,6 +411,7 @@ parameter_types! { pub static DepositPerByte: BalanceOf = 1; pub const DepositPerItem: BalanceOf = 2; pub static MaxDelegateDependencies: u32 = 32; + pub static MaxTransientStorageSize: u32 = 4 * 1024; pub static CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(0); // We need this one set high enough for running benchmarks. @@ -458,7 +459,7 @@ parameter_types! { pub static InstantiateAccount: Option<::AccountId> = None; } -pub struct EnsureAccount(sp_std::marker::PhantomData<(T, A)>); +pub struct EnsureAccount(core::marker::PhantomData<(T, A)>); impl>>> EnsureOrigin<::RuntimeOrigin> for EnsureAccount where @@ -504,6 +505,7 @@ impl Config for Test { type Migrations = crate::migration::codegen::BenchMigrations; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type MaxDelegateDependencies = MaxDelegateDependencies; + type MaxTransientStorageSize = MaxTransientStorageSize; type Debug = TestDebug; } @@ -962,6 +964,68 @@ fn storage_max_value_limit() { }); } +#[test] +fn transient_storage_work() { + let (code, _code_hash) = compile_module::("transient_storage").unwrap(); + + ExtBuilder::default().build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let min_balance = Contracts::min_balance(); + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); + + builder::bare_call(addr).build_and_unwrap_result(); + }); +} + +#[test] +fn transient_storage_limit_in_call() { + let (wasm_caller, _code_hash_caller) = + compile_module::("create_transient_storage_and_call").unwrap(); + let (wasm_callee, _code_hash_callee) = compile_module::("set_transient_storage").unwrap(); + ExtBuilder::default().build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let min_balance = Contracts::min_balance(); + + // Create both contracts: Constructors do nothing. + let addr_caller = builder::bare_instantiate(Code::Upload(wasm_caller)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); + let addr_callee = builder::bare_instantiate(Code::Upload(wasm_callee)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); + + let storage_value_size = 1000; + MaxTransientStorageSize::set(4 * 1024); + // Call contracts with storage values within the limit. + // Caller and Callee contracts each set a transient storage value of size 1000. + assert_ok!(builder::call(addr_caller.clone()) + .data((storage_value_size, storage_value_size, &addr_callee).encode()) + .build(),); + + MaxTransientStorageSize::set(512); + // Call a contract with a storage value that is too large. + // Limit exceeded in the caller contract. + assert_err_ignore_postinfo!( + builder::call(addr_caller.clone()) + .data((storage_value_size, storage_value_size, &addr_callee).encode()) + .build(), + >::OutOfTransientStorage, + ); + + MaxTransientStorageSize::set(1536); + // Call a contract with a storage value that is too large. + // Limit exceeded in the callee contract. + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .data((storage_value_size, storage_value_size, &addr_callee).encode()) + .build(), + >::ContractTrapped + ); + }); +} + #[test] fn deploy_and_call_other_contract() { let (caller_wasm, _caller_code_hash) = compile_module::("caller_contract").unwrap(); diff --git a/substrate/frame/contracts/src/transient_storage.rs b/substrate/frame/contracts/src/transient_storage.rs new file mode 100644 index 0000000000000..c795a966385a9 --- /dev/null +++ b/substrate/frame/contracts/src/transient_storage.rs @@ -0,0 +1,698 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This module contains routines for accessing and altering a contract transient storage. + +use crate::{ + exec::{AccountIdOf, Key}, + storage::WriteOutcome, + Config, Error, +}; +use codec::Encode; +use core::marker::PhantomData; +use frame_support::DefaultNoBound; +use sp_runtime::{DispatchError, DispatchResult, Saturating}; +use sp_std::{collections::btree_map::BTreeMap, mem, vec::Vec}; + +/// Meter entry tracks transaction allocations. +#[derive(Default, Debug)] +pub struct MeterEntry { + /// Allocations made in the current transaction. + pub amount: u32, + /// Allocations limit in the current transaction. + pub limit: u32, +} + +impl MeterEntry { + /// Create a new entry. + fn new(limit: u32) -> Self { + Self { limit, amount: Default::default() } + } + + /// Check if the allocated amount exceeds the limit. + fn exceeds_limit(&self, amount: u32) -> bool { + self.amount.saturating_add(amount) > self.limit + } + + /// Absorb the allocation amount of the nested entry into the current entry. + fn absorb(&mut self, rhs: Self) { + self.amount.saturating_accrue(rhs.amount) + } +} + +// The storage meter enforces a limit for each transaction, +// which is calculated as free_storage * (1 - 1/16) for each subsequent frame. +#[derive(DefaultNoBound)] +pub struct StorageMeter { + nested_meters: Vec, + root_meter: MeterEntry, + _phantom: PhantomData, +} + +impl StorageMeter { + const STORAGE_FRACTION_DENOMINATOR: u32 = 16; + /// Create a new storage allocation meter. + fn new(memory_limit: u32) -> Self { + Self { root_meter: MeterEntry::new(memory_limit), ..Default::default() } + } + + /// Charge the allocated amount of transaction storage from the meter. + fn charge(&mut self, amount: u32) -> DispatchResult { + let meter = self.current_mut(); + if meter.exceeds_limit(amount) { + return Err(Error::::OutOfTransientStorage.into()); + } + meter.amount.saturating_accrue(amount); + Ok(()) + } + + /// Revert a transaction meter. + fn revert(&mut self) { + self.nested_meters.pop().expect( + "A call to revert a meter must be preceded by a corresponding call to start a meter; + the code within this crate makes sure that this is always the case; qed", + ); + } + + /// Start a transaction meter. + fn start(&mut self) { + let meter = self.current(); + let mut transaction_limit = meter.limit.saturating_sub(meter.amount); + if !self.nested_meters.is_empty() { + // Allow use of (1 - 1/STORAGE_FRACTION_DENOMINATOR) of free storage for subsequent + // calls. + transaction_limit.saturating_reduce( + transaction_limit.saturating_div(Self::STORAGE_FRACTION_DENOMINATOR), + ); + } + + self.nested_meters.push(MeterEntry::new(transaction_limit)); + } + + /// Commit a transaction meter. + fn commit(&mut self) { + let transaction_meter = self.nested_meters.pop().expect( + "A call to commit a meter must be preceded by a corresponding call to start a meter; + the code within this crate makes sure that this is always the case; qed", + ); + self.current_mut().absorb(transaction_meter) + } + + /// The total allocated amount of memory. + #[cfg(test)] + fn total_amount(&self) -> u32 { + self.nested_meters + .iter() + .fold(self.root_meter.amount, |acc, e| acc.saturating_add(e.amount)) + } + + /// A mutable reference to the current meter entry. + pub fn current_mut(&mut self) -> &mut MeterEntry { + self.nested_meters.last_mut().unwrap_or(&mut self.root_meter) + } + + /// A reference to the current meter entry. + pub fn current(&self) -> &MeterEntry { + self.nested_meters.last().unwrap_or(&self.root_meter) + } +} + +/// An entry representing a journal change. +struct JournalEntry { + key: Vec, + prev_value: Option>, +} + +impl JournalEntry { + /// Create a new change. + fn new(key: Vec, prev_value: Option>) -> Self { + Self { key, prev_value } + } + + /// Revert the change. + fn revert(self, storage: &mut Storage) { + storage.write(&self.key, self.prev_value); + } +} + +/// A journal containing transient storage modifications. +struct Journal(Vec); + +impl Journal { + /// Create a new journal. + fn new() -> Self { + Self(Default::default()) + } + + /// Add a change to the journal. + fn push(&mut self, entry: JournalEntry) { + self.0.push(entry); + } + + /// Length of the journal. + fn len(&self) -> usize { + self.0.len() + } + + /// Roll back all journal changes until the chackpoint + fn rollback(&mut self, storage: &mut Storage, checkpoint: usize) { + self.0.drain(checkpoint..).rev().for_each(|entry| entry.revert(storage)); + } +} + +/// Storage for maintaining the current transaction state. +#[derive(Default)] +struct Storage(BTreeMap, Vec>); + +impl Storage { + /// Read the storage entry. + fn read(&self, key: &Vec) -> Option> { + self.0.get(key).cloned() + } + + /// Write the storage entry. + fn write(&mut self, key: &Vec, value: Option>) -> Option> { + if let Some(value) = value { + // Insert storage entry. + self.0.insert(key.clone(), value) + } else { + // Remove storage entry. + self.0.remove(key) + } + } +} + +/// Transient storage behaves almost identically to regular storage but is discarded after each +/// transaction. It consists of a `BTreeMap` for the current state, a journal of all changes, and a +/// list of checkpoints. On entry to the `start_transaction` function, a marker (checkpoint) is +/// added to the list. New values are written to the current state, and the previous value is +/// recorded in the journal (`write`). When the `commit_transaction` function is called, the marker +/// to the journal index (checkpoint) of when that call was entered is discarded. +/// On `rollback_transaction`, all entries are reverted up to the last checkpoint. +pub struct TransientStorage { + // The storage and journal size is limited by the storage meter. + storage: Storage, + journal: Journal, + // The size of the StorageMeter is limited by the stack depth. + meter: StorageMeter, + // The size of the checkpoints is limited by the stack depth. + checkpoints: Vec, +} + +impl TransientStorage { + /// Create new transient storage with the supplied memory limit. + pub fn new(memory_limit: u32) -> Self { + TransientStorage { + storage: Default::default(), + journal: Journal::new(), + checkpoints: Default::default(), + meter: StorageMeter::new(memory_limit), + } + } + + /// Read the storage value. If the entry does not exist, `None` is returned. + pub fn read(&self, account: &AccountIdOf, key: &Key) -> Option> { + self.storage.read(&Self::storage_key(&account.encode(), &key.hash())) + } + + /// Write a value to storage. + /// + /// If the `value` is `None`, then the entry is removed. If `take` is true, + /// a [`WriteOutcome::Taken`] is returned instead of a [`WriteOutcome::Overwritten`]. + /// If the entry did not exist, [`WriteOutcome::New`] is returned. + pub fn write( + &mut self, + account: &AccountIdOf, + key: &Key, + value: Option>, + take: bool, + ) -> Result { + let key = Self::storage_key(&account.encode(), &key.hash()); + let prev_value = self.storage.read(&key); + // Skip if the same value is being set. + if prev_value != value { + // Calculate the allocation size. + if let Some(value) = &value { + // Charge the key, value and journal entry. + // If a new value is written, a new journal entry is created. The previous value is + // moved to the journal along with its key, and the new value is written to + // storage. + let key_len = key.capacity(); + let mut amount = value + .capacity() + .saturating_add(key_len) + .saturating_add(mem::size_of::()); + if prev_value.is_none() { + // Charge a new storage entry. + // If there was no previous value, a new entry is added to storage (BTreeMap) + // containing a Vec for the key and a Vec for the value. The value was already + // included in the amount. + amount.saturating_accrue(key_len.saturating_add(mem::size_of::>())); + } + self.meter.charge(amount as _)?; + } + self.storage.write(&key, value); + // Update the journal. + self.journal.push(JournalEntry::new(key, prev_value.clone())); + } + + Ok(match (take, prev_value) { + (_, None) => WriteOutcome::New, + (false, Some(prev_value)) => WriteOutcome::Overwritten(prev_value.len() as _), + (true, Some(prev_value)) => WriteOutcome::Taken(prev_value), + }) + } + + /// Start a new nested transaction. + /// + /// This allows to either commit or roll back all changes that are made after this call. + /// For every transaction there must be a matching call to either `rollback_transaction` + /// or `commit_transaction`. + pub fn start_transaction(&mut self) { + self.meter.start(); + self.checkpoints.push(self.journal.len()); + } + + /// Rollback the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are discarded. + /// + /// # Panics + /// + /// Will panic if there is no open transaction. + pub fn rollback_transaction(&mut self) { + let checkpoint = self + .checkpoints + .pop() + .expect( + "A call to rollback_transaction must be preceded by a corresponding call to start_transaction; + the code within this crate makes sure that this is always the case; qed" + ); + self.meter.revert(); + self.journal.rollback(&mut self.storage, checkpoint); + } + + /// Commit the last transaction started by `start_transaction`. + /// + /// Any changes made during that transaction are committed. + /// + /// # Panics + /// + /// Will panic if there is no open transaction. + pub fn commit_transaction(&mut self) { + self.checkpoints + .pop() + .expect( + "A call to commit_transaction must be preceded by a corresponding call to start_transaction; + the code within this crate makes sure that this is always the case; qed" + ); + self.meter.commit(); + } + + /// The storage allocation meter used for transaction metering. + #[cfg(any(test, feature = "runtime-benchmarks"))] + pub fn meter(&mut self) -> &mut StorageMeter { + return &mut self.meter + } + + fn storage_key(account: &[u8], key: &[u8]) -> Vec { + let mut storage_key = Vec::with_capacity(account.len() + key.len()); + storage_key.extend_from_slice(&account); + storage_key.extend_from_slice(&key); + storage_key + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + tests::{Test, ALICE, BOB, CHARLIE}, + Error, + }; + use core::u32::MAX; + + // Calculate the allocation size for the given entry. + fn allocation_size( + account: &AccountIdOf, + key: &Key, + value: Option>, + ) -> u32 { + let mut storage: TransientStorage = TransientStorage::::new(MAX); + storage + .write(account, key, value, false) + .expect("Could not write to transient storage."); + storage.meter().current().amount + } + + #[test] + fn read_write_works() { + let mut storage: TransientStorage = TransientStorage::::new(2048); + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1]), false), + Ok(WriteOutcome::New) + ); + assert_eq!( + storage.write(&ALICE, &Key::Fix([2; 32]), Some(vec![2]), true), + Ok(WriteOutcome::New) + ); + assert_eq!( + storage.write(&BOB, &Key::Fix([3; 32]), Some(vec![3]), false), + Ok(WriteOutcome::New) + ); + assert_eq!(storage.read(&ALICE, &Key::Fix([1; 32])), Some(vec![1])); + assert_eq!(storage.read(&ALICE, &Key::Fix([2; 32])), Some(vec![2])); + assert_eq!(storage.read(&BOB, &Key::Fix([3; 32])), Some(vec![3])); + // Overwrite values. + assert_eq!( + storage.write(&ALICE, &Key::Fix([2; 32]), Some(vec![4, 5]), false), + Ok(WriteOutcome::Overwritten(1)) + ); + assert_eq!( + storage.write(&BOB, &Key::Fix([3; 32]), Some(vec![6, 7]), true), + Ok(WriteOutcome::Taken(vec![3])) + ); + assert_eq!(storage.read(&ALICE, &Key::Fix([1; 32])), Some(vec![1])); + assert_eq!(storage.read(&ALICE, &Key::Fix([2; 32])), Some(vec![4, 5])); + assert_eq!(storage.read(&BOB, &Key::Fix([3; 32])), Some(vec![6, 7])); + + // Check for an empty value. + assert_eq!( + storage.write(&BOB, &Key::Fix([3; 32]), Some(vec![]), true), + Ok(WriteOutcome::Taken(vec![6, 7])) + ); + assert_eq!(storage.read(&BOB, &Key::Fix([3; 32])), Some(vec![])); + + assert_eq!( + storage.write(&BOB, &Key::Fix([3; 32]), None, true), + Ok(WriteOutcome::Taken(vec![])) + ); + assert_eq!(storage.read(&BOB, &Key::Fix([3; 32])), None); + } + + #[test] + fn read_write_with_var_sized_keys_works() { + let mut storage = TransientStorage::::new(2048); + assert_eq!( + storage.write( + &ALICE, + &Key::::try_from_var([1; 64].to_vec()).unwrap(), + Some(vec![1]), + false + ), + Ok(WriteOutcome::New) + ); + assert_eq!( + storage.write( + &BOB, + &Key::::try_from_var([2; 64].to_vec()).unwrap(), + Some(vec![2, 3]), + false + ), + Ok(WriteOutcome::New) + ); + assert_eq!( + storage.read(&ALICE, &Key::::try_from_var([1; 64].to_vec()).unwrap()), + Some(vec![1]) + ); + assert_eq!( + storage.read(&BOB, &Key::::try_from_var([2; 64].to_vec()).unwrap()), + Some(vec![2, 3]) + ); + // Overwrite values. + assert_eq!( + storage.write( + &ALICE, + &Key::::try_from_var([1; 64].to_vec()).unwrap(), + Some(vec![4, 5]), + false + ), + Ok(WriteOutcome::Overwritten(1)) + ); + assert_eq!( + storage.read(&ALICE, &Key::::try_from_var([1; 64].to_vec()).unwrap()), + Some(vec![4, 5]) + ); + } + + #[test] + fn rollback_transaction_works() { + let mut storage = TransientStorage::::new(1024); + + storage.start_transaction(); + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1]), false), + Ok(WriteOutcome::New) + ); + storage.rollback_transaction(); + assert_eq!(storage.read(&ALICE, &Key::Fix([1; 32])), None) + } + + #[test] + fn commit_transaction_works() { + let mut storage = TransientStorage::::new(1024); + + storage.start_transaction(); + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1]), false), + Ok(WriteOutcome::New) + ); + storage.commit_transaction(); + assert_eq!(storage.read(&ALICE, &Key::Fix([1; 32])), Some(vec![1])) + } + + #[test] + fn overwrite_and_commmit_transaction_works() { + let mut storage = TransientStorage::::new(1024); + storage.start_transaction(); + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1]), false), + Ok(WriteOutcome::New) + ); + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1, 2]), false), + Ok(WriteOutcome::Overwritten(1)) + ); + storage.commit_transaction(); + assert_eq!(storage.read(&ALICE, &Key::Fix([1; 32])), Some(vec![1, 2])) + } + + #[test] + fn rollback_in_nested_transaction_works() { + let mut storage = TransientStorage::::new(1024); + storage.start_transaction(); + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1]), false), + Ok(WriteOutcome::New) + ); + storage.start_transaction(); + assert_eq!( + storage.write(&BOB, &Key::Fix([1; 32]), Some(vec![1]), false), + Ok(WriteOutcome::New) + ); + storage.rollback_transaction(); + storage.commit_transaction(); + assert_eq!(storage.read(&ALICE, &Key::Fix([1; 32])), Some(vec![1])); + assert_eq!(storage.read(&BOB, &Key::Fix([1; 32])), None) + } + + #[test] + fn commit_in_nested_transaction_works() { + let mut storage = TransientStorage::::new(1024); + storage.start_transaction(); + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1]), false), + Ok(WriteOutcome::New) + ); + storage.start_transaction(); + assert_eq!( + storage.write(&BOB, &Key::Fix([1; 32]), Some(vec![2]), false), + Ok(WriteOutcome::New) + ); + storage.start_transaction(); + assert_eq!( + storage.write(&CHARLIE, &Key::Fix([1; 32]), Some(vec![3]), false), + Ok(WriteOutcome::New) + ); + storage.commit_transaction(); + storage.commit_transaction(); + storage.commit_transaction(); + assert_eq!(storage.read(&ALICE, &Key::Fix([1; 32])), Some(vec![1])); + assert_eq!(storage.read(&BOB, &Key::Fix([1; 32])), Some(vec![2])); + assert_eq!(storage.read(&CHARLIE, &Key::Fix([1; 32])), Some(vec![3])); + } + + #[test] + fn rollback_all_transactions_works() { + let mut storage = TransientStorage::::new(1024); + storage.start_transaction(); + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1]), false), + Ok(WriteOutcome::New) + ); + storage.start_transaction(); + assert_eq!( + storage.write(&BOB, &Key::Fix([1; 32]), Some(vec![2]), false), + Ok(WriteOutcome::New) + ); + storage.start_transaction(); + assert_eq!( + storage.write(&CHARLIE, &Key::Fix([1; 32]), Some(vec![3]), false), + Ok(WriteOutcome::New) + ); + storage.commit_transaction(); + storage.commit_transaction(); + storage.rollback_transaction(); + assert_eq!(storage.read(&ALICE, &Key::Fix([1; 32])), None); + assert_eq!(storage.read(&BOB, &Key::Fix([1; 32])), None); + assert_eq!(storage.read(&CHARLIE, &Key::Fix([1; 32])), None); + } + + #[test] + fn metering_transactions_works() { + let size = allocation_size(&ALICE, &Key::Fix([1; 32]), Some(vec![1u8; 4096])); + let mut storage = TransientStorage::::new(size * 2); + storage.start_transaction(); + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1u8; 4096]), false), + Ok(WriteOutcome::New) + ); + let limit = storage.meter().current().limit; + storage.commit_transaction(); + + storage.start_transaction(); + assert_eq!(storage.meter().current().limit, limit - size); + assert_eq!(storage.meter().current().limit - storage.meter().current().amount, size); + assert_eq!( + storage.write(&ALICE, &Key::Fix([2; 32]), Some(vec![1u8; 4096]), false), + Ok(WriteOutcome::New) + ); + assert_eq!(storage.meter().current().amount, size); + storage.commit_transaction(); + assert_eq!(storage.meter().total_amount(), size * 2); + } + + #[test] + fn metering_nested_transactions_works() { + let size = allocation_size(&ALICE, &Key::Fix([1; 32]), Some(vec![1u8; 4096])); + let mut storage = TransientStorage::::new(size * 3); + + storage.start_transaction(); + let limit = storage.meter().current().limit; + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1u8; 4096]), false), + Ok(WriteOutcome::New) + ); + storage.start_transaction(); + assert_eq!(storage.meter().total_amount(), size); + assert!(storage.meter().current().limit < limit - size); + assert_eq!( + storage.write(&ALICE, &Key::Fix([2; 32]), Some(vec![1u8; 4096]), false), + Ok(WriteOutcome::New) + ); + storage.commit_transaction(); + assert_eq!(storage.meter().current().limit, limit); + assert_eq!(storage.meter().total_amount(), storage.meter().current().amount); + storage.commit_transaction(); + } + + #[test] + fn metering_transaction_fails() { + let size = allocation_size(&ALICE, &Key::Fix([1; 32]), Some(vec![1u8; 4096])); + let mut storage = TransientStorage::::new(size - 1); + storage.start_transaction(); + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1u8; 4096]), false), + Err(Error::::OutOfTransientStorage.into()) + ); + assert_eq!(storage.meter.current().amount, 0); + storage.commit_transaction(); + assert_eq!(storage.meter.total_amount(), 0); + } + + #[test] + fn metering_nested_transactions_fails() { + let size = allocation_size(&ALICE, &Key::Fix([1; 32]), Some(vec![1u8; 4096])); + let mut storage = TransientStorage::::new(size * 2); + + storage.start_transaction(); + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1u8; 4096]), false), + Ok(WriteOutcome::New) + ); + storage.start_transaction(); + assert_eq!( + storage.write(&ALICE, &Key::Fix([2; 32]), Some(vec![1u8; 4096]), false), + Err(Error::::OutOfTransientStorage.into()) + ); + storage.commit_transaction(); + storage.commit_transaction(); + assert_eq!(storage.meter.total_amount(), size); + } + + #[test] + fn metering_nested_transaction_with_rollback_works() { + let size = allocation_size(&ALICE, &Key::Fix([1; 32]), Some(vec![1u8; 4096])); + let mut storage = TransientStorage::::new(size * 2); + + storage.start_transaction(); + let limit = storage.meter.current().limit; + storage.start_transaction(); + assert_eq!( + storage.write(&ALICE, &Key::Fix([2; 32]), Some(vec![1u8; 4096]), false), + Ok(WriteOutcome::New) + ); + storage.rollback_transaction(); + + assert_eq!(storage.meter.total_amount(), 0); + assert_eq!(storage.meter.current().limit, limit); + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1u8; 4096]), false), + Ok(WriteOutcome::New) + ); + let amount = storage.meter().current().amount; + assert_eq!(storage.meter().total_amount(), amount); + storage.commit_transaction(); + } + + #[test] + fn metering_with_rollback_works() { + let size = allocation_size(&ALICE, &Key::Fix([1; 32]), Some(vec![1u8; 4096])); + let mut storage = TransientStorage::::new(size * 5); + + storage.start_transaction(); + assert_eq!( + storage.write(&ALICE, &Key::Fix([1; 32]), Some(vec![1u8; 4096]), false), + Ok(WriteOutcome::New) + ); + let amount = storage.meter.total_amount(); + storage.start_transaction(); + assert_eq!( + storage.write(&ALICE, &Key::Fix([2; 32]), Some(vec![1u8; 4096]), false), + Ok(WriteOutcome::New) + ); + storage.start_transaction(); + assert_eq!( + storage.write(&BOB, &Key::Fix([1; 32]), Some(vec![1u8; 4096]), false), + Ok(WriteOutcome::New) + ); + storage.commit_transaction(); + storage.rollback_transaction(); + assert_eq!(storage.meter.total_amount(), amount); + storage.commit_transaction(); + } +} diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 0d65d696758d0..f4ee76459c4ea 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -48,6 +48,7 @@ use crate::{ AccountIdOf, BadOrigin, BalanceOf, CodeHash, CodeInfoOf, CodeVec, Config, Error, Event, HoldReason, Pallet, PristineCode, Schedule, Weight, LOG_TARGET, }; +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::DispatchResult, @@ -56,7 +57,6 @@ use frame_support::{ }; use sp_core::Get; use sp_runtime::{DispatchError, RuntimeDebug}; -use sp_std::prelude::*; use wasmi::{CompilationMode, InstancePre, Linker, Memory, MemoryType, StackLimits, Store}; const BYTES_PER_PAGE: usize = 64 * 1024; @@ -506,6 +506,7 @@ mod tests { primitives::ExecReturnValue, storage::WriteOutcome, tests::{RuntimeCall, Test, ALICE, BOB}, + transient_storage::TransientStorage, BalanceOf, CodeHash, Error, Origin, Pallet as Contracts, }; use assert_matches::assert_matches; @@ -563,6 +564,7 @@ mod tests { pub struct MockExt { storage: HashMap, Vec>, + transient_storage: TransientStorage, instantiates: Vec, terminations: Vec, calls: Vec, @@ -591,6 +593,7 @@ mod tests { Self { code_hashes: Default::default(), storage: Default::default(), + transient_storage: TransientStorage::new(1024 * 1024), instantiates: Default::default(), terminations: Default::default(), calls: Default::default(), @@ -691,6 +694,21 @@ mod tests { } Ok(result) } + fn get_transient_storage(&self, key: &Key) -> Option> { + self.transient_storage.read(self.address(), key) + } + fn get_transient_storage_size(&self, key: &Key) -> Option { + self.transient_storage.read(self.address(), key).map(|value| value.len() as _) + } + fn set_transient_storage( + &mut self, + key: &Key, + value: Option>, + take_old: bool, + ) -> Result { + let account_id = self.address().clone(); + self.transient_storage.write(&account_id, key, value, take_old) + } fn caller(&self) -> Origin { self.caller.clone() } @@ -784,6 +802,10 @@ mod tests { fn contract_info(&mut self) -> &mut crate::ContractInfo { unimplemented!() } + #[cfg(feature = "runtime-benchmarks")] + fn transient_storage(&mut self) -> &mut TransientStorage { + unimplemented!() + } fn ecdsa_to_eth_address(&self, _pk: &[u8; 33]) -> Result<[u8; 20], ()> { Ok([2u8; 20]) } @@ -3046,6 +3068,337 @@ mod tests { assert_eq!(&result.data[4..], &[0u8; 0]); } + #[test] + fn set_transient_storage_works() { + const CODE: &str = r#" +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "seal0" "set_transient_storage" (func $set_transient_storage (param i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0, 4) size of input buffer + ;; 4k in little endian + (data (i32.const 0) "\00\10") + + ;; [4, 4100) input buffer + + (func (export "call") + ;; Receive (key ++ value_to_write) + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Size of the input buffer + ) + ;; Store the passed value to the passed key and store result to memory + (i32.store (i32.const 168) + (call $set_transient_storage + (i32.const 8) ;; key_ptr + (i32.load (i32.const 4)) ;; key_len + (i32.add ;; value_ptr = 8 + key_len + (i32.const 8) + (i32.load (i32.const 4))) + (i32.sub ;; value_len (input_size - (key_len + key_len_len)) + (i32.load (i32.const 0)) + (i32.add + (i32.load (i32.const 4)) + (i32.const 4) + ) + ) + ) + ) + (call $seal_return + (i32.const 0) ;; flags + (i32.const 168) ;; ptr to returned value + (i32.const 4) ;; length of returned value + ) + ) + + (func (export "deploy")) +) +"#; + + let mut ext = MockExt::default(); + + // value did not exist before -> sentinel returned + let input = (32, [1u8; 32], [42u8, 48]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); + assert_eq!( + ext.get_transient_storage(&Key::::try_from_var([1u8; 32].to_vec()).unwrap()), + Some(vec![42, 48]) + ); + + // value do exist -> length of old value returned + let input = (32, [1u8; 32], [0u8; 0]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 2); + assert_eq!( + ext.get_transient_storage(&Key::::try_from_var([1u8; 32].to_vec()).unwrap()), + Some(vec![]) + ); + + // value do exist -> length of old value returned (test for zero sized val) + let input = (32, [1u8; 32], [99u8]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); + assert_eq!( + ext.get_transient_storage(&Key::::try_from_var([1u8; 32].to_vec()).unwrap()), + Some(vec![99]) + ); + } + + #[test] + fn get_transient_storage_works() { + const CODE: &str = r#" +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "seal0" "get_transient_storage" (func $get_transient_storage (param i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0, 4) size of input buffer (160 bytes as we copy the key+len here) + (data (i32.const 0) "\A0") + + ;; [4, 8) size of output buffer + ;; 4k in little endian + (data (i32.const 4) "\00\10") + + ;; [8, 168) input buffer + ;; [168, 4264) output buffer + + (func (export "call") + ;; Receive (key ++ value_to_write) + (call $seal_input + (i32.const 8) ;; Pointer to the input buffer + (i32.const 0) ;; Size of the input buffer + ) + ;; Load a storage value and result of this call into the output buffer + (i32.store (i32.const 168) + (call $get_transient_storage + (i32.const 12) ;; key_ptr + (i32.load (i32.const 8)) ;; key_len + (i32.const 172) ;; Pointer to the output buffer + (i32.const 4) ;; Pointer to the size of the buffer + ) + ) + (call $seal_return + (i32.const 0) ;; flags + (i32.const 168) ;; output buffer ptr + (i32.add ;; length: output size + 4 (retval) + (i32.load (i32.const 4)) + (i32.const 4) + ) + ) + ) + + (func (export "deploy")) +) +"#; + + let mut ext = MockExt::default(); + + assert_ok!(ext.set_transient_storage( + &Key::::try_from_var([1u8; 64].to_vec()).unwrap(), + Some(vec![42u8]), + false + )); + assert_ok!(ext.set_transient_storage( + &Key::::try_from_var([2u8; 19].to_vec()).unwrap(), + Some(vec![]), + false + )); + + // value does not exist + let input = (63, [1u8; 64]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!( + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), + ReturnErrorCode::KeyNotFound as u32 + ); + + // value exists + let input = (64, [1u8; 64]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!( + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), + ReturnErrorCode::Success as u32 + ); + assert_eq!(&result.data[4..], &[42u8]); + + // value exists (test for 0 sized) + let input = (19, [2u8; 19]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!( + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), + ReturnErrorCode::Success as u32 + ); + assert_eq!(&result.data[4..], &([] as [u8; 0])); + } + + #[test] + fn clear_transient_storage_works() { + const CODE: &str = r#" +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "seal0" "clear_transient_storage" (func $clear_transient_storage (param i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + ;; size of input buffer + ;; [0, 4) size of input buffer (128+32 = 160 bytes = 0xA0) + (data (i32.const 0) "\A0") + + ;; [4, 164) input buffer + + (func (export "call") + ;; Receive key + (call $seal_input + (i32.const 4) ;; Where we take input and store it + (i32.const 0) ;; Where we take and store the length of thedata + ) + ;; Call seal_clear_storage and save what it returns at 0 + (i32.store (i32.const 0) + (call $clear_transient_storage + (i32.const 8) ;; key_ptr + (i32.load (i32.const 4)) ;; key_len + ) + ) + (call $seal_return + (i32.const 0) ;; flags + (i32.const 0) ;; returned value + (i32.const 4) ;; length of returned value + ) + ) + + (func (export "deploy")) +) +"#; + + let mut ext = MockExt::default(); + + assert_ok!(ext.set_transient_storage( + &Key::::try_from_var([1u8; 64].to_vec()).unwrap(), + Some(vec![42u8]), + false + )); + + // value did not exist + let input = (32, [3u8; 32]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + // sentinel returned + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); + + // value did exist + let input = (64, [1u8; 64]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + // length returned + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 1); + // value cleared + assert_eq!( + ext.get_transient_storage(&Key::::try_from_var([1u8; 64].to_vec()).unwrap()), + None + ); + } + + #[test] + fn take_transient_storage_works() { + const CODE: &str = r#" +(module + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "take_transient_storage" (func $take_transient_storage (param i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0, 4) size of input buffer (160 bytes as we copy the key+len here) + (data (i32.const 0) "\A0") + + ;; [4, 8) size of output buffer + ;; 4k in little endian + (data (i32.const 4) "\00\10") + + ;; [8, 168) input buffer + ;; [168, 4264) output buffer + + (func (export "call") + ;; Receive key + (call $seal_input + (i32.const 8) ;; Pointer to the input buffer + (i32.const 0) ;; Size of the length buffer + ) + + ;; Load a storage value and result of this call into the output buffer + (i32.store (i32.const 168) + (call $take_transient_storage + (i32.const 12) ;; key_ptr + (i32.load (i32.const 8)) ;; key_len + (i32.const 172) ;; Pointer to the output buffer + (i32.const 4) ;; Pointer to the size of the buffer + ) + ) + + ;; Return the contents of the buffer + (call $seal_return + (i32.const 0) ;; flags + (i32.const 168) ;; output buffer ptr + (i32.add ;; length: storage size + 4 (retval) + (i32.load (i32.const 4)) + (i32.const 4) + ) + ) + ) + + (func (export "deploy")) +) +"#; + + let mut ext = MockExt::default(); + + assert_ok!(ext.set_transient_storage( + &Key::::try_from_var([1u8; 64].to_vec()).unwrap(), + Some(vec![42u8]), + false + )); + assert_ok!(ext.set_transient_storage( + &Key::::try_from_var([2u8; 19].to_vec()).unwrap(), + Some(vec![]), + false + )); + + // value does not exist -> error returned + let input = (63, [1u8; 64]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!( + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), + ReturnErrorCode::KeyNotFound as u32 + ); + + // value did exist -> value returned + let input = (64, [1u8; 64]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!( + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), + ReturnErrorCode::Success as u32 + ); + assert_eq!( + ext.get_transient_storage(&Key::::try_from_var([1u8; 64].to_vec()).unwrap()), + None + ); + assert_eq!(&result.data[4..], &[42u8]); + + // value did exist -> length returned (test for 0 sized) + let input = (19, [2u8; 19]).encode(); + let result = execute(CODE, input, &mut ext).unwrap(); + assert_eq!( + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), + ReturnErrorCode::Success as u32 + ); + assert_eq!( + ext.get_transient_storage(&Key::::try_from_var([2u8; 19].to_vec()).unwrap()), + None + ); + assert_eq!(&result.data[4..], &[0u8; 0]); + } + #[test] fn is_contract_works() { const CODE_IS_CONTRACT: &str = r#" diff --git a/substrate/frame/contracts/src/wasm/prepare.rs b/substrate/frame/contracts/src/wasm/prepare.rs index 50eb6d625321a..93fe3080d22db 100644 --- a/substrate/frame/contracts/src/wasm/prepare.rs +++ b/substrate/frame/contracts/src/wasm/prepare.rs @@ -28,10 +28,10 @@ use crate::{ }, AccountIdOf, CodeVec, Config, Error, Schedule, LOG_TARGET, }; +#[cfg(any(test, feature = "runtime-benchmarks"))] +use alloc::vec::Vec; use codec::MaxEncodedLen; use sp_runtime::{traits::Hash, DispatchError}; -#[cfg(any(test, feature = "runtime-benchmarks"))] -use sp_std::prelude::Vec; use wasmi::{ core::ValType as WasmiValueType, CompilationMode, Config as WasmiConfig, Engine, ExternType, Module, StackLimits, @@ -56,7 +56,7 @@ pub enum LoadingMode { #[cfg(test)] pub mod tracker { - use sp_std::cell::RefCell; + use core::cell::RefCell; thread_local! { pub static LOADED_MODULE: RefCell> = RefCell::new(Vec::new()); } diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 5f50dbf391a28..982d28540ec1e 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -24,7 +24,9 @@ use crate::{ weights::WeightInfo, BalanceOf, CodeHash, Config, DebugBufferVec, Error, SENTINEL, }; +use alloc::{boxed::Box, vec, vec::Vec}; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; +use core::fmt; use frame_support::{ dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, traits::Get, weights::Weight, @@ -36,7 +38,6 @@ use sp_runtime::{ traits::{Bounded, Zero}, DispatchError, RuntimeDebug, }; -use sp_std::{fmt, prelude::*}; use wasmi::{core::HostError, errors::LinkerError, Linker, Memory, Store}; type CallOf = ::RuntimeCall; @@ -198,6 +199,16 @@ pub enum RuntimeCosts { GetStorage(u32), /// Weight of calling `seal_take_storage` for the given size. TakeStorage(u32), + /// Weight of calling `seal_set_transient_storage` for the given storage item sizes. + SetTransientStorage { old_bytes: u32, new_bytes: u32 }, + /// Weight of calling `seal_clear_transient_storage` per cleared byte. + ClearTransientStorage(u32), + /// Weight of calling `seal_contains_transient_storage` per byte of the checked item. + ContainsTransientStorage(u32), + /// Weight of calling `seal_get_transient_storage` with the specified size in storage. + GetTransientStorage(u32), + /// Weight of calling `seal_take_transient_storage` for the given size. + TakeTransientStorage(u32), /// Weight of calling `seal_transfer`. Transfer, /// Base weight of calling `seal_call`. @@ -244,6 +255,34 @@ pub enum RuntimeCosts { UnlockDelegateDependency, } +// For the function that modifies the storage, the benchmarks are done with one item in the +// transient_storage (BTreeMap). To consider the worst-case scenario, the weight of the overhead of +// writing to a full BTreeMap should be included. On top of that, the rollback weight is added, +// which is the worst scenario. +macro_rules! cost_write { + // cost_write!(name, a, b, c) -> T::WeightInfo::name(a, b, c).saturating_add(T::WeightInfo::rollback_transient_storage()) + // .saturating_add(T::WeightInfo::set_transient_storage_full().saturating_sub(T::WeightInfo::set_transient_storage_empty()) + ($name:ident $(, $arg:expr )*) => { + (T::WeightInfo::$name($( $arg ),*).saturating_add(T::WeightInfo::rollback_transient_storage()).saturating_add(cost_write!(@cost_storage))) + }; + + (@cost_storage) => { + T::WeightInfo::set_transient_storage_full().saturating_sub(T::WeightInfo::set_transient_storage_empty()) + }; +} + +macro_rules! cost_read { + // cost_read!(name, a, b, c) -> T::WeightInfo::name(a, b, c).saturating_add(T::WeightInfo::get_transient_storage_full() + // .saturating_sub(T::WeightInfo::get_transient_storage_empty()) + ($name:ident $(, $arg:expr )*) => { + (T::WeightInfo::$name($( $arg ),*).saturating_add(cost_read!(@cost_storage))) + }; + + (@cost_storage) => { + T::WeightInfo::get_transient_storage_full().saturating_sub(T::WeightInfo::get_transient_storage_empty()) + }; +} + macro_rules! cost_args { // cost_args!(name, a, b, c) -> T::WeightInfo::name(a, b, c).saturating_sub(T::WeightInfo::name(0, 0, 0)) ($name:ident, $( $arg: expr ),+) => { @@ -295,6 +334,12 @@ impl Token for RuntimeCosts { ContainsStorage(len) => T::WeightInfo::seal_contains_storage(len), GetStorage(len) => T::WeightInfo::seal_get_storage(len), TakeStorage(len) => T::WeightInfo::seal_take_storage(len), + SetTransientStorage { new_bytes, old_bytes } => + cost_write!(seal_set_transient_storage, new_bytes, old_bytes), + ClearTransientStorage(len) => cost_write!(seal_clear_transient_storage, len), + ContainsTransientStorage(len) => cost_read!(seal_contains_transient_storage, len), + GetTransientStorage(len) => cost_read!(seal_get_transient_storage, len), + TakeTransientStorage(len) => cost_write!(seal_take_transient_storage, len), Transfer => T::WeightInfo::seal_transfer(), CallBase => T::WeightInfo::seal_call(0, 0), DelegateCallBase => T::WeightInfo::seal_delegate_call(), @@ -791,10 +836,129 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { let key = self.decode_key(memory, key_type, key_ptr)?; let outcome = self.ext.get_storage_size(&key); - self.adjust_gas(charged, RuntimeCosts::ClearStorage(outcome.unwrap_or(0))); + self.adjust_gas(charged, RuntimeCosts::ContainsStorage(outcome.unwrap_or(0))); Ok(outcome.unwrap_or(SENTINEL)) } + fn set_transient_storage( + &mut self, + memory: &[u8], + key_type: KeyType, + key_ptr: u32, + value_ptr: u32, + value_len: u32, + ) -> Result { + let max_size = self.ext.max_value_size(); + let charged = self.charge_gas(RuntimeCosts::SetTransientStorage { + new_bytes: value_len, + old_bytes: max_size, + })?; + if value_len > max_size { + return Err(Error::::ValueTooLarge.into()) + } + let key = self.decode_key(memory, key_type, key_ptr)?; + let value = Some(self.read_sandbox_memory(memory, value_ptr, value_len)?); + let write_outcome = self.ext.set_transient_storage(&key, value, false)?; + self.adjust_gas( + charged, + RuntimeCosts::SetTransientStorage { + new_bytes: value_len, + old_bytes: write_outcome.old_len(), + }, + ); + Ok(write_outcome.old_len_with_sentinel()) + } + + fn clear_transient_storage( + &mut self, + memory: &[u8], + key_type: KeyType, + key_ptr: u32, + ) -> Result { + let charged = + self.charge_gas(RuntimeCosts::ClearTransientStorage(self.ext.max_value_size()))?; + let key = self.decode_key(memory, key_type, key_ptr)?; + let outcome = self.ext.set_transient_storage(&key, None, false)?; + + self.adjust_gas(charged, RuntimeCosts::ClearTransientStorage(outcome.old_len())); + Ok(outcome.old_len_with_sentinel()) + } + + fn get_transient_storage( + &mut self, + memory: &mut [u8], + key_type: KeyType, + key_ptr: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { + let charged = + self.charge_gas(RuntimeCosts::GetTransientStorage(self.ext.max_value_size()))?; + let key = self.decode_key(memory, key_type, key_ptr)?; + let outcome = self.ext.get_transient_storage(&key); + + if let Some(value) = outcome { + self.adjust_gas(charged, RuntimeCosts::GetTransientStorage(value.len() as u32)); + self.write_sandbox_output( + memory, + out_ptr, + out_len_ptr, + &value, + false, + already_charged, + )?; + Ok(ReturnErrorCode::Success) + } else { + self.adjust_gas(charged, RuntimeCosts::GetTransientStorage(0)); + Ok(ReturnErrorCode::KeyNotFound) + } + } + + fn contains_transient_storage( + &mut self, + memory: &[u8], + key_type: KeyType, + key_ptr: u32, + ) -> Result { + let charged = + self.charge_gas(RuntimeCosts::ContainsTransientStorage(self.ext.max_value_size()))?; + let key = self.decode_key(memory, key_type, key_ptr)?; + let outcome = self.ext.get_transient_storage_size(&key); + + self.adjust_gas(charged, RuntimeCosts::ContainsTransientStorage(outcome.unwrap_or(0))); + Ok(outcome.unwrap_or(SENTINEL)) + } + + fn take_transient_storage( + &mut self, + memory: &mut [u8], + key_type: KeyType, + key_ptr: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { + let charged = + self.charge_gas(RuntimeCosts::TakeTransientStorage(self.ext.max_value_size()))?; + let key = self.decode_key(memory, key_type, key_ptr)?; + if let crate::storage::WriteOutcome::Taken(value) = + self.ext.set_transient_storage(&key, None, true)? + { + self.adjust_gas(charged, RuntimeCosts::TakeTransientStorage(value.len() as u32)); + self.write_sandbox_output( + memory, + out_ptr, + out_len_ptr, + &value, + false, + already_charged, + )?; + Ok(ReturnErrorCode::Success) + } else { + self.adjust_gas(charged, RuntimeCosts::TakeTransientStorage(0)); + Ok(ReturnErrorCode::KeyNotFound) + } + } + fn call( &mut self, memory: &mut [u8], @@ -1098,6 +1262,67 @@ pub mod env { } } + /// Set the value at the given key in the contract transient storage. + #[unstable] + fn set_transient_storage( + ctx: _, + memory: _, + key_ptr: u32, + key_len: u32, + value_ptr: u32, + value_len: u32, + ) -> Result { + ctx.set_transient_storage(memory, KeyType::Var(key_len), key_ptr, value_ptr, value_len) + } + + /// Clear the value at the given key in the contract storage. + #[unstable] + fn clear_transient_storage( + ctx: _, + memory: _, + key_ptr: u32, + key_len: u32, + ) -> Result { + ctx.clear_transient_storage(memory, KeyType::Var(key_len), key_ptr) + } + + /// Retrieve the value under the given key from transient storage. + #[unstable] + fn get_transient_storage( + ctx: _, + memory: _, + key_ptr: u32, + key_len: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { + ctx.get_transient_storage(memory, KeyType::Var(key_len), key_ptr, out_ptr, out_len_ptr) + } + + /// Checks whether there is a value stored under the given key in transient storage. + #[unstable] + fn contains_transient_storage( + ctx: _, + memory: _, + key_ptr: u32, + key_len: u32, + ) -> Result { + ctx.contains_transient_storage(memory, KeyType::Var(key_len), key_ptr) + } + + /// Retrieve and remove the value under the given key from transient storage. + #[unstable] + fn take_transient_storage( + ctx: _, + memory: _, + key_ptr: u32, + key_len: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { + ctx.take_transient_storage(memory, KeyType::Var(key_len), key_ptr, out_ptr, out_len_ptr) + } + /// Transfer some value to another account. /// See [`pallet_contracts_uapi::HostFn::transfer`]. #[prefixed_alias] @@ -1900,7 +2125,7 @@ pub mod env { data_len: u32, ) -> Result<(), TrapReason> { let num_topic = topics_len - .checked_div(sp_std::mem::size_of::>() as u32) + .checked_div(core::mem::size_of::>() as u32) .ok_or("Zero sized topics are not allowed")?; ctx.charge_gas(RuntimeCosts::DepositEvent { num_topic, len: data_len })?; if data_len > ctx.ext.max_value_size() { diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index 0404a9d3d8e50..dc10a8aee773c 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_contracts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-06-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-07-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-1pho9goo-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yaoqqom-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -98,6 +98,16 @@ pub trait WeightInfo { fn seal_get_storage(n: u32, ) -> Weight; fn seal_contains_storage(n: u32, ) -> Weight; fn seal_take_storage(n: u32, ) -> Weight; + fn set_transient_storage_empty() -> Weight; + fn set_transient_storage_full() -> Weight; + fn get_transient_storage_empty() -> Weight; + fn get_transient_storage_full() -> Weight; + fn rollback_transient_storage() -> Weight; + fn seal_set_transient_storage(n: u32, o: u32, ) -> Weight; + fn seal_clear_transient_storage(n: u32, ) -> Weight; + fn seal_get_transient_storage(n: u32, ) -> Weight; + fn seal_contains_transient_storage(n: u32, ) -> Weight; + fn seal_take_transient_storage(n: u32, ) -> Weight; fn seal_transfer() -> Weight; fn seal_call(t: u32, i: u32, ) -> Weight; fn seal_delegate_call() -> Weight; @@ -127,8 +137,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_896_000 picoseconds. - Weight::from_parts(1_990_000, 1627) + // Minimum execution time: 1_921_000 picoseconds. + Weight::from_parts(2_003_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -138,10 +148,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 ยฑ0)` // Estimated: `442 + k * (70 ยฑ0)` - // Minimum execution time: 11_142_000 picoseconds. - Weight::from_parts(11_578_000, 442) - // Standard Error: 1_557 - .saturating_add(Weight::from_parts(1_165_198, 0).saturating_mul(k.into())) + // Minimum execution time: 11_364_000 picoseconds. + Weight::from_parts(11_463_000, 442) + // Standard Error: 2_141 + .saturating_add(Weight::from_parts(1_149_944, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -155,10 +165,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 ยฑ0)` // Estimated: `6149 + c * (1 ยฑ0)` - // Minimum execution time: 7_649_000 picoseconds. - Weight::from_parts(4_827_445, 6149) + // Minimum execution time: 7_565_000 picoseconds. + Weight::from_parts(5_041_009, 6149) // Standard Error: 5 - .saturating_add(Weight::from_parts(1_630, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_640, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -171,8 +181,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_096_000 picoseconds. - Weight::from_parts(16_937_000, 6450) + // Minimum execution time: 15_894_000 picoseconds. + Weight::from_parts(16_618_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -185,10 +195,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 ยฑ0)` // Estimated: `3635 + k * (1 ยฑ0)` - // Minimum execution time: 3_131_000 picoseconds. - Weight::from_parts(3_209_000, 3635) - // Standard Error: 481 - .saturating_add(Weight::from_parts(1_087_506, 0).saturating_mul(k.into())) + // Minimum execution time: 3_077_000 picoseconds. + Weight::from_parts(3_144_000, 3635) + // Standard Error: 650 + .saturating_add(Weight::from_parts(1_095_835, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -207,10 +217,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `325 + c * (1 ยฑ0)` // Estimated: `6263 + c * (1 ยฑ0)` - // Minimum execution time: 15_289_000 picoseconds. - Weight::from_parts(16_157_168, 6263) + // Minimum execution time: 14_960_000 picoseconds. + Weight::from_parts(15_778_951, 6263) // Standard Error: 1 - .saturating_add(Weight::from_parts(395, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(443, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -221,8 +231,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_312_000 picoseconds. - Weight::from_parts(12_650_000, 6380) + // Minimum execution time: 11_849_000 picoseconds. + Weight::from_parts(12_273_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -236,8 +246,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 47_239_000 picoseconds. - Weight::from_parts(48_617_000, 6292) + // Minimum execution time: 47_862_000 picoseconds. + Weight::from_parts(48_879_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -249,8 +259,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 52_084_000 picoseconds. - Weight::from_parts(53_838_000, 6534) + // Minimum execution time: 50_754_000 picoseconds. + Weight::from_parts(52_720_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -260,8 +270,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 11_785_000 picoseconds. - Weight::from_parts(12_284_000, 6349) + // Minimum execution time: 11_459_000 picoseconds. + Weight::from_parts(11_921_000, 6349) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -271,8 +281,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_136_000 picoseconds. - Weight::from_parts(2_233_000, 1627) + // Minimum execution time: 2_135_000 picoseconds. + Weight::from_parts(2_247_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -284,8 +294,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 10_957_000 picoseconds. - Weight::from_parts(11_314_000, 3631) + // Minimum execution time: 10_645_000 picoseconds. + Weight::from_parts(11_107_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -295,8 +305,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_354_000 picoseconds. - Weight::from_parts(4_613_000, 3607) + // Minimum execution time: 4_353_000 picoseconds. + Weight::from_parts(4_628_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -307,8 +317,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_541_000 picoseconds. - Weight::from_parts(5_790_000, 3632) + // Minimum execution time: 5_432_000 picoseconds. + Weight::from_parts(5_624_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -319,8 +329,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 5_502_000 picoseconds. - Weight::from_parts(5_701_000, 3607) + // Minimum execution time: 5_371_000 picoseconds. + Weight::from_parts(5_794_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -339,12 +349,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `801 + c * (1 ยฑ0)` - // Estimated: `4264 + c * (1 ยฑ0)` - // Minimum execution time: 247_884_000 picoseconds. - Weight::from_parts(265_795_781, 4264) + // Measured: `800 + c * (1 ยฑ0)` + // Estimated: `4266 + c * (1 ยฑ0)` + // Minimum execution time: 247_157_000 picoseconds. + Weight::from_parts(269_252_698, 4266) // Standard Error: 4 - .saturating_add(Weight::from_parts(724, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(729, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -372,14 +382,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `323` // Estimated: `6262` - // Minimum execution time: 4_500_184_000 picoseconds. - Weight::from_parts(160_729_258, 6262) - // Standard Error: 143 - .saturating_add(Weight::from_parts(52_809, 0).saturating_mul(c.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(2_173, 0).saturating_mul(i.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(2_165, 0).saturating_mul(s.into())) + // Minimum execution time: 4_575_784_000 picoseconds. + Weight::from_parts(207_379_459, 6262) + // Standard Error: 124 + .saturating_add(Weight::from_parts(52_392, 0).saturating_mul(c.into())) + // Standard Error: 15 + .saturating_add(Weight::from_parts(2_257, 0).saturating_mul(i.into())) + // Standard Error: 15 + .saturating_add(Weight::from_parts(2_263, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -404,13 +414,13 @@ impl WeightInfo for SubstrateWeight { fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `560` - // Estimated: `4029` - // Minimum execution time: 2_219_163_000 picoseconds. - Weight::from_parts(2_236_918_000, 4029) - // Standard Error: 32 - .saturating_add(Weight::from_parts(937, 0).saturating_mul(i.into())) - // Standard Error: 32 - .saturating_add(Weight::from_parts(938, 0).saturating_mul(s.into())) + // Estimated: `4017` + // Minimum execution time: 2_306_770_000 picoseconds. + Weight::from_parts(2_462_908_000, 4017) + // Standard Error: 33 + .saturating_add(Weight::from_parts(898, 0).saturating_mul(i.into())) + // Standard Error: 33 + .saturating_add(Weight::from_parts(859, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -430,8 +440,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `826` // Estimated: `4291` - // Minimum execution time: 164_801_000 picoseconds. - Weight::from_parts(167_250_000, 4291) + // Minimum execution time: 165_499_000 picoseconds. + Weight::from_parts(169_903_000, 4291) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -448,10 +458,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 225_207_000 picoseconds. - Weight::from_parts(263_665_658, 3607) - // Standard Error: 47 - .saturating_add(Weight::from_parts(50_732, 0).saturating_mul(c.into())) + // Minimum execution time: 227_590_000 picoseconds. + Weight::from_parts(260_045_588, 3607) + // Standard Error: 52 + .saturating_add(Weight::from_parts(51_305, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -468,10 +478,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 230_718_000 picoseconds. - Weight::from_parts(258_359_271, 3607) - // Standard Error: 47 - .saturating_add(Weight::from_parts(51_014, 0).saturating_mul(c.into())) + // Minimum execution time: 239_634_000 picoseconds. + Weight::from_parts(262_040_831, 3607) + // Standard Error: 103 + .saturating_add(Weight::from_parts(51_590, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -487,8 +497,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 39_668_000 picoseconds. - Weight::from_parts(41_031_000, 3780) + // Minimum execution time: 39_152_000 picoseconds. + Weight::from_parts(39_970_000, 3780) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -502,8 +512,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 25_890_000 picoseconds. - Weight::from_parts(26_603_000, 6492) + // Minimum execution time: 25_143_000 picoseconds. + Weight::from_parts(26_103_000, 6492) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -512,17 +522,17 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_269_000 picoseconds. - Weight::from_parts(9_227_069, 0) - // Standard Error: 74 - .saturating_add(Weight::from_parts(51_396, 0).saturating_mul(r.into())) + // Minimum execution time: 8_406_000 picoseconds. + Weight::from_parts(9_056_753, 0) + // Standard Error: 98 + .saturating_add(Weight::from_parts(53_110, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 602_000 picoseconds. - Weight::from_parts(664_000, 0) + // Minimum execution time: 659_000 picoseconds. + Weight::from_parts(705_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -530,8 +540,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `354` // Estimated: `3819` - // Minimum execution time: 6_131_000 picoseconds. - Weight::from_parts(6_468_000, 3819) + // Minimum execution time: 6_165_000 picoseconds. + Weight::from_parts(6_340_000, 3819) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) @@ -540,79 +550,79 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `447` // Estimated: `3912` - // Minimum execution time: 7_557_000 picoseconds. - Weight::from_parts(7_704_000, 3912) + // Minimum execution time: 7_398_000 picoseconds. + Weight::from_parts(7_661_000, 3912) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 783_000 picoseconds. - Weight::from_parts(848_000, 0) + // Minimum execution time: 723_000 picoseconds. + Weight::from_parts(793_000, 0) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 397_000 picoseconds. - Weight::from_parts(435_000, 0) + // Minimum execution time: 398_000 picoseconds. + Weight::from_parts(428_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 351_000 picoseconds. - Weight::from_parts(372_000, 0) + // Minimum execution time: 329_000 picoseconds. + Weight::from_parts(364_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 608_000 picoseconds. - Weight::from_parts(645_000, 0) + // Minimum execution time: 592_000 picoseconds. + Weight::from_parts(624_000, 0) } fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 661_000 picoseconds. - Weight::from_parts(729_000, 0) + // Minimum execution time: 665_000 picoseconds. + Weight::from_parts(714_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 4_545_000 picoseconds. - Weight::from_parts(4_663_000, 0) + // Minimum execution time: 4_486_000 picoseconds. + Weight::from_parts(4_668_000, 0) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 614_000 picoseconds. - Weight::from_parts(641_000, 0) + // Minimum execution time: 548_000 picoseconds. + Weight::from_parts(590_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 583_000 picoseconds. - Weight::from_parts(618_000, 0) + // Minimum execution time: 536_000 picoseconds. + Weight::from_parts(578_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 583_000 picoseconds. - Weight::from_parts(617_000, 0) + // Minimum execution time: 552_000 picoseconds. + Weight::from_parts(599_000, 0) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 607_000 picoseconds. - Weight::from_parts(638_000, 0) + // Minimum execution time: 556_000 picoseconds. + Weight::from_parts(600_000, 0) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) @@ -620,8 +630,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 4_172_000 picoseconds. - Weight::from_parts(4_408_000, 1552) + // Minimum execution time: 4_084_000 picoseconds. + Weight::from_parts(4_321_000, 1552) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `n` is `[0, 1048572]`. @@ -629,20 +639,20 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 475_000 picoseconds. - Weight::from_parts(515_000, 0) + // Minimum execution time: 468_000 picoseconds. + Weight::from_parts(492_000, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(298, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(310, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048572]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 289_000 picoseconds. - Weight::from_parts(357_000, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(405, 0).saturating_mul(n.into())) + // Minimum execution time: 377_000 picoseconds. + Weight::from_parts(396_000, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(431, 0).saturating_mul(n.into())) } /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -655,10 +665,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `319 + n * (78 ยฑ0)` // Estimated: `3784 + n * (2553 ยฑ0)` - // Minimum execution time: 13_316_000 picoseconds. - Weight::from_parts(15_855_821, 3784) - // Standard Error: 7_274 - .saturating_add(Weight::from_parts(3_447_246, 0).saturating_mul(n.into())) + // Minimum execution time: 13_028_000 picoseconds. + Weight::from_parts(15_330_917, 3784) + // Standard Error: 8_260 + .saturating_add(Weight::from_parts(3_594_893, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -671,8 +681,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 3_468_000 picoseconds. - Weight::from_parts(3_608_000, 1561) + // Minimum execution time: 3_367_000 picoseconds. + Weight::from_parts(3_555_000, 1561) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `System::EventTopics` (r:4 w:4) @@ -683,12 +693,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ยฑ0)` - // Minimum execution time: 3_777_000 picoseconds. - Weight::from_parts(4_028_191, 990) - // Standard Error: 5_907 - .saturating_add(Weight::from_parts(2_183_733, 0).saturating_mul(t.into())) + // Minimum execution time: 3_779_000 picoseconds. + Weight::from_parts(4_003_836, 990) + // Standard Error: 5_409 + .saturating_add(Weight::from_parts(2_082_176, 0).saturating_mul(t.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(18, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(14, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -698,10 +708,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 400_000 picoseconds. - Weight::from_parts(423_000, 0) + // Minimum execution time: 409_000 picoseconds. + Weight::from_parts(447_000, 0) // Standard Error: 10 - .saturating_add(Weight::from_parts(1_209, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_219, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -711,12 +721,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `250 + o * (1 ยฑ0)` // Estimated: `249 + o * (1 ยฑ0)` - // Minimum execution time: 9_033_000 picoseconds. - Weight::from_parts(8_797_934, 249) + // Minimum execution time: 9_176_000 picoseconds. + Weight::from_parts(9_121_191, 249) // Standard Error: 1 - .saturating_add(Weight::from_parts(257, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(292, 0).saturating_mul(n.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(51, 0).saturating_mul(o.into())) + .saturating_add(Weight::from_parts(31, 0).saturating_mul(o.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -728,10 +738,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 7_167_000 picoseconds. - Weight::from_parts(8_012_194, 248) + // Minimum execution time: 7_294_000 picoseconds. + Weight::from_parts(7_963_151, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(90, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(92, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -743,10 +753,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 6_868_000 picoseconds. - Weight::from_parts(7_801_811, 248) + // Minimum execution time: 6_978_000 picoseconds. + Weight::from_parts(7_741_355, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(605, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(654, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -757,10 +767,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 6_322_000 picoseconds. - Weight::from_parts(7_103_552, 248) + // Minimum execution time: 6_286_000 picoseconds. + Weight::from_parts(7_026_923, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(79, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(86, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -771,20 +781,106 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 7_702_000 picoseconds. - Weight::from_parts(8_746_305, 248) - // Standard Error: 2 - .saturating_add(Weight::from_parts(604, 0).saturating_mul(n.into())) + // Minimum execution time: 7_597_000 picoseconds. + Weight::from_parts(8_706_785, 248) + // Standard Error: 1 + .saturating_add(Weight::from_parts(653, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } + fn set_transient_storage_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_497_000 picoseconds. + Weight::from_parts(1_564_000, 0) + } + fn set_transient_storage_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_670_000 picoseconds. + Weight::from_parts(2_807_000, 0) + } + fn get_transient_storage_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_836_000 picoseconds. + Weight::from_parts(3_878_000, 0) + } + fn get_transient_storage_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_537_000 picoseconds. + Weight::from_parts(4_665_000, 0) + } + fn rollback_transient_storage() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_592_000 picoseconds. + Weight::from_parts(1_742_000, 0) + } + /// The range of component `n` is `[0, 16384]`. + /// The range of component `o` is `[0, 16384]`. + fn seal_set_transient_storage(n: u32, o: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_101_000 picoseconds. + Weight::from_parts(2_481_218, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(242, 0).saturating_mul(n.into())) + // Standard Error: 0 + .saturating_add(Weight::from_parts(300, 0).saturating_mul(o.into())) + } + /// The range of component `n` is `[0, 16384]`. + fn seal_clear_transient_storage(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_059_000 picoseconds. + Weight::from_parts(2_426_609, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(307, 0).saturating_mul(n.into())) + } + /// The range of component `n` is `[0, 16384]`. + fn seal_get_transient_storage(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_918_000 picoseconds. + Weight::from_parts(2_114_837, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(302, 0).saturating_mul(n.into())) + } + /// The range of component `n` is `[0, 16384]`. + fn seal_contains_transient_storage(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_759_000 picoseconds. + Weight::from_parts(1_959_995, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(147, 0).saturating_mul(n.into())) + } + /// The range of component `n` is `[0, 16384]`. + fn seal_take_transient_storage(_n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_759_000 picoseconds. + Weight::from_parts(9_952_099, 0) + } fn seal_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 8_851_000 picoseconds. - Weight::from_parts(9_083_000, 0) + // Minimum execution time: 8_700_000 picoseconds. + Weight::from_parts(8_903_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -800,12 +896,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `620 + t * (280 ยฑ0)` // Estimated: `4085 + t * (2182 ยฑ0)` - // Minimum execution time: 121_148_000 picoseconds. - Weight::from_parts(119_605_377, 4085) - // Standard Error: 208_337 - .saturating_add(Weight::from_parts(43_153_338, 0).saturating_mul(t.into())) + // Minimum execution time: 123_399_000 picoseconds. + Weight::from_parts(120_909_821, 4085) + // Standard Error: 166_830 + .saturating_add(Weight::from_parts(43_853_642, 0).saturating_mul(t.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(5, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -820,8 +916,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 108_159_000 picoseconds. - Weight::from_parts(110_027_000, 3895) + // Minimum execution time: 112_350_000 picoseconds. + Weight::from_parts(116_003_000, 3895) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) @@ -839,13 +935,13 @@ impl WeightInfo for SubstrateWeight { fn seal_instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `676` - // Estimated: `4127` - // Minimum execution time: 1_861_874_000 picoseconds. - Weight::from_parts(1_872_926_000, 4127) - // Standard Error: 23 - .saturating_add(Weight::from_parts(557, 0).saturating_mul(i.into())) - // Standard Error: 23 - .saturating_add(Weight::from_parts(920, 0).saturating_mul(s.into())) + // Estimated: `4132` + // Minimum execution time: 1_972_276_000 picoseconds. + Weight::from_parts(1_977_872_000, 4132) + // Standard Error: 24 + .saturating_add(Weight::from_parts(623, 0).saturating_mul(i.into())) + // Standard Error: 24 + .saturating_add(Weight::from_parts(917, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -854,64 +950,64 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 878_000 picoseconds. - Weight::from_parts(10_993_950, 0) + // Minimum execution time: 899_000 picoseconds. + Weight::from_parts(10_963_972, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_325, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_355, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_261_000 picoseconds. - Weight::from_parts(9_759_497, 0) + // Minimum execution time: 1_396_000 picoseconds. + Weight::from_parts(9_404_986, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(3_594, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_627, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 726_000 picoseconds. - Weight::from_parts(9_795_728, 0) + // Minimum execution time: 834_000 picoseconds. + Weight::from_parts(9_749_716, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_455, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_500, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 739_000 picoseconds. - Weight::from_parts(9_701_202, 0) + // Minimum execution time: 756_000 picoseconds. + Weight::from_parts(8_995_036, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_459, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_495, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 43_309_000 picoseconds. - Weight::from_parts(41_405_949, 0) + // Minimum execution time: 45_800_000 picoseconds. + Weight::from_parts(44_676_829, 0) // Standard Error: 8 - .saturating_add(Weight::from_parts(5_336, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(5_315, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 47_880_000 picoseconds. - Weight::from_parts(49_025_000, 0) + // Minimum execution time: 47_415_000 picoseconds. + Weight::from_parts(48_743_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_462_000 picoseconds. - Weight::from_parts(13_631_000, 0) + // Minimum execution time: 13_437_000 picoseconds. + Weight::from_parts(13_588_000, 0) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -921,8 +1017,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 17_978_000 picoseconds. - Weight::from_parts(18_578_000, 3895) + // Minimum execution time: 17_775_000 picoseconds. + Weight::from_parts(18_332_000, 3895) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -932,8 +1028,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3820` - // Minimum execution time: 8_384_000 picoseconds. - Weight::from_parts(8_687_000, 3820) + // Minimum execution time: 8_326_000 picoseconds. + Weight::from_parts(8_656_000, 3820) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -943,8 +1039,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3558` - // Minimum execution time: 7_547_000 picoseconds. - Weight::from_parts(7_935_000, 3558) + // Minimum execution time: 7_276_000 picoseconds. + Weight::from_parts(7_630_000, 3558) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -952,15 +1048,15 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 331_000 picoseconds. - Weight::from_parts(363_000, 0) + // Minimum execution time: 330_000 picoseconds. + Weight::from_parts(373_000, 0) } fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 349_000 picoseconds. - Weight::from_parts(365_000, 0) + // Minimum execution time: 381_000 picoseconds. + Weight::from_parts(418_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -968,8 +1064,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 2_814_000 picoseconds. - Weight::from_parts(3_038_000, 1704) + // Minimum execution time: 2_711_000 picoseconds. + Weight::from_parts(2_941_000, 1704) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -977,10 +1073,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 693_000 picoseconds. - Weight::from_parts(665_431, 0) - // Standard Error: 12 - .saturating_add(Weight::from_parts(7_030, 0).saturating_mul(r.into())) + // Minimum execution time: 720_000 picoseconds. + Weight::from_parts(389_111, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(7_278, 0).saturating_mul(r.into())) } } @@ -992,8 +1088,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_896_000 picoseconds. - Weight::from_parts(1_990_000, 1627) + // Minimum execution time: 1_921_000 picoseconds. + Weight::from_parts(2_003_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1003,10 +1099,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 ยฑ0)` // Estimated: `442 + k * (70 ยฑ0)` - // Minimum execution time: 11_142_000 picoseconds. - Weight::from_parts(11_578_000, 442) - // Standard Error: 1_557 - .saturating_add(Weight::from_parts(1_165_198, 0).saturating_mul(k.into())) + // Minimum execution time: 11_364_000 picoseconds. + Weight::from_parts(11_463_000, 442) + // Standard Error: 2_141 + .saturating_add(Weight::from_parts(1_149_944, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1020,10 +1116,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 ยฑ0)` // Estimated: `6149 + c * (1 ยฑ0)` - // Minimum execution time: 7_649_000 picoseconds. - Weight::from_parts(4_827_445, 6149) + // Minimum execution time: 7_565_000 picoseconds. + Weight::from_parts(5_041_009, 6149) // Standard Error: 5 - .saturating_add(Weight::from_parts(1_630, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_640, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1036,8 +1132,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_096_000 picoseconds. - Weight::from_parts(16_937_000, 6450) + // Minimum execution time: 15_894_000 picoseconds. + Weight::from_parts(16_618_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1050,10 +1146,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 ยฑ0)` // Estimated: `3635 + k * (1 ยฑ0)` - // Minimum execution time: 3_131_000 picoseconds. - Weight::from_parts(3_209_000, 3635) - // Standard Error: 481 - .saturating_add(Weight::from_parts(1_087_506, 0).saturating_mul(k.into())) + // Minimum execution time: 3_077_000 picoseconds. + Weight::from_parts(3_144_000, 3635) + // Standard Error: 650 + .saturating_add(Weight::from_parts(1_095_835, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1072,10 +1168,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `325 + c * (1 ยฑ0)` // Estimated: `6263 + c * (1 ยฑ0)` - // Minimum execution time: 15_289_000 picoseconds. - Weight::from_parts(16_157_168, 6263) + // Minimum execution time: 14_960_000 picoseconds. + Weight::from_parts(15_778_951, 6263) // Standard Error: 1 - .saturating_add(Weight::from_parts(395, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(443, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1086,8 +1182,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_312_000 picoseconds. - Weight::from_parts(12_650_000, 6380) + // Minimum execution time: 11_849_000 picoseconds. + Weight::from_parts(12_273_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1101,8 +1197,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 47_239_000 picoseconds. - Weight::from_parts(48_617_000, 6292) + // Minimum execution time: 47_862_000 picoseconds. + Weight::from_parts(48_879_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1114,8 +1210,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 52_084_000 picoseconds. - Weight::from_parts(53_838_000, 6534) + // Minimum execution time: 50_754_000 picoseconds. + Weight::from_parts(52_720_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1125,8 +1221,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 11_785_000 picoseconds. - Weight::from_parts(12_284_000, 6349) + // Minimum execution time: 11_459_000 picoseconds. + Weight::from_parts(11_921_000, 6349) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1136,8 +1232,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_136_000 picoseconds. - Weight::from_parts(2_233_000, 1627) + // Minimum execution time: 2_135_000 picoseconds. + Weight::from_parts(2_247_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1149,8 +1245,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 10_957_000 picoseconds. - Weight::from_parts(11_314_000, 3631) + // Minimum execution time: 10_645_000 picoseconds. + Weight::from_parts(11_107_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1160,8 +1256,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_354_000 picoseconds. - Weight::from_parts(4_613_000, 3607) + // Minimum execution time: 4_353_000 picoseconds. + Weight::from_parts(4_628_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1172,8 +1268,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_541_000 picoseconds. - Weight::from_parts(5_790_000, 3632) + // Minimum execution time: 5_432_000 picoseconds. + Weight::from_parts(5_624_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1184,8 +1280,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 5_502_000 picoseconds. - Weight::from_parts(5_701_000, 3607) + // Minimum execution time: 5_371_000 picoseconds. + Weight::from_parts(5_794_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1204,12 +1300,12 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `801 + c * (1 ยฑ0)` - // Estimated: `4264 + c * (1 ยฑ0)` - // Minimum execution time: 247_884_000 picoseconds. - Weight::from_parts(265_795_781, 4264) + // Measured: `800 + c * (1 ยฑ0)` + // Estimated: `4266 + c * (1 ยฑ0)` + // Minimum execution time: 247_157_000 picoseconds. + Weight::from_parts(269_252_698, 4266) // Standard Error: 4 - .saturating_add(Weight::from_parts(724, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(729, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1237,14 +1333,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `323` // Estimated: `6262` - // Minimum execution time: 4_500_184_000 picoseconds. - Weight::from_parts(160_729_258, 6262) - // Standard Error: 143 - .saturating_add(Weight::from_parts(52_809, 0).saturating_mul(c.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(2_173, 0).saturating_mul(i.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(2_165, 0).saturating_mul(s.into())) + // Minimum execution time: 4_575_784_000 picoseconds. + Weight::from_parts(207_379_459, 6262) + // Standard Error: 124 + .saturating_add(Weight::from_parts(52_392, 0).saturating_mul(c.into())) + // Standard Error: 15 + .saturating_add(Weight::from_parts(2_257, 0).saturating_mul(i.into())) + // Standard Error: 15 + .saturating_add(Weight::from_parts(2_263, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1269,13 +1365,13 @@ impl WeightInfo for () { fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `560` - // Estimated: `4029` - // Minimum execution time: 2_219_163_000 picoseconds. - Weight::from_parts(2_236_918_000, 4029) - // Standard Error: 32 - .saturating_add(Weight::from_parts(937, 0).saturating_mul(i.into())) - // Standard Error: 32 - .saturating_add(Weight::from_parts(938, 0).saturating_mul(s.into())) + // Estimated: `4017` + // Minimum execution time: 2_306_770_000 picoseconds. + Weight::from_parts(2_462_908_000, 4017) + // Standard Error: 33 + .saturating_add(Weight::from_parts(898, 0).saturating_mul(i.into())) + // Standard Error: 33 + .saturating_add(Weight::from_parts(859, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1295,8 +1391,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `826` // Estimated: `4291` - // Minimum execution time: 164_801_000 picoseconds. - Weight::from_parts(167_250_000, 4291) + // Minimum execution time: 165_499_000 picoseconds. + Weight::from_parts(169_903_000, 4291) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1313,10 +1409,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 225_207_000 picoseconds. - Weight::from_parts(263_665_658, 3607) - // Standard Error: 47 - .saturating_add(Weight::from_parts(50_732, 0).saturating_mul(c.into())) + // Minimum execution time: 227_590_000 picoseconds. + Weight::from_parts(260_045_588, 3607) + // Standard Error: 52 + .saturating_add(Weight::from_parts(51_305, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1333,10 +1429,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 230_718_000 picoseconds. - Weight::from_parts(258_359_271, 3607) - // Standard Error: 47 - .saturating_add(Weight::from_parts(51_014, 0).saturating_mul(c.into())) + // Minimum execution time: 239_634_000 picoseconds. + Weight::from_parts(262_040_831, 3607) + // Standard Error: 103 + .saturating_add(Weight::from_parts(51_590, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1352,8 +1448,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 39_668_000 picoseconds. - Weight::from_parts(41_031_000, 3780) + // Minimum execution time: 39_152_000 picoseconds. + Weight::from_parts(39_970_000, 3780) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1367,8 +1463,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 25_890_000 picoseconds. - Weight::from_parts(26_603_000, 6492) + // Minimum execution time: 25_143_000 picoseconds. + Weight::from_parts(26_103_000, 6492) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1377,17 +1473,17 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_269_000 picoseconds. - Weight::from_parts(9_227_069, 0) - // Standard Error: 74 - .saturating_add(Weight::from_parts(51_396, 0).saturating_mul(r.into())) + // Minimum execution time: 8_406_000 picoseconds. + Weight::from_parts(9_056_753, 0) + // Standard Error: 98 + .saturating_add(Weight::from_parts(53_110, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 602_000 picoseconds. - Weight::from_parts(664_000, 0) + // Minimum execution time: 659_000 picoseconds. + Weight::from_parts(705_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -1395,8 +1491,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `354` // Estimated: `3819` - // Minimum execution time: 6_131_000 picoseconds. - Weight::from_parts(6_468_000, 3819) + // Minimum execution time: 6_165_000 picoseconds. + Weight::from_parts(6_340_000, 3819) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) @@ -1405,79 +1501,79 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `447` // Estimated: `3912` - // Minimum execution time: 7_557_000 picoseconds. - Weight::from_parts(7_704_000, 3912) + // Minimum execution time: 7_398_000 picoseconds. + Weight::from_parts(7_661_000, 3912) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 783_000 picoseconds. - Weight::from_parts(848_000, 0) + // Minimum execution time: 723_000 picoseconds. + Weight::from_parts(793_000, 0) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 397_000 picoseconds. - Weight::from_parts(435_000, 0) + // Minimum execution time: 398_000 picoseconds. + Weight::from_parts(428_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 351_000 picoseconds. - Weight::from_parts(372_000, 0) + // Minimum execution time: 329_000 picoseconds. + Weight::from_parts(364_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 608_000 picoseconds. - Weight::from_parts(645_000, 0) + // Minimum execution time: 592_000 picoseconds. + Weight::from_parts(624_000, 0) } fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 661_000 picoseconds. - Weight::from_parts(729_000, 0) + // Minimum execution time: 665_000 picoseconds. + Weight::from_parts(714_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 4_545_000 picoseconds. - Weight::from_parts(4_663_000, 0) + // Minimum execution time: 4_486_000 picoseconds. + Weight::from_parts(4_668_000, 0) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 614_000 picoseconds. - Weight::from_parts(641_000, 0) + // Minimum execution time: 548_000 picoseconds. + Weight::from_parts(590_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 583_000 picoseconds. - Weight::from_parts(618_000, 0) + // Minimum execution time: 536_000 picoseconds. + Weight::from_parts(578_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 583_000 picoseconds. - Weight::from_parts(617_000, 0) + // Minimum execution time: 552_000 picoseconds. + Weight::from_parts(599_000, 0) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 607_000 picoseconds. - Weight::from_parts(638_000, 0) + // Minimum execution time: 556_000 picoseconds. + Weight::from_parts(600_000, 0) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) @@ -1485,8 +1581,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 4_172_000 picoseconds. - Weight::from_parts(4_408_000, 1552) + // Minimum execution time: 4_084_000 picoseconds. + Weight::from_parts(4_321_000, 1552) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `n` is `[0, 1048572]`. @@ -1494,20 +1590,20 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 475_000 picoseconds. - Weight::from_parts(515_000, 0) + // Minimum execution time: 468_000 picoseconds. + Weight::from_parts(492_000, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(298, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(310, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048572]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 289_000 picoseconds. - Weight::from_parts(357_000, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(405, 0).saturating_mul(n.into())) + // Minimum execution time: 377_000 picoseconds. + Weight::from_parts(396_000, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(431, 0).saturating_mul(n.into())) } /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -1520,10 +1616,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `319 + n * (78 ยฑ0)` // Estimated: `3784 + n * (2553 ยฑ0)` - // Minimum execution time: 13_316_000 picoseconds. - Weight::from_parts(15_855_821, 3784) - // Standard Error: 7_274 - .saturating_add(Weight::from_parts(3_447_246, 0).saturating_mul(n.into())) + // Minimum execution time: 13_028_000 picoseconds. + Weight::from_parts(15_330_917, 3784) + // Standard Error: 8_260 + .saturating_add(Weight::from_parts(3_594_893, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -1536,8 +1632,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 3_468_000 picoseconds. - Weight::from_parts(3_608_000, 1561) + // Minimum execution time: 3_367_000 picoseconds. + Weight::from_parts(3_555_000, 1561) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `System::EventTopics` (r:4 w:4) @@ -1548,12 +1644,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ยฑ0)` - // Minimum execution time: 3_777_000 picoseconds. - Weight::from_parts(4_028_191, 990) - // Standard Error: 5_907 - .saturating_add(Weight::from_parts(2_183_733, 0).saturating_mul(t.into())) + // Minimum execution time: 3_779_000 picoseconds. + Weight::from_parts(4_003_836, 990) + // Standard Error: 5_409 + .saturating_add(Weight::from_parts(2_082_176, 0).saturating_mul(t.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(18, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(14, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -1563,10 +1659,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 400_000 picoseconds. - Weight::from_parts(423_000, 0) + // Minimum execution time: 409_000 picoseconds. + Weight::from_parts(447_000, 0) // Standard Error: 10 - .saturating_add(Weight::from_parts(1_209, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_219, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -1576,12 +1672,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `250 + o * (1 ยฑ0)` // Estimated: `249 + o * (1 ยฑ0)` - // Minimum execution time: 9_033_000 picoseconds. - Weight::from_parts(8_797_934, 249) + // Minimum execution time: 9_176_000 picoseconds. + Weight::from_parts(9_121_191, 249) // Standard Error: 1 - .saturating_add(Weight::from_parts(257, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(292, 0).saturating_mul(n.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(51, 0).saturating_mul(o.into())) + .saturating_add(Weight::from_parts(31, 0).saturating_mul(o.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -1593,10 +1689,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 7_167_000 picoseconds. - Weight::from_parts(8_012_194, 248) + // Minimum execution time: 7_294_000 picoseconds. + Weight::from_parts(7_963_151, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(90, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(92, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1608,10 +1704,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 6_868_000 picoseconds. - Weight::from_parts(7_801_811, 248) + // Minimum execution time: 6_978_000 picoseconds. + Weight::from_parts(7_741_355, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(605, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(654, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1622,10 +1718,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 6_322_000 picoseconds. - Weight::from_parts(7_103_552, 248) + // Minimum execution time: 6_286_000 picoseconds. + Weight::from_parts(7_026_923, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(79, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(86, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1636,20 +1732,106 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 7_702_000 picoseconds. - Weight::from_parts(8_746_305, 248) - // Standard Error: 2 - .saturating_add(Weight::from_parts(604, 0).saturating_mul(n.into())) + // Minimum execution time: 7_597_000 picoseconds. + Weight::from_parts(8_706_785, 248) + // Standard Error: 1 + .saturating_add(Weight::from_parts(653, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } + fn set_transient_storage_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_497_000 picoseconds. + Weight::from_parts(1_564_000, 0) + } + fn set_transient_storage_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_670_000 picoseconds. + Weight::from_parts(2_807_000, 0) + } + fn get_transient_storage_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_836_000 picoseconds. + Weight::from_parts(3_878_000, 0) + } + fn get_transient_storage_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_537_000 picoseconds. + Weight::from_parts(4_665_000, 0) + } + fn rollback_transient_storage() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_592_000 picoseconds. + Weight::from_parts(1_742_000, 0) + } + /// The range of component `n` is `[0, 16384]`. + /// The range of component `o` is `[0, 16384]`. + fn seal_set_transient_storage(n: u32, o: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_101_000 picoseconds. + Weight::from_parts(2_481_218, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(242, 0).saturating_mul(n.into())) + // Standard Error: 0 + .saturating_add(Weight::from_parts(300, 0).saturating_mul(o.into())) + } + /// The range of component `n` is `[0, 16384]`. + fn seal_clear_transient_storage(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_059_000 picoseconds. + Weight::from_parts(2_426_609, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(307, 0).saturating_mul(n.into())) + } + /// The range of component `n` is `[0, 16384]`. + fn seal_get_transient_storage(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_918_000 picoseconds. + Weight::from_parts(2_114_837, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(302, 0).saturating_mul(n.into())) + } + /// The range of component `n` is `[0, 16384]`. + fn seal_contains_transient_storage(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_759_000 picoseconds. + Weight::from_parts(1_959_995, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(147, 0).saturating_mul(n.into())) + } + /// The range of component `n` is `[0, 16384]`. + fn seal_take_transient_storage(_n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_759_000 picoseconds. + Weight::from_parts(9_952_099, 0) + } fn seal_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 8_851_000 picoseconds. - Weight::from_parts(9_083_000, 0) + // Minimum execution time: 8_700_000 picoseconds. + Weight::from_parts(8_903_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -1665,12 +1847,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `620 + t * (280 ยฑ0)` // Estimated: `4085 + t * (2182 ยฑ0)` - // Minimum execution time: 121_148_000 picoseconds. - Weight::from_parts(119_605_377, 4085) - // Standard Error: 208_337 - .saturating_add(Weight::from_parts(43_153_338, 0).saturating_mul(t.into())) + // Minimum execution time: 123_399_000 picoseconds. + Weight::from_parts(120_909_821, 4085) + // Standard Error: 166_830 + .saturating_add(Weight::from_parts(43_853_642, 0).saturating_mul(t.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(5, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -1685,8 +1867,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 108_159_000 picoseconds. - Weight::from_parts(110_027_000, 3895) + // Minimum execution time: 112_350_000 picoseconds. + Weight::from_parts(116_003_000, 3895) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) @@ -1704,13 +1886,13 @@ impl WeightInfo for () { fn seal_instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `676` - // Estimated: `4127` - // Minimum execution time: 1_861_874_000 picoseconds. - Weight::from_parts(1_872_926_000, 4127) - // Standard Error: 23 - .saturating_add(Weight::from_parts(557, 0).saturating_mul(i.into())) - // Standard Error: 23 - .saturating_add(Weight::from_parts(920, 0).saturating_mul(s.into())) + // Estimated: `4132` + // Minimum execution time: 1_972_276_000 picoseconds. + Weight::from_parts(1_977_872_000, 4132) + // Standard Error: 24 + .saturating_add(Weight::from_parts(623, 0).saturating_mul(i.into())) + // Standard Error: 24 + .saturating_add(Weight::from_parts(917, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1719,64 +1901,64 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 878_000 picoseconds. - Weight::from_parts(10_993_950, 0) + // Minimum execution time: 899_000 picoseconds. + Weight::from_parts(10_963_972, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_325, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_355, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_261_000 picoseconds. - Weight::from_parts(9_759_497, 0) + // Minimum execution time: 1_396_000 picoseconds. + Weight::from_parts(9_404_986, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(3_594, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_627, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 726_000 picoseconds. - Weight::from_parts(9_795_728, 0) + // Minimum execution time: 834_000 picoseconds. + Weight::from_parts(9_749_716, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_455, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_500, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 739_000 picoseconds. - Weight::from_parts(9_701_202, 0) + // Minimum execution time: 756_000 picoseconds. + Weight::from_parts(8_995_036, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_459, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_495, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 43_309_000 picoseconds. - Weight::from_parts(41_405_949, 0) + // Minimum execution time: 45_800_000 picoseconds. + Weight::from_parts(44_676_829, 0) // Standard Error: 8 - .saturating_add(Weight::from_parts(5_336, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(5_315, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 47_880_000 picoseconds. - Weight::from_parts(49_025_000, 0) + // Minimum execution time: 47_415_000 picoseconds. + Weight::from_parts(48_743_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_462_000 picoseconds. - Weight::from_parts(13_631_000, 0) + // Minimum execution time: 13_437_000 picoseconds. + Weight::from_parts(13_588_000, 0) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -1786,8 +1968,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 17_978_000 picoseconds. - Weight::from_parts(18_578_000, 3895) + // Minimum execution time: 17_775_000 picoseconds. + Weight::from_parts(18_332_000, 3895) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1797,8 +1979,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3820` - // Minimum execution time: 8_384_000 picoseconds. - Weight::from_parts(8_687_000, 3820) + // Minimum execution time: 8_326_000 picoseconds. + Weight::from_parts(8_656_000, 3820) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1808,8 +1990,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3558` - // Minimum execution time: 7_547_000 picoseconds. - Weight::from_parts(7_935_000, 3558) + // Minimum execution time: 7_276_000 picoseconds. + Weight::from_parts(7_630_000, 3558) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1817,15 +1999,15 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 331_000 picoseconds. - Weight::from_parts(363_000, 0) + // Minimum execution time: 330_000 picoseconds. + Weight::from_parts(373_000, 0) } fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 349_000 picoseconds. - Weight::from_parts(365_000, 0) + // Minimum execution time: 381_000 picoseconds. + Weight::from_parts(418_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -1833,8 +2015,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 2_814_000 picoseconds. - Weight::from_parts(3_038_000, 1704) + // Minimum execution time: 2_711_000 picoseconds. + Weight::from_parts(2_941_000, 1704) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -1842,9 +2024,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 693_000 picoseconds. - Weight::from_parts(665_431, 0) - // Standard Error: 12 - .saturating_add(Weight::from_parts(7_030, 0).saturating_mul(r.into())) + // Minimum execution time: 720_000 picoseconds. + Weight::from_parts(389_111, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(7_278, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/contracts/uapi/Cargo.toml b/substrate/frame/contracts/uapi/Cargo.toml index e19caa460419e..cb559ec88d281 100644 --- a/substrate/frame/contracts/uapi/Cargo.toml +++ b/substrate/frame/contracts/uapi/Cargo.toml @@ -12,13 +12,13 @@ description = "Exposes all the host functions that a contract can import." workspace = true [dependencies] -paste = { version = "1.0", default-features = false } -bitflags = "1.0" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"], optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +paste = { workspace = true } +bitflags = { workspace = true } +scale-info = { features = ["derive"], optional = true, workspace = true } +codec = { features = [ "derive", "max-encoded-len", -], optional = true } +], optional = true, workspace = true } [target.'cfg(target_arch = "riscv32")'.dependencies] polkavm-derive = { workspace = true } diff --git a/substrate/frame/contracts/uapi/src/host.rs b/substrate/frame/contracts/uapi/src/host.rs index 92065eda5d635..51f0cd7eb2dc0 100644 --- a/substrate/frame/contracts/uapi/src/host.rs +++ b/substrate/frame/contracts/uapi/src/host.rs @@ -67,7 +67,7 @@ fn ptr_or_sentinel(data: &Option<&[u8]>) -> *const u8 { pub enum HostFnImpl {} /// Defines all the host apis implemented by both wasm and RISC-V vms. -pub trait HostFn { +pub trait HostFn: private::Sealed { /// Returns the number of times specified contract exists on the call stack. Delegated calls are /// not counted as separate calls. /// @@ -292,6 +292,20 @@ pub trait HostFn { /// Returns the size of the pre-existing value at the specified key if any. fn clear_storage_v1(key: &[u8]) -> Option; + /// Clear the value at the given key in the contract transient storage. + /// + /// # Parameters + /// + /// - `key`: The storage key. + /// + /// # Return + /// + /// Returns the size of the pre-existing value at the specified key if any. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn clear_transient_storage(key: &[u8]) -> Option; + /// Retrieve the code hash for a specified contract address. /// /// # Parameters @@ -324,6 +338,21 @@ pub trait HostFn { /// Returns the size of the pre-existing value at the specified key if any. fn contains_storage_v1(key: &[u8]) -> Option; + /// Checks whether there is a value stored under the given key in transient storage. + /// + /// The key length must not exceed the maximum defined by the contracts module parameter. + /// + /// # Parameters + /// - `key`: The storage key. + /// + /// # Return + /// + /// Returns the size of the pre-existing value at the specified key if any. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn contains_transient_storage(key: &[u8]) -> Option; + /// Emit a custom debug message. /// /// No newlines are added to the supplied message. @@ -453,6 +482,22 @@ pub trait HostFn { /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] fn get_storage_v1(key: &[u8], output: &mut &mut [u8]) -> Result; + /// Retrieve the value under the given key from transient storage. + /// + /// The key length must not exceed the maximum defined by the contracts module parameter. + /// + /// # Parameters + /// - `key`: The storage key. + /// - `output`: A reference to the output data buffer to write the storage entry. + /// + /// # Errors + /// + /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn get_transient_storage(key: &[u8], output: &mut &mut [u8]) -> Result; + hash_fn!(sha2_256, 32); hash_fn!(keccak_256, 32); hash_fn!(blake2_256, 32); @@ -673,6 +718,24 @@ pub trait HostFn { /// Returns the size of the pre-existing value at the specified key if any. fn set_storage_v2(key: &[u8], value: &[u8]) -> Option; + /// Set the value at the given key in the contract transient storage. + /// + /// The key and value lengths must not exceed the maximums defined by the contracts module + /// parameters. + /// + /// # Parameters + /// + /// - `key`: The storage key. + /// - `encoded_value`: The storage value. + /// + /// # Return + /// + /// Returns the size of the pre-existing value at the specified key if any. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn set_transient_storage(key: &[u8], value: &[u8]) -> Option; + /// Verify a sr25519 signature /// /// # Parameters @@ -696,6 +759,20 @@ pub trait HostFn { /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] fn take_storage(key: &[u8], output: &mut &mut [u8]) -> Result; + /// Retrieve and remove the value under the given key from transient storage. + /// + /// # Parameters + /// - `key`: The storage key. + /// - `output`: A reference to the output data buffer to write the storage entry. + /// + /// # Errors + /// + /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn take_transient_storage(key: &[u8], output: &mut &mut [u8]) -> Result; + /// Transfer some amount of funds into the specified account. /// /// # Parameters @@ -804,3 +881,8 @@ pub trait HostFn { )] fn xcm_send(dest: &[u8], msg: &[u8], output: &mut [u8; 32]) -> Result; } + +mod private { + pub trait Sealed {} + impl Sealed for super::HostFnImpl {} +} diff --git a/substrate/frame/contracts/uapi/src/host/riscv32.rs b/substrate/frame/contracts/uapi/src/host/riscv32.rs index 561ab28747df9..3555202332121 100644 --- a/substrate/frame/contracts/uapi/src/host/riscv32.rs +++ b/substrate/frame/contracts/uapi/src/host/riscv32.rs @@ -172,6 +172,10 @@ impl HostFn for HostFnImpl { todo!() } + fn set_transient_storage(key: &[u8], encoded_value: &[u8]) -> Option { + todo!() + } + fn clear_storage(key: &[u8]) { todo!() } @@ -180,13 +184,25 @@ impl HostFn for HostFnImpl { todo!() } + fn clear_transient_storage(key: &[u8]) -> Option { + todo!() + } + impl_get_storage!(get_storage, sys::get_storage); impl_get_storage!(get_storage_v1, sys::v1::get_storage); + fn get_transient_storage(key: &[u8], output: &mut &mut [u8]) -> Result { + todo!() + } + fn take_storage(key: &[u8], output: &mut &mut [u8]) -> Result { todo!() } + fn take_transient_storage(key: &[u8], output: &mut &mut [u8]) -> Result { + todo!() + } + fn contains_storage(key: &[u8]) -> Option { todo!() } @@ -195,6 +211,10 @@ impl HostFn for HostFnImpl { todo!() } + fn contains_transient_storage(key: &[u8]) -> Option { + todo!() + } + fn terminate(beneficiary: &[u8]) -> ! { todo!() } diff --git a/substrate/frame/contracts/uapi/src/host/wasm32.rs b/substrate/frame/contracts/uapi/src/host/wasm32.rs index cb5435bfc014d..55600bc3201f0 100644 --- a/substrate/frame/contracts/uapi/src/host/wasm32.rs +++ b/substrate/frame/contracts/uapi/src/host/wasm32.rs @@ -61,6 +61,8 @@ mod sys { pub fn clear_storage(key_ptr: *const u8); + pub fn clear_transient_storage(key_ptr: *const u8, key_len: u32) -> ReturnCode; + pub fn code_hash( account_id_ptr: *const u8, output_ptr: *mut u8, @@ -69,6 +71,8 @@ mod sys { pub fn contains_storage(key_ptr: *const u8) -> ReturnCode; + pub fn contains_transient_storage(key_ptr: *const u8, key_len: u32) -> ReturnCode; + pub fn debug_message(str_ptr: *const u8, str_len: u32) -> ReturnCode; pub fn delegate_call( @@ -103,6 +107,13 @@ mod sys { out_len_ptr: *mut u32, ) -> ReturnCode; + pub fn get_transient_storage( + key_ptr: *const u8, + key_len: u32, + out_ptr: *mut u8, + out_len_ptr: *mut u32, + ) -> ReturnCode; + pub fn hash_blake2_128(input_ptr: *const u8, input_len: u32, output_ptr: *mut u8); pub fn hash_blake2_256(input_ptr: *const u8, input_len: u32, output_ptr: *mut u8); @@ -133,6 +144,13 @@ mod sys { pub fn set_storage(key_ptr: *const u8, value_ptr: *const u8, value_len: u32); + pub fn set_transient_storage( + key_ptr: *const u8, + key_len: u32, + value_ptr: *const u8, + value_len: u32, + ) -> ReturnCode; + pub fn sr25519_verify( signature_ptr: *const u8, public_key_ptr: *const u8, @@ -147,6 +165,13 @@ mod sys { out_len_ptr: *mut u32, ) -> ReturnCode; + pub fn take_transient_storage( + key_ptr: *const u8, + key_len: u32, + out_ptr: *mut u8, + out_len_ptr: *mut u32, + ) -> ReturnCode; + pub fn terminate(beneficiary_ptr: *const u8) -> !; pub fn transfer( @@ -598,6 +623,18 @@ impl HostFn for HostFnImpl { ret_code.into() } + fn set_transient_storage(key: &[u8], encoded_value: &[u8]) -> Option { + let ret_code = unsafe { + sys::set_transient_storage( + key.as_ptr(), + key.len() as u32, + encoded_value.as_ptr(), + encoded_value.len() as u32, + ) + }; + ret_code.into() + } + fn clear_storage(key: &[u8]) { unsafe { sys::clear_storage(key.as_ptr()) }; } @@ -607,6 +644,11 @@ impl HostFn for HostFnImpl { ret_code.into() } + fn clear_transient_storage(key: &[u8]) -> Option { + let ret_code = unsafe { sys::clear_transient_storage(key.as_ptr(), key.len() as u32) }; + ret_code.into() + } + #[inline(always)] fn get_storage(key: &[u8], output: &mut &mut [u8]) -> Result { let mut output_len = output.len() as u32; @@ -633,6 +675,23 @@ impl HostFn for HostFnImpl { ret_code.into() } + #[inline(always)] + fn get_transient_storage(key: &[u8], output: &mut &mut [u8]) -> Result { + let mut output_len = output.len() as u32; + let ret_code = { + unsafe { + sys::get_transient_storage( + key.as_ptr(), + key.len() as u32, + output.as_mut_ptr(), + &mut output_len, + ) + } + }; + extract_from_slice(output, output_len as usize); + ret_code.into() + } + #[inline(always)] fn take_storage(key: &[u8], output: &mut &mut [u8]) -> Result { let mut output_len = output.len() as u32; @@ -650,6 +709,23 @@ impl HostFn for HostFnImpl { ret_code.into() } + #[inline(always)] + fn take_transient_storage(key: &[u8], output: &mut &mut [u8]) -> Result { + let mut output_len = output.len() as u32; + let ret_code = { + unsafe { + sys::take_transient_storage( + key.as_ptr(), + key.len() as u32, + output.as_mut_ptr(), + &mut output_len, + ) + } + }; + extract_from_slice(output, output_len as usize); + ret_code.into() + } + fn debug_message(str: &[u8]) -> Result { let ret_code = unsafe { sys::debug_message(str.as_ptr(), str.len() as u32) }; ret_code.into() @@ -665,6 +741,11 @@ impl HostFn for HostFnImpl { ret_code.into() } + fn contains_transient_storage(key: &[u8]) -> Option { + let ret_code = unsafe { sys::contains_transient_storage(key.as_ptr(), key.len() as u32) }; + ret_code.into() + } + fn terminate(beneficiary: &[u8]) -> ! { unsafe { sys::terminate(beneficiary.as_ptr()) } } diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index 20de4d858ad62..6184b0ffd6e67 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -16,24 +16,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -assert_matches = "1.3.0" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +assert_matches = { workspace = true } +codec = { features = [ "derive", "max-encoded-len", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-scheduler = { path = "../scheduler" } -sp-core = { path = "../../primitives/core" } +pallet-balances = { workspace = true, default-features = true } +pallet-scheduler = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] @@ -49,7 +48,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/conviction-voting/src/benchmarking.rs b/substrate/frame/conviction-voting/src/benchmarking.rs index 8701ed7ebb074..546ad5385355e 100644 --- a/substrate/frame/conviction-voting/src/benchmarking.rs +++ b/substrate/frame/conviction-voting/src/benchmarking.rs @@ -19,6 +19,7 @@ use super::*; +use alloc::{collections::btree_map::BTreeMap, vec::Vec}; use assert_matches::assert_matches; use frame_benchmarking::v1::{account, benchmarks_instance_pallet, whitelist_account}; use frame_support::{ @@ -30,7 +31,6 @@ use frame_support::{ }, }; use sp_runtime::traits::Bounded; -use sp_std::collections::btree_map::BTreeMap; use crate::Pallet as ConvictionVoting; diff --git a/substrate/frame/conviction-voting/src/lib.rs b/substrate/frame/conviction-voting/src/lib.rs index 466fc70a619b6..be7f7f448070f 100644 --- a/substrate/frame/conviction-voting/src/lib.rs +++ b/substrate/frame/conviction-voting/src/lib.rs @@ -27,6 +27,8 @@ #![recursion_limit = "256"] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use frame_support::{ dispatch::DispatchResult, ensure, @@ -40,7 +42,6 @@ use sp_runtime::{ traits::{AtLeast32BitUnsigned, Saturating, StaticLookup, Zero}, ArithmeticError, DispatchError, Perbill, }; -use sp_std::prelude::*; mod conviction; mod types; @@ -559,7 +560,7 @@ impl, I: 'static> Pallet { ensure!(balance <= T::Currency::total_balance(&who), Error::::InsufficientFunds); let votes = VotingFor::::try_mutate(&who, &class, |voting| -> Result { - let old = sp_std::mem::replace( + let old = core::mem::replace( voting, Voting::Delegating(Delegating { balance, @@ -596,7 +597,7 @@ impl, I: 'static> Pallet { fn try_undelegate(who: T::AccountId, class: ClassOf) -> Result { let votes = VotingFor::::try_mutate(&who, &class, |voting| -> Result { - match sp_std::mem::replace(voting, Voting::default()) { + match core::mem::replace(voting, Voting::default()) { Voting::Delegating(Delegating { balance, target, diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index 0e985e25290fa..78569fb3c9f25 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -54,20 +54,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type MaxLocks = ConstU32<10>; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } #[derive(Clone, PartialEq, Eq, Debug)] diff --git a/substrate/frame/conviction-voting/src/types.rs b/substrate/frame/conviction-voting/src/types.rs index 2c45b54485bd9..d6bbb678a14b3 100644 --- a/substrate/frame/conviction-voting/src/types.rs +++ b/substrate/frame/conviction-voting/src/types.rs @@ -18,6 +18,7 @@ //! Miscellaneous additional datatypes. use codec::{Codec, Decode, Encode, MaxEncodedLen}; +use core::{fmt::Debug, marker::PhantomData}; use frame_support::{ traits::VoteTally, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; @@ -26,7 +27,6 @@ use sp_runtime::{ traits::{Saturating, Zero}, RuntimeDebug, }; -use sp_std::{fmt::Debug, marker::PhantomData}; use super::*; use crate::{AccountVote, Conviction, Vote}; diff --git a/substrate/frame/conviction-voting/src/vote.rs b/substrate/frame/conviction-voting/src/vote.rs index 5ae08f0de65f2..1c5b742ba12b1 100644 --- a/substrate/frame/conviction-voting/src/vote.rs +++ b/substrate/frame/conviction-voting/src/vote.rs @@ -25,7 +25,6 @@ use sp_runtime::{ traits::{Saturating, Zero}, RuntimeDebug, }; -use sp_std::prelude::*; /// A number of lock periods, plus a vote, one way or the other. #[derive(Copy, Clone, Eq, PartialEq, Default, RuntimeDebug, MaxEncodedLen)] diff --git a/substrate/frame/core-fellowship/Cargo.toml b/substrate/frame/core-fellowship/Cargo.toml index 8773a124cd02a..7ef6f9e11eb11 100644 --- a/substrate/frame/core-fellowship/Cargo.toml +++ b/substrate/frame/core-fellowship/Cargo.toml @@ -16,18 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -pallet-ranked-collective = { path = "../ranked-collective", default-features = false, optional = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +pallet-ranked-collective = { optional = true, workspace = true } [features] default = ["std"] @@ -44,7 +43,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/core-fellowship/src/benchmarking.rs b/substrate/frame/core-fellowship/src/benchmarking.rs index b3ee3ab7d165f..adb8a4a091b8d 100644 --- a/substrate/frame/core-fellowship/src/benchmarking.rs +++ b/substrate/frame/core-fellowship/src/benchmarking.rs @@ -22,6 +22,7 @@ use super::*; use crate::Pallet as CoreFellowship; +use alloc::{boxed::Box, vec}; use frame_benchmarking::v2::*; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use sp_arithmetic::traits::Bounded; @@ -85,6 +86,45 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn set_partial_params() -> Result<(), BenchmarkError> { + let max_rank = T::MaxRank::get().try_into().unwrap(); + + // Set up the initial default state for the Params storage + let params = ParamsType { + active_salary: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + passive_salary: BoundedVec::try_from(vec![10u32.into(); max_rank]).unwrap(), + demotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + min_promotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + offboard_timeout: 1u32.into(), + }; + CoreFellowship::::set_params(RawOrigin::Root.into(), Box::new(params))?; + + let default_params = Params::::get(); + let expected_params = ParamsType { + active_salary: default_params.active_salary, + passive_salary: BoundedVec::try_from(vec![10u32.into(); max_rank]).unwrap(), + demotion_period: default_params.demotion_period, + min_promotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + offboard_timeout: 1u32.into(), + }; + + let params_payload = ParamsType { + active_salary: BoundedVec::try_from(vec![None; max_rank]).unwrap(), + passive_salary: BoundedVec::try_from(vec![Some(10u32.into()); max_rank]).unwrap(), + demotion_period: BoundedVec::try_from(vec![None; max_rank]).unwrap(), + min_promotion_period: BoundedVec::try_from(vec![Some(100u32.into()); max_rank]) + .unwrap(), + offboard_timeout: None, + }; + + #[extrinsic_call] + _(RawOrigin::Root, Box::new(params_payload.clone())); + + assert_eq!(Params::::get(), expected_params); + Ok(()) + } + #[benchmark] fn bump_offboard() -> Result<(), BenchmarkError> { set_benchmark_params::()?; @@ -171,6 +211,22 @@ mod benchmarks { Ok(()) } + /// Benchmark the `promote_fast` extrinsic to promote someone up to `r`. + #[benchmark] + fn promote_fast(r: Linear<1, { T::MaxRank::get() as u32 }>) -> Result<(), BenchmarkError> { + let r = r.try_into().expect("r is too large"); + let member = make_member::(0)?; + + ensure_evidence::(&member)?; + + #[extrinsic_call] + _(RawOrigin::Root, member.clone(), r); + + assert_eq!(T::Members::rank_of(&member), Some(r)); + assert!(!MemberEvidence::::contains_key(&member)); + Ok(()) + } + #[benchmark] fn offboard() -> Result<(), BenchmarkError> { let member = make_member::(0)?; diff --git a/substrate/frame/core-fellowship/src/lib.rs b/substrate/frame/core-fellowship/src/lib.rs index 94339b85d0524..c61447e36280a 100644 --- a/substrate/frame/core-fellowship/src/lib.rs +++ b/substrate/frame/core-fellowship/src/lib.rs @@ -57,11 +57,14 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::boxed::Box; use codec::{Decode, Encode, MaxEncodedLen}; +use core::{fmt::Debug, marker::PhantomData}; use scale_info::TypeInfo; use sp_arithmetic::traits::{Saturating, Zero}; use sp_runtime::RuntimeDebug; -use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; use frame_support::{ defensive, @@ -209,6 +212,10 @@ pub mod pallet { /// rank to which it can promote. type PromoteOrigin: EnsureOrigin>; + /// The origin that has permission to "fast" promote a member by ignoring promotion periods + /// and skipping ranks. The `Success` value is the maximum rank to which it can promote. + type FastPromoteOrigin: EnsureOrigin>; + /// The maximum size in bytes submitted evidence is allowed to be. #[pallet::constant] type EvidenceSize: Get; @@ -222,6 +229,11 @@ pub mod pallet { pub type ParamsOf = ParamsType<>::Balance, BlockNumberFor, >::MaxRank>; + pub type PartialParamsOf = ParamsType< + Option<>::Balance>, + Option>, + >::MaxRank, + >; pub type MemberStatusOf = MemberStatus>; pub type RankOf = <>::Members as RankedMembers>::Rank; @@ -493,6 +505,44 @@ pub mod pallet { Ok(()) } + /// Fast promotions can skip ranks and ignore the `min_promotion_period`. + /// + /// This is useful for out-of-band promotions, hence it has its own `FastPromoteOrigin` to + /// be (possibly) more restrictive than `PromoteOrigin`. Note that the member must already + /// be inducted. + #[pallet::weight(T::WeightInfo::promote_fast(*to_rank as u32))] + #[pallet::call_index(10)] + pub fn promote_fast( + origin: OriginFor, + who: T::AccountId, + to_rank: RankOf, + ) -> DispatchResult { + match T::FastPromoteOrigin::try_origin(origin) { + Ok(allow_rank) => ensure!(allow_rank >= to_rank, Error::::NoPermission), + Err(origin) => ensure_root(origin)?, + } + ensure!(to_rank as u32 <= T::MaxRank::get(), Error::::InvalidRank); + let curr_rank = T::Members::rank_of(&who).ok_or(Error::::Unranked)?; + ensure!(to_rank > curr_rank, Error::::UnexpectedRank); + + let mut member = Member::::get(&who).ok_or(Error::::NotTracked)?; + let now = frame_system::Pallet::::block_number(); + member.last_promotion = now; + member.last_proof = now; + + for rank in (curr_rank + 1)..=to_rank { + T::Members::promote(&who)?; + + // NOTE: We could factor this out, but it would destroy our invariants: + Member::::insert(&who, &member); + + Self::dispose_evidence(who.clone(), rank.saturating_sub(1), Some(rank)); + Self::deposit_event(Event::::Promoted { who: who.clone(), to_rank: rank }); + } + + Ok(()) + } + /// Stop tracking a prior member who is now not a ranked member of the collective. /// /// - `origin`: A `Signed` origin of an account. @@ -558,9 +608,59 @@ pub mod pallet { Ok(Pays::No.into()) } + + /// Set the parameters partially. + /// + /// - `origin`: An origin complying with `ParamsOrigin` or root. + /// - `partial_params`: The new parameters for the pallet. + /// + /// This update config with multiple arguments without duplicating + /// the fields that does not need to update (set to None). + #[pallet::weight(T::WeightInfo::set_partial_params())] + #[pallet::call_index(9)] + pub fn set_partial_params( + origin: OriginFor, + partial_params: Box>, + ) -> DispatchResult { + T::ParamsOrigin::ensure_origin_or_root(origin)?; + let params = Params::::mutate(|p| { + Self::set_partial_params_slice(&mut p.active_salary, partial_params.active_salary); + Self::set_partial_params_slice( + &mut p.passive_salary, + partial_params.passive_salary, + ); + Self::set_partial_params_slice( + &mut p.demotion_period, + partial_params.demotion_period, + ); + Self::set_partial_params_slice( + &mut p.min_promotion_period, + partial_params.min_promotion_period, + ); + if let Some(new_offboard_timeout) = partial_params.offboard_timeout { + p.offboard_timeout = new_offboard_timeout; + } + p.clone() + }); + Self::deposit_event(Event::::ParamsChanged { params }); + Ok(()) + } } impl, I: 'static> Pallet { + /// Partially update the base slice with a new slice + /// + /// Only elements in the base slice which has a new value in the new slice will be updated. + pub(crate) fn set_partial_params_slice( + base_slice: &mut BoundedVec>::MaxRank>, + new_slice: BoundedVec, >::MaxRank>, + ) { + for (base_element, new_element) in base_slice.iter_mut().zip(new_slice) { + if let Some(element) = new_element { + *base_element = element; + } + } + } /// Convert a rank into a `0..RANK_COUNT` index suitable for the arrays in Params. /// /// Rank 1 becomes index 0, rank `RANK_COUNT` becomes index `RANK_COUNT - 1`. Any rank not diff --git a/substrate/frame/core-fellowship/src/migration.rs b/substrate/frame/core-fellowship/src/migration.rs index b8b5540a4b475..b1e27d1e79363 100644 --- a/substrate/frame/core-fellowship/src/migration.rs +++ b/substrate/frame/core-fellowship/src/migration.rs @@ -24,6 +24,8 @@ use frame_support::{ BoundedVec, }; +#[cfg(feature = "try-runtime")] +use alloc::vec::Vec; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; diff --git a/substrate/frame/core-fellowship/src/tests/integration.rs b/substrate/frame/core-fellowship/src/tests/integration.rs index f313731665857..bcf70c7beb102 100644 --- a/substrate/frame/core-fellowship/src/tests/integration.rs +++ b/substrate/frame/core-fellowship/src/tests/integration.rs @@ -78,6 +78,7 @@ impl Config for Test { type InductOrigin = EnsureInducted; type ApproveOrigin = TryMapSuccess, u64>, TryMorphInto>; type PromoteOrigin = TryMapSuccess, u64>, TryMorphInto>; + type FastPromoteOrigin = Self::PromoteOrigin; type EvidenceSize = EvidenceSize; type MaxRank = ConstU32<9>; } @@ -157,6 +158,7 @@ impl pallet_ranked_collective::Config for Test { type MinRankOfClass = MinRankOfClass; type MemberSwappedHandler = CoreFellowship; type VoteWeight = Geometric; + type MaxMemberCount = (); #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = CoreFellowship; } diff --git a/substrate/frame/core-fellowship/src/tests/unit.rs b/substrate/frame/core-fellowship/src/tests/unit.rs index 9245e5159a901..11d1ea9fe5b75 100644 --- a/substrate/frame/core-fellowship/src/tests/unit.rs +++ b/substrate/frame/core-fellowship/src/tests/unit.rs @@ -21,7 +21,7 @@ use std::collections::BTreeMap; use core::cell::RefCell; use frame_support::{ - assert_noop, assert_ok, derive_impl, ord_parameter_types, + assert_noop, assert_ok, derive_impl, hypothetically, ord_parameter_types, pallet_prelude::Weight, parameter_types, traits::{tokens::GetSalary, ConstU32, IsInVec, TryMapSuccess}, @@ -115,6 +115,7 @@ impl Config for Test { type InductOrigin = EnsureInducted; type ApproveOrigin = TryMapSuccess, u64>, TryMorphInto>; type PromoteOrigin = TryMapSuccess, u64>, TryMorphInto>; + type FastPromoteOrigin = Self::PromoteOrigin; type EvidenceSize = ConstU32<1024>; type MaxRank = ConstU32<9>; } @@ -187,6 +188,40 @@ fn set_params_works() { }); } +#[test] +fn set_partial_params_works() { + new_test_ext().execute_with(|| { + let params = ParamsType { + active_salary: bounded_vec![None; 9], + passive_salary: bounded_vec![None; 9], + demotion_period: bounded_vec![None, Some(10), None, None, None, None, None, None, None], + min_promotion_period: bounded_vec![None; 9], + offboard_timeout: Some(2), + }; + assert_noop!( + CoreFellowship::set_partial_params(signed(2), Box::new(params.clone())), + DispatchError::BadOrigin + ); + assert_ok!(CoreFellowship::set_partial_params(signed(1), Box::new(params))); + + // Update params from the base params value declared in `new_test_ext` + let raw_updated_params = ParamsType { + active_salary: bounded_vec![10, 20, 30, 40, 50, 60, 70, 80, 90], + passive_salary: bounded_vec![1, 2, 3, 4, 5, 6, 7, 8, 9], + demotion_period: bounded_vec![2, 10, 6, 8, 10, 12, 14, 16, 18], + min_promotion_period: bounded_vec![3, 6, 9, 12, 15, 18, 21, 24, 27], + offboard_timeout: 2, + }; + // Updated params stored in Params storage value + let updated_params = Params::::get(); + assert_eq!(raw_updated_params, updated_params); + + System::assert_last_event( + Event::::ParamsChanged { params: updated_params }.into(), + ); + }); +} + #[test] fn induct_works() { new_test_ext().execute_with(|| { @@ -222,6 +257,99 @@ fn promote_works() { }); } +#[test] +fn promote_fast_works() { + let alice = 1; + + new_test_ext().execute_with(|| { + assert_noop!( + CoreFellowship::promote_fast(signed(alice), alice, 1), + Error::::Unranked + ); + set_rank(alice, 0); + assert_noop!( + CoreFellowship::promote_fast(signed(alice), alice, 1), + Error::::NotTracked + ); + assert_ok!(CoreFellowship::import(signed(alice))); + + // Cannot fast promote to the same rank: + assert_noop!( + CoreFellowship::promote_fast(signed(alice), alice, 0), + Error::::UnexpectedRank + ); + assert_ok!(CoreFellowship::promote_fast(signed(alice), alice, 1)); + assert_eq!(TestClub::rank_of(&alice), Some(1)); + + // Cannot promote normally because of the period: + assert_noop!(CoreFellowship::promote(signed(2), alice, 2), Error::::TooSoon); + // But can fast promote: + assert_ok!(CoreFellowship::promote_fast(signed(2), alice, 2)); + assert_eq!(TestClub::rank_of(&alice), Some(2)); + + // Cannot promote to lower rank: + assert_noop!( + CoreFellowship::promote_fast(signed(alice), alice, 0), + Error::::UnexpectedRank + ); + assert_noop!( + CoreFellowship::promote_fast(signed(alice), alice, 1), + Error::::UnexpectedRank + ); + // Permission is checked: + assert_noop!( + CoreFellowship::promote_fast(signed(alice), alice, 2), + Error::::NoPermission + ); + + // Can fast promote up to the maximum: + assert_ok!(CoreFellowship::promote_fast(signed(9), alice, 9)); + // But not past the maximum: + assert_noop!( + CoreFellowship::promote_fast(RuntimeOrigin::root(), alice, 10), + Error::::InvalidRank + ); + }); +} + +/// Compare the storage root hashes of a normal promote and a fast promote. +#[test] +fn promote_fast_identical_to_promote() { + let alice = 1; + + new_test_ext().execute_with(|| { + set_rank(alice, 0); + assert_eq!(TestClub::rank_of(&alice), Some(0)); + assert_ok!(CoreFellowship::import(signed(alice))); + run_to(3); + assert_eq!(TestClub::rank_of(&alice), Some(0)); + assert_ok!(CoreFellowship::submit_evidence( + signed(alice), + Wish::Promotion, + bounded_vec![0; 1024] + )); + + let root_promote = hypothetically!({ + assert_ok!(CoreFellowship::promote(signed(alice), alice, 1)); + // Don't clean the events since they should emit the same events: + sp_io::storage::root(sp_runtime::StateVersion::V1) + }); + + // This is using thread locals instead of storage... + TestClub::demote(&alice).unwrap(); + + let root_promote_fast = hypothetically!({ + assert_ok!(CoreFellowship::promote_fast(signed(alice), alice, 1)); + + sp_io::storage::root(sp_runtime::StateVersion::V1) + }); + + assert_eq!(root_promote, root_promote_fast); + // Ensure that we don't compare trivial stuff like `()` from a type error above. + assert_eq!(root_promote.len(), 32); + }); +} + #[test] fn sync_works() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/core-fellowship/src/weights.rs b/substrate/frame/core-fellowship/src/weights.rs index 8fad6f585c112..5e64600b662b9 100644 --- a/substrate/frame/core-fellowship/src/weights.rs +++ b/substrate/frame/core-fellowship/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_core_fellowship` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-06-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -50,11 +50,13 @@ use core::marker::PhantomData; /// Weight functions needed for `pallet_core_fellowship`. pub trait WeightInfo { fn set_params() -> Weight; + fn set_partial_params() -> Weight; fn bump_offboard() -> Weight; fn bump_demote() -> Weight; fn set_active() -> Weight; fn induct() -> Weight; fn promote() -> Weight; + fn promote_fast(r: u32, ) -> Weight; fn offboard() -> Weight; fn import() -> Weight; fn approve() -> Weight; @@ -70,8 +72,19 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_633_000 picoseconds. - Weight::from_parts(8_018_000, 0) + // Minimum execution time: 5_772_000 picoseconds. + Weight::from_parts(6_000_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `CoreFellowship::Params` (r:1 w:1) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) + fn set_partial_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `399` + // Estimated: `1853` + // Minimum execution time: 10_050_000 picoseconds. + Weight::from_parts(10_244_000, 1853) + .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `CoreFellowship::Member` (r:1 w:1) @@ -92,8 +105,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `17278` // Estimated: `19894` - // Minimum execution time: 57_597_000 picoseconds. - Weight::from_parts(58_825_000, 19894) + // Minimum execution time: 54_433_000 picoseconds. + Weight::from_parts(55_650_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -115,8 +128,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `17388` // Estimated: `19894` - // Minimum execution time: 61_387_000 picoseconds. - Weight::from_parts(63_408_000, 19894) + // Minimum execution time: 57_634_000 picoseconds. + Weight::from_parts(58_816_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -128,8 +141,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 15_941_000 picoseconds. - Weight::from_parts(16_547_000, 3514) + // Minimum execution time: 14_527_000 picoseconds. + Weight::from_parts(14_948_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -147,8 +160,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 24_963_000 picoseconds. - Weight::from_parts(25_873_000, 3514) + // Minimum execution time: 22_137_000 picoseconds. + Weight::from_parts(22_925_000, 3514) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -170,11 +183,38 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16931` // Estimated: `19894` - // Minimum execution time: 55_062_000 picoseconds. - Weight::from_parts(58_422_000, 19894) + // Minimum execution time: 51_837_000 picoseconds. + Weight::from_parts(52_810_000, 19894) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:9 w:9) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:9) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:9) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 9]`. + fn promote_fast(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `16844` + // Estimated: `19894 + r * (2489 ยฑ0)` + // Minimum execution time: 45_065_000 picoseconds. + Weight::from_parts(34_090_392, 19894) + // Standard Error: 18_620 + .saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into())) + } /// Storage: `RankedCollective::Members` (r:1 w:0) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Member` (r:1 w:1) @@ -185,8 +225,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3514` - // Minimum execution time: 15_901_000 picoseconds. - Weight::from_parts(16_746_000, 3514) + // Minimum execution time: 14_321_000 picoseconds. + Weight::from_parts(14_747_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -198,8 +238,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 14_768_000 picoseconds. - Weight::from_parts(15_421_000, 3514) + // Minimum execution time: 13_525_000 picoseconds. + Weight::from_parts(13_843_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -213,8 +253,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 36_925_000 picoseconds. - Weight::from_parts(38_330_000, 19894) + // Minimum execution time: 34_719_000 picoseconds. + Weight::from_parts(35_162_000, 19894) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -226,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 25_210_000 picoseconds. - Weight::from_parts(26_247_000, 19894) + // Minimum execution time: 23_477_000 picoseconds. + Weight::from_parts(23_897_000, 19894) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -241,8 +281,19 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_633_000 picoseconds. - Weight::from_parts(8_018_000, 0) + // Minimum execution time: 5_772_000 picoseconds. + Weight::from_parts(6_000_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `CoreFellowship::Params` (r:1 w:1) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) + fn set_partial_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `399` + // Estimated: `1853` + // Minimum execution time: 10_050_000 picoseconds. + Weight::from_parts(10_244_000, 1853) + .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `CoreFellowship::Member` (r:1 w:1) @@ -263,8 +314,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `17278` // Estimated: `19894` - // Minimum execution time: 57_597_000 picoseconds. - Weight::from_parts(58_825_000, 19894) + // Minimum execution time: 54_433_000 picoseconds. + Weight::from_parts(55_650_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -286,8 +337,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `17388` // Estimated: `19894` - // Minimum execution time: 61_387_000 picoseconds. - Weight::from_parts(63_408_000, 19894) + // Minimum execution time: 57_634_000 picoseconds. + Weight::from_parts(58_816_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -299,8 +350,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 15_941_000 picoseconds. - Weight::from_parts(16_547_000, 3514) + // Minimum execution time: 14_527_000 picoseconds. + Weight::from_parts(14_948_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -318,8 +369,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 24_963_000 picoseconds. - Weight::from_parts(25_873_000, 3514) + // Minimum execution time: 22_137_000 picoseconds. + Weight::from_parts(22_925_000, 3514) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -341,11 +392,38 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16931` // Estimated: `19894` - // Minimum execution time: 55_062_000 picoseconds. - Weight::from_parts(58_422_000, 19894) + // Minimum execution time: 51_837_000 picoseconds. + Weight::from_parts(52_810_000, 19894) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:9 w:9) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:9) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:9) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 9]`. + fn promote_fast(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `16844` + // Estimated: `19894 + r * (2489 ยฑ0)` + // Minimum execution time: 45_065_000 picoseconds. + Weight::from_parts(34_090_392, 19894) + // Standard Error: 18_620 + .saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into())) + } /// Storage: `RankedCollective::Members` (r:1 w:0) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Member` (r:1 w:1) @@ -356,8 +434,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3514` - // Minimum execution time: 15_901_000 picoseconds. - Weight::from_parts(16_746_000, 3514) + // Minimum execution time: 14_321_000 picoseconds. + Weight::from_parts(14_747_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -369,8 +447,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 14_768_000 picoseconds. - Weight::from_parts(15_421_000, 3514) + // Minimum execution time: 13_525_000 picoseconds. + Weight::from_parts(13_843_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -384,8 +462,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 36_925_000 picoseconds. - Weight::from_parts(38_330_000, 19894) + // Minimum execution time: 34_719_000 picoseconds. + Weight::from_parts(35_162_000, 19894) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -397,8 +475,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 25_210_000 picoseconds. - Weight::from_parts(26_247_000, 19894) + // Minimum execution time: 23_477_000 picoseconds. + Weight::from_parts(23_897_000, 19894) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/delegated-staking/Cargo.toml b/substrate/frame/delegated-staking/Cargo.toml index 3b122dc2e26c3..a7751e1d6c1ca 100644 --- a/substrate/frame/delegated-staking/Cargo.toml +++ b/substrate/frame/delegated-staking/Cargo.toml @@ -12,25 +12,24 @@ description = "FRAME delegated staking pallet" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../../primitives/std", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } +codec = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -substrate-test-utils = { path = "../../test-utils" } -sp-tracing = { path = "../../primitives/tracing" } -pallet-staking = { path = "../staking" } -pallet-nomination-pools = { path = "../nomination-pools" } -pallet-balances = { path = "../balances" } -pallet-timestamp = { path = "../timestamp" } -pallet-staking-reward-curve = { path = "../staking/reward-curve" } -frame-election-provider-support = { path = "../election-provider-support", default-features = false } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true } [features] default = ["std"] @@ -48,7 +47,6 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-staking/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-election-provider-support/runtime-benchmarks", diff --git a/substrate/frame/delegated-staking/src/impls.rs b/substrate/frame/delegated-staking/src/impls.rs index 9f5649672d70e..f8df9dfe7b46c 100644 --- a/substrate/frame/delegated-staking/src/impls.rs +++ b/substrate/frame/delegated-staking/src/impls.rs @@ -139,7 +139,7 @@ impl OnStakingUpdate> for Pallet { fn on_slash( who: &T::AccountId, _slashed_active: BalanceOf, - _slashed_unlocking: &sp_std::collections::btree_map::BTreeMap>, + _slashed_unlocking: &alloc::collections::btree_map::BTreeMap>, slashed_total: BalanceOf, ) { >::mutate(who, |maybe_register| match maybe_register { diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs index 4b924bce3a579..61809dcb54eea 100644 --- a/substrate/frame/delegated-staking/src/lib.rs +++ b/substrate/frame/delegated-staking/src/lib.rs @@ -132,10 +132,13 @@ mod mock; mod tests; mod types; +extern crate alloc; + pub use pallet::*; use types::*; +use core::convert::TryInto; use frame_support::{ pallet_prelude::*, traits::{ @@ -154,7 +157,6 @@ use sp_runtime::{ ArithmeticError, DispatchResult, Perbill, RuntimeDebug, Saturating, }; use sp_staking::{Agent, Delegator, EraIndex, StakingInterface, StakingUnchecked}; -use sp_std::{convert::TryInto, prelude::*}; pub type BalanceOf = <::Currency as FunInspect<::AccountId>>::Balance; @@ -779,7 +781,7 @@ impl Pallet { } #[cfg(any(test, feature = "try-runtime"))] -use sp_std::collections::btree_map::BTreeMap; +use alloc::collections::btree_map::BTreeMap; #[cfg(any(test, feature = "try-runtime"))] impl Pallet { @@ -823,10 +825,6 @@ impl Pallet { ) -> Result<(), sp_runtime::TryRuntimeError> { let mut delegation_aggregation = BTreeMap::>::new(); for (delegator, delegation) in delegations.iter() { - ensure!( - T::CoreStaking::status(delegator).is_err(), - "delegator should not be directly staked" - ); ensure!(!Self::is_agent(delegator), "delegator cannot be an agent"); delegation_aggregation diff --git a/substrate/frame/delegated-staking/src/mock.rs b/substrate/frame/delegated-staking/src/mock.rs index c1875055f2fec..811d5739f4e98 100644 --- a/substrate/frame/delegated-staking/src/mock.rs +++ b/substrate/frame/delegated-staking/src/mock.rs @@ -20,7 +20,7 @@ use frame_support::{ assert_ok, derive_impl, pallet_prelude::*, parameter_types, - traits::{ConstU64, Currency}, + traits::{ConstU64, Currency, VariantCountOf}, PalletId, }; @@ -44,7 +44,7 @@ pub const GENESIS_VALIDATOR: AccountId = 1; pub const GENESIS_NOMINATOR_ONE: AccountId = 101; pub const GENESIS_NOMINATOR_TWO: AccountId = 102; -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type Block = Block; type AccountData = pallet_balances::AccountData; @@ -64,19 +64,14 @@ pub type Balance = u128; parameter_types! { pub static ExistentialDeposit: Balance = 1; } + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<128>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = RuntimeHoldReason; + type MaxFreezes = VariantCountOf; type RuntimeFreezeReason = RuntimeFreezeReason; } @@ -93,7 +88,6 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; - pub static BondingDuration: u32 = 3; pub static ElectionsBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } pub struct OnChainSeqPhragmen; @@ -106,35 +100,17 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBoundsOnChain; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; - type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = ConstU32<1>; - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; - type BondingDuration = BondingDuration; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type HistoryDepth = ConstU32<84>; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<10>; - type MaxControllersInDeprecationBatch = ConstU32<100>; type EventListeners = (Pools, DelegatedStaking); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/delegated-staking/src/tests.rs b/substrate/frame/delegated-staking/src/tests.rs index d40539d40ddda..385bb17ddadbd 100644 --- a/substrate/frame/delegated-staking/src/tests.rs +++ b/substrate/frame/delegated-staking/src/tests.rs @@ -21,7 +21,7 @@ use super::*; use crate::mock::*; use frame_support::{assert_noop, assert_ok, traits::fungible::InspectHold}; use pallet_nomination_pools::{Error as PoolsError, Event as PoolsEvent}; -use pallet_staking::Error as StakingError; +use pallet_staking::{Error as StakingError, RewardDestination}; use sp_staking::{Agent, DelegationInterface, Delegator, StakerStatus}; #[test] @@ -337,7 +337,6 @@ fn apply_pending_slash() { /// Integration tests with pallet-staking. mod staking_integration { use super::*; - use pallet_staking::RewardDestination; use sp_staking::Stake; #[test] @@ -501,17 +500,17 @@ mod staking_integration { ExtBuilder::default().build_and_execute(|| { start_era(1); let agent = 200; - setup_delegation_stake(agent, 201, (300..350).collect(), 100, 0); + setup_delegation_stake(agent, 201, (300..350).collect(), 320, 0); // verify withdraw not possible yet assert_noop!( - DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 100, 0), + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 320, 0), Error::::NotEnoughFunds ); // fill up unlocking chunks in core staking. - // 10 is the max chunks - for i in 2..=11 { + // 32 is the max chunks + for i in 2..=33 { start_era(i); assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 10)); // no withdrawals from core staking yet. @@ -519,35 +518,35 @@ mod staking_integration { } // another unbond would trigger withdrawal - start_era(12); + start_era(34); assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 10)); - // 8 previous unbonds would be withdrawn as they were already unlocked. Unlocking period - // is 3 eras. - assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 8 * 10); + // 30 previous unbonds would be withdrawn as they were already unlocked. Unlocking + // period is 3 eras. + assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 30 * 10); // release some delegation now. assert_ok!(DelegatedStaking::release_delegation( RawOrigin::Signed(agent).into(), 300, - 40, + 160, 0 )); - assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 80 - 40); + assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 300 - 160); // cannot release more than available assert_noop!( - DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 50, 0), + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 141, 0), Error::::NotEnoughFunds ); assert_ok!(DelegatedStaking::release_delegation( RawOrigin::Signed(agent).into(), 300, - 40, + 140, 0 )); - assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(300)), 100 - 80); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(300)), 320 - 300); }); } @@ -1217,6 +1216,46 @@ mod pool_integration { }); } + #[test] + fn existing_pool_member_can_stake() { + // A pool member is able to stake directly since staking only uses free funds but once a + // staker, they cannot join/add extra bond to the pool. They can still withdraw funds. + ExtBuilder::default().build_and_execute(|| { + start_era(1); + // GIVEN: a pool. + fund(&200, 1000); + let pool_id = create_pool(200, 800); + + // WHEN: delegator joins a pool + let delegator = 100; + fund(&delegator, 1000); + assert_ok!(Pools::join(RawOrigin::Signed(delegator).into(), 200, pool_id)); + + // THEN: they can still stake directly. + assert_ok!(Staking::bond( + RuntimeOrigin::signed(delegator), + 500, + RewardDestination::Account(101) + )); + assert_ok!(Staking::nominate( + RuntimeOrigin::signed(delegator), + vec![GENESIS_VALIDATOR] + )); + + // The delegator cannot add any extra bond to the pool anymore. + assert_noop!( + Pools::bond_extra(RawOrigin::Signed(delegator).into(), BondExtra::FreeBalance(100)), + Error::::AlreadyStaking + ); + + // But they can unbond + assert_ok!(Pools::unbond(RawOrigin::Signed(delegator).into(), delegator, 50)); + // and withdraw + start_era(4); + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(delegator).into(), delegator, 0)); + }); + } + fn create_pool(creator: AccountId, amount: Balance) -> u32 { fund(&creator, amount * 2); assert_ok!(Pools::create( diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index 7f182447ead61..ce5ffa57d5798 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -16,24 +16,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-core = { workspace = true } log = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-scheduler = { path = "../scheduler" } -pallet-preimage = { path = "../preimage" } +pallet-balances = { workspace = true, default-features = true } +pallet-scheduler = { workspace = true, default-features = true } +pallet-preimage = { workspace = true, default-features = true } [features] default = ["std"] @@ -51,7 +50,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/democracy/src/benchmarking.rs b/substrate/frame/democracy/src/benchmarking.rs index 799d614c37f4a..ee36e9212f52b 100644 --- a/substrate/frame/democracy/src/benchmarking.rs +++ b/substrate/frame/democracy/src/benchmarking.rs @@ -85,8 +85,8 @@ fn assert_has_event(generic_event: ::RuntimeEvent) { // note a new preimage. fn note_preimage() -> T::Hash { + use alloc::borrow::Cow; use core::sync::atomic::{AtomicU8, Ordering}; - use sp_std::borrow::Cow; // note a new preimage on every function invoke. static COUNTER: AtomicU8 = AtomicU8::new(0); let data = Cow::from(vec![COUNTER.fetch_add(1, Ordering::Relaxed)]); diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs index 19cdc754659d3..27bc36a756e4b 100644 --- a/substrate/frame/democracy/src/lib.rs +++ b/substrate/frame/democracy/src/lib.rs @@ -152,10 +152,12 @@ #![recursion_limit = "256"] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; use frame_support::{ ensure, - error::BadOrigin, traits::{ defensive_prelude::*, schedule::{v3::Named as ScheduleNamed, DispatchTime}, @@ -166,10 +168,9 @@ use frame_support::{ }; use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; use sp_runtime::{ - traits::{Bounded as ArithBounded, One, Saturating, StaticLookup, Zero}, + traits::{BadOrigin, Bounded as ArithBounded, One, Saturating, StaticLookup, Zero}, ArithmeticError, DispatchError, DispatchResult, }; -use sp_std::prelude::*; mod conviction; mod types; @@ -439,7 +440,7 @@ pub mod pallet { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { #[serde(skip)] - _config: sp_std::marker::PhantomData, + _config: core::marker::PhantomData, } #[pallet::genesis_build] @@ -1434,7 +1435,7 @@ impl Pallet { delegations: Default::default(), prior: Default::default(), }; - sp_std::mem::swap(&mut old, voting); + core::mem::swap(&mut old, voting); match old { Voting::Delegating { balance, target, conviction, delegations, mut prior, .. @@ -1475,7 +1476,7 @@ impl Pallet { fn try_undelegate(who: T::AccountId) -> Result { let votes = VotingOf::::try_mutate(&who, |voting| -> Result { let mut old = Voting::default(); - sp_std::mem::swap(&mut old, voting); + core::mem::swap(&mut old, voting); match old { Voting::Delegating { balance, target, conviction, delegations, mut prior } => { // remove any delegation votes to our current target. diff --git a/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs b/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs index 1cb50a157b12b..ca0e0f7a091aa 100644 --- a/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs +++ b/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs @@ -19,6 +19,7 @@ //! pallet. use crate::{PropIndex, Voting, DEMOCRACY_ID}; +use alloc::{collections::btree_map::BTreeMap, vec::Vec}; use core::iter::Sum; use frame_support::{ pallet_prelude::ValueQuery, @@ -29,7 +30,6 @@ use frame_support::{ }; use sp_core::Get; use sp_runtime::{traits::Zero, BoundedVec, Saturating}; -use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; const LOG_TARGET: &str = "runtime::democracy::migrations::unlock_and_unreserve_all_funds"; @@ -87,7 +87,7 @@ type VotingOf = StorageMap< /// The pallet should be made inoperable before this migration is run. /// /// (See also [`RemovePallet`][frame_support::migrations::RemovePallet]) -pub struct UnlockAndUnreserveAllFunds(sp_std::marker::PhantomData); +pub struct UnlockAndUnreserveAllFunds(core::marker::PhantomData); impl UnlockAndUnreserveAllFunds { /// Calculates and returns the total amounts reserved by each account by this pallet, and all @@ -170,8 +170,8 @@ where /// the actual total reserved amount for any accounts. #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use alloc::collections::btree_set::BTreeSet; use codec::Encode; - use sp_std::collections::btree_set::BTreeSet; // Get staked and deposited balances as reported by this pallet. let (account_deposits, account_locks, _) = Self::get_account_deposits_and_locks(); diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index 9303c0da504f3..7d7066c8af691 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -108,20 +108,9 @@ impl pallet_scheduler::Config for Test { type Preimages = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type MaxLocks = ConstU32<10>; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { pub static PreimageByteDeposit: u64 = 0; diff --git a/substrate/frame/democracy/src/vote.rs b/substrate/frame/democracy/src/vote.rs index b3fe9aa28e1ac..779f7ecd570f0 100644 --- a/substrate/frame/democracy/src/vote.rs +++ b/substrate/frame/democracy/src/vote.rs @@ -25,7 +25,6 @@ use sp_runtime::{ traits::{Saturating, Zero}, BoundedVec, RuntimeDebug, }; -use sp_std::prelude::*; /// A number of lock periods, plus a vote, one way or the other. #[derive(Copy, Clone, Eq, PartialEq, Default, RuntimeDebug)] diff --git a/substrate/frame/election-provider-multi-phase/Cargo.toml b/substrate/frame/election-provider-multi-phase/Cargo.toml index 43e3e7079d2fa..4d1f521d726e2 100644 --- a/substrate/frame/election-provider-multi-phase/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/Cargo.toml @@ -15,40 +15,39 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } +], workspace = true } log = { workspace = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-io = { path = "../../primitives/io", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -frame-election-provider-support = { path = "../election-provider-support", default-features = false } +sp-io = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-npos-elections = { workspace = true } +sp-arithmetic = { workspace = true } +frame-election-provider-support = { workspace = true } # Optional imports for benchmarking -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -pallet-election-provider-support-benchmarking = { path = "../election-provider-support/benchmarking", default-features = false, optional = true } -rand = { version = "0.8.5", default-features = false, features = ["alloc", "small_rng"], optional = true } -strum = { version = "0.26.2", default-features = false, features = ["derive"], optional = true } +frame-benchmarking = { optional = true, workspace = true } +pallet-election-provider-support-benchmarking = { optional = true, workspace = true } +rand = { features = ["alloc", "small_rng"], optional = true, workspace = true } +strum = { features = ["derive"], optional = true, workspace = true } [dev-dependencies] -parking_lot = "0.12.1" -rand = "0.8.5" -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io" } -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } -sp-tracing = { path = "../../primitives/tracing" } -pallet-balances = { path = "../balances" } -frame-benchmarking = { path = "../benchmarking" } +parking_lot = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-io = { workspace = true, default-features = true } +sp-npos-elections = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } [features] default = ["std"] @@ -68,7 +67,6 @@ std = [ "sp-io/std", "sp-npos-elections/std", "sp-runtime/std", - "sp-std/std", "sp-tracing/std", "strum/std", ] diff --git a/substrate/frame/election-provider-multi-phase/src/helpers.rs b/substrate/frame/election-provider-multi-phase/src/helpers.rs index a3f27fc18f077..8269b2cb73be2 100644 --- a/substrate/frame/election-provider-multi-phase/src/helpers.rs +++ b/substrate/frame/election-provider-multi-phase/src/helpers.rs @@ -21,7 +21,7 @@ use crate::{ unsigned::{MinerConfig, MinerVoterOf}, SolutionTargetIndexOf, SolutionVoterIndexOf, VoteWeight, }; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; +use alloc::{collections::btree_map::BTreeMap, vec::Vec}; #[macro_export] macro_rules! log { diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 63b4c49cdfe47..9ce8b3890a624 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -229,6 +229,9 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, Encode}; use frame_election_provider_support::{ bounds::{CountBound, ElectionBounds, ElectionBoundsBuilder, SizeBound}, @@ -256,7 +259,6 @@ use sp_runtime::{ }, DispatchError, ModuleError, PerThing, Perbill, RuntimeDebug, SaturatedConversion, }; -use sp_std::prelude::*; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; @@ -837,7 +839,7 @@ pub mod pallet { } fn integrity_test() { - use sp_std::mem::size_of; + use core::mem::size_of; // The index type of both voters and targets need to be smaller than that of usize (very // unlikely to be the case, but anyhow).. assert!(size_of::>() <= size_of::()); @@ -1354,7 +1356,7 @@ pub mod pallet { /// This wrapper is created for handling the synchronization of [`Snapshot`], [`SnapshotMetadata`] /// and [`DesiredTargets`] storage items. -pub struct SnapshotWrapper(sp_std::marker::PhantomData); +pub struct SnapshotWrapper(core::marker::PhantomData); impl SnapshotWrapper { /// Kill all snapshot related storage items at the same time. diff --git a/substrate/frame/election-provider-multi-phase/src/migrations.rs b/substrate/frame/election-provider-multi-phase/src/migrations.rs index 156f1c02e27cd..73a2d878f150f 100644 --- a/substrate/frame/election-provider-multi-phase/src/migrations.rs +++ b/substrate/frame/election-provider-multi-phase/src/migrations.rs @@ -16,15 +16,15 @@ // limitations under the License. pub mod v1 { + use alloc::collections::btree_map::BTreeMap; use frame_support::{ storage::unhashed, traits::{Defensive, GetStorageVersion, OnRuntimeUpgrade}, BoundedVec, }; - use sp_std::collections::btree_map::BTreeMap; use crate::*; - pub struct MigrateToV1(sp_std::marker::PhantomData); + pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { let current = Pallet::::in_code_storage_version(); diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index 92b87d92e99b1..4532185b959c0 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -237,7 +237,6 @@ impl frame_system::Config for Runtime { const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { - pub const ExistentialDeposit: u64 = 1; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights ::with_sensible_defaults( Weight::from_parts(2u64 * constants::WEIGHT_REF_TIME_PER_SECOND, u64::MAX), @@ -245,20 +244,9 @@ parameter_types! { ); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } #[derive(Default, Eq, PartialEq, Debug, Clone, Copy)] diff --git a/substrate/frame/election-provider-multi-phase/src/signed.rs b/substrate/frame/election-provider-multi-phase/src/signed.rs index ae830ed0382d8..fe07e477e1d5d 100644 --- a/substrate/frame/election-provider-multi-phase/src/signed.rs +++ b/substrate/frame/election-provider-multi-phase/src/signed.rs @@ -24,7 +24,12 @@ use crate::{ ReadySolution, SignedSubmissionIndices, SignedSubmissionNextIndex, SignedSubmissionsMap, SolutionOf, SolutionOrSnapshotSize, Weight, WeightInfo, }; +use alloc::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + vec::Vec, +}; use codec::{Decode, Encode, HasCompact}; +use core::cmp::Ordering; use frame_election_provider_support::NposSolution; use frame_support::traits::{ defensive_prelude::*, Currency, Get, OnUnbalanced, ReservableCurrency, @@ -37,11 +42,6 @@ use sp_runtime::{ traits::{Convert, Saturating, Zero}, FixedPointNumber, FixedPointOperand, FixedU128, Percent, RuntimeDebug, }; -use sp_std::{ - cmp::Ordering, - collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - vec::Vec, -}; /// A raw, unchecked signed submission. /// diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index 8b25815eca13e..728ab93023895 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -21,6 +21,7 @@ use crate::{ helpers, Call, Config, ElectionCompute, Error, FeasibilityError, Pallet, RawSolution, ReadySolution, RoundSnapshot, SolutionAccuracyOf, SolutionOf, SolutionOrSnapshotSize, Weight, }; +use alloc::{boxed::Box, vec::Vec}; use codec::Encode; use frame_election_provider_support::{NposSolution, NposSolver, PerThing128, VoteWeight}; use frame_support::{ @@ -39,7 +40,6 @@ use sp_runtime::{ offchain::storage::{MutateStorageError, StorageValueRef}, DispatchError, SaturatedConversion, }; -use sp_std::prelude::*; /// Storage key used to store the last block number at which offchain worker ran. pub(crate) const OFFCHAIN_LAST_BLOCK: &[u8] = b"parity/multi-phase-unsigned-election"; @@ -396,14 +396,14 @@ impl Pallet { /// Configurations for a miner that comes with this pallet. pub trait MinerConfig { /// The account id type. - type AccountId: Ord + Clone + codec::Codec + sp_std::fmt::Debug; + type AccountId: Ord + Clone + codec::Codec + core::fmt::Debug; /// The solution that the miner is mining. type Solution: codec::Codec + Default + PartialEq + Eq + Clone - + sp_std::fmt::Debug + + core::fmt::Debug + Ord + NposSolution + TypeInfo; @@ -428,7 +428,7 @@ pub trait MinerConfig { } /// A base miner, suitable to be used for both signed and unsigned submissions. -pub struct Miner(sp_std::marker::PhantomData); +pub struct Miner(core::marker::PhantomData); impl Miner { /// Same as [`Pallet::mine_solution`], but the input snapshot data must be given. pub fn mine_solution_with_snapshot( @@ -505,7 +505,7 @@ impl Miner { stake }) .unwrap_or_default(); - sp_std::cmp::Reverse(stake) + core::cmp::Reverse(stake) }, ); @@ -1016,6 +1016,7 @@ mod tests { Event, InvalidTransaction, Phase, QueuedSolution, TransactionSource, TransactionValidityError, }; + use alloc::vec; use codec::Decode; use frame_election_provider_support::IndexAssignment; use frame_support::{assert_noop, assert_ok, traits::OffchainWorker}; diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml index fc696e04d689f..77ecbb1af98f1 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -16,30 +16,30 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -parking_lot = "0.12.1" -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -scale-info = { version = "2.11.1", features = ["derive"] } +parking_lot = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } log = { workspace = true } -sp-runtime = { path = "../../../primitives/runtime" } -sp-io = { path = "../../../primitives/io" } -sp-std = { path = "../../../primitives/std" } -sp-staking = { path = "../../../primitives/staking" } -sp-core = { path = "../../../primitives/core" } -sp-npos-elections = { path = "../../../primitives/npos-elections", default-features = false } -sp-tracing = { path = "../../../primitives/tracing" } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-npos-elections = { workspace = true } +sp-tracing = { workspace = true, default-features = true } -frame-system = { path = "../../system" } -frame-support = { path = "../../support" } -frame-election-provider-support = { path = "../../election-provider-support" } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } -pallet-election-provider-multi-phase = { path = ".." } -pallet-staking = { path = "../../staking" } -pallet-nomination-pools = { path = "../../nomination-pools" } -pallet-bags-list = { path = "../../bags-list" } -pallet-balances = { path = "../../balances" } -pallet-timestamp = { path = "../../timestamp" } -pallet-session = { path = "../../session" } +pallet-election-provider-multi-phase = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-session = { workspace = true, default-features = true } [features] try-runtime = [ diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index 2b1f1335c6fe8..aaffbb6681cd2 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -22,6 +22,7 @@ pub(crate) const LOG_TARGET: &str = "tests::e2e-epm"; use frame_support::{assert_err, assert_noop, assert_ok}; use mock::*; +use pallet_timestamp::Now; use sp_core::Get; use sp_runtime::Perbill; @@ -46,7 +47,7 @@ fn log_current_time() { Session::current_index(), Staking::current_era(), ElectionProviderMultiPhase::current_phase(), - Timestamp::now() + Now::::get() ); } @@ -209,7 +210,7 @@ fn continuous_slashes_below_offending_threshold() { // failed due to election minimum score. if start_next_active_era(pool_state.clone()).is_err() { assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); - break + break; } active_validator_set = Session::validators(); diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index e5987ec33f06c..5c64f2a0bc20f 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -19,7 +19,7 @@ use frame_support::{ assert_ok, parameter_types, traits, - traits::{Hooks, UnfilteredDispatchable}, + traits::{Hooks, UnfilteredDispatchable, VariantCountOf}, weights::constants, }; use frame_system::EnsureRoot; @@ -38,7 +38,6 @@ use sp_staking::{ offence::{OffenceDetails, OnOffenceHandler}, EraIndex, SessionIndex, }; -use sp_std::prelude::*; use std::collections::BTreeMap; use codec::Decode; @@ -102,20 +101,14 @@ parameter_types! { ); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = traits::ConstU32<1024>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type MaxFreezes = traits::ConstU32<1>; + type MaxFreezes = VariantCountOf; type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = RuntimeFreezeReason; - type WeightInfo = (); } impl pallet_timestamp::Config for Runtime { @@ -235,7 +228,6 @@ parameter_types! { pub const SessionsPerEra: sp_staking::SessionIndex = 2; pub static BondingDuration: sp_staking::EraIndex = 28; pub const SlashDeferDuration: sp_staking::EraIndex = 7; // 1/4 the bonding duration. - pub HistoryDepth: u32 = 84; } impl pallet_bags_list::Config for Runtime { @@ -291,15 +283,11 @@ const MAX_QUOTA_NOMINATIONS: u32 = 16; /// Disabling factor set explicitly to byzantine threshold pub(crate) const SLASHING_DISABLING_FACTOR: usize = 3; +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); // burn slashes - type Reward = (); // rewards are minted from the void type SessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; type SlashDeferDuration = SlashDeferDuration; @@ -314,12 +302,10 @@ impl pallet_staking::Config for Runtime { type NominationsQuota = pallet_staking::FixedNominationsQuota; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = MaxUnlockingChunks; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = HistoryDepth; type EventListeners = Pools; type WeightInfo = pallet_staking::weights::SubstrateWeight; - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; } impl frame_system::offchain::SendTransactionTypes for Runtime diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index 1c63f90720f7b..80fea6c5ca972 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -15,21 +15,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-election-provider-solution-type = { path = "solution-type" } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-election-provider-solution-type = { workspace = true, default-features = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-npos-elections = { workspace = true } +sp-runtime = { workspace = true } +sp-core = { workspace = true } [dev-dependencies] -rand = { version = "0.8.5", features = ["small_rng"] } -sp-io = { path = "../../primitives/io" } -sp-npos-elections = { path = "../../primitives/npos-elections" } +rand = { features = ["small_rng"], workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-npos-elections = { workspace = true, default-features = true } [features] default = ["std"] @@ -44,7 +43,6 @@ std = [ "sp-io/std", "sp-npos-elections/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-support/runtime-benchmarks", diff --git a/substrate/frame/election-provider-support/benchmarking/Cargo.toml b/substrate/frame/election-provider-support/benchmarking/Cargo.toml index c2e644cfefab9..dcfcb9c35cda0 100644 --- a/substrate/frame/election-provider-support/benchmarking/Cargo.toml +++ b/substrate/frame/election-provider-support/benchmarking/Cargo.toml @@ -15,15 +15,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } -frame-election-provider-support = { path = "..", default-features = false } -frame-system = { path = "../../system", default-features = false } -sp-npos-elections = { path = "../../../primitives/npos-elections", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-election-provider-support = { workspace = true } +frame-system = { workspace = true } +sp-npos-elections = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] @@ -34,7 +33,6 @@ std = [ "frame-system/std", "sp-npos-elections/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/election-provider-support/benchmarking/src/inner.rs b/substrate/frame/election-provider-support/benchmarking/src/inner.rs index 4722680cfcc1c..8cca0d459eac3 100644 --- a/substrate/frame/election-provider-support/benchmarking/src/inner.rs +++ b/substrate/frame/election-provider-support/benchmarking/src/inner.rs @@ -18,10 +18,10 @@ //! Election provider support pallet benchmarking. //! This is separated into its own crate to avoid bloating the size of the runtime. +use alloc::vec::Vec; use codec::Decode; use frame_benchmarking::v1::benchmarks; use frame_election_provider_support::{NposSolver, PhragMMS, SequentialPhragmen}; -use sp_std::vec::Vec; pub struct Pallet(frame_system::Pallet); pub trait Config: frame_system::Config {} diff --git a/substrate/frame/election-provider-support/benchmarking/src/lib.rs b/substrate/frame/election-provider-support/benchmarking/src/lib.rs index 78b226e52af6c..d092483f93839 100644 --- a/substrate/frame/election-provider-support/benchmarking/src/lib.rs +++ b/substrate/frame/election-provider-support/benchmarking/src/lib.rs @@ -19,6 +19,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + #[cfg(feature = "runtime-benchmarks")] pub mod inner; diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index 0b631bd7bb035..a254f6c9b5b72 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -20,14 +20,14 @@ proc-macro = true [dependencies] syn = { features = ["full", "visit"], workspace = true } quote = { workspace = true } -proc-macro2 = "1.0.56" -proc-macro-crate = "3.0.0" +proc-macro2 = { workspace = true } +proc-macro-crate = { workspace = true } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -scale-info = "2.11.1" -sp-arithmetic = { path = "../../../primitives/arithmetic" } +codec = { workspace = true, default-features = true } +scale-info = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } # used by generate_solution_type: -frame-election-provider-support = { path = ".." } -frame-support = { path = "../../support" } -trybuild = "1.0.88" +frame-election-provider-support = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +trybuild = { workspace = true } diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index 98da507384fd9..2c7a7aea1ca2b 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -16,19 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.5.3", features = ["derive"] } -honggfuzz = "0.5" -rand = { version = "0.8", features = ["small_rng", "std"] } +clap = { features = ["derive"], workspace = true } +honggfuzz = { workspace = true } +rand = { features = ["small_rng", "std"], workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-election-provider-solution-type = { path = ".." } -frame-election-provider-support = { path = "../.." } -sp-arithmetic = { path = "../../../../primitives/arithmetic" } -sp-runtime = { path = "../../../../primitives/runtime" } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-election-provider-solution-type = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } # used by generate_solution_type: -sp-npos-elections = { path = "../../../../primitives/npos-elections", default-features = false } -frame-support = { path = "../../../support" } +sp-npos-elections = { workspace = true } +frame-support = { workspace = true, default-features = true } [[bin]] name = "compact" diff --git a/substrate/frame/election-provider-support/solution-type/src/codec.rs b/substrate/frame/election-provider-support/solution-type/src/codec.rs index 17a256c228e28..16d5f17469b7e 100644 --- a/substrate/frame/election-provider-support/solution-type/src/codec.rs +++ b/substrate/frame/election-provider-support/solution-type/src/codec.rs @@ -51,14 +51,14 @@ fn decode_impl( quote! { let #name = < - _fepsp::sp_std::prelude::Vec<(_fepsp::codec::Compact<#voter_type>, _fepsp::codec::Compact<#target_type>)> + _fepsp::Vec<(_fepsp::codec::Compact<#voter_type>, _fepsp::codec::Compact<#target_type>)> as _fepsp::codec::Decode >::decode(value)?; let #name = #name .into_iter() .map(|(v, t)| (v.0, t.0)) - .collect::<_fepsp::sp_std::prelude::Vec<_>>(); + .collect::<_fepsp::Vec<_>>(); } }; @@ -73,7 +73,7 @@ fn decode_impl( quote! { let #name = < - _fepsp::sp_std::prelude::Vec<( + _fepsp::Vec<( _fepsp::codec::Compact<#voter_type>, [(_fepsp::codec::Compact<#target_type>, _fepsp::codec::Compact<#weight_type>); #c-1], _fepsp::codec::Compact<#target_type>, @@ -87,7 +87,7 @@ fn decode_impl( [ #inner_impl ], t_last.0, )) - .collect::<_fepsp::sp_std::prelude::Vec<_>>(); + .collect::<_fepsp::Vec<_>>(); } }) .collect::(); @@ -126,7 +126,7 @@ fn encode_impl(ident: &syn::Ident, count: usize) -> TokenStream2 { _fepsp::codec::Compact(v.clone()), _fepsp::codec::Compact(t.clone()), )) - .collect::<_fepsp::sp_std::prelude::Vec<_>>(); + .collect::<_fepsp::Vec<_>>(); #name.encode_to(&mut r); } }; @@ -153,7 +153,7 @@ fn encode_impl(ident: &syn::Ident, count: usize) -> TokenStream2 { [ #inners_solution_array ], _fepsp::codec::Compact(t_last.clone()), )) - .collect::<_fepsp::sp_std::prelude::Vec<_>>(); + .collect::<_fepsp::Vec<_>>(); #name.encode_to(&mut r); } }) @@ -161,7 +161,7 @@ fn encode_impl(ident: &syn::Ident, count: usize) -> TokenStream2 { quote!( impl _fepsp::codec::Encode for #ident { - fn encode(&self) -> _fepsp::sp_std::prelude::Vec { + fn encode(&self) -> _fepsp::Vec { let mut r = vec![]; #encode_impl_single #encode_impl_rest @@ -182,7 +182,7 @@ fn scale_info_impl( let name = format!("{}", vote_field(1)); quote! { .field(|f| - f.ty::<_fepsp::sp_std::prelude::Vec< + f.ty::<_fepsp::Vec< (_fepsp::codec::Compact<#voter_type>, _fepsp::codec::Compact<#target_type>) >>() .name(#name) @@ -194,7 +194,7 @@ fn scale_info_impl( let name = format!("{}", vote_field(2)); quote! { .field(|f| - f.ty::<_fepsp::sp_std::prelude::Vec<( + f.ty::<_fepsp::Vec<( _fepsp::codec::Compact<#voter_type>, (_fepsp::codec::Compact<#target_type>, _fepsp::codec::Compact<#weight_type>), _fepsp::codec::Compact<#target_type> @@ -209,7 +209,7 @@ fn scale_info_impl( let name = format!("{}", vote_field(c)); quote! { .field(|f| - f.ty::<_fepsp::sp_std::prelude::Vec<( + f.ty::<_fepsp::Vec<( _fepsp::codec::Compact<#voter_type>, [ (_fepsp::codec::Compact<#target_type>, _fepsp::codec::Compact<#weight_type>); diff --git a/substrate/frame/election-provider-support/solution-type/src/single_page.rs b/substrate/frame/election-provider-support/solution-type/src/single_page.rs index 161631ee83fa6..de59df162c8ad 100644 --- a/substrate/frame/election-provider-support/solution-type/src/single_page.rs +++ b/substrate/frame/election-provider-support/solution-type/src/single_page.rs @@ -40,7 +40,7 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { let name = vote_field(1); // NOTE: we use the visibility of the struct for the fields as well.. could be made better. quote!( - #vis #name: _fepsp::sp_std::prelude::Vec<(#voter_type, #target_type)>, + #vis #name: _fepsp::Vec<(#voter_type, #target_type)>, ) }; @@ -49,7 +49,7 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { let field_name = vote_field(c); let array_len = c - 1; quote!( - #vis #field_name: _fepsp::sp_std::prelude::Vec<( + #vis #field_name: _fepsp::Vec<( #voter_type, [(#target_type, #weight_type); #array_len], #target_type @@ -147,8 +147,8 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { self, voter_at: impl Fn(Self::VoterIndex) -> Option, target_at: impl Fn(Self::TargetIndex) -> Option, - ) -> Result<_fepsp::sp_std::prelude::Vec<_feps::Assignment>, _feps::Error> { - let mut #assignment_name: _fepsp::sp_std::prelude::Vec<_feps::Assignment> = Default::default(); + ) -> Result<_fepsp::Vec<_feps::Assignment>, _feps::Error> { + let mut #assignment_name: _fepsp::Vec<_feps::Assignment> = Default::default(); #into_impl Ok(#assignment_name) } @@ -165,10 +165,10 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { all_edges } - fn unique_targets(&self) -> _fepsp::sp_std::prelude::Vec { + fn unique_targets(&self) -> _fepsp::Vec { // NOTE: this implementation returns the targets sorted, but we don't use it yet per // se, nor is the API enforcing it. - use _fepsp::sp_std::collections::btree_set::BTreeSet; + use _fepsp::BTreeSet; let mut all_targets: BTreeSet = BTreeSet::new(); let mut maybe_insert_target = |t: Self::TargetIndex| { all_targets.insert(t); @@ -206,7 +206,7 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { .saturating_add((s as usize).saturating_mul(max_element_size)) } } - impl<'a> _fepsp::sp_std::convert::TryFrom<&'a [__IndexAssignment]> for #ident { + impl<'a> core::convert::TryFrom<&'a [__IndexAssignment]> for #ident { type Error = _feps::Error; fn try_from(index_assignments: &'a [__IndexAssignment]) -> Result { let mut #struct_name = #ident::default(); @@ -361,7 +361,7 @@ pub(crate) fn into_impl( let target = target_at(*t_idx).or_invalid_index()?; Ok((target, *p)) }) - .collect::, _feps::Error>>()?; + .collect::, _feps::Error>>()?; if sum >= #per_thing::one() { return Err(_feps::Error::SolutionWeightOverflow); diff --git a/substrate/frame/election-provider-support/src/lib.rs b/substrate/frame/election-provider-support/src/lib.rs index 2e9ee3b8a48b5..394f58a384425 100644 --- a/substrate/frame/election-provider-support/src/lib.rs +++ b/substrate/frame/election-provider-support/src/lib.rs @@ -177,11 +177,14 @@ pub mod bounds; pub mod onchain; pub mod traits; +extern crate alloc; + +use alloc::{boxed::Box, vec::Vec}; +use core::fmt::Debug; use sp_runtime::{ traits::{Bounded, Saturating, Zero}, RuntimeDebug, }; -use sp_std::{fmt::Debug, prelude::*}; pub use bounds::DataProviderBounds; pub use codec::{Decode, Encode}; @@ -202,10 +205,10 @@ use sp_runtime::TryRuntimeError; // re-export for the solution macro, with the dependencies of the macro. #[doc(hidden)] pub mod private { + pub use alloc::{collections::btree_set::BTreeSet, vec::Vec}; pub use codec; pub use scale_info; pub use sp_arithmetic; - pub use sp_std; // Simple Extension trait to easily convert `None` from index closures to `Err`. // @@ -276,7 +279,7 @@ pub type IndexAssignmentOf = IndexAssignment< /// Types that are used by the data provider trait. pub mod data_provider { /// Alias for the result type of the election data provider. - pub type Result = sp_std::result::Result; + pub type Result = core::result::Result; } /// Something that can provide the data to an [`ElectionProvider`]. @@ -431,7 +434,7 @@ pub trait InstantElectionProvider: ElectionProviderBase { } /// An election provider that does nothing whatsoever. -pub struct NoElection(sp_std::marker::PhantomData); +pub struct NoElection(core::marker::PhantomData); impl ElectionProviderBase for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)> @@ -486,7 +489,7 @@ where /// used on the implementing side of [`ElectionDataProvider`]. pub trait SortedListProvider { /// The list's error type. - type Error: sp_std::fmt::Debug; + type Error: core::fmt::Debug; /// The type used by the list to compare nodes for ordering. type Score: Bounded + Saturating + Zero; @@ -597,7 +600,7 @@ pub trait NposSolver { /// The accuracy of this solver. This will affect the accuracy of the output. type Accuracy: PerThing128; /// The error type of this implementation. - type Error: sp_std::fmt::Debug + sp_std::cmp::PartialEq; + type Error: core::fmt::Debug + core::cmp::PartialEq; /// Solve an NPoS solution with the given `voters`, `targets`, and select `to_elect` count /// of `targets`. @@ -617,7 +620,7 @@ pub trait NposSolver { /// A wrapper for [`sp_npos_elections::seq_phragmen`] that implements [`NposSolver`]. See the /// documentation of [`sp_npos_elections::seq_phragmen`] for more info. pub struct SequentialPhragmen( - sp_std::marker::PhantomData<(AccountId, Accuracy, Balancing)>, + core::marker::PhantomData<(AccountId, Accuracy, Balancing)>, ); impl>> @@ -642,7 +645,7 @@ impl( - sp_std::marker::PhantomData<(AccountId, Accuracy, Balancing)>, + core::marker::PhantomData<(AccountId, Accuracy, Balancing)>, ); impl>> diff --git a/substrate/frame/election-provider-support/src/onchain.rs b/substrate/frame/election-provider-support/src/onchain.rs index ee4f6992a085e..1063d5d35aee7 100644 --- a/substrate/frame/election-provider-support/src/onchain.rs +++ b/substrate/frame/election-provider-support/src/onchain.rs @@ -24,11 +24,12 @@ use crate::{ BoundedSupportsOf, Debug, ElectionDataProvider, ElectionProvider, ElectionProviderBase, InstantElectionProvider, NposSolver, WeightInfo, }; +use alloc::collections::btree_map::BTreeMap; +use core::marker::PhantomData; use frame_support::{dispatch::DispatchClass, traits::Get}; use sp_npos_elections::{ assignment_ratio_to_staked_normalized, to_supports, BoundedSupports, ElectionResult, VoteWeight, }; -use sp_std::{collections::btree_map::BTreeMap, marker::PhantomData, prelude::*}; /// Errors of the on-chain election. #[derive(Eq, PartialEq, Debug)] diff --git a/substrate/frame/election-provider-support/src/traits.rs b/substrate/frame/election-provider-support/src/traits.rs index 43d183b338e29..84fd57992d343 100644 --- a/substrate/frame/election-provider-support/src/traits.rs +++ b/substrate/frame/election-provider-support/src/traits.rs @@ -18,11 +18,12 @@ //! Traits for the election operations. use crate::{Assignment, IdentifierT, IndexAssignmentOf, PerThing128, VoteWeight}; +use alloc::vec::Vec; use codec::Encode; +use core::fmt::Debug; use scale_info::TypeInfo; use sp_arithmetic::traits::{Bounded, UniqueSaturatedInto}; use sp_npos_elections::{ElectionScore, Error, EvaluateSupport}; -use sp_std::{fmt::Debug, prelude::*}; /// An opaque index-based, NPoS solution type. pub trait NposSolution diff --git a/substrate/frame/elections-phragmen/Cargo.toml b/substrate/frame/elections-phragmen/Cargo.toml index dbcb740518b13..6d1ecbd07350e 100644 --- a/substrate/frame/elections-phragmen/Cargo.toml +++ b/substrate/frame/elections-phragmen/Cargo.toml @@ -16,26 +16,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } +], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-npos-elections = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-tracing = { path = "../../primitives/tracing" } -substrate-test-utils = { path = "../../test-utils" } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } [features] default = ["std"] @@ -52,7 +51,6 @@ std = [ "sp-npos-elections/std", "sp-runtime/std", "sp-staking/std", - "sp-std/std", "sp-tracing/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/elections-phragmen/src/benchmarking.rs b/substrate/frame/elections-phragmen/src/benchmarking.rs index 55bb1b968fa1b..8e762f667b2a6 100644 --- a/substrate/frame/elections-phragmen/src/benchmarking.rs +++ b/substrate/frame/elections-phragmen/src/benchmarking.rs @@ -56,7 +56,7 @@ fn default_stake(num_votes: u32) -> BalanceOf { /// Get the current number of candidates. fn candidate_count() -> u32 { - >::decode_len().unwrap_or(0usize) as u32 + Candidates::::decode_len().unwrap_or(0usize) as u32 } /// Add `c` new candidates. @@ -67,7 +67,7 @@ fn submit_candidates( (0..c) .map(|i| { let account = endowed_account::(prefix, i); - >::submit_candidacy( + Elections::::submit_candidacy( RawOrigin::Signed(account.clone()).into(), candidate_count::(), ) @@ -96,7 +96,7 @@ fn submit_voter( votes: Vec, stake: BalanceOf, ) -> DispatchResultWithPostInfo { - >::vote(RawOrigin::Signed(caller).into(), votes, stake) + Elections::::vote(RawOrigin::Signed(caller).into(), votes, stake) } /// create `num_voter` voters who randomly vote for at most `votes` of `all_candidates` if @@ -121,28 +121,28 @@ fn distribute_voters( /// members, or members and runners-up. fn fill_seats_up_to(m: u32) -> Result, &'static str> { let _ = submit_candidates_with_self_vote::(m, "fill_seats_up_to")?; - assert_eq!(>::candidates().len() as u32, m, "wrong number of candidates."); - >::do_phragmen(); - assert_eq!(>::candidates().len(), 0, "some candidates remaining."); + assert_eq!(Candidates::::get().len() as u32, m, "wrong number of candidates."); + Elections::::do_phragmen(); + assert_eq!(Candidates::::get().len(), 0, "some candidates remaining."); assert_eq!( - >::members().len() + >::runners_up().len(), + Members::::get().len() + RunnersUp::::get().len(), m as usize, "wrong number of members and runners-up", ); - Ok(>::members() + Ok(Members::::get() .into_iter() .map(|m| m.who) - .chain(>::runners_up().into_iter().map(|r| r.who)) + .chain(RunnersUp::::get().into_iter().map(|r| r.who)) .collect()) } /// removes all the storage items to reverse any genesis state. fn clean() { - >::kill(); - >::kill(); - >::kill(); + Members::::kill(); + Candidates::::kill(); + RunnersUp::::kill(); #[allow(deprecated)] - >::remove_all(None); + Voting::::remove_all(None); } benchmarks! { @@ -180,14 +180,14 @@ benchmarks! { // original votes. let mut votes = all_candidates.iter().skip(1).cloned().collect::>(); - submit_voter::(caller.clone(), votes.clone(), stake / >::from(10u32))?; + submit_voter::(caller.clone(), votes.clone(), stake / BalanceOf::::from(10u32))?; // new votes. votes = all_candidates; - assert!(votes.len() > >::get(caller.clone()).votes.len()); + assert!(votes.len() > Voting::::get(caller.clone()).votes.len()); whitelist!(caller); - }: vote(RawOrigin::Signed(caller), votes, stake / >::from(10u32)) + }: vote(RawOrigin::Signed(caller), votes, stake / BalanceOf::::from(10u32)) vote_less { let v in 2 .. T::MaxVotesPerVoter::get(); @@ -205,7 +205,7 @@ benchmarks! { // new votes. votes = votes.into_iter().skip(1).collect::>(); - assert!(votes.len() < >::get(caller.clone()).votes.len()); + assert!(votes.len() < Voting::::get(caller.clone()).votes.len()); whitelist!(caller); }: vote(RawOrigin::Signed(caller), votes, stake) @@ -294,7 +294,7 @@ benchmarks! { let members_and_runners_up = fill_seats_up_to::(m)?; let bailing = members_and_runners_up[0].clone(); - assert!(>::is_member(&bailing)); + assert!(Elections::::is_member(&bailing)); whitelist!(bailing); }: renounce_candidacy(RawOrigin::Signed(bailing.clone()), Renouncing::Member) @@ -318,7 +318,7 @@ benchmarks! { let members_and_runners_up = fill_seats_up_to::(m)?; let bailing = members_and_runners_up[T::DesiredMembers::get() as usize + 1].clone(); - assert!(>::is_runner_up(&bailing)); + assert!(Elections::::is_runner_up(&bailing)); whitelist!(bailing); }: renounce_candidacy(RawOrigin::Signed(bailing.clone()), Renouncing::RunnerUp) @@ -345,11 +345,11 @@ benchmarks! { clean::(); let _ = fill_seats_up_to::(m)?; - let removing = as_lookup::(>::members_ids()[0].clone()); + let removing = as_lookup::(Elections::::members_ids()[0].clone()); }: remove_member(RawOrigin::Root, removing, true, false) verify { // must still have enough members. - assert_eq!(>::members().len() as u32, T::DesiredMembers::get()); + assert_eq!(Members::::get().len() as u32, T::DesiredMembers::get()); #[cfg(test)] { // reset members in between benchmark tests. @@ -371,15 +371,15 @@ benchmarks! { distribute_voters::(all_candidates, v, T::MaxVotesPerVoter::get() as usize)?; // all candidates leave. - >::kill(); + Candidates::::kill(); // now everyone is defunct - assert!(>::iter().all(|(_, v)| >::is_defunct_voter(&v.votes))); - assert_eq!(>::iter().count() as u32, v); + assert!(Voting::::iter().all(|(_, v)| Elections::::is_defunct_voter(&v.votes))); + assert_eq!(Voting::::iter().count() as u32, v); let root = RawOrigin::Root; }: _(root, v, d) verify { - assert_eq!(>::iter().count() as u32, v - d); + assert_eq!(Voting::::iter().count() as u32, v - d); } election_phragmen { @@ -404,12 +404,12 @@ benchmarks! { let all_candidates = submit_candidates_with_self_vote::(c, "candidates")?; let _ = distribute_voters::(all_candidates, v.saturating_sub(c), votes_per_voter as usize)?; }: { - >::on_initialize(T::TermDuration::get()); + Elections::::on_initialize(T::TermDuration::get()); } verify { - assert_eq!(>::members().len() as u32, T::DesiredMembers::get().min(c)); + assert_eq!(Members::::get().len() as u32, T::DesiredMembers::get().min(c)); assert_eq!( - >::runners_up().len() as u32, + RunnersUp::::get().len() as u32, T::DesiredRunnersUp::get().min(c.saturating_sub(T::DesiredMembers::get())), ); diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index b4be07030efbb..565ead4059d5f 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -98,7 +98,11 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; +use core::cmp::Ordering; use frame_support::{ traits::{ defensive_prelude::*, ChangeMembers, Contains, ContainsLengthBound, Currency, Get, @@ -115,7 +119,6 @@ use sp_runtime::{ DispatchError, Perbill, RuntimeDebug, }; use sp_staking::currency_to_vote::CurrencyToVote; -use sp_std::{cmp::Ordering, prelude::*}; #[cfg(any(feature = "try-runtime", test))] use sp_runtime::TryRuntimeError; @@ -377,9 +380,9 @@ pub mod pallet { ); ensure!(!votes.is_empty(), Error::::NoVotes); - let candidates_count = >::decode_len().unwrap_or(0); - let members_count = >::decode_len().unwrap_or(0); - let runners_up_count = >::decode_len().unwrap_or(0); + let candidates_count = Candidates::::decode_len().unwrap_or(0); + let members_count = Members::::decode_len().unwrap_or(0); + let runners_up_count = RunnersUp::::decode_len().unwrap_or(0); // can never submit a vote of there are no members, and cannot submit more votes than // all potential vote targets. @@ -393,7 +396,7 @@ pub mod pallet { // Reserve bond. let new_deposit = Self::deposit_of(votes.len()); - let Voter { deposit: old_deposit, .. } = >::get(&who); + let Voter { deposit: old_deposit, .. } = Voting::::get(&who); match new_deposit.cmp(&old_deposit) { Ordering::Greater => { // Must reserve a bit more. @@ -455,7 +458,7 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; - let actual_count = >::decode_len().unwrap_or(0) as u32; + let actual_count = Candidates::::decode_len().unwrap_or(0) as u32; ensure!(actual_count <= candidate_count, Error::::InvalidWitnessData); ensure!( actual_count <= ::MaxCandidates::get(), @@ -470,7 +473,7 @@ pub mod pallet { T::Currency::reserve(&who, T::CandidacyBond::get()) .map_err(|_| Error::::InsufficientCandidateFunds)?; - >::mutate(|c| c.insert(index, (who, T::CandidacyBond::get()))); + Candidates::::mutate(|c| c.insert(index, (who, T::CandidacyBond::get()))); Ok(()) } @@ -509,7 +512,7 @@ pub mod pallet { Self::deposit_event(Event::Renounced { candidate: who }); }, Renouncing::RunnerUp => { - >::try_mutate::<_, Error, _>(|runners_up| { + RunnersUp::::try_mutate::<_, Error, _>(|runners_up| { let index = runners_up .iter() .position(|SeatHolder { who: r, .. }| r == &who) @@ -523,7 +526,7 @@ pub mod pallet { })?; }, Renouncing::Candidate(count) => { - >::try_mutate::<_, Error, _>(|candidates| { + Candidates::::try_mutate::<_, Error, _>(|candidates| { ensure!(count >= candidates.len() as u32, Error::::InvalidWitnessData); let index = candidates .binary_search_by(|(c, _)| c.cmp(&who)) @@ -599,7 +602,7 @@ pub mod pallet { ) -> DispatchResult { let _ = ensure_root(origin)?; - >::iter() + Voting::::iter() .take(num_voters as usize) .filter(|(_, x)| Self::is_defunct_voter(&x.votes)) .take(num_defunct as usize) @@ -682,7 +685,6 @@ pub mod pallet { /// /// Invariant: Always sorted based on account id. #[pallet::storage] - #[pallet::getter(fn members)] pub type Members = StorageValue<_, Vec>>, ValueQuery>; @@ -691,7 +693,6 @@ pub mod pallet { /// Invariant: Always sorted based on rank (worse to best). Upon removal of a member, the /// last (i.e. _best_) runner-up will be replaced. #[pallet::storage] - #[pallet::getter(fn runners_up)] pub type RunnersUp = StorageValue<_, Vec>>, ValueQuery>; @@ -702,19 +703,16 @@ pub mod pallet { /// /// Invariant: Always sorted based on account id. #[pallet::storage] - #[pallet::getter(fn candidates)] pub type Candidates = StorageValue<_, Vec<(T::AccountId, BalanceOf)>, ValueQuery>; /// The total number of vote rounds that have happened, excluding the upcoming one. #[pallet::storage] - #[pallet::getter(fn election_rounds)] pub type ElectionRounds = StorageValue<_, u32, ValueQuery>; /// Votes and locked stake of a particular voter. /// /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. #[pallet::storage] - #[pallet::getter(fn voting)] pub type Voting = StorageMap<_, Twox64Concat, T::AccountId, Voter>, ValueQuery>; @@ -768,7 +766,7 @@ pub mod pallet { // they have any lock. NOTE: this means that we will still try to remove a lock // once this genesis voter is removed, and for now it is okay because // remove_lock is noop if lock is not there. - >::insert( + Voting::::insert( &member, Voter { votes: vec![member.clone()], stake: *stake, deposit: Zero::zero() }, ); @@ -811,7 +809,7 @@ impl Pallet { // - `Ok(Option(replacement))` if member was removed and replacement was replaced. // - `Ok(None)` if member was removed but no replacement was found // - `Err(_)` if who is not a member. - let maybe_replacement = >::try_mutate::<_, Error, _>(|members| { + let maybe_replacement = Members::::try_mutate::<_, Error, _>(|members| { let remove_index = members .binary_search_by(|m| m.who.cmp(who)) .map_err(|_| Error::::NotMember)?; @@ -831,7 +829,7 @@ impl Pallet { T::Currency::unreserve(who, removed.deposit); } - let maybe_next_best = >::mutate(|r| r.pop()).map(|next_best| { + let maybe_next_best = RunnersUp::::mutate(|r| r.pop()).map(|next_best| { // defensive-only: Members and runners-up are disjoint. This will always be err and // give us an index to insert. if let Err(index) = members.binary_search_by(|m| m.who.cmp(&next_best.who)) { @@ -847,7 +845,7 @@ impl Pallet { })?; let remaining_member_ids_sorted = - Self::members().into_iter().map(|x| x.who).collect::>(); + Members::::get().into_iter().map(|x| x.who).collect::>(); let outgoing = &[who.clone()]; let maybe_current_prime = T::ChangeMembers::get_prime(); let return_value = match maybe_replacement { @@ -884,7 +882,7 @@ impl Pallet { /// Check if `who` is a candidate. It returns the insert index if the element does not exists as /// an error. fn is_candidate(who: &T::AccountId) -> Result<(), usize> { - Self::candidates().binary_search_by(|c| c.0.cmp(who)).map(|_| ()) + Candidates::::get().binary_search_by(|c| c.0.cmp(who)).map(|_| ()) } /// Check if `who` is a voter. It may or may not be a _current_ one. @@ -894,17 +892,17 @@ impl Pallet { /// Check if `who` is currently an active member. fn is_member(who: &T::AccountId) -> bool { - Self::members().binary_search_by(|m| m.who.cmp(who)).is_ok() + Members::::get().binary_search_by(|m| m.who.cmp(who)).is_ok() } /// Check if `who` is currently an active runner-up. fn is_runner_up(who: &T::AccountId) -> bool { - Self::runners_up().iter().any(|r| &r.who == who) + RunnersUp::::get().iter().any(|r| &r.who == who) } /// Get the members' account ids. pub(crate) fn members_ids() -> Vec { - Self::members().into_iter().map(|m| m.who).collect::>() + Members::::get().into_iter().map(|m| m.who).collect::>() } /// Get a concatenation of previous members and runners-up and their deposits. @@ -912,10 +910,10 @@ impl Pallet { /// These accounts are essentially treated as candidates. fn implicit_candidates_with_deposit() -> Vec<(T::AccountId, BalanceOf)> { // invariant: these two are always without duplicates. - Self::members() + Members::::get() .into_iter() .map(|m| (m.who, m.deposit)) - .chain(Self::runners_up().into_iter().map(|r| (r.who, r.deposit))) + .chain(RunnersUp::::get().into_iter().map(|r| (r.who, r.deposit))) .collect::>() } @@ -932,7 +930,7 @@ impl Pallet { /// Remove a certain someone as a voter. fn do_remove_voter(who: &T::AccountId) { - let Voter { deposit, .. } = >::take(who); + let Voter { deposit, .. } = Voting::::take(who); // remove storage, lock and unreserve. T::Currency::remove_lock(T::PalletId::get(), who); @@ -952,7 +950,7 @@ impl Pallet { let desired_runners_up = T::DesiredRunnersUp::get() as usize; let num_to_elect = desired_runners_up + desired_seats; - let mut candidates_and_deposit = Self::candidates(); + let mut candidates_and_deposit = Candidates::::get(); // add all the previous members and runners-up as candidates as well. candidates_and_deposit.append(&mut Self::implicit_candidates_with_deposit()); @@ -1011,12 +1009,12 @@ impl Pallet { sp_npos_elections::seq_phragmen(num_to_elect, candidate_ids, voters_and_votes, None) .map(|ElectionResult:: { winners, assignments: _ }| { // this is already sorted by id. - let old_members_ids_sorted = >::take() + let old_members_ids_sorted = Members::::take() .into_iter() .map(|m| m.who) .collect::>(); // this one needs a sort by id. - let mut old_runners_up_ids_sorted = >::take() + let mut old_runners_up_ids_sorted = RunnersUp::::take() .into_iter() .map(|r| r.who) .collect::>(); @@ -1122,7 +1120,7 @@ impl Pallet { // fetch deposits from the one recorded one. This will make sure that a // candidate who submitted candidacy before a change to candidacy deposit will // have the correct amount recorded. - >::put( + Members::::put( new_members_sorted_by_id .iter() .map(|(who, stake)| SeatHolder { @@ -1132,7 +1130,7 @@ impl Pallet { }) .collect::>(), ); - >::put( + RunnersUp::::put( new_runners_up_sorted_by_rank .into_iter() .map(|(who, stake)| SeatHolder { @@ -1144,10 +1142,10 @@ impl Pallet { ); // clean candidates. - >::kill(); + Candidates::::kill(); Self::deposit_event(Event::NewTerm { new_members: new_members_sorted_by_id }); - >::mutate(|v| *v += 1); + ElectionRounds::::mutate(|v| *v += 1); }) .map_err(|e| { log::error!(target: LOG_TARGET, "Failed to run election [{:?}].", e,); @@ -1294,11 +1292,11 @@ impl Pallet { } fn candidates_ids() -> Vec { - Pallet::::candidates().iter().map(|(x, _)| x).cloned().collect::>() + Candidates::::get().iter().map(|(x, _)| x).cloned().collect::>() } fn runners_up_ids() -> Vec { - Pallet::::runners_up().into_iter().map(|r| r.who).collect::>() + RunnersUp::::get().into_iter().map(|r| r.who).collect::>() } } @@ -1310,7 +1308,7 @@ mod tests { assert_noop, assert_ok, derive_impl, dispatch::DispatchResultWithPostInfo, parameter_types, - traits::{ConstU32, ConstU64, OnInitialize}, + traits::{ConstU32, OnInitialize}, }; use frame_system::ensure_signed; use sp_runtime::{testing::Header, BuildStorage}; @@ -1322,20 +1320,9 @@ mod tests { type AccountData = pallet_balances::AccountData; } + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = frame_system::Pallet; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } frame_support::parameter_types! { @@ -1511,22 +1498,22 @@ mod tests { } fn candidate_ids() -> Vec { - Elections::candidates().into_iter().map(|(c, _)| c).collect::>() + Candidates::::get().into_iter().map(|(c, _)| c).collect::>() } fn candidate_deposit(who: &u64) -> u64 { - Elections::candidates() + Candidates::::get() .into_iter() .find_map(|(c, d)| if c == *who { Some(d) } else { None }) .unwrap_or_default() } fn voter_deposit(who: &u64) -> u64 { - Elections::voting(who).deposit + Voting::::get(who).deposit } fn runners_up_ids() -> Vec { - Elections::runners_up().into_iter().map(|r| r.who).collect::>() + RunnersUp::::get().into_iter().map(|r| r.who).collect::>() } fn members_ids() -> Vec { @@ -1534,11 +1521,14 @@ mod tests { } fn members_and_stake() -> Vec<(u64, u64)> { - Elections::members().into_iter().map(|m| (m.who, m.stake)).collect::>() + elections_phragmen::Members::::get() + .into_iter() + .map(|m| (m.who, m.stake)) + .collect::>() } fn runners_up_and_stake() -> Vec<(u64, u64)> { - Elections::runners_up() + RunnersUp::::get() .into_iter() .map(|r| (r.who, r.stake)) .collect::>() @@ -1573,7 +1563,7 @@ mod tests { } fn submit_candidacy(origin: RuntimeOrigin) -> sp_runtime::DispatchResult { - Elections::submit_candidacy(origin, Elections::candidates().len() as u32) + Elections::submit_candidacy(origin, Candidates::::get().len() as u32) } fn vote(origin: RuntimeOrigin, votes: Vec, stake: u64) -> DispatchResultWithPostInfo { @@ -1597,13 +1587,13 @@ mod tests { assert_eq!(::VotingBondFactor::get(), 0); assert_eq!(::CandidacyBond::get(), 3); assert_eq!(::TermDuration::get(), 5); - assert_eq!(Elections::election_rounds(), 0); + assert_eq!(ElectionRounds::::get(), 0); - assert!(Elections::members().is_empty()); - assert!(Elections::runners_up().is_empty()); + assert!(elections_phragmen::Members::::get().is_empty()); + assert!(RunnersUp::::get().is_empty()); assert!(candidate_ids().is_empty()); - assert_eq!(>::decode_len(), None); + assert_eq!(Candidates::::decode_len(), None); assert!(Elections::is_candidate(&1).is_err()); assert!(all_voters().is_empty()); @@ -1618,7 +1608,7 @@ mod tests { .build_and_execute(|| { System::set_block_number(1); assert_eq!( - Elections::members(), + elections_phragmen::Members::::get(), vec![ SeatHolder { who: 1, stake: 10, deposit: 0 }, SeatHolder { who: 2, stake: 20, deposit: 0 } @@ -1626,11 +1616,11 @@ mod tests { ); assert_eq!( - Elections::voting(1), + Voting::::get(1), Voter { stake: 10u64, votes: vec![1], deposit: 0 } ); assert_eq!( - Elections::voting(2), + Voting::::get(2), Voter { stake: 20u64, votes: vec![2], deposit: 0 } ); @@ -1650,19 +1640,19 @@ mod tests { System::set_block_number(1); assert_eq!( - Elections::voting(1), + Voting::::get(1), Voter { stake: 10u64, votes: vec![1], deposit: 0 } ); assert_eq!( - Elections::voting(2), + Voting::::get(2), Voter { stake: 20u64, votes: vec![2], deposit: 0 } ); assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(1))); assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); - assert_eq!(Elections::voting(1), Default::default()); - assert_eq!(Elections::voting(2), Default::default()); + assert_eq!(Voting::::get(1), Default::default()); + assert_eq!(Voting::::get(2), Default::default()); }) } @@ -1673,7 +1663,7 @@ mod tests { .build_and_execute(|| { System::set_block_number(1); assert_eq!( - Elections::members(), + elections_phragmen::Members::::get(), vec![ SeatHolder { who: 1, stake: 10, deposit: 0 }, SeatHolder { who: 2, stake: 20, deposit: 0 }, @@ -1681,11 +1671,11 @@ mod tests { ); assert_eq!( - Elections::voting(1), + Voting::::get(1), Voter { stake: 10u64, votes: vec![1], deposit: 0 } ); assert_eq!( - Elections::voting(2), + Voting::::get(2), Voter { stake: 20u64, votes: vec![2], deposit: 0 } ); @@ -1729,17 +1719,17 @@ mod tests { ExtBuilder::default().term_duration(0).build_and_execute(|| { assert_eq!(::TermDuration::get(), 0); assert_eq!(::DesiredMembers::get(), 2); - assert_eq!(Elections::election_rounds(), 0); + assert_eq!(ElectionRounds::::get(), 0); assert!(members_ids().is_empty()); - assert!(Elections::runners_up().is_empty()); + assert!(RunnersUp::::get().is_empty()); assert!(candidate_ids().is_empty()); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert!(members_ids().is_empty()); - assert!(Elections::runners_up().is_empty()); + assert!(RunnersUp::::get().is_empty()); assert!(candidate_ids().is_empty()); }); } @@ -1780,14 +1770,14 @@ mod tests { ExtBuilder::default().build_and_execute(|| { assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_eq!(Elections::candidates(), vec![(5, 3)]); + assert_eq!(Candidates::::get(), vec![(5, 3)]); // a runtime upgrade changes the bond. CANDIDACY_BOND.with(|v| *v.borrow_mut() = 4); assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); - assert_eq!(Elections::candidates(), vec![(4, 4), (5, 3)]); + assert_eq!(Candidates::::get(), vec![(4, 4), (5, 3)]); // once elected, they each hold their candidacy bond, no more. System::set_block_number(5); @@ -1796,7 +1786,7 @@ mod tests { assert_eq!(balances(&4), (34, 6)); assert_eq!(balances(&5), (45, 5)); assert_eq!( - Elections::members(), + elections_phragmen::Members::::get(), vec![ SeatHolder { who: 4, stake: 34, deposit: 4 }, SeatHolder { who: 5, stake: 45, deposit: 3 }, @@ -1845,7 +1835,7 @@ mod tests { Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![5]); - assert!(Elections::runners_up().is_empty()); + assert!(RunnersUp::::get().is_empty()); assert!(candidate_ids().is_empty()); assert_noop!(submit_candidacy(RuntimeOrigin::signed(5)), Error::::MemberSubmit); @@ -1971,7 +1961,7 @@ mod tests { // 2 + 1 assert_eq!(balances(&2), (17, 3)); - assert_eq!(Elections::voting(&2).deposit, 3); + assert_eq!(Voting::::get(&2).deposit, 3); assert_eq!(has_lock(&2), 10); assert_eq!(locked_stake_of(&2), 10); @@ -1979,7 +1969,7 @@ mod tests { assert_ok!(vote(RuntimeOrigin::signed(2), vec![5, 4], 15)); // 2 + 2 assert_eq!(balances(&2), (16, 4)); - assert_eq!(Elections::voting(&2).deposit, 4); + assert_eq!(Voting::::get(&2).deposit, 4); assert_eq!(has_lock(&2), 15); assert_eq!(locked_stake_of(&2), 15); @@ -1987,7 +1977,7 @@ mod tests { assert_ok!(vote(RuntimeOrigin::signed(2), vec![5, 3], 18)); // 2 + 2 assert_eq!(balances(&2), (16, 4)); - assert_eq!(Elections::voting(&2).deposit, 4); + assert_eq!(Voting::::get(&2).deposit, 4); assert_eq!(has_lock(&2), 16); assert_eq!(locked_stake_of(&2), 16); @@ -1995,7 +1985,7 @@ mod tests { assert_ok!(vote(RuntimeOrigin::signed(2), vec![4], 12)); // 2 + 1 assert_eq!(balances(&2), (17, 3)); - assert_eq!(Elections::voting(&2).deposit, 3); + assert_eq!(Voting::::get(&2).deposit, 3); assert_eq!(has_lock(&2), 12); assert_eq!(locked_stake_of(&2), 12); }); @@ -2273,9 +2263,9 @@ mod tests { assert_eq!(votes_of(&4), vec![4]); assert_eq!(candidate_ids(), vec![3, 4, 5]); - assert_eq!(>::decode_len().unwrap(), 3); + assert_eq!(Candidates::::decode_len().unwrap(), 3); - assert_eq!(Elections::election_rounds(), 0); + assert_eq!(ElectionRounds::::get(), 0); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2284,13 +2274,13 @@ mod tests { // votes for 5 assert_eq!(balances(&2), (18, 2)); assert_eq!(members_and_stake(), vec![(3, 25), (5, 18)]); - assert!(Elections::runners_up().is_empty()); + assert!(RunnersUp::::get().is_empty()); assert_eq_uvec!(all_voters(), vec![2, 3, 4]); assert!(candidate_ids().is_empty()); - assert_eq!(>::decode_len(), None); + assert_eq!(Candidates::::decode_len(), None); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); }); } @@ -2353,7 +2343,7 @@ mod tests { Elections::on_initialize(System::block_number()); assert_eq!(members_and_stake(), vec![(5, 45)]); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); // but now it has a valid target. assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); @@ -2363,7 +2353,7 @@ mod tests { // candidate 4 is affected by an old vote. assert_eq!(members_and_stake(), vec![(4, 28), (5, 45)]); - assert_eq!(Elections::election_rounds(), 2); + assert_eq!(ElectionRounds::::get(), 2); assert_eq_uvec!(all_voters(), vec![3, 5]); }); } @@ -2384,7 +2374,7 @@ mod tests { System::set_block_number(5); Elections::on_initialize(System::block_number()); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); assert_eq!(members_ids(), vec![4, 5]); }); } @@ -2399,7 +2389,7 @@ mod tests { Elections::on_initialize(System::block_number()); assert!(candidate_ids().is_empty()); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); assert!(members_ids().is_empty()); System::assert_last_event(RuntimeEvent::Elections(super::Event::NewTerm { @@ -2553,7 +2543,7 @@ mod tests { Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![4, 5]); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); @@ -2597,7 +2587,7 @@ mod tests { assert_eq!(runners_up_and_stake(), vec![(2, 15), (3, 25)]); // no new candidates but old members and runners-up are always added. assert!(candidate_ids().is_empty()); - assert_eq!(Elections::election_rounds(), b / 5); + assert_eq!(ElectionRounds::::get(), b / 5); assert_eq_uvec!(all_voters(), vec![2, 3, 4, 5]); }; @@ -2621,7 +2611,7 @@ mod tests { System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![4, 5]); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); // a new candidate assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); @@ -2630,7 +2620,7 @@ mod tests { assert_ok!(Elections::remove_member(RuntimeOrigin::root(), 4, true, true)); assert_eq!(balances(&4), (35, 2)); // slashed - assert_eq!(Elections::election_rounds(), 2); // new election round + assert_eq!(ElectionRounds::::get(), 2); // new election round assert_eq!(members_ids(), vec![3, 5]); // new members }); } @@ -2647,14 +2637,14 @@ mod tests { assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_eq!(>::decode_len().unwrap(), 3); + assert_eq!(Candidates::::decode_len().unwrap(), 3); - assert_eq!(Elections::election_rounds(), 0); + assert_eq!(ElectionRounds::::get(), 0); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![3, 5]); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(3))); @@ -2665,7 +2655,7 @@ mod tests { System::set_block_number(10); Elections::on_initialize(System::block_number()); assert!(members_ids().is_empty()); - assert_eq!(Elections::election_rounds(), 2); + assert_eq!(ElectionRounds::::get(), 2); }); } @@ -2730,7 +2720,7 @@ mod tests { Elections::on_initialize(System::block_number()); assert_eq_uvec!(members_ids(), vec![3, 4]); - assert_eq!(Elections::election_rounds(), 1); + assert_eq!(ElectionRounds::::get(), 1); }); } @@ -3175,13 +3165,13 @@ mod tests { .desired_members(0) .desired_runners_up(0) .build_and_execute(|| { - assert_eq!(Elections::candidates().len(), 0); + assert_eq!(Candidates::::get().len(), 0); assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_eq!(Elections::candidates().len(), 3); + assert_eq!(Candidates::::get().len(), 3); assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); @@ -3193,7 +3183,7 @@ mod tests { assert_eq!(members_ids().len(), 0); assert_eq!(runners_up_ids().len(), 0); assert_eq!(all_voters().len(), 3); - assert_eq!(Elections::candidates().len(), 0); + assert_eq!(Candidates::::get().len(), 0); }); // not interested in members @@ -3201,13 +3191,13 @@ mod tests { .desired_members(0) .desired_runners_up(2) .build_and_execute(|| { - assert_eq!(Elections::candidates().len(), 0); + assert_eq!(Candidates::::get().len(), 0); assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_eq!(Elections::candidates().len(), 3); + assert_eq!(Candidates::::get().len(), 3); assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); @@ -3219,7 +3209,7 @@ mod tests { assert_eq!(members_ids().len(), 0); assert_eq!(runners_up_ids(), vec![3, 4]); assert_eq!(all_voters().len(), 3); - assert_eq!(Elections::candidates().len(), 0); + assert_eq!(Candidates::::get().len(), 0); }); // not interested in runners-up @@ -3227,13 +3217,13 @@ mod tests { .desired_members(2) .desired_runners_up(0) .build_and_execute(|| { - assert_eq!(Elections::candidates().len(), 0); + assert_eq!(Candidates::::get().len(), 0); assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_eq!(Elections::candidates().len(), 3); + assert_eq!(Candidates::::get().len(), 3); assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); @@ -3245,7 +3235,7 @@ mod tests { assert_eq!(members_ids(), vec![3, 4]); assert_eq!(runners_up_ids().len(), 0); assert_eq!(all_voters().len(), 3); - assert_eq!(Elections::candidates().len(), 0); + assert_eq!(Candidates::::get().len(), 0); }); } diff --git a/substrate/frame/elections-phragmen/src/migrations/unlock_and_unreserve_all_funds.rs b/substrate/frame/elections-phragmen/src/migrations/unlock_and_unreserve_all_funds.rs index 482766ee97f54..ed4569aa25ee6 100644 --- a/substrate/frame/elections-phragmen/src/migrations/unlock_and_unreserve_all_funds.rs +++ b/substrate/frame/elections-phragmen/src/migrations/unlock_and_unreserve_all_funds.rs @@ -18,6 +18,7 @@ //! A migration that unreserves all deposit and unlocks all stake held in the context of this //! pallet. +use alloc::{collections::btree_map::BTreeMap, vec::Vec}; use core::iter::Sum; use frame_support::{ pallet_prelude::ValueQuery, @@ -28,7 +29,6 @@ use frame_support::{ }; use sp_core::Get; use sp_runtime::traits::Zero; -use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; const LOG_TARGET: &str = "elections_phragmen::migrations::unlock_and_unreserve_all_funds"; @@ -93,7 +93,7 @@ type Voting = StorageMap< /// The pallet should be made inoperable before this migration is run. /// /// (See also [`RemovePallet`][frame_support::migrations::RemovePallet]) -pub struct UnlockAndUnreserveAllFunds(sp_std::marker::PhantomData); +pub struct UnlockAndUnreserveAllFunds(core::marker::PhantomData); impl UnlockAndUnreserveAllFunds { /// Calculates and returns the total amounts deposited and staked by each account in the context @@ -187,8 +187,8 @@ where /// reported as staked by the pallet and the amount actually locked in `Balances`. #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use alloc::collections::btree_set::BTreeSet; use codec::Encode; - use sp_std::collections::btree_set::BTreeSet; // Get staked and deposited balances as reported by this pallet. let (account_deposited_sums, account_staked_sums, _) = diff --git a/substrate/frame/elections-phragmen/src/migrations/v3.rs b/substrate/frame/elections-phragmen/src/migrations/v3.rs index cdca1138ebbd2..82f82f23e712f 100644 --- a/substrate/frame/elections-phragmen/src/migrations/v3.rs +++ b/substrate/frame/elections-phragmen/src/migrations/v3.rs @@ -19,12 +19,12 @@ use super::super::LOG_TARGET; use crate::{Config, Pallet}; +use alloc::vec::Vec; use codec::{Decode, Encode, FullCodec}; use frame_support::{ pallet_prelude::ValueQuery, traits::StorageVersion, weights::Weight, Twox64Concat, }; use sp_runtime::RuntimeDebug; -use sp_std::prelude::*; #[derive(Encode, Decode, Clone, Default, RuntimeDebug, PartialEq)] struct SeatHolder { @@ -116,16 +116,16 @@ pub fn apply( /// Migrate from the old legacy voting bond (fixed) to the new one (per-vote dynamic). pub fn migrate_voters_to_recorded_deposit(old_deposit: V::Balance) { - >::translate::<(V::Balance, Vec), _>(|_who, (stake, votes)| { + Voting::::translate::<(V::Balance, Vec), _>(|_who, (stake, votes)| { Some(Voter { votes, stake, deposit: old_deposit }) }); - log::info!(target: LOG_TARGET, "migrated {} voter accounts.", >::iter().count()); + log::info!(target: LOG_TARGET, "migrated {} voter accounts.", Voting::::iter().count()); } /// Migrate all candidates to recorded deposit. pub fn migrate_candidates_to_recorded_deposit(old_deposit: V::Balance) { - let _ = >::translate::, _>(|maybe_old_candidates| { + let _ = Candidates::::translate::, _>(|maybe_old_candidates| { maybe_old_candidates.map(|old_candidates| { log::info!(target: LOG_TARGET, "migrated {} candidate accounts.", old_candidates.len()); old_candidates.into_iter().map(|c| (c, old_deposit)).collect::>() @@ -135,7 +135,7 @@ pub fn migrate_candidates_to_recorded_deposit(old_deposit: /// Migrate all members to recorded deposit. pub fn migrate_members_to_recorded_deposit(old_deposit: V::Balance) { - let _ = >::translate::, _>(|maybe_old_members| { + let _ = Members::::translate::, _>(|maybe_old_members| { maybe_old_members.map(|old_members| { log::info!(target: LOG_TARGET, "migrated {} member accounts.", old_members.len()); old_members @@ -148,7 +148,7 @@ pub fn migrate_members_to_recorded_deposit(old_deposit: V: /// Migrate all runners-up to recorded deposit. pub fn migrate_runners_up_to_recorded_deposit(old_deposit: V::Balance) { - let _ = >::translate::, _>( + let _ = RunnersUp::::translate::, _>( |maybe_old_runners_up| { maybe_old_runners_up.map(|old_runners_up| { log::info!( diff --git a/substrate/frame/elections-phragmen/src/migrations/v5.rs b/substrate/frame/elections-phragmen/src/migrations/v5.rs index 6e360aa8b8c15..150a4c4fa6c2c 100644 --- a/substrate/frame/elections-phragmen/src/migrations/v5.rs +++ b/substrate/frame/elections-phragmen/src/migrations/v5.rs @@ -16,6 +16,7 @@ // limitations under the License. use super::super::*; +use alloc::{boxed::Box, vec::Vec}; /// Migrate the locks and vote stake on accounts (as specified with param `to_migrate`) that have /// more than their free balance locked. diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index 45c7440eb8913..0bb42517eb467 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -16,15 +16,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-default-config-example = { path = "default-config", default-features = false } -pallet-dev-mode = { path = "dev-mode", default-features = false } -pallet-example-basic = { path = "basic", default-features = false } -pallet-example-frame-crate = { path = "frame-crate", default-features = false } -pallet-example-kitchensink = { path = "kitchensink", default-features = false } -pallet-example-offchain-worker = { path = "offchain-worker", default-features = false } -pallet-example-split = { path = "split", default-features = false } -pallet-example-single-block-migrations = { path = "single-block-migrations", default-features = false } -pallet-example-tasks = { path = "tasks", default-features = false } +pallet-default-config-example = { workspace = true } +pallet-dev-mode = { workspace = true } +pallet-example-basic = { workspace = true } +pallet-example-frame-crate = { workspace = true } +pallet-example-kitchensink = { workspace = true } +pallet-example-offchain-worker = { workspace = true } +pallet-example-split = { workspace = true } +pallet-example-single-block-migrations = { workspace = true } +pallet-example-tasks = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/examples/basic/Cargo.toml b/substrate/frame/examples/basic/Cargo.toml index ba9f9eca27d79..af547c7eeb21e 100644 --- a/substrate/frame/examples/basic/Cargo.toml +++ b/substrate/frame/examples/basic/Cargo.toml @@ -8,6 +8,7 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME example pallet" readme = "README.md" +publish = false [lints] workspace = true @@ -16,19 +17,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-balances = { path = "../../balances", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } +sp-core = { workspace = true } [features] default = ["std"] @@ -43,7 +43,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/examples/basic/src/lib.rs b/substrate/frame/examples/basic/src/lib.rs index 12cadc969fd74..fea04cb447a07 100644 --- a/substrate/frame/examples/basic/src/lib.rs +++ b/substrate/frame/examples/basic/src/lib.rs @@ -53,6 +53,9 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::vec::Vec; use codec::{Decode, Encode}; use core::marker::PhantomData; use frame_support::{ @@ -69,7 +72,6 @@ use sp_runtime::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, }; -use sp_std::vec::Vec; // Re-export pallet items so that they can be accessed from the crate namespace. pub use pallet::*; diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index d351b27eecde3..d7095eb3c944f 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -71,20 +71,9 @@ impl frame_system::Config for Test { type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl Config for Test { @@ -103,7 +92,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { example: pallet_example_basic::GenesisConfig { dummy: 42, // we configure the map with (key, value) pairs. - bar: vec![(1, 2), (2, 3)], + bar: alloc::vec![(1, 2), (2, 3)], foo: 24, }, } diff --git a/substrate/frame/examples/default-config/Cargo.toml b/substrate/frame/examples/default-config/Cargo.toml index 0ad5b56cb6faa..81509c782a3fa 100644 --- a/substrate/frame/examples/default-config/Cargo.toml +++ b/substrate/frame/examples/default-config/Cargo.toml @@ -8,6 +8,7 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME example pallet demonstrating derive_impl / default_config in action" readme = "README.md" +publish = false [lints] workspace = true @@ -16,15 +17,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] @@ -36,7 +36,6 @@ std = [ "scale-info/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/examples/default-config/src/lib.rs b/substrate/frame/examples/default-config/src/lib.rs index 5b66c78e06283..ccdcd4968598d 100644 --- a/substrate/frame/examples/default-config/src/lib.rs +++ b/substrate/frame/examples/default-config/src/lib.rs @@ -32,6 +32,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + #[frame_support::pallet] pub mod pallet { use frame_support::pallet_prelude::*; @@ -189,7 +191,7 @@ pub mod tests { } parameter_types! { - pub const SomeCall: RuntimeCall = RuntimeCall::System(frame_system::Call::::remark { remark: vec![] }); + pub const SomeCall: RuntimeCall = RuntimeCall::System(frame_system::Call::::remark { remark: alloc::vec![] }); } #[derive_impl(TestDefaultConfig as pallet::DefaultConfig)] diff --git a/substrate/frame/examples/dev-mode/Cargo.toml b/substrate/frame/examples/dev-mode/Cargo.toml index d7570f570946f..c3dd7f26f21d4 100644 --- a/substrate/frame/examples/dev-mode/Cargo.toml +++ b/substrate/frame/examples/dev-mode/Cargo.toml @@ -16,18 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-balances = { path = "../../balances", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } +sp-core = { workspace = true } [features] default = ["std"] @@ -41,7 +40,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/examples/dev-mode/src/lib.rs b/substrate/frame/examples/dev-mode/src/lib.rs index 15f1a4b5d6199..eb94c024280c7 100644 --- a/substrate/frame/examples/dev-mode/src/lib.rs +++ b/substrate/frame/examples/dev-mode/src/lib.rs @@ -28,9 +28,11 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{vec, vec::Vec}; use frame_support::dispatch::DispatchResult; use frame_system::ensure_signed; -use sp_std::{vec, vec::Vec}; // Re-export pallet items so that they can be accessed from the crate namespace. pub use pallet::*; diff --git a/substrate/frame/examples/dev-mode/src/tests.rs b/substrate/frame/examples/dev-mode/src/tests.rs index e8a18ec13fe91..637864b87bc43 100644 --- a/substrate/frame/examples/dev-mode/src/tests.rs +++ b/substrate/frame/examples/dev-mode/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for pallet-dev-mode. use crate::*; -use frame_support::{assert_ok, derive_impl, traits::ConstU64}; +use frame_support::{assert_ok, derive_impl}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -65,20 +65,9 @@ impl frame_system::Config for Test { type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; } impl Config for Test { diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index 29984bab3e0ff..e5137526026e6 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -16,10 +16,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } -frame = { package = "polkadot-sdk-frame", path = "../..", default-features = false, features = ["experimental", "runtime"] } +frame = { features = ["experimental", "runtime"], workspace = true } [features] diff --git a/substrate/frame/examples/kitchensink/Cargo.toml b/substrate/frame/examples/kitchensink/Cargo.toml index db3e22daa01bd..f1f9fdb492d9c 100644 --- a/substrate/frame/examples/kitchensink/Cargo.toml +++ b/substrate/frame/examples/kitchensink/Cargo.toml @@ -16,23 +16,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } -frame-support = { path = "../../support", default-features = false, features = ["experimental"] } -frame-system = { path = "../../system", default-features = false } +frame-support = { features = ["experimental"], workspace = true } +frame-system = { workspace = true } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-io = { workspace = true } +sp-runtime = { workspace = true } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } -pallet-balances = { path = "../../balances", default-features = false } +pallet-balances = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } +sp-core = { workspace = true } [features] default = ["std"] @@ -47,7 +46,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/examples/kitchensink/src/lib.rs b/substrate/frame/examples/kitchensink/src/lib.rs index b7425b0c0846a..442318565426e 100644 --- a/substrate/frame/examples/kitchensink/src/lib.rs +++ b/substrate/frame/examples/kitchensink/src/lib.rs @@ -42,6 +42,8 @@ use sp_runtime::TryRuntimeError; pub mod weights; pub use weights::*; +extern crate alloc; + #[frame_support::pallet] pub mod pallet { use super::*; diff --git a/substrate/frame/examples/kitchensink/src/tests.rs b/substrate/frame/examples/kitchensink/src/tests.rs index 1205fefc42298..7cf95497bf064 100644 --- a/substrate/frame/examples/kitchensink/src/tests.rs +++ b/substrate/frame/examples/kitchensink/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for pallet-example-kitchensink. use crate::*; -use frame_support::{assert_ok, derive_impl, parameter_types, traits::ConstU64}; +use frame_support::{assert_ok, derive_impl, parameter_types, traits::VariantCountOf}; use sp_runtime::BuildStorage; // Reexport crate as its pallet name for construct_runtime. use crate as pallet_example_kitchensink; @@ -43,20 +43,14 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = VariantCountOf; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; } parameter_types! { diff --git a/substrate/frame/examples/multi-block-migrations/Cargo.toml b/substrate/frame/examples/multi-block-migrations/Cargo.toml index 61bb2bc61b4e3..91d0a71bb3415 100644 --- a/substrate/frame/examples/multi-block-migrations/Cargo.toml +++ b/substrate/frame/examples/multi-block-migrations/Cargo.toml @@ -13,14 +13,14 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -pallet-migrations = { path = "../../migrations", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } -log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } +codec = { workspace = true } +pallet-migrations = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +log = { workspace = true } +scale-info = { workspace = true } +sp-io = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index 23ce79c34402d..179a658de312d 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -8,6 +8,7 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME example pallet for offchain worker" readme = "README.md" +publish = false [lints] workspace = true @@ -16,17 +17,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -lite-json = { version = "0.2.0", default-features = false } +codec = { workspace = true } +lite-json = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-keystore = { path = "../../../primitives/keystore", optional = true, default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-keystore = { optional = true, workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] @@ -41,7 +41,6 @@ std = [ "sp-io/std", "sp-keystore/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/examples/offchain-worker/src/lib.rs b/substrate/frame/examples/offchain-worker/src/lib.rs index 0a90e896188eb..add014f6b34a9 100644 --- a/substrate/frame/examples/offchain-worker/src/lib.rs +++ b/substrate/frame/examples/offchain-worker/src/lib.rs @@ -45,6 +45,9 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::vec::Vec; use codec::{Decode, Encode}; use frame_support::traits::Get; use frame_system::{ @@ -67,7 +70,6 @@ use sp_runtime::{ transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, RuntimeDebug, }; -use sp_std::vec::Vec; #[cfg(test)] mod tests; @@ -606,7 +608,7 @@ impl Pallet { let body = response.body().collect::>(); // Create a str slice from the body. - let body_str = sp_std::str::from_utf8(&body).map_err(|_| { + let body_str = alloc::str::from_utf8(&body).map_err(|_| { log::warn!("No UTF8 body"); http::Error::Unknown })?; diff --git a/substrate/frame/examples/offchain-worker/src/tests.rs b/substrate/frame/examples/offchain-worker/src/tests.rs index e2c57a8c1e1ab..b665cbbb62aed 100644 --- a/substrate/frame/examples/offchain-worker/src/tests.rs +++ b/substrate/frame/examples/offchain-worker/src/tests.rs @@ -375,7 +375,7 @@ fn price_oracle_response(state: &mut testing::OffchainState) { #[test] fn parse_price_works() { - let test_data = vec![ + let test_data = alloc::vec![ ("{\"USD\":6536.92}", Some(653692)), ("{\"USD\":65.92}", Some(6592)), ("{\"USD\":6536.924565}", Some(653692)), diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml index 080500f629671..2d524f2caa269 100644 --- a/substrate/frame/examples/single-block-migrations/Cargo.toml +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -13,20 +13,19 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -docify = "0.2.8" -log = { version = "0.4.21", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false } -frame-executive = { path = "../../executive", default-features = false } -frame-system = { path = "../../system", default-features = false } -frame-try-runtime = { path = "../../try-runtime", default-features = false, optional = true } -pallet-balances = { path = "../../balances", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-version = { path = "../../../primitives/version", default-features = false } +docify = { workspace = true } +log = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-executive = { workspace = true } +frame-system = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-balances = { workspace = true } +sp-runtime = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-version = { workspace = true } [features] default = ["std"] @@ -42,7 +41,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "sp-version/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/examples/single-block-migrations/src/lib.rs b/substrate/frame/examples/single-block-migrations/src/lib.rs index 411537aa8c65f..07c7199b93223 100644 --- a/substrate/frame/examples/single-block-migrations/src/lib.rs +++ b/substrate/frame/examples/single-block-migrations/src/lib.rs @@ -156,6 +156,9 @@ pub use pallet::*; pub mod migrations; #[doc(hidden)] mod mock; + +extern crate alloc; + use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::traits::StorageVersion; use sp_runtime::RuntimeDebug; diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs index 7b543d72c9840..55cf7cef9a7a8 100644 --- a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs +++ b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs @@ -21,7 +21,7 @@ use frame_support::{ }; #[cfg(feature = "try-runtime")] -use sp_std::vec::Vec; +use alloc::vec::Vec; /// Collection of storage item formats from the previous storage version. /// @@ -41,7 +41,7 @@ mod v0 { /// /// In this migration, update the on-chain storage for the pallet to reflect the new storage /// layout. -pub struct InnerMigrateV0ToV1(sp_std::marker::PhantomData); +pub struct InnerMigrateV0ToV1(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for InnerMigrateV0ToV1 { /// Return the existing [`crate::Value`] so we can check that it was correctly set in diff --git a/substrate/frame/examples/single-block-migrations/src/mock.rs b/substrate/frame/examples/single-block-migrations/src/mock.rs index 68594cc4ad727..f4cf81ea6474f 100644 --- a/substrate/frame/examples/single-block-migrations/src/mock.rs +++ b/substrate/frame/examples/single-block-migrations/src/mock.rs @@ -18,7 +18,7 @@ #![cfg(any(all(feature = "try-runtime", test), doc))] use crate::*; -use frame_support::{derive_impl, traits::ConstU64, weights::constants::ParityDbWeight}; +use frame_support::{derive_impl, weights::constants::ParityDbWeight}; // Re-export crate as its pallet name for construct_runtime. use crate as pallet_example_storage_migration; @@ -41,20 +41,9 @@ impl frame_system::Config for MockRuntime { type DbWeight = ParityDbWeight; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for MockRuntime { - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); } impl Config for MockRuntime {} diff --git a/substrate/frame/examples/split/Cargo.toml b/substrate/frame/examples/split/Cargo.toml index 6cb4d7ddd6c06..9542902bae64b 100644 --- a/substrate/frame/examples/split/Cargo.toml +++ b/substrate/frame/examples/split/Cargo.toml @@ -8,6 +8,7 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME example split pallet" readme = "README.md" +publish = false [lints] workspace = true @@ -16,20 +17,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-io = { workspace = true } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } +sp-core = { workspace = true } [features] default = ["std"] @@ -42,7 +42,6 @@ std = [ "scale-info/std", "sp-core/std", "sp-io/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/examples/tasks/Cargo.toml b/substrate/frame/examples/tasks/Cargo.toml index 95246ef3f6643..00695ceddf197 100644 --- a/substrate/frame/examples/tasks/Cargo.toml +++ b/substrate/frame/examples/tasks/Cargo.toml @@ -6,6 +6,7 @@ edition.workspace = true license.workspace = true repository.workspace = true description = "Pallet to demonstrate the usage of Tasks to recognize and execute service work" +publish = false [lints] workspace = true @@ -14,19 +15,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-core = { default-features = false, path = "../../../primitives/core" } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-core = { workspace = true } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } [features] default = ["std"] @@ -40,7 +40,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index 4cce0fa9f9504..78d9ea6fa4992 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -16,29 +16,28 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -aquamarine = "0.5.0" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +aquamarine = { workspace = true } +codec = { features = [ "derive", -] } +], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -frame-try-runtime = { path = "../try-runtime", default-features = false, optional = true } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-tracing = { path = "../../primitives/tracing", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-tracing = { workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -pallet-balances = { path = "../balances" } -pallet-transaction-payment = { path = "../transaction-payment" } -sp-core = { path = "../../primitives/core" } -sp-inherents = { path = "../../primitives/inherents" } -sp-io = { path = "../../primitives/io" } -sp-version = { path = "../../primitives/version" } +array-bytes = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } [features] default = ["std"] @@ -57,7 +56,6 @@ std = [ "sp-inherents/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "sp-tracing/std", "sp-version/std", ] diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index 3028eaf318e08..1e7bac64e18fd 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -151,7 +151,10 @@ pub mod block_flowchart {} #[cfg(test)] mod tests; +extern crate alloc; + use codec::{Codec, Encode}; +use core::marker::PhantomData; use frame_support::{ defensive_assert, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, @@ -174,7 +177,6 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, ExtrinsicInclusionMode, }; -use sp_std::{marker::PhantomData, prelude::*}; #[cfg(feature = "try-runtime")] use ::{ @@ -467,7 +469,7 @@ where /// Logs the result of trying to decode the entire state. fn log_decode_result( - res: Result>, + res: Result>, ) -> Result<(), TryRuntimeError> { match res { Ok(bytes) => { diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs index e3721f7b6dcba..69a970a89d930 100644 --- a/substrate/frame/executive/src/tests.rs +++ b/substrate/frame/executive/src/tests.rs @@ -36,7 +36,7 @@ use frame_support::{ migrations::MultiStepMigrator, pallet_prelude::*, parameter_types, - traits::{fungible, ConstU8, Currency, IsInherent}, + traits::{fungible, ConstU8, Currency, IsInherent, VariantCount, VariantCountOf}, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightMeter, WeightToFee}, }; use frame_system::{pallet_prelude::*, ChainContext, LastRuntimeUpgrade, LastRuntimeUpgradeInfo}; @@ -325,12 +325,24 @@ impl frame_system::Config for Runtime { type MultiBlockMigrator = MockedModeGetter; } +#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, MaxEncodedLen, TypeInfo, RuntimeDebug)] +pub enum FreezeReasonId { + Foo, +} + +impl VariantCount for FreezeReasonId { + const VARIANT_COUNT: u32 = 1; +} + type Balance = u64; #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { type Balance = Balance; type AccountStore = System; + type RuntimeFreezeReason = FreezeReasonId; + type FreezeIdentifier = FreezeReasonId; + type MaxFreezes = VariantCountOf; } parameter_types! { @@ -637,8 +649,8 @@ fn block_weight_limit_enforced() { assert!(res.is_ok()); assert_eq!( >::block_weight().total(), - //--------------------- on_initialize + block_execution + extrinsic_base weight - Weight::from_parts((encoded_len + 5) * (nonce + 1), 0) + base_block_weight, + //--------------------- on_initialize + block_execution + extrinsic_base weight + extrinsic len + Weight::from_parts((encoded_len + 5) * (nonce + 1), (nonce + 1)* encoded_len) + base_block_weight, ); assert_eq!( >::extrinsic_index(), @@ -686,9 +698,10 @@ fn block_weight_and_size_is_stored_per_tx() { ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; + // Check we account for all extrinsic weight and their len. assert_eq!( >::block_weight().total(), - base_block_weight + 3u64 * extrinsic_weight, + base_block_weight + 3u64 * extrinsic_weight + 3u64 * Weight::from_parts(0, len as u64), ); assert_eq!(>::all_extrinsics_len(), 3 * len); @@ -743,8 +756,12 @@ fn validate_unsigned() { fn can_not_pay_for_tx_fee_on_full_lock() { let mut t = new_test_ext(1); t.execute_with(|| { - as fungible::MutateFreeze>::set_freeze(&(), &1, 110) - .unwrap(); + as fungible::MutateFreeze>::set_freeze( + &FreezeReasonId::Foo, + &1, + 110, + ) + .unwrap(); let xt = TestXt::new( RuntimeCall::System(frame_system::Call::remark { remark: vec![1u8] }), sign_extra(1, 0, 0), diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index 5b7121e2eae37..2b188bad1dfa7 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -15,31 +15,30 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } -frame-election-provider-support = { path = "../election-provider-support", default-features = false } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +frame-election-provider-support = { workspace = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } -docify = "0.2.8" +docify = { workspace = true } [dev-dependencies] -pallet-staking-reward-curve = { path = "../staking/reward-curve" } -sp-core = { path = "../../primitives/core", default-features = false } -substrate-test-utils = { path = "../../test-utils" } -sp-tracing = { path = "../../primitives/tracing" } -pallet-staking = { path = "../staking" } -pallet-balances = { path = "../balances" } -pallet-timestamp = { path = "../timestamp" } +pallet-staking-reward-curve = { workspace = true, default-features = true } +sp-core = { workspace = true } +substrate-test-utils = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } [features] default = ["std"] @@ -58,7 +57,6 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-staking/std", - "sp-std/std", "sp-tracing/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/fast-unstake/src/benchmarking.rs b/substrate/frame/fast-unstake/src/benchmarking.rs index 4828dcb9b42cb..d01ff715ca4fc 100644 --- a/substrate/frame/fast-unstake/src/benchmarking.rs +++ b/substrate/frame/fast-unstake/src/benchmarking.rs @@ -20,6 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use crate::{types::*, Pallet as FastUnstake, *}; +use alloc::{vec, vec::Vec}; use frame_benchmarking::v1::{benchmarks, whitelist_account, BenchmarkError}; use frame_support::{ assert_ok, @@ -28,7 +29,6 @@ use frame_support::{ use frame_system::RawOrigin; use sp_runtime::traits::Zero; use sp_staking::{EraIndex, StakingInterface}; -use sp_std::prelude::*; const USER_SEED: u32 = 0; diff --git a/substrate/frame/fast-unstake/src/lib.rs b/substrate/frame/fast-unstake/src/lib.rs index f31c9c640260b..41920907bd57b 100644 --- a/substrate/frame/fast-unstake/src/lib.rs +++ b/substrate/frame/fast-unstake/src/lib.rs @@ -112,6 +112,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + pub use pallet::*; #[cfg(test)] @@ -150,6 +152,7 @@ macro_rules! log { pub mod pallet { use super::*; use crate::types::*; + use alloc::vec::Vec; use frame_support::{ pallet_prelude::*, traits::{Defensive, ReservableCurrency, StorageVersion}, @@ -157,7 +160,6 @@ pub mod pallet { use frame_system::pallet_prelude::*; use sp_runtime::{traits::Zero, DispatchResult}; use sp_staking::{EraIndex, StakingInterface}; - use sp_std::{prelude::*, vec::Vec}; pub use weights::WeightInfo; #[cfg(feature = "try-runtime")] diff --git a/substrate/frame/fast-unstake/src/migrations.rs b/substrate/frame/fast-unstake/src/migrations.rs index 97ad86bfff42b..1a873534ac819 100644 --- a/substrate/frame/fast-unstake/src/migrations.rs +++ b/substrate/frame/fast-unstake/src/migrations.rs @@ -17,20 +17,20 @@ pub mod v1 { use crate::{types::BalanceOf, *}; + use alloc::vec::Vec; use frame_support::{ storage::unhashed, traits::{Defensive, Get, GetStorageVersion, OnRuntimeUpgrade}, weights::Weight, }; use sp_staking::EraIndex; - use sp_std::prelude::*; #[cfg(feature = "try-runtime")] use frame_support::ensure; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; - pub struct MigrateToV1(sp_std::marker::PhantomData); + pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { let current = Pallet::::in_code_storage_version(); diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index 9238a085141df..757052e230a18 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -26,7 +26,6 @@ use frame_support::{ use sp_runtime::{traits::IdentityLookup, BuildStorage}; use pallet_staking::{Exposure, IndividualExposure, StakerStatus}; -use sp_std::prelude::*; pub type AccountId = u128; pub type BlockNumber = u64; @@ -60,20 +59,11 @@ parameter_types! { pub static ExistentialDeposit: Balance = 1; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<128>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } pallet_staking_reward_curve::build! { @@ -113,35 +103,17 @@ impl frame_election_provider_support::ElectionProvider for MockElection { } } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; - type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type HistoryDepth = ConstU32<84>; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = MockElection; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/fast-unstake/src/types.rs b/substrate/frame/fast-unstake/src/types.rs index 3fb5720861fa8..2a2319ef61296 100644 --- a/substrate/frame/fast-unstake/src/types.rs +++ b/substrate/frame/fast-unstake/src/types.rs @@ -24,7 +24,6 @@ use frame_support::{ }; use scale_info::TypeInfo; use sp_staking::{EraIndex, StakingInterface}; -use sp_std::prelude::*; /// Maximum number of eras that we might check for a single staker. /// @@ -32,7 +31,7 @@ use sp_std::prelude::*; #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] #[codec(mel_bound(T: Config))] #[scale_info(skip_type_params(T))] -pub struct MaxChecking(sp_std::marker::PhantomData); +pub struct MaxChecking(core::marker::PhantomData); impl frame_support::traits::Get for MaxChecking { fn get() -> u32 { T::Staking::bonding_duration() + 1 diff --git a/substrate/frame/glutton/Cargo.toml b/substrate/frame/glutton/Cargo.toml index 730c4e70935c0..5a73e8caef8ea 100644 --- a/substrate/frame/glutton/Cargo.toml +++ b/substrate/frame/glutton/Cargo.toml @@ -16,20 +16,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -blake2 = { version = "0.10.4", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +blake2 = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-inherents = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] @@ -43,9 +43,9 @@ std = [ "pallet-balances/std", "scale-info/std", "sp-core/std", + "sp-inherents/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/glutton/README.md b/substrate/frame/glutton/README.md index 89dbe26ec7a9d..43642df19104a 100644 --- a/substrate/frame/glutton/README.md +++ b/substrate/frame/glutton/README.md @@ -7,6 +7,7 @@ The `Glutton` pallet gets the name from its property to consume vast amounts of resources. It can be used to push para-chains and their relay-chains to the limits. This is good for testing out theoretical limits in a practical way. -The `Glutton` can be set to consume a fraction of the available unused weight of a chain. It accomplishes this by -utilizing the `on_idle` hook and consuming a specific ration of the remaining weight. The rations can be set via -`set_compute` and `set_storage`. Initially the `Glutton` needs to be initialized once with `initialize_pallet`. +The `Glutton` can be set to consume a fraction of the available block length and unused weight of a chain. It +accomplishes this by filling the block length up to a ration and utilizing the `on_idle` hook to consume a +specific ration of the remaining weight. The rations can be set via `set_compute`, `set_storage` and `set_block_length`. +Initially the `Glutton` needs to be initialized once with `initialize_pallet`. diff --git a/substrate/frame/glutton/src/lib.rs b/substrate/frame/glutton/src/lib.rs index 344a70becaeb9..c8d2981ebfef1 100644 --- a/substrate/frame/glutton/src/lib.rs +++ b/substrate/frame/glutton/src/lib.rs @@ -35,12 +35,14 @@ mod mock; mod tests; pub mod weights; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use blake2::{Blake2b512, Digest}; use frame_support::{pallet_prelude::*, weights::WeightMeter, DefaultNoBound}; use frame_system::pallet_prelude::*; use sp_io::hashing::twox_256; use sp_runtime::{traits::Zero, FixedPointNumber, FixedU64}; -use sp_std::{vec, vec::Vec}; pub use pallet::*; pub use weights::WeightInfo; @@ -89,6 +91,11 @@ pub mod pallet { /// The storage limit. storage: FixedU64, }, + /// The block length limit has been updated. + BlockLengthLimitSet { + /// The block length limit. + block_length: FixedU64, + }, } #[pallet::error] @@ -116,6 +123,13 @@ pub mod pallet { #[pallet::storage] pub(crate) type Storage = StorageValue<_, FixedU64, ValueQuery>; + /// The proportion of the `block length` to consume on each block. + /// + /// `1.0` is mapped to `100%`. Must be at most [`crate::RESOURCE_HARD_LIMIT`]. Setting this to + /// over `1.0` could stall the chain. + #[pallet::storage] + pub(crate) type Length = StorageValue<_, FixedU64, ValueQuery>; + /// Storage map used for wasting proof size. /// /// It contains no meaningful data - hence the name "Trash". The maximal number of entries is @@ -146,9 +160,11 @@ pub mod pallet { pub storage: FixedU64, /// The amount of trash data for wasting proof size. pub trash_data_count: u32, + /// The block length limit. + pub block_length: FixedU64, #[serde(skip)] /// The required configuration field. - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] @@ -170,6 +186,9 @@ pub mod pallet { assert!(self.storage <= RESOURCE_HARD_LIMIT, "Storage limit is insane"); >::put(self.storage); + + assert!(self.block_length <= RESOURCE_HARD_LIMIT, "Block length limit is insane"); + >::put(self.block_length); } } @@ -208,6 +227,40 @@ pub mod pallet { } } + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = sp_inherents::MakeFatalError<()>; + + const INHERENT_IDENTIFIER: InherentIdentifier = *b"bloated0"; + + fn create_inherent(_data: &InherentData) -> Option { + let max_block_length = *T::BlockLength::get().max.get(DispatchClass::Mandatory); + let bloat_size = Length::::get().saturating_mul_int(max_block_length) as usize; + let amount_trash = bloat_size / VALUE_SIZE; + let garbage = TrashData::::iter() + .map(|(_k, v)| v) + .collect::>() + .into_iter() + .cycle() + .take(amount_trash) + .collect::>(); + + Some(Call::bloat { garbage }) + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::bloat { .. }) + } + + fn check_inherent(call: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { + match call { + Call::bloat { .. } => Ok(()), + _ => unreachable!("other calls are not inherents"), + } + } + } + #[pallet::call(weight = T::WeightInfo)] impl Pallet { /// Initialize the pallet. Should be called once, if no genesis state was provided. @@ -277,6 +330,31 @@ pub mod pallet { Self::deposit_event(Event::StorageLimitSet { storage }); Ok(()) } + + /// Increase the block size by including the specified garbage bytes. + #[pallet::call_index(3)] + #[pallet::weight((0, DispatchClass::Mandatory))] + pub fn bloat(_origin: OriginFor, _garbage: Vec<[u8; VALUE_SIZE]>) -> DispatchResult { + Ok(()) + } + + /// Set how much of the block length should be filled with trash data on each block. + /// + /// `1.0` means that all block should be filled. If set to `1.0`, storage proof size will + /// be close to zero. + /// + /// Only callable by Root or `AdminOrigin`. + #[pallet::call_index(4)] + #[pallet::weight({1})] + pub fn set_block_length(origin: OriginFor, block_length: FixedU64) -> DispatchResult { + T::AdminOrigin::ensure_origin_or_root(origin)?; + + ensure!(block_length <= RESOURCE_HARD_LIMIT, Error::::InsaneLimit); + Length::::set(block_length); + + Self::deposit_event(Event::BlockLengthLimitSet { block_length }); + Ok(()) + } } impl Pallet { diff --git a/substrate/frame/glutton/src/mock.rs b/substrate/frame/glutton/src/mock.rs index 132ef5cfbcbba..7163d7c46781f 100644 --- a/substrate/frame/glutton/src/mock.rs +++ b/substrate/frame/glutton/src/mock.rs @@ -50,10 +50,14 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -/// Set the `compute` and `storage` limits. +/// Set the `compute`, `storage` and `block_length` limits. /// /// `1.0` corresponds to `100%`. -pub fn set_limits(compute: f64, storage: f64) { +pub fn set_limits(compute: f64, storage: f64, block_length: f64) { assert_ok!(Glutton::set_compute(RuntimeOrigin::root(), FixedU64::from_float(compute))); assert_ok!(Glutton::set_storage(RuntimeOrigin::root(), FixedU64::from_float(storage))); + assert_ok!(Glutton::set_block_length( + RuntimeOrigin::root(), + FixedU64::from_float(block_length) + )); } diff --git a/substrate/frame/glutton/src/tests.rs b/substrate/frame/glutton/src/tests.rs index b72d527277254..81d228f39a936 100644 --- a/substrate/frame/glutton/src/tests.rs +++ b/substrate/frame/glutton/src/tests.rs @@ -123,6 +123,43 @@ fn setting_compute_respects_limit() { }); } +#[test] +fn setting_block_length_works() { + new_test_ext().execute_with(|| { + assert_eq!(Compute::::get(), Zero::zero()); + + assert_ok!(Glutton::set_block_length(RuntimeOrigin::root(), FixedU64::from_float(0.3))); + assert_eq!(Length::::get(), FixedU64::from_float(0.3)); + System::assert_last_event( + Event::BlockLengthLimitSet { block_length: FixedU64::from_float(0.3) }.into(), + ); + + assert_noop!( + Glutton::set_block_length(RuntimeOrigin::signed(1), FixedU64::from_float(0.5)), + DispatchError::BadOrigin + ); + assert_noop!( + Glutton::set_block_length(RuntimeOrigin::none(), FixedU64::from_float(0.5)), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn setting_block_length_respects_limit() { + new_test_ext().execute_with(|| { + // < 1000% is fine + assert_ok!(Glutton::set_block_length(RuntimeOrigin::root(), FixedU64::from_float(9.99)),); + // == 1000% is fine + assert_ok!(Glutton::set_block_length(RuntimeOrigin::root(), FixedU64::from_u32(10)),); + // > 1000% is not + assert_noop!( + Glutton::set_block_length(RuntimeOrigin::root(), FixedU64::from_float(10.01)), + Error::::InsaneLimit + ); + }); +} + #[test] fn setting_storage_works() { new_test_ext().execute_with(|| { @@ -163,7 +200,7 @@ fn setting_storage_respects_limit() { #[test] fn on_idle_works() { new_test_ext().execute_with(|| { - set_limits(One::one(), One::one()); + set_limits(One::one(), One::one(), One::one()); Glutton::on_idle(1, Weight::from_parts(20_000_000, 0)); }); @@ -173,7 +210,7 @@ fn on_idle_works() { #[test] fn on_idle_weight_high_proof_is_close_enough_works() { new_test_ext().execute_with(|| { - set_limits(One::one(), One::one()); + set_limits(One::one(), One::one(), One::one()); let should = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, WEIGHT_PROOF_SIZE_PER_MB * 5); let got = Glutton::on_idle(1, should); @@ -196,7 +233,7 @@ fn on_idle_weight_high_proof_is_close_enough_works() { #[test] fn on_idle_weight_low_proof_is_close_enough_works() { new_test_ext().execute_with(|| { - set_limits(One::one(), One::one()); + set_limits(One::one(), One::one(), One::one()); let should = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, WEIGHT_PROOF_SIZE_PER_KB * 20); let got = Glutton::on_idle(1, should); @@ -224,7 +261,7 @@ fn on_idle_weight_over_unity_is_close_enough_works() { let max_block = Weight::from_parts(500 * WEIGHT_REF_TIME_PER_MILLIS, 5 * WEIGHT_PROOF_SIZE_PER_MB); // But now we tell it to consume more than that. - set_limits(1.75, 1.5); + set_limits(1.75, 1.5, 0.0); let want = Weight::from_parts( (1.75 * max_block.ref_time() as f64) as u64, (1.5 * max_block.proof_size() as f64) as u64, diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index 37048b06608f6..e24f9a51db842 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -16,33 +16,32 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-authorship = { path = "../authorship", default-features = false } -pallet-session = { path = "../session", default-features = false } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } -sp-consensus-grandpa = { path = "../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive", "serde"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-authorship = { workspace = true } +pallet-session = { workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } +sp-consensus-grandpa = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-session = { workspace = true } +sp-staking = { features = ["serde"], workspace = true } [dev-dependencies] -finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } -frame-benchmarking = { path = "../benchmarking" } -frame-election-provider-support = { path = "../election-provider-support" } -pallet-balances = { path = "../balances" } -pallet-offences = { path = "../offences" } -pallet-staking = { path = "../staking" } -pallet-staking-reward-curve = { path = "../staking/reward-curve" } -pallet-timestamp = { path = "../timestamp" } -sp-keyring = { path = "../../primitives/keyring" } +finality-grandpa = { features = ["derive-codec"], workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-offences = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } [features] default = ["std"] @@ -67,7 +66,6 @@ std = [ "sp-runtime/std", "sp-session/std", "sp-staking/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/grandpa/src/equivocation.rs b/substrate/frame/grandpa/src/equivocation.rs index 16727f79a58d5..b213c1ceb7219 100644 --- a/substrate/frame/grandpa/src/equivocation.rs +++ b/substrate/frame/grandpa/src/equivocation.rs @@ -35,6 +35,7 @@ //! that the `ValidateUnsigned` for the GRANDPA pallet is used in the runtime //! definition. +use alloc::{boxed::Box, vec, vec::Vec}; use codec::{self as codec, Decode, Encode}; use frame_support::traits::{Get, KeyOwnerProofSystem}; use frame_system::pallet_prelude::BlockNumberFor; @@ -52,7 +53,6 @@ use sp_staking::{ offence::{Kind, Offence, OffenceReportSystem, ReportOffence}, SessionIndex, }; -use sp_std::prelude::*; use super::{Call, Config, Error, Pallet, LOG_TARGET}; @@ -114,7 +114,7 @@ impl Offence for EquivocationOffence { /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the the authorship pallet. -pub struct EquivocationReportSystem(sp_std::marker::PhantomData<(T, R, P, L)>); +pub struct EquivocationReportSystem(core::marker::PhantomData<(T, R, P, L)>); impl OffenceReportSystem< diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 90bcd8721dfa1..4f69aeaef5236 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -28,11 +28,14 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + // Re-export since this is necessary for `impl_apis` in runtime. pub use sp_consensus_grandpa::{ self as fg_primitives, AuthorityId, AuthorityList, AuthorityWeight, }; +use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::{DispatchResultWithPostInfo, Pays}, @@ -50,7 +53,6 @@ use sp_consensus_grandpa::{ use sp_runtime::{generic::DigestItem, traits::Zero, DispatchResult}; use sp_session::{GetSessionNumber, GetValidatorCount}; use sp_staking::{offence::OffenceReportSystem, SessionIndex}; -use sp_std::prelude::*; mod default_weights; mod equivocation; @@ -351,7 +353,7 @@ pub mod pallet { pub struct GenesisConfig { pub authorities: AuthorityList, #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] diff --git a/substrate/frame/grandpa/src/migrations/v5.rs b/substrate/frame/grandpa/src/migrations/v5.rs index a0865a3f2bf9a..f1af0af42dd0a 100644 --- a/substrate/frame/grandpa/src/migrations/v5.rs +++ b/substrate/frame/grandpa/src/migrations/v5.rs @@ -16,7 +16,9 @@ // limitations under the License. use crate::{BoundedAuthorityList, Pallet}; +use alloc::vec::Vec; use codec::Decode; +use core::marker::PhantomData; use frame_support::{ migrations::VersionedMigration, storage, @@ -24,7 +26,6 @@ use frame_support::{ weights::Weight, }; use sp_consensus_grandpa::AuthorityList; -use sp_std::{marker::PhantomData, vec::Vec}; const GRANDPA_AUTHORITIES_KEY: &[u8] = b":grandpa_authorities"; diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 38b5536bc598b..5ba7da7f9fda2 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -108,20 +108,11 @@ impl pallet_authorship::Config for Test { type EventHandler = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_timestamp::Config for Test { @@ -159,35 +150,22 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBoundsOnChain; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { - type RewardRemainder = (); - type CurrencyToVote = (); - type RuntimeEvent = RuntimeEvent; type Currency = Balances; type CurrencyBalance = ::Balance; - type Slash = (); - type Reward = (); type SessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type SessionInterface = Self; type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; - type MaxExposurePageSize = ConstU32<64>; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/identity/Cargo.toml b/substrate/frame/identity/Cargo.toml index 987e418048d36..6794fbfbbf42b 100644 --- a/substrate/frame/identity/Cargo.toml +++ b/substrate/frame/identity/Cargo.toml @@ -16,21 +16,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -enumflags2 = { version = "0.7.7" } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +enumflags2 = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-keystore = { path = "../../primitives/keystore" } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } [features] default = ["std"] @@ -48,7 +47,6 @@ std = [ "sp-io/std", "sp-keystore/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/identity/src/benchmarking.rs b/substrate/frame/identity/src/benchmarking.rs index 957549b19f859..ab04000c2281b 100644 --- a/substrate/frame/identity/src/benchmarking.rs +++ b/substrate/frame/identity/src/benchmarking.rs @@ -22,6 +22,7 @@ use super::*; use crate::Pallet as Identity; +use alloc::{vec, vec::Vec}; use frame_benchmarking::{account, v2::*, whitelisted_caller, BenchmarkError}; use frame_support::{ assert_ok, ensure, diff --git a/substrate/frame/identity/src/legacy.rs b/substrate/frame/identity/src/legacy.rs index 60e812c2238b2..c2107e9290385 100644 --- a/substrate/frame/identity/src/legacy.rs +++ b/substrate/frame/identity/src/legacy.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#[cfg(feature = "runtime-benchmarks")] +use alloc::vec; use codec::{Decode, Encode, MaxEncodedLen}; #[cfg(feature = "runtime-benchmarks")] use enumflags2::BitFlag; @@ -22,7 +24,6 @@ use enumflags2::{bitflags, BitFlags}; use frame_support::{traits::Get, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; use scale_info::{build::Variants, Path, Type, TypeInfo}; use sp_runtime::{BoundedVec, RuntimeDebug}; -use sp_std::prelude::*; use crate::types::{Data, IdentityInformationProvider}; diff --git a/substrate/frame/identity/src/lib.rs b/substrate/frame/identity/src/lib.rs index 50d6de32ac61e..776a08f5e9e8a 100644 --- a/substrate/frame/identity/src/lib.rs +++ b/substrate/frame/identity/src/lib.rs @@ -101,7 +101,10 @@ mod tests; mod types; pub mod weights; +extern crate alloc; + use crate::types::{AuthorityPropertiesOf, Suffix, Username}; +use alloc::{boxed::Box, vec::Vec}; use codec::Encode; use frame_support::{ ensure, @@ -113,7 +116,6 @@ pub use pallet::*; use sp_runtime::traits::{ AppendZerosInput, Hash, IdentifyAccount, Saturating, StaticLookup, Verify, Zero, }; -use sp_std::prelude::*; pub use types::{ Data, IdentityInformationProvider, Judgement, RegistrarIndex, RegistrarInfo, Registration, }; diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs index b1a953d487ce2..09edd5de79bb2 100644 --- a/substrate/frame/identity/src/tests.rs +++ b/substrate/frame/identity/src/tests.rs @@ -61,20 +61,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/identity/src/types.rs b/substrate/frame/identity/src/types.rs index 10f0db8c25d8f..45401d53e9e90 100644 --- a/substrate/frame/identity/src/types.rs +++ b/substrate/frame/identity/src/types.rs @@ -16,7 +16,9 @@ // limitations under the License. use super::*; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; +use core::{fmt::Debug, iter::once, ops::Add}; use frame_support::{ traits::{ConstU32, Get}, BoundedVec, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, @@ -29,7 +31,6 @@ use sp_runtime::{ traits::{Member, Zero}, RuntimeDebug, }; -use sp_std::{fmt::Debug, iter::once, ops::Add, prelude::*}; /// An identifier for a single name registrar/identity verification service. pub type RegistrarIndex = u32; @@ -65,7 +66,7 @@ impl Data { } impl Decode for Data { - fn decode(input: &mut I) -> sp_std::result::Result { + fn decode(input: &mut I) -> core::result::Result { let b = input.read_byte()?; Ok(match b { 0 => Data::None, @@ -295,7 +296,7 @@ impl< IdentityInfo: IdentityInformationProvider, > Decode for Registration { - fn decode(input: &mut I) -> sp_std::result::Result { + fn decode(input: &mut I) -> core::result::Result { let (judgements, deposit, info) = Decode::decode(&mut AppendZerosInput::new(input))?; Ok(Self { judgements, deposit, info }) } diff --git a/substrate/frame/im-online/Cargo.toml b/substrate/frame/im-online/Cargo.toml index 78192a81d7b46..2cb03b57d6ca8 100644 --- a/substrate/frame/im-online/Cargo.toml +++ b/substrate/frame/im-online/Cargo.toml @@ -16,22 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-authorship = { path = "../authorship", default-features = false } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive", "serde"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-authorship = { workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-staking = { features = ["serde"], workspace = true } [dev-dependencies] -pallet-session = { path = "../session" } +pallet-session = { workspace = true, default-features = true } [features] default = ["std"] @@ -49,7 +48,6 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-staking/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index f91a473e53d53..ee2a8451d6fb7 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -82,6 +82,9 @@ mod mock; mod tests; pub mod weights; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ pallet_prelude::*, @@ -107,7 +110,6 @@ use sp_staking::{ offence::{Kind, Offence, ReportOffence}, SessionIndex, }; -use sp_std::prelude::*; pub use weights::WeightInfo; pub mod sr25519 { @@ -196,8 +198,8 @@ enum OffchainErr { SubmitTransaction, } -impl sp_std::fmt::Debug for OffchainErr { - fn fmt(&self, fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { +impl core::fmt::Debug for OffchainErr { + fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { match *self { OffchainErr::TooEarly => write!(fmt, "Too early to send heartbeat."), OffchainErr::WaitingForInclusion(ref block) => { diff --git a/substrate/frame/im-online/src/migration.rs b/substrate/frame/im-online/src/migration.rs index 754a2e672e6cf..6d3a5cda69735 100644 --- a/substrate/frame/im-online/src/migration.rs +++ b/substrate/frame/im-online/src/migration.rs @@ -18,6 +18,7 @@ //! Storage migrations for the im-online pallet. use super::*; +use alloc::vec::Vec; use frame_support::{storage_alias, traits::OnRuntimeUpgrade}; #[cfg(feature = "try-runtime")] diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index 248bae003ed85..ea17048153126 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -16,19 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-keyring = { path = "../../primitives/keyring", optional = true, default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-keyring = { optional = true, workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] @@ -44,7 +43,6 @@ std = [ "sp-keyring", "sp-keyring?/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/indices/src/lib.rs b/substrate/frame/indices/src/lib.rs index ff12d092cfb8d..740d69365df3e 100644 --- a/substrate/frame/indices/src/lib.rs +++ b/substrate/frame/indices/src/lib.rs @@ -25,13 +25,15 @@ mod mock; mod tests; pub mod weights; +extern crate alloc; + +use alloc::vec::Vec; use codec::Codec; use frame_support::traits::{BalanceStatus::Reserved, Currency, ReservableCurrency}; use sp_runtime::{ traits::{AtLeast32Bit, LookupError, Saturating, StaticLookup, Zero}, MultiAddress, }; -use sp_std::prelude::*; pub use weights::WeightInfo; type BalanceOf = diff --git a/substrate/frame/indices/src/mock.rs b/substrate/frame/indices/src/mock.rs index 7a8ff98f6d4ae..72bbc6dab4a42 100644 --- a/substrate/frame/indices/src/mock.rs +++ b/substrate/frame/indices/src/mock.rs @@ -42,20 +42,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl Config for Test { diff --git a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml index c2ec14cb4bc77..977b9fdb6f606 100644 --- a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml @@ -16,17 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -safe-mix = { version = "1.0", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +safe-mix = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -39,7 +38,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/insecure-randomness-collective-flip/README.md b/substrate/frame/insecure-randomness-collective-flip/README.md index 4f02782fa6591..fc38367bf5520 100644 --- a/substrate/frame/insecure-randomness-collective-flip/README.md +++ b/substrate/frame/insecure-randomness-collective-flip/README.md @@ -44,7 +44,7 @@ pub mod pallet { impl Pallet { #[pallet::weight(0)] pub fn random_module_example(origin: OriginFor) -> DispatchResult { - let _random_value = >::random(&b"my context"[..]); + let _random_value = pallet_insecure_randomness_collective_flip::Pallet::::random(&b"my context"[..]); Ok(()) } } diff --git a/substrate/frame/insecure-randomness-collective-flip/src/lib.rs b/substrate/frame/insecure-randomness-collective-flip/src/lib.rs index bdb089a14200c..b605b4d08582b 100644 --- a/substrate/frame/insecure-randomness-collective-flip/src/lib.rs +++ b/substrate/frame/insecure-randomness-collective-flip/src/lib.rs @@ -60,7 +60,7 @@ //! impl Pallet { //! #[pallet::weight(0)] //! pub fn random_module_example(origin: OriginFor) -> DispatchResult { -//! let _random_value = >::random(&b"my context"[..]); +//! let _random_value = pallet_insecure_randomness_collective_flip::Pallet::::random(&b"my context"[..]); //! Ok(()) //! } //! } @@ -101,9 +101,9 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(block_number: BlockNumberFor) -> Weight { - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); - >::mutate(|ref mut values| { + RandomMaterial::::mutate(|ref mut values| { if values.try_push(parent_hash).is_err() { let index = block_number_to_index::(block_number); values[index] = parent_hash; @@ -118,9 +118,15 @@ pub mod pallet { /// is arranged as a ring buffer with `block_number % 81` being the index into the `Vec` of /// the oldest hash. #[pallet::storage] - #[pallet::getter(fn random_material)] - pub(super) type RandomMaterial = + pub type RandomMaterial = StorageValue<_, BoundedVec>, ValueQuery>; + + impl Pallet { + /// Gets the random material storage value + pub fn random_material() -> BoundedVec> { + RandomMaterial::::get() + } + } } impl Randomness> for Pallet { @@ -135,10 +141,10 @@ impl Randomness> for Pallet { /// and mean that all bits of the resulting value are entirely manipulatable by the author of /// the parent block, who can determine the value of `parent_hash`. fn random(subject: &[u8]) -> (T::Hash, BlockNumberFor) { - let block_number = >::block_number(); + let block_number = frame_system::Pallet::::block_number(); let index = block_number_to_index::(block_number); - let hash_series = >::get(); + let hash_series = RandomMaterial::::get(); let seed = if !hash_series.is_empty() { // Always the case after block 1 is initialized. hash_series @@ -226,7 +232,7 @@ mod tests { setup_blocks(38); - let random_material = CollectiveFlip::random_material(); + let random_material = RandomMaterial::::get(); assert_eq!(random_material.len(), 38); assert_eq!(random_material[0], genesis_hash); @@ -240,7 +246,7 @@ mod tests { setup_blocks(81); - let random_material = CollectiveFlip::random_material(); + let random_material = RandomMaterial::::get(); assert_eq!(random_material.len(), 81); assert_ne!(random_material[0], random_material[1]); @@ -255,7 +261,7 @@ mod tests { setup_blocks(162); - let random_material = CollectiveFlip::random_material(); + let random_material = RandomMaterial::::get(); assert_eq!(random_material.len(), 81); assert_ne!(random_material[0], random_material[1]); @@ -276,7 +282,7 @@ mod tests { assert_eq!(known_since, 162 - RANDOM_MATERIAL_LEN as u64); assert_ne!(random, H256::zero()); - assert!(!CollectiveFlip::random_material().contains(&random)); + assert!(!RandomMaterial::::get().contains(&random)); }); } } diff --git a/substrate/frame/lottery/Cargo.toml b/substrate/frame/lottery/Cargo.toml index be59e5ec89351..0a33e54a825de 100644 --- a/substrate/frame/lottery/Cargo.toml +++ b/substrate/frame/lottery/Cargo.toml @@ -15,21 +15,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -frame-support-test = { path = "../support/test" } -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +frame-support-test = { workspace = true } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -44,7 +43,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/lottery/src/benchmarking.rs b/substrate/frame/lottery/src/benchmarking.rs index 123b425b976f3..046bc0acbb644 100644 --- a/substrate/frame/lottery/src/benchmarking.rs +++ b/substrate/frame/lottery/src/benchmarking.rs @@ -22,6 +22,7 @@ use super::*; use crate::Pallet as Lottery; +use alloc::{boxed::Box, vec}; use frame_benchmarking::{ v1::{account, whitelisted_caller, BenchmarkError}, v2::*, diff --git a/substrate/frame/lottery/src/lib.rs b/substrate/frame/lottery/src/lib.rs index 54a8edd38606d..0071b258fc45c 100644 --- a/substrate/frame/lottery/src/lib.rs +++ b/substrate/frame/lottery/src/lib.rs @@ -54,6 +54,9 @@ mod mock; mod tests; pub mod weights; +extern crate alloc; + +use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchResult, GetDispatchInfo}, @@ -68,7 +71,6 @@ use sp_runtime::{ traits::{AccountIdConversion, Dispatchable, Saturating, Zero}, ArithmeticError, DispatchError, RuntimeDebug, }; -use sp_std::prelude::*; pub use weights::WeightInfo; type BalanceOf = diff --git a/substrate/frame/lottery/src/mock.rs b/substrate/frame/lottery/src/mock.rs index 596e1a9d837d1..d2c442e2ac6e5 100644 --- a/substrate/frame/lottery/src/mock.rs +++ b/substrate/frame/lottery/src/mock.rs @@ -22,7 +22,7 @@ use crate as pallet_lottery; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}, + traits::{ConstU32, OnFinalize, OnInitialize}, }; use frame_support_test::TestRandomness; use frame_system::EnsureRoot; @@ -49,20 +49,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/membership/Cargo.toml b/substrate/frame/membership/Cargo.toml index 9f19c40973687..0fc5ce02809e1 100644 --- a/substrate/frame/membership/Cargo.toml +++ b/substrate/frame/membership/Cargo.toml @@ -16,16 +16,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive", "serde"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } [features] default = ["std"] @@ -39,7 +38,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/membership/src/lib.rs b/substrate/frame/membership/src/lib.rs index aa6be6497eea6..e38a6ba5d931b 100644 --- a/substrate/frame/membership/src/lib.rs +++ b/substrate/frame/membership/src/lib.rs @@ -23,12 +23,14 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::vec::Vec; use frame_support::{ - traits::{ChangeMembers, Contains, Get, InitializeMembers, SortedMembers}, + traits::{ChangeMembers, Contains, ContainsLengthBound, Get, InitializeMembers, SortedMembers}, BoundedVec, }; use sp_runtime::traits::{StaticLookup, UniqueSaturatedInto}; -use sp_std::prelude::*; pub mod migrations; pub mod weights; @@ -95,13 +97,11 @@ pub mod pallet { /// The current membership, stored as an ordered Vec. #[pallet::storage] - #[pallet::getter(fn members)] pub type Members, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; /// The current prime member, if one exists. #[pallet::storage] - #[pallet::getter(fn prime)] pub type Prime, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; #[pallet::genesis_config] @@ -115,7 +115,7 @@ pub mod pallet { #[pallet::genesis_build] impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) { - use sp_std::collections::btree_set::BTreeSet; + use alloc::collections::btree_set::BTreeSet; let members_set: BTreeSet<_> = self.members.iter().collect(); assert_eq!( members_set.len(), @@ -126,7 +126,7 @@ pub mod pallet { let mut members = self.members.clone(); members.sort(); T::MembershipInitialized::initialize_members(&members); - >::put(members); + Members::::put(members); } } @@ -171,14 +171,14 @@ pub mod pallet { T::AddOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; - let mut members = >::get(); + let mut members = Members::::get(); let init_length = members.len(); let location = members.binary_search(&who).err().ok_or(Error::::AlreadyMember)?; members .try_insert(location, who.clone()) .map_err(|_| Error::::TooManyMembers)?; - >::put(&members); + Members::::put(&members); T::MembershipChanged::change_members_sorted(&[who], &[], &members[..]); @@ -199,12 +199,12 @@ pub mod pallet { T::RemoveOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; - let mut members = >::get(); + let mut members = Members::::get(); let init_length = members.len(); let location = members.binary_search(&who).ok().ok_or(Error::::NotMember)?; members.remove(location); - >::put(&members); + Members::::put(&members); T::MembershipChanged::change_members_sorted(&[], &[who], &members[..]); Self::rejig_prime(&members); @@ -233,13 +233,13 @@ pub mod pallet { return Ok(().into()); } - let mut members = >::get(); + let mut members = Members::::get(); let location = members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; let _ = members.binary_search(&add).err().ok_or(Error::::AlreadyMember)?; members[location] = add.clone(); members.sort(); - >::put(&members); + Members::::put(&members); T::MembershipChanged::change_members_sorted(&[add], &[remove], &members[..]); Self::rejig_prime(&members); @@ -260,7 +260,7 @@ pub mod pallet { let mut members: BoundedVec = BoundedVec::try_from(members).map_err(|_| Error::::TooManyMembers)?; members.sort(); - >::mutate(|m| { + Members::::mutate(|m| { T::MembershipChanged::set_members_sorted(&members[..], m); Self::rejig_prime(&members); *m = members; @@ -288,14 +288,14 @@ pub mod pallet { return Ok(().into()); } - let mut members = >::get(); + let mut members = Members::::get(); let members_length = members.len() as u32; let location = members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; let _ = members.binary_search(&new).err().ok_or(Error::::AlreadyMember)?; members[location] = new.clone(); members.sort(); - >::put(&members); + Members::::put(&members); T::MembershipChanged::change_members_sorted( &[new.clone()], @@ -323,7 +323,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { T::PrimeOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; - let members = Self::members(); + let members = Members::::get(); members.binary_search(&who).ok().ok_or(Error::::NotMember)?; Prime::::put(&who); T::MembershipChanged::set_prime(Some(who)); @@ -345,6 +345,16 @@ pub mod pallet { } impl, I: 'static> Pallet { + /// The current membership, stored as an ordered `Vec`. + pub fn members() -> BoundedVec { + Members::::get() + } + + /// The current prime member, if one exists. + pub fn prime() -> Option { + Prime::::get() + } + fn rejig_prime(members: &[T::AccountId]) { if let Some(prime) = Prime::::get() { match members.binary_search(&prime) { @@ -357,13 +367,24 @@ impl, I: 'static> Pallet { impl, I: 'static> Contains for Pallet { fn contains(t: &T::AccountId) -> bool { - Self::members().binary_search(t).is_ok() + Members::::get().binary_search(t).is_ok() + } +} + +impl ContainsLengthBound for Pallet { + fn min_len() -> usize { + 0 + } + + /// Implementation uses a parameter type so calling is cost-free. + fn max_len() -> usize { + T::MaxMembers::get() as usize } } impl, I: 'static> SortedMembers for Pallet { fn sorted_members() -> Vec { - Self::members().to_vec() + Members::::get().to_vec() } fn count() -> usize { @@ -398,12 +419,12 @@ mod benchmark { let prime_origin = T::PrimeOrigin::try_successful_origin() .expect("PrimeOrigin has no successful origin required for the benchmark"); - assert_ok!(>::reset_members(reset_origin, members.clone())); + assert_ok!(Membership::::reset_members(reset_origin, members.clone())); if let Some(prime) = prime.map(|i| members[i].clone()) { let prime_lookup = T::Lookup::unlookup(prime); - assert_ok!(>::set_prime(prime_origin, prime_lookup)); + assert_ok!(Membership::::set_prime(prime_origin, prime_lookup)); } else { - assert_ok!(>::clear_prime(prime_origin)); + assert_ok!(Membership::::clear_prime(prime_origin)); } } @@ -416,12 +437,12 @@ mod benchmark { let new_member = account::("add", m, SEED); let new_member_lookup = T::Lookup::unlookup(new_member.clone()); }: { - assert_ok!(>::add_member( + assert_ok!(Membership::::add_member( T::AddOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, new_member_lookup, )); } verify { - assert!(>::get().contains(&new_member)); + assert!(Members::::get().contains(&new_member)); #[cfg(test)] crate::tests::clean(); } @@ -436,14 +457,14 @@ mod benchmark { let to_remove = members.first().cloned().unwrap(); let to_remove_lookup = T::Lookup::unlookup(to_remove.clone()); }: { - assert_ok!(>::remove_member( + assert_ok!(Membership::::remove_member( T::RemoveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, to_remove_lookup, )); } verify { - assert!(!>::get().contains(&to_remove)); + assert!(!Members::::get().contains(&to_remove)); // prime is rejigged - assert!(>::get().is_some() && T::MembershipChanged::get_prime().is_some()); + assert!(Prime::::get().is_some() && T::MembershipChanged::get_prime().is_some()); #[cfg(test)] crate::tests::clean(); } @@ -458,16 +479,16 @@ mod benchmark { let remove = members.first().cloned().unwrap(); let remove_lookup = T::Lookup::unlookup(remove.clone()); }: { - assert_ok!(>::swap_member( + assert_ok!(Membership::::swap_member( T::SwapOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, remove_lookup, add_lookup, )); } verify { - assert!(!>::get().contains(&remove)); - assert!(>::get().contains(&add)); + assert!(!Members::::get().contains(&remove)); + assert!(Members::::get().contains(&add)); // prime is rejigged - assert!(>::get().is_some() && T::MembershipChanged::get_prime().is_some()); + assert!(Prime::::get().is_some() && T::MembershipChanged::get_prime().is_some()); #[cfg(test)] crate::tests::clean(); } @@ -479,15 +500,15 @@ mod benchmark { set_members::(members.clone(), Some(members.len() - 1)); let mut new_members = (m..2*m).map(|i| account("member", i, SEED)).collect::>(); }: { - assert_ok!(>::reset_members( + assert_ok!(Membership::::reset_members( T::ResetOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, new_members.clone(), )); } verify { new_members.sort(); - assert_eq!(>::get(), new_members); + assert_eq!(Members::::get(), new_members); // prime is rejigged - assert!(>::get().is_some() && T::MembershipChanged::get_prime().is_some()); + assert!(Prime::::get().is_some() && T::MembershipChanged::get_prime().is_some()); #[cfg(test)] crate::tests::clean(); } @@ -503,12 +524,12 @@ mod benchmark { let add_lookup = T::Lookup::unlookup(add.clone()); whitelist!(prime); }: { - assert_ok!(>::change_key(RawOrigin::Signed(prime.clone()).into(), add_lookup)); + assert_ok!(Membership::::change_key(RawOrigin::Signed(prime.clone()).into(), add_lookup)); } verify { - assert!(!>::get().contains(&prime)); - assert!(>::get().contains(&add)); + assert!(!Members::::get().contains(&prime)); + assert!(Members::::get().contains(&add)); // prime is rejigged - assert_eq!(>::get().unwrap(), add); + assert_eq!(Prime::::get().unwrap(), add); #[cfg(test)] crate::tests::clean(); } @@ -519,12 +540,12 @@ mod benchmark { let prime_lookup = T::Lookup::unlookup(prime.clone()); set_members::(members, None); }: { - assert_ok!(>::set_prime( + assert_ok!(Membership::::set_prime( T::PrimeOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, prime_lookup, )); } verify { - assert!(>::get().is_some()); + assert!(Prime::::get().is_some()); assert!(::get_prime().is_some()); #[cfg(test)] crate::tests::clean(); } @@ -534,11 +555,11 @@ mod benchmark { let prime = members.last().cloned().unwrap(); set_members::(members, None); }: { - assert_ok!(>::clear_prime( + assert_ok!(Membership::::clear_prime( T::PrimeOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, )); } verify { - assert!(>::get().is_none()); + assert!(Prime::::get().is_none()); assert!(::get_prime().is_none()); #[cfg(test)] crate::tests::clean(); } @@ -655,7 +676,7 @@ mod tests { #[test] fn query_membership_works() { new_test_ext().execute_with(|| { - assert_eq!(Membership::members(), vec![10, 20, 30]); + assert_eq!(crate::Members::::get(), vec![10, 20, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), vec![10, 20, 30]); }); } @@ -669,12 +690,12 @@ mod tests { Error::::NotMember ); assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); - assert_eq!(Membership::prime(), Some(20)); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Prime::::get(), Some(20)); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); assert_ok!(Membership::clear_prime(RuntimeOrigin::signed(5))); - assert_eq!(Membership::prime(), None); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Prime::::get(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); }); } @@ -687,8 +708,11 @@ mod tests { Error::::AlreadyMember ); assert_ok!(Membership::add_member(RuntimeOrigin::signed(1), 15)); - assert_eq!(Membership::members(), vec![10, 15, 20, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); + assert_eq!(crate::Members::::get(), vec![10, 15, 20, 30]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); }); } @@ -702,10 +726,13 @@ mod tests { ); assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); assert_ok!(Membership::remove_member(RuntimeOrigin::signed(2), 20)); - assert_eq!(Membership::members(), vec![10, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); - assert_eq!(Membership::prime(), None); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Members::::get(), vec![10, 30]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); + assert_eq!(crate::Prime::::get(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); }); } @@ -724,16 +751,19 @@ mod tests { assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); assert_ok!(Membership::swap_member(RuntimeOrigin::signed(3), 20, 20)); - assert_eq!(Membership::members(), vec![10, 20, 30]); - assert_eq!(Membership::prime(), Some(20)); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Members::::get(), vec![10, 20, 30]); + assert_eq!(crate::Prime::::get(), Some(20)); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 10)); assert_ok!(Membership::swap_member(RuntimeOrigin::signed(3), 10, 25)); - assert_eq!(Membership::members(), vec![20, 25, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); - assert_eq!(Membership::prime(), None); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Members::::get(), vec![20, 25, 30]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); + assert_eq!(crate::Prime::::get(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); }); } @@ -741,8 +771,11 @@ mod tests { fn swap_member_works_that_does_not_change_order() { new_test_ext().execute_with(|| { assert_ok!(Membership::swap_member(RuntimeOrigin::signed(3), 10, 5)); - assert_eq!(Membership::members(), vec![5, 20, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); + assert_eq!(crate::Members::::get(), vec![5, 20, 30]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); }); } @@ -770,10 +803,13 @@ mod tests { Error::::AlreadyMember ); assert_ok!(Membership::change_key(RuntimeOrigin::signed(10), 40)); - assert_eq!(Membership::members(), vec![20, 30, 40]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); - assert_eq!(Membership::prime(), Some(40)); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Members::::get(), vec![20, 30, 40]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); + assert_eq!(crate::Prime::::get(), Some(40)); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); }); } @@ -781,8 +817,11 @@ mod tests { fn change_key_works_that_does_not_change_order() { new_test_ext().execute_with(|| { assert_ok!(Membership::change_key(RuntimeOrigin::signed(10), 5)); - assert_eq!(Membership::members(), vec![5, 20, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); + assert_eq!(crate::Members::::get(), vec![5, 20, 30]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); }); } @@ -803,16 +842,22 @@ mod tests { ); assert_ok!(Membership::reset_members(RuntimeOrigin::signed(4), vec![20, 40, 30])); - assert_eq!(Membership::members(), vec![20, 30, 40]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); - assert_eq!(Membership::prime(), Some(20)); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Members::::get(), vec![20, 30, 40]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); + assert_eq!(crate::Prime::::get(), Some(20)); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); assert_ok!(Membership::reset_members(RuntimeOrigin::signed(4), vec![10, 40, 30])); - assert_eq!(Membership::members(), vec![10, 30, 40]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); - assert_eq!(Membership::prime(), None); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + assert_eq!(crate::Members::::get(), vec![10, 30, 40]); + assert_eq!( + MEMBERS.with(|m| m.borrow().clone()), + crate::Members::::get().to_vec() + ); + assert_eq!(crate::Prime::::get(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), crate::Prime::::get()); }); } diff --git a/substrate/frame/merkle-mountain-range/Cargo.toml b/substrate/frame/merkle-mountain-range/Cargo.toml index 0d73c567cf4e1..b8a9b6065c6cd 100644 --- a/substrate/frame/merkle-mountain-range/Cargo.toml +++ b/substrate/frame/merkle-mountain-range/Cargo.toml @@ -15,22 +15,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-mmr-primitives = { path = "../../primitives/merkle-mountain-range", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-mmr-primitives = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -env_logger = "0.11" -itertools = "0.11" +array-bytes = { workspace = true, default-features = true } +env_logger = { workspace = true } +itertools = { workspace = true } [features] default = ["std"] @@ -45,7 +44,6 @@ std = [ "sp-io/std", "sp-mmr-primitives/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/merkle-mountain-range/src/lib.rs b/substrate/frame/merkle-mountain-range/src/lib.rs index a86443f2e0114..cacb33b504347 100644 --- a/substrate/frame/merkle-mountain-range/src/lib.rs +++ b/substrate/frame/merkle-mountain-range/src/lib.rs @@ -56,6 +56,9 @@ //! NOTE This pallet is experimental and not proven to work in production. #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::vec::Vec; use frame_support::weights::Weight; use frame_system::pallet_prelude::{BlockNumberFor, HeaderFor}; use log; @@ -64,7 +67,6 @@ use sp_runtime::{ traits::{self, One, Saturating}, SaturatedConversion, }; -use sp_std::prelude::*; pub use pallet::*; pub use sp_mmr_primitives::{ @@ -89,7 +91,7 @@ mod tests; /// is not available (since the block is not finished yet), /// we use the `parent_hash` here along with parent block number. pub struct ParentNumberAndHash { - _phantom: sp_std::marker::PhantomData, + _phantom: core::marker::PhantomData, } impl LeafDataProvider for ParentNumberAndHash { @@ -110,7 +112,7 @@ pub trait BlockHashProvider { /// Default implementation of BlockHashProvider using frame_system. pub struct DefaultBlockHashProvider { - _phantom: sp_std::marker::PhantomData, + _phantom: core::marker::PhantomData, } impl BlockHashProvider, T::Hash> @@ -282,6 +284,19 @@ where } } +/// Stateless ancestry proof verification. +pub fn verify_ancestry_proof( + root: H::Output, + ancestry_proof: primitives::AncestryProof, +) -> Result +where + H: traits::Hash, + L: primitives::FullLeaf, +{ + mmr::verify_ancestry_proof::(root, ancestry_proof) + .map_err(|_| Error::Verify.log_debug(("The ancestry proof is incorrect.", root))) +} + impl, I: 'static> Pallet { /// Build offchain key from `parent_hash` of block that originally added node `pos` to MMR. /// @@ -289,7 +304,7 @@ impl, I: 'static> Pallet { fn node_temp_offchain_key( pos: NodeIndex, parent_hash: ::Hash, - ) -> sp_std::prelude::Vec { + ) -> Vec { NodesUtils::node_temp_offchain_key::>(&T::INDEXING_PREFIX, pos, parent_hash) } @@ -298,22 +313,19 @@ impl, I: 'static> Pallet { /// Used for nodes added by now finalized blocks. /// Never read keys using `node_canon_offchain_key` unless you sure that /// there's no `node_offchain_key` key in the storage. - fn node_canon_offchain_key(pos: NodeIndex) -> sp_std::prelude::Vec { + fn node_canon_offchain_key(pos: NodeIndex) -> Vec { NodesUtils::node_canon_offchain_key(&T::INDEXING_PREFIX, pos) } /// Provide the parent number for the block that added `leaf_index` to the MMR. - fn leaf_index_to_parent_block_num( - leaf_index: LeafIndex, - leaves_count: LeafIndex, - ) -> BlockNumberFor { + fn leaf_index_to_parent_block_num(leaf_index: LeafIndex) -> BlockNumberFor { // leaves are zero-indexed and were added one per block since pallet activation, // while block numbers are one-indexed, so block number that added `leaf_idx` is: // `block_num = block_num_when_pallet_activated + leaf_idx + 1` // `block_num = (current_block_num - leaves_count) + leaf_idx + 1` // `parent_block_num = current_block_num - leaves_count + leaf_idx`. >::block_number() - .saturating_sub(leaves_count.saturated_into()) + .saturating_sub(Self::mmr_leaves().saturated_into()) .saturating_add(leaf_index.saturated_into()) } @@ -330,6 +342,15 @@ impl, I: 'static> Pallet { utils::block_num_to_leaf_index::>(block_num, first_mmr_block) } + /// Convert a block number into a leaf index. + pub fn block_num_to_leaf_count(block_num: BlockNumberFor) -> Result + where + T: frame_system::Config, + { + let leaf_index = Self::block_num_to_leaf_index(block_num)?; + Ok(leaf_index.saturating_add(1)) + } + /// Generate an MMR proof for the given `block_numbers`. /// If `best_known_block_number = Some(n)`, this generates a historical proof for /// the chain with head at height `n`. @@ -347,8 +368,7 @@ impl, I: 'static> Pallet { let best_known_block_number = best_known_block_number.unwrap_or_else(|| >::block_number()); - let leaves_count = - Self::block_num_to_leaf_index(best_known_block_number)?.saturating_add(1); + let leaf_count = Self::block_num_to_leaf_count(best_known_block_number)?; // we need to translate the block_numbers into leaf indices. let leaf_indices = block_numbers @@ -358,7 +378,7 @@ impl, I: 'static> Pallet { }) .collect::, _>>()?; - let mmr: ModuleMmr = mmr::Mmr::new(leaves_count); + let mmr: ModuleMmr = mmr::Mmr::new(leaf_count); mmr.generate_proof(leaf_indices) } @@ -374,7 +394,7 @@ impl, I: 'static> Pallet { ) -> Result<(), primitives::Error> { if proof.leaf_count > NumberOfLeaves::::get() || proof.leaf_count == 0 || - (proof.items.len().saturating_add(leaves.len())) as u64 > proof.leaf_count + proof.items.len().saturating_add(leaves.len()) as u64 > proof.leaf_count { return Err(primitives::Error::Verify .log_debug("The proof has incorrect number of leaves or proof items.")) @@ -397,24 +417,18 @@ impl, I: 'static> Pallet { let best_known_block_number = best_known_block_number.unwrap_or_else(|| >::block_number()); - let leaf_count = Self::block_num_to_leaf_index(best_known_block_number)?.saturating_add(1); - let prev_leaf_count = Self::block_num_to_leaf_index(prev_block_number)?.saturating_add(1); + let leaf_count = Self::block_num_to_leaf_count(best_known_block_number)?; + let prev_leaf_count = Self::block_num_to_leaf_count(prev_block_number)?; let mmr: ModuleMmr = mmr::Mmr::new(leaf_count); mmr.generate_ancestry_proof(prev_leaf_count) } pub fn verify_ancestry_proof( + root: HashOf, ancestry_proof: primitives::AncestryProof>, - ) -> Result<(), Error> { - let mmr: ModuleMmr = - mmr::Mmr::new(ancestry_proof.leaf_count); - let is_valid = mmr.verify_ancestry_proof(ancestry_proof)?; - if is_valid { - Ok(()) - } else { - Err(Error::Verify.log_debug("The ancestry proof is incorrect.")) - } + ) -> Result, Error> { + verify_ancestry_proof::, LeafOf>(root, ancestry_proof) } /// Return the on-chain MMR root hash. diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs index 5efc172d1e93f..2b46357c50723 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -23,8 +23,8 @@ use crate::{ primitives::{self, Error, NodeIndex}, Config, HashOf, HashingOf, }; +use alloc::vec::Vec; use sp_mmr_primitives::{mmr_lib, mmr_lib::MMRStoreReadOps, utils::NodesUtils, LeafIndex}; -use sp_std::prelude::*; /// Stateless verification of the proof for a batch of leaves. /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the @@ -60,6 +60,42 @@ where .map_err(|e| Error::Verify.log_debug(e)) } +pub fn verify_ancestry_proof( + root: H::Output, + ancestry_proof: primitives::AncestryProof, +) -> Result +where + H: sp_runtime::traits::Hash, + L: primitives::FullLeaf, +{ + let mmr_size = NodesUtils::new(ancestry_proof.leaf_count).size(); + + let prev_peaks_proof = mmr_lib::NodeMerkleProof::, Hasher>::new( + mmr_size, + ancestry_proof + .items + .into_iter() + .map(|(index, hash)| (index, Node::Hash(hash))) + .collect(), + ); + + let raw_ancestry_proof = mmr_lib::AncestryProof::, Hasher> { + prev_peaks: ancestry_proof.prev_peaks.into_iter().map(|hash| Node::Hash(hash)).collect(), + prev_size: mmr_lib::helper::leaf_index_to_mmr_size(ancestry_proof.prev_leaf_count - 1), + proof: prev_peaks_proof, + }; + + let prev_root = mmr_lib::ancestry_proof::bagging_peaks_hashes::, Hasher>( + raw_ancestry_proof.prev_peaks.clone(), + ) + .map_err(|e| Error::Verify.log_debug(e))?; + raw_ancestry_proof + .verify_ancestor(Node::Hash(root), prev_root.clone()) + .map_err(|e| Error::Verify.log_debug(e))?; + + Ok(prev_root.hash()) +} + /// A wrapper around an MMR library to expose limited functionality. /// /// Available functions depend on the storage kind ([Runtime](crate::mmr::storage::RuntimeStorage) @@ -119,44 +155,6 @@ where .map_err(|e| Error::Verify.log_debug(e)) } - pub fn verify_ancestry_proof( - &self, - ancestry_proof: primitives::AncestryProof>, - ) -> Result { - let prev_peaks_proof = - mmr_lib::NodeMerkleProof::, Hasher, L>>::new( - self.mmr.mmr_size(), - ancestry_proof - .items - .into_iter() - .map(|(index, hash)| (index, Node::Hash(hash))) - .collect(), - ); - - let raw_ancestry_proof = mmr_lib::AncestryProof::< - NodeOf, - Hasher, L>, - > { - prev_peaks: ancestry_proof - .prev_peaks - .into_iter() - .map(|hash| Node::Hash(hash)) - .collect(), - prev_size: mmr_lib::helper::leaf_index_to_mmr_size(ancestry_proof.prev_leaf_count - 1), - proof: prev_peaks_proof, - }; - - let prev_root = mmr_lib::ancestry_proof::bagging_peaks_hashes::< - NodeOf, - Hasher, L>, - >(raw_ancestry_proof.prev_peaks.clone()) - .map_err(|e| Error::Verify.log_debug(e))?; - let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; - raw_ancestry_proof - .verify_ancestor(root, prev_root) - .map_err(|e| Error::Verify.log_debug(e)) - } - /// Return the internal size of the MMR (number of nodes). #[cfg(test)] pub fn size(&self) -> NodeIndex { diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mod.rs b/substrate/frame/merkle-mountain-range/src/mmr/mod.rs index 93fefe910e45d..5b73f53506e92 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/mod.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/mod.rs @@ -21,7 +21,7 @@ pub mod storage; use sp_mmr_primitives::{mmr_lib, DataOrHash, FullLeaf}; use sp_runtime::traits; -pub use self::mmr::{verify_leaves_proof, Mmr}; +pub use self::mmr::{verify_ancestry_proof, verify_leaves_proof, Mmr}; /// Node type for runtime `T`. pub type NodeOf = Node<>::Hashing, L>; diff --git a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs index 6848b8f1b9906..a390898014846 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs @@ -17,14 +17,13 @@ //! An MMR storage implementation. +use alloc::{vec, vec::Vec}; use codec::Encode; +use core::iter::Peekable; use log::{debug, trace}; use sp_core::offchain::StorageKind; use sp_io::offchain_index; use sp_mmr_primitives::{mmr_lib, mmr_lib::helper, utils::NodesUtils}; -use sp_std::iter::Peekable; -#[cfg(not(feature = "std"))] -use sp_std::prelude::*; use crate::{ mmr::{Node, NodeOf}, @@ -52,7 +51,7 @@ pub struct OffchainStorage; /// /// There are two different implementations depending on the use case. /// See docs for [RuntimeStorage] and [OffchainStorage]. -pub struct Storage(sp_std::marker::PhantomData<(StorageType, T, I, L)>); +pub struct Storage(core::marker::PhantomData<(StorageType, T, I, L)>); impl Default for Storage { fn default() -> Self { @@ -67,7 +66,6 @@ where L: primitives::FullLeaf + codec::Decode, { fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result>> { - let leaves = NumberOfLeaves::::get(); // Find out which leaf added node `pos` in the MMR. let ancestor_leaf_idx = NodesUtils::leaf_index_that_added_node(pos); @@ -86,7 +84,7 @@ where // Fall through to searching node using fork-specific key. let ancestor_parent_block_num = - Pallet::::leaf_index_to_parent_block_num(ancestor_leaf_idx, leaves); + Pallet::::leaf_index_to_parent_block_num(ancestor_leaf_idx); let ancestor_parent_hash = T::BlockHashProvider::block_hash(ancestor_parent_block_num); let temp_key = Pallet::::node_temp_offchain_key(pos, ancestor_parent_hash); debug!( diff --git a/substrate/frame/merkle-mountain-range/src/tests.rs b/substrate/frame/merkle-mountain-range/src/tests.rs index f8cfcb4e2c286..b8c9d54db8209 100644 --- a/substrate/frame/merkle-mountain-range/src/tests.rs +++ b/substrate/frame/merkle-mountain-range/src/tests.rs @@ -792,16 +792,28 @@ fn does_not_panic_when_generating_historical_proofs() { fn generating_and_verifying_ancestry_proofs_works_correctly() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); - ext.execute_with(|| add_blocks(500)); + + let mut prev_roots = vec![]; + ext.execute_with(|| { + for _ in 1..=500 { + add_blocks(1); + prev_roots.push(Pallet::::mmr_root()) + } + }); ext.persist_offchain_overlay(); register_offchain_ext(&mut ext); ext.execute_with(|| { + let root = Pallet::::mmr_root(); // Check that generating and verifying ancestry proofs works correctly // for each previous block - for prev_block_number in 1..501 { - let proof = Pallet::::generate_ancestry_proof(prev_block_number, None).unwrap(); - Pallet::::verify_ancestry_proof(proof).unwrap(); + for prev_block_number in 1usize..=500 { + let proof = + Pallet::::generate_ancestry_proof(prev_block_number as u64, None).unwrap(); + assert_eq!( + Pallet::::verify_ancestry_proof(root, proof), + Ok(prev_roots[prev_block_number - 1]) + ); } // Check that we can't generate ancestry proofs for a future block. diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index e44cbeb1550cc..0c3bbb2c883c5 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -12,28 +12,27 @@ description = "FRAME pallet to queue and process messages" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } -environmental = { version = "1.1.4", default-features = false } +environmental = { workspace = true } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-weights = { path = "../../primitives/weights", default-features = false } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-arithmetic = { workspace = true } +sp-weights = { workspace = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } [dev-dependencies] -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-tracing = { path = "../../primitives/tracing" } -rand = "0.8.5" -rand_distr = "0.4.3" +sp-crypto-hashing = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +rand_distr = { workspace = true } [features] default = ["std"] @@ -49,7 +48,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "sp-tracing/std", "sp-weights/std", ] diff --git a/substrate/frame/message-queue/src/benchmarking.rs b/substrate/frame/message-queue/src/benchmarking.rs index 7e99bc0585845..8f0712acc5f51 100644 --- a/substrate/frame/message-queue/src/benchmarking.rs +++ b/substrate/frame/message-queue/src/benchmarking.rs @@ -26,7 +26,6 @@ use frame_benchmarking::v2::*; use frame_support::traits::Get; use frame_system::RawOrigin; use sp_io::hashing::blake2_256; -use sp_std::prelude::*; #[benchmarks( where diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index ef3420d21be52..2dbffef7e5a24 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -203,7 +203,11 @@ pub mod mock_helpers; mod tests; pub mod weights; +extern crate alloc; + +use alloc::{vec, vec::Vec}; use codec::{Codec, Decode, Encode, MaxEncodedLen}; +use core::{fmt::Debug, ops::Deref}; use frame_support::{ defensive, pallet_prelude::*, @@ -223,7 +227,6 @@ use sp_runtime::{ traits::{One, Zero}, SaturatedConversion, Saturating, }; -use sp_std::{fmt::Debug, ops::Deref, prelude::*, vec}; use sp_weights::WeightMeter; pub use weights::WeightInfo; @@ -307,7 +310,7 @@ impl< return Err(()) } - let mut heap = sp_std::mem::take(&mut self.heap).into_inner(); + let mut heap = core::mem::take(&mut self.heap).into_inner(); header.using_encoded(|h| heap.extend_from_slice(h)); heap.extend_from_slice(message.deref()); self.heap = BoundedVec::defensive_truncate_from(heap); @@ -1509,7 +1512,7 @@ pub(crate) fn with_service_mutex R, R>(f: F) -> Result { } /// Provides a [`sp_core::Get`] to access the `MEL` of a [`codec::MaxEncodedLen`] type. -pub struct MaxEncodedLenOf(sp_std::marker::PhantomData); +pub struct MaxEncodedLenOf(core::marker::PhantomData); impl Get for MaxEncodedLenOf { fn get() -> u32 { T::max_encoded_len() as u32 @@ -1518,7 +1521,7 @@ impl Get for MaxEncodedLenOf { /// Calculates the maximum message length and exposed it through the [`codec::MaxEncodedLen`] trait. pub struct MaxMessageLen( - sp_std::marker::PhantomData<(Origin, Size, HeapSize)>, + core::marker::PhantomData<(Origin, Size, HeapSize)>, ); impl, HeapSize: Get> Get for MaxMessageLen @@ -1544,7 +1547,7 @@ pub type BookStateOf = BookState>; /// Converts a [`sp_core::Get`] with returns a type that can be cast into an `u32` into a `Get` /// which returns an `u32`. -pub struct IntoU32(sp_std::marker::PhantomData<(T, O)>); +pub struct IntoU32(core::marker::PhantomData<(T, O)>); impl, O: Into> Get for IntoU32 { fn get() -> u32 { T::get().into() diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index 66a242d5a18ff..26533cc7c330c 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -23,9 +23,9 @@ pub use super::mock_helpers::*; use super::*; use crate as pallet_message_queue; +use alloc::collections::btree_map::BTreeMap; use frame_support::{derive_impl, parameter_types}; use sp_runtime::BuildStorage; -use sp_std::collections::btree_map::BTreeMap; type Block = frame_system::mocking::MockBlock; diff --git a/substrate/frame/message-queue/src/mock_helpers.rs b/substrate/frame/message-queue/src/mock_helpers.rs index 28395e27cdd2a..873add776e209 100644 --- a/substrate/frame/message-queue/src/mock_helpers.rs +++ b/substrate/frame/message-queue/src/mock_helpers.rs @@ -22,6 +22,7 @@ //! Cannot be put into mock.rs since benchmarks require no-std and mock.rs is std. use crate::*; +use alloc::vec::Vec; use frame_support::traits::Defensive; /// Converts `Self` into a `Weight` by using `Self` for all components. diff --git a/substrate/frame/metadata-hash-extension/Cargo.toml b/substrate/frame/metadata-hash-extension/Cargo.toml index 13d4bd0c2ea90..10d90bba0911b 100644 --- a/substrate/frame/metadata-hash-extension/Cargo.toml +++ b/substrate/frame/metadata-hash-extension/Cargo.toml @@ -9,23 +9,23 @@ repository.workspace = true description = "FRAME signed extension for verifying the metadata hash" [dependencies] -array-bytes = "6.2.2" -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -log = { workspace = true, default-features = false } -docify = "0.2.8" +array-bytes = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +log = { workspace = true } +docify = { workspace = true } [dev-dependencies] -substrate-wasm-builder = { path = "../../utils/wasm-builder", features = ["metadata-hash"] } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -sp-api = { path = "../../primitives/api" } -sp-transaction-pool = { path = "../../primitives/transaction-pool" } -merkleized-metadata = "0.1.0" -frame-metadata = { version = "16.0.0", features = ["current"] } -sp-tracing = { path = "../../primitives/tracing" } +substrate-wasm-builder = { features = ["metadata-hash"], workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +sp-api = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } +merkleized-metadata = { workspace = true } +frame-metadata = { features = ["current"], workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/migrations/Cargo.toml b/substrate/frame/migrations/Cargo.toml index 69e910a4e4f6e..5fbed74a44007 100644 --- a/substrate/frame/migrations/Cargo.toml +++ b/substrate/frame/migrations/Cargo.toml @@ -11,28 +11,27 @@ repository.workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -docify = "0.2.8" -impl-trait-for-tuples = "0.2.2" -log = "0.4.21" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } - -frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } -frame-support = { default-features = false, path = "../support" } -frame-system = { default-features = false, path = "../system" } -sp-core = { path = "../../primitives/core", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } +codec = { features = ["derive"], workspace = true } +docify = { workspace = true } +impl-trait-for-tuples = { workspace = true } +log = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } + +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -frame-executive = { path = "../executive" } -sp-api = { path = "../../primitives/api", features = ["std"] } -sp-block-builder = { path = "../../primitives/block-builder", features = ["std"] } -sp-io = { path = "../../primitives/io", features = ["std"] } -sp-tracing = { path = "../../primitives/tracing", features = ["std"] } -sp-version = { path = "../../primitives/version", features = ["std"] } +frame-executive = { workspace = true, default-features = true } +sp-api = { features = ["std"], workspace = true, default-features = true } +sp-block-builder = { features = ["std"], workspace = true, default-features = true } +sp-io = { features = ["std"], workspace = true, default-features = true } +sp-tracing = { features = ["std"], workspace = true, default-features = true } +sp-version = { features = ["std"], workspace = true, default-features = true } -pretty_assertions = "1.3.0" +pretty_assertions = { workspace = true } [features] default = ["std"] @@ -46,7 +45,6 @@ std = [ "scale-info/std", "sp-core/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/migrations/src/lib.rs b/substrate/frame/migrations/src/lib.rs index 649bc314a12b5..68041a57eaa2e 100644 --- a/substrate/frame/migrations/src/lib.rs +++ b/substrate/frame/migrations/src/lib.rs @@ -69,22 +69,22 @@ //! either be [`MigrationCursor::Active`] or [`MigrationCursor::Stuck`]. In the active case it //! points to the currently active migration and stores its inner cursor. The inner cursor can then //! be used by the migration to store its inner state and advance. Each time when the migration -//! returns `Some(cursor)`, it signals the pallet that it is not done yet. +//! returns `Some(cursor)`, it signals the pallet that it is not done yet. //! The cursor is reset on each runtime upgrade. This ensures that it starts to execute at the //! first migration in the vector. The pallets cursor is only ever incremented or set to `Stuck` //! once it encounters an error (Goal 4). Once in the stuck state, the pallet will stay stuck until -//! it is fixed through manual governance intervention. +//! it is fixed through manual governance intervention. //! As soon as the cursor of the pallet becomes `Some(_)`; [`MultiStepMigrator::ongoing`] returns //! `true` (Goal 2). This can be used by upstream code to possibly pause transactions. //! In `on_initialize` the pallet will load the current migration and check whether it was already //! executed in the past by checking for membership of its ID in the [`Historic`] set. Historic //! migrations are skipped without causing an error. Each successfully executed migration is added -//! to this set (Goal 5). +//! to this set (Goal 5). //! This proceeds until no more migrations remain. At that point, the event `UpgradeCompleted` is -//! emitted (Goal 1). +//! emitted (Goal 1). //! The execution of each migration happens by calling [`SteppedMigration::transactional_step`]. //! This function wraps the inner `step` function into a transactional layer to allow rollback in -//! the error case (Goal 6). +//! the error case (Goal 6). //! Weight limits must be checked by the migration itself. The pallet provides a [`WeightMeter`] for //! that purpose. The pallet may return [`SteppedMigrationError::InsufficientWeight`] at any point. //! In that scenario, one of two things will happen: if that migration was exclusively executed @@ -145,9 +145,12 @@ pub mod mock_helpers; mod tests; pub mod weights; +extern crate alloc; + pub use pallet::*; pub use weights::WeightInfo; +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use core::ops::ControlFlow; use frame_support::{ @@ -159,7 +162,6 @@ use frame_support::{ }; use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System}; use sp_runtime::Saturating; -use sp_std::vec::Vec; /// Points to the next migration to execute. #[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)] diff --git a/substrate/frame/migrations/src/mock_helpers.rs b/substrate/frame/migrations/src/mock_helpers.rs index d230417d12e6c..9d3b4d1193f21 100644 --- a/substrate/frame/migrations/src/mock_helpers.rs +++ b/substrate/frame/migrations/src/mock_helpers.rs @@ -19,6 +19,7 @@ #![allow(missing_docs)] +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; use frame_support::{ migrations::*, @@ -26,7 +27,6 @@ use frame_support::{ }; use sp_core::ConstU32; use sp_runtime::BoundedVec; -use sp_std::{vec, vec::Vec}; /// Opaque identifier of a migration. pub type MockedIdentifier = BoundedVec>; diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index 44a567d668fb3..a9980ac268bcb 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -16,19 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } -frame-support = { default-features = false, path = "../support" } -frame-system = { default-features = false, path = "../system" } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], workspace = true } -sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto" } -sp-arithmetic = { default-features = false, path = "../../primitives/arithmetic" } -sp-io = { default-features = false, path = "../../primitives/io" } -sp-mixnet = { default-features = false, path = "../../primitives/mixnet" } -sp-runtime = { default-features = false, path = "../../primitives/runtime" } -sp-std = { default-features = false, path = "../../primitives/std" } +sp-application-crypto = { workspace = true } +sp-arithmetic = { workspace = true } +sp-io = { workspace = true } +sp-mixnet = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] @@ -45,7 +44,6 @@ std = [ "sp-io/std", "sp-mixnet/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/mixnet/src/lib.rs b/substrate/frame/mixnet/src/lib.rs index c7a5b624157b8..c0505a4f01057 100644 --- a/substrate/frame/mixnet/src/lib.rs +++ b/substrate/frame/mixnet/src/lib.rs @@ -21,7 +21,11 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; +use core::cmp::Ordering; use frame_support::{ traits::{EstimateNextSessionRotation, Get, OneSessionHandler}, BoundedVec, @@ -41,7 +45,6 @@ use sp_mixnet::types::{ SessionPhase, SessionStatus, KX_PUBLIC_SIZE, }; use sp_runtime::RuntimeDebug; -use sp_std::{cmp::Ordering, vec::Vec}; const LOG_TARGET: &str = "runtime::mixnet"; diff --git a/substrate/frame/multisig/Cargo.toml b/substrate/frame/multisig/Cargo.toml index 649a7100325f9..c1571c6c0300a 100644 --- a/substrate/frame/multisig/Cargo.toml +++ b/substrate/frame/multisig/Cargo.toml @@ -16,20 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } # third party log = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] @@ -43,7 +42,6 @@ std = [ "scale-info/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/multisig/src/lib.rs b/substrate/frame/multisig/src/lib.rs index a83b78e316f50..51c36773bdad3 100644 --- a/substrate/frame/multisig/src/lib.rs +++ b/substrate/frame/multisig/src/lib.rs @@ -48,6 +48,9 @@ pub mod migrations; mod tests; pub mod weights; +extern crate alloc; + +use alloc::{boxed::Box, vec, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::{ @@ -66,7 +69,6 @@ use sp_runtime::{ traits::{Dispatchable, TrailingZeroInput, Zero}, DispatchError, RuntimeDebug, }; -use sp_std::prelude::*; pub use weights::WeightInfo; pub use pallet::*; diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml index e2a7e34c637b4..ca2e70cbe6cef 100644 --- a/substrate/frame/nft-fractionalization/Cargo.toml +++ b/substrate/frame/nft-fractionalization/Cargo.toml @@ -16,22 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-assets = { path = "../assets", default-features = false } -pallet-nfts = { path = "../nfts", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-assets = { workspace = true } +pallet-nfts = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -sp-std = { path = "../../primitives/std" } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } [features] default = ["std"] @@ -48,7 +47,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/nft-fractionalization/src/benchmarking.rs b/substrate/frame/nft-fractionalization/src/benchmarking.rs index 0b54acdab49ea..811b5fe1b3177 100644 --- a/substrate/frame/nft-fractionalization/src/benchmarking.rs +++ b/substrate/frame/nft-fractionalization/src/benchmarking.rs @@ -32,7 +32,6 @@ use frame_support::{ use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin as SystemOrigin}; use pallet_nfts::{CollectionConfig, CollectionSettings, ItemConfig, MintSettings}; use sp_runtime::traits::StaticLookup; -use sp_std::prelude::*; use crate::Pallet as NftFractionalization; diff --git a/substrate/frame/nft-fractionalization/src/lib.rs b/substrate/frame/nft-fractionalization/src/lib.rs index cb269f464c48a..5fa990ecebe66 100644 --- a/substrate/frame/nft-fractionalization/src/lib.rs +++ b/substrate/frame/nft-fractionalization/src/lib.rs @@ -56,6 +56,7 @@ pub use weights::WeightInfo; #[frame_support::pallet] pub mod pallet { use super::*; + use core::fmt::Display; use frame_support::{ dispatch::DispatchResult, ensure, @@ -83,7 +84,6 @@ pub mod pallet { use frame_system::pallet_prelude::*; use scale_info::prelude::{format, string::String}; use sp_runtime::traits::{One, Zero}; - use sp_std::{fmt::Display, prelude::*}; #[pallet::pallet] pub struct Pallet(_); diff --git a/substrate/frame/nft-fractionalization/src/mock.rs b/substrate/frame/nft-fractionalization/src/mock.rs index 82a6088162600..50b41b5fc64e1 100644 --- a/substrate/frame/nft-fractionalization/src/mock.rs +++ b/substrate/frame/nft-fractionalization/src/mock.rs @@ -57,20 +57,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = (); } impl pallet_assets::Config for Test { diff --git a/substrate/frame/nfts/Cargo.toml b/substrate/frame/nfts/Cargo.toml index 5c5c011c94ea2..e3ffd971a2bb6 100644 --- a/substrate/frame/nfts/Cargo.toml +++ b/substrate/frame/nfts/Cargo.toml @@ -16,21 +16,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -enumflags2 = { version = "0.7.7" } +codec = { workspace = true } +enumflags2 = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-keystore = { path = "../../primitives/keystore" } +pallet-balances = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } [features] default = ["std"] @@ -47,7 +46,6 @@ std = [ "sp-io/std", "sp-keystore/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/nfts/runtime-api/Cargo.toml b/substrate/frame/nfts/runtime-api/Cargo.toml index 6bee98fb51e0c..4125aa1aab00b 100644 --- a/substrate/frame/nfts/runtime-api/Cargo.toml +++ b/substrate/frame/nfts/runtime-api/Cargo.toml @@ -16,11 +16,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -pallet-nfts = { path = "..", default-features = false } -sp-api = { path = "../../../primitives/api", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +pallet-nfts = { workspace = true } +sp-api = { workspace = true } [features] default = ["std"] -std = ["codec/std", "pallet-nfts/std", "sp-api/std", "sp-std/std"] +std = ["codec/std", "pallet-nfts/std", "sp-api/std"] diff --git a/substrate/frame/nfts/runtime-api/src/lib.rs b/substrate/frame/nfts/runtime-api/src/lib.rs index 816088f1b716a..87faa7909851e 100644 --- a/substrate/frame/nfts/runtime-api/src/lib.rs +++ b/substrate/frame/nfts/runtime-api/src/lib.rs @@ -19,8 +19,10 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::vec::Vec; use codec::{Decode, Encode}; -use sp_std::vec::Vec; sp_api::decl_runtime_apis! { pub trait NftsApi diff --git a/substrate/frame/nfts/src/benchmarking.rs b/substrate/frame/nfts/src/benchmarking.rs index 8792af675fc16..bc81096b459dc 100644 --- a/substrate/frame/nfts/src/benchmarking.rs +++ b/substrate/frame/nfts/src/benchmarking.rs @@ -30,12 +30,7 @@ use frame_support::{ BoundedVec, }; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin as SystemOrigin}; -use sp_io::crypto::{sr25519_generate, sr25519_sign}; -use sp_runtime::{ - traits::{Bounded, IdentifyAccount, One}, - AccountId32, MultiSignature, MultiSigner, -}; -use sp_std::prelude::*; +use sp_runtime::traits::{Bounded, One}; use crate::Pallet as Nfts; @@ -229,12 +224,6 @@ fn make_filled_vec(value: u16, length: usize) -> Vec { } benchmarks_instance_pallet! { - where_clause { - where - T::OffchainSignature: From, - T::AccountId: From, - } - create { let collection = T::Helper::collection(0); let origin = T::CreateOrigin::try_successful_origin(&collection) @@ -800,8 +789,7 @@ benchmarks_instance_pallet! { mint_pre_signed { let n in 0 .. T::MaxAttributesPerCall::get() as u32; - let caller_public = sr25519_generate(0.into(), None); - let caller = MultiSigner::Sr25519(caller_public).into_account().into(); + let (caller_public, caller) = T::Helper::signer(); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); let caller_lookup = T::Lookup::unlookup(caller.clone()); @@ -830,7 +818,7 @@ benchmarks_instance_pallet! { mint_price: Some(DepositBalanceOf::::min_value()), }; let message = Encode::encode(&mint_data); - let signature = MultiSignature::Sr25519(sr25519_sign(0.into(), &caller_public, &message).unwrap()); + let signature = T::Helper::sign(&caller_public, &message); let target: T::AccountId = account("target", 0, SEED); T::Currency::make_free_balance_be(&target, DepositBalanceOf::::max_value()); @@ -848,8 +836,7 @@ benchmarks_instance_pallet! { let item_owner: T::AccountId = account("item_owner", 0, SEED); let item_owner_lookup = T::Lookup::unlookup(item_owner.clone()); - let signer_public = sr25519_generate(0.into(), None); - let signer: T::AccountId = MultiSigner::Sr25519(signer_public).into_account().into(); + let (signer_public, signer) = T::Helper::signer(); T::Currency::make_free_balance_be(&item_owner, DepositBalanceOf::::max_value()); @@ -876,7 +863,7 @@ benchmarks_instance_pallet! { deadline: One::one(), }; let message = Encode::encode(&pre_signed_data); - let signature = MultiSignature::Sr25519(sr25519_sign(0.into(), &signer_public, &message).unwrap()); + let signature = T::Helper::sign(&signer_public, &message); frame_system::Pallet::::set_block_number(One::one()); }: _(SystemOrigin::Signed(item_owner.clone()), pre_signed_data, signature.into(), signer.clone()) diff --git a/substrate/frame/nfts/src/common_functions.rs b/substrate/frame/nfts/src/common_functions.rs index 1ad523d664c7c..2c4778c1444f7 100644 --- a/substrate/frame/nfts/src/common_functions.rs +++ b/substrate/frame/nfts/src/common_functions.rs @@ -18,6 +18,7 @@ //! Various pieces of common functionality. use crate::*; +use alloc::vec::Vec; use frame_support::pallet_prelude::*; impl, I: 'static> Pallet { diff --git a/substrate/frame/nfts/src/features/metadata.rs b/substrate/frame/nfts/src/features/metadata.rs index 85edd294d50b7..260061603e9cc 100644 --- a/substrate/frame/nfts/src/features/metadata.rs +++ b/substrate/frame/nfts/src/features/metadata.rs @@ -18,6 +18,7 @@ //! This module contains helper methods to configure the metadata of collections and items. use crate::*; +use alloc::vec::Vec; use frame_support::pallet_prelude::*; impl, I: 'static> Pallet { diff --git a/substrate/frame/nfts/src/features/roles.rs b/substrate/frame/nfts/src/features/roles.rs index f6d2785fd9cb4..aa6394f70bfd4 100644 --- a/substrate/frame/nfts/src/features/roles.rs +++ b/substrate/frame/nfts/src/features/roles.rs @@ -18,8 +18,8 @@ //! This module contains helper methods to configure account roles for existing collections. use crate::*; +use alloc::{collections::btree_map::BTreeMap, vec::Vec}; use frame_support::pallet_prelude::*; -use sp_std::collections::btree_map::BTreeMap; impl, I: 'static> Pallet { /// Set the team roles for a specific collection. diff --git a/substrate/frame/nfts/src/impl_nonfungibles.rs b/substrate/frame/nfts/src/impl_nonfungibles.rs index ee7f42cfc689c..c90655aadbfc6 100644 --- a/substrate/frame/nfts/src/impl_nonfungibles.rs +++ b/substrate/frame/nfts/src/impl_nonfungibles.rs @@ -25,7 +25,6 @@ use frame_support::{ BoundedSlice, }; use sp_runtime::{DispatchError, DispatchResult}; -use sp_std::prelude::*; impl, I: 'static> Inspect<::AccountId> for Pallet { type ItemId = T::ItemId; diff --git a/substrate/frame/nfts/src/lib.rs b/substrate/frame/nfts/src/lib.rs index 615720268fed6..4e5493a3c7551 100644 --- a/substrate/frame/nfts/src/lib.rs +++ b/substrate/frame/nfts/src/lib.rs @@ -48,6 +48,9 @@ mod types; pub mod macros; pub mod weights; +extern crate alloc; + +use alloc::{boxed::Box, vec, vec::Vec}; use codec::{Decode, Encode}; use frame_support::traits::{ tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, Incrementable, @@ -58,7 +61,6 @@ use sp_runtime::{ traits::{IdentifyAccount, Saturating, StaticLookup, Verify, Zero}, RuntimeDebug, }; -use sp_std::prelude::*; pub use pallet::*; pub use types::*; @@ -84,18 +86,42 @@ pub mod pallet { pub struct Pallet(PhantomData<(T, I)>); #[cfg(feature = "runtime-benchmarks")] - pub trait BenchmarkHelper { + pub trait BenchmarkHelper { fn collection(i: u16) -> CollectionId; fn item(i: u16) -> ItemId; + fn signer() -> (Public, AccountId); + fn sign(signer: &Public, message: &[u8]) -> Signature; } #[cfg(feature = "runtime-benchmarks")] - impl, ItemId: From> BenchmarkHelper for () { + impl + BenchmarkHelper< + CollectionId, + ItemId, + sp_runtime::MultiSigner, + sp_runtime::AccountId32, + sp_runtime::MultiSignature, + > for () + where + CollectionId: From, + ItemId: From, + { fn collection(i: u16) -> CollectionId { i.into() } fn item(i: u16) -> ItemId { i.into() } + fn signer() -> (sp_runtime::MultiSigner, sp_runtime::AccountId32) { + let public = sp_io::crypto::sr25519_generate(0.into(), None); + let account = sp_runtime::MultiSigner::Sr25519(public).into_account(); + (public.into(), account) + } + fn sign(signer: &sp_runtime::MultiSigner, message: &[u8]) -> sp_runtime::MultiSignature { + sp_runtime::MultiSignature::Sr25519( + sp_io::crypto::sr25519_sign(0.into(), &signer.clone().try_into().unwrap(), message) + .unwrap(), + ) + } } #[pallet::config] @@ -206,7 +232,13 @@ pub mod pallet { #[cfg(feature = "runtime-benchmarks")] /// A set of helper functions for benchmarking. - type Helper: BenchmarkHelper; + type Helper: BenchmarkHelper< + Self::CollectionId, + Self::ItemId, + Self::OffchainPublic, + Self::AccountId, + Self::OffchainSignature, + >; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; diff --git a/substrate/frame/nfts/src/mock.rs b/substrate/frame/nfts/src/mock.rs index 51cfd5f244bcd..5b589f591ca34 100644 --- a/substrate/frame/nfts/src/mock.rs +++ b/substrate/frame/nfts/src/mock.rs @@ -53,20 +53,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/nfts/src/tests.rs b/substrate/frame/nfts/src/tests.rs index 4d23aca64ceb1..e1b598ca4261c 100644 --- a/substrate/frame/nfts/src/tests.rs +++ b/substrate/frame/nfts/src/tests.rs @@ -32,7 +32,6 @@ use sp_runtime::{ traits::{Dispatchable, IdentifyAccount}, MultiSignature, MultiSigner, }; -use sp_std::prelude::*; type AccountIdOf = ::AccountId; diff --git a/substrate/frame/nfts/src/types.rs b/substrate/frame/nfts/src/types.rs index 5a9f6ae2f0e21..1687a03520afe 100644 --- a/substrate/frame/nfts/src/types.rs +++ b/substrate/frame/nfts/src/types.rs @@ -19,6 +19,7 @@ use super::*; use crate::macros::*; +use alloc::{vec, vec::Vec}; use codec::EncodeLike; use enumflags2::{bitflags, BitFlags}; use frame_support::{ diff --git a/substrate/frame/nis/Cargo.toml b/substrate/frame/nis/Cargo.toml index 1e3a0609c46bb..fb6c25789b26d 100644 --- a/substrate/frame/nis/Cargo.toml +++ b/substrate/frame/nis/Cargo.toml @@ -16,19 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-io = { path = "../../primitives/io" } +pallet-balances = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -43,7 +42,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/nis/src/benchmarking.rs b/substrate/frame/nis/src/benchmarking.rs index f6a83b78d5181..2c7ad651f9903 100644 --- a/substrate/frame/nis/src/benchmarking.rs +++ b/substrate/frame/nis/src/benchmarking.rs @@ -30,7 +30,6 @@ use sp_runtime::{ traits::{Bounded, One, Zero}, DispatchError, PerThing, }; -use sp_std::prelude::*; use crate::Pallet as Nis; diff --git a/substrate/frame/nis/src/lib.rs b/substrate/frame/nis/src/lib.rs index f38755836fb9f..016daa4cb78be 100644 --- a/substrate/frame/nis/src/lib.rs +++ b/substrate/frame/nis/src/lib.rs @@ -76,6 +76,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use frame_support::traits::{ fungible::{self, Inspect as FunInspect, Mutate as FunMutate}, tokens::{DepositConsequence, Fortitude, Preservation, Provenance, WithdrawConsequence}, @@ -95,7 +97,7 @@ mod mock; mod tests; pub mod weights; -pub struct WithMaximumOf(sp_std::marker::PhantomData); +pub struct WithMaximumOf(core::marker::PhantomData); impl Convert for WithMaximumOf where A::Type: Clone + Unsigned + From, @@ -116,7 +118,7 @@ where } } -pub struct NoCounterpart(sp_std::marker::PhantomData); +pub struct NoCounterpart(core::marker::PhantomData); impl FunInspect for NoCounterpart { type Balance = u32; fn total_issuance() -> u32 { @@ -171,6 +173,7 @@ impl BenchmarkSetup for () { pub mod pallet { use super::{FunInspect, FunMutate}; pub use crate::weights::WeightInfo; + use alloc::{vec, vec::Vec}; use frame_support::{ pallet_prelude::*, traits::{ @@ -193,7 +196,6 @@ pub mod pallet { traits::{AccountIdConversion, Bounded, Convert, ConvertBack, Saturating, Zero}, Rounding, TokenError, }; - use sp_std::prelude::*; type BalanceOf = <::Currency as FunInspect<::AccountId>>::Balance; @@ -372,7 +374,7 @@ pub mod pallet { pub receipts_on_hold: Balance, } - pub struct OnEmptyQueueTotals(sp_std::marker::PhantomData); + pub struct OnEmptyQueueTotals(core::marker::PhantomData); impl Get> for OnEmptyQueueTotals { fn get() -> QueueTotalsTypeOf { BoundedVec::truncate_from(vec![ @@ -573,7 +575,7 @@ pub mod pallet { // queue is let mut bid = Bid { amount, who: who.clone() }; let net = if queue_full { - sp_std::mem::swap(&mut q[0], &mut bid); + core::mem::swap(&mut q[0], &mut bid); let _ = T::Currency::release( &HoldReason::NftReceipt.into(), &bid.who, @@ -755,7 +757,13 @@ pub mod pallet { // We ignore this error as it just means the amount we're trying to deposit is // dust and the beneficiary account doesn't exist. .or_else( - |e| if e == TokenError::CannotCreate.into() { Ok(()) } else { Err(e) }, + |e| { + if e == TokenError::CannotCreate.into() { + Ok(()) + } else { + Err(e) + } + }, )?; summary.receipts_on_hold.saturating_reduce(on_hold); } diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml index 17ed16d262336..b56a76ce641aa 100644 --- a/substrate/frame/node-authorization/Cargo.toml +++ b/substrate/frame/node-authorization/Cargo.toml @@ -15,15 +15,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] @@ -36,7 +35,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/node-authorization/src/lib.rs b/substrate/frame/node-authorization/src/lib.rs index a7967536079f9..7682b54ea0f24 100644 --- a/substrate/frame/node-authorization/src/lib.rs +++ b/substrate/frame/node-authorization/src/lib.rs @@ -44,10 +44,12 @@ mod tests; pub mod weights; +extern crate alloc; + +use alloc::{collections::btree_set::BTreeSet, vec::Vec}; pub use pallet::*; use sp_core::OpaquePeerId as PeerId; use sp_runtime::traits::StaticLookup; -use sp_std::{collections::btree_set::BTreeSet, prelude::*}; pub use weights::WeightInfo; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index bf4e01a318479..bfcc92edb7e2e 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -16,30 +16,29 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } +], workspace = true } # FRAME -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } log = { workspace = true } # Optional: use for testing and/or fuzzing -pallet-balances = { path = "../balances", optional = true, default-features = false } -sp-tracing = { path = "../../primitives/tracing", optional = true, default-features = false } +pallet-balances = { optional = true, workspace = true } +sp-tracing = { optional = true, workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-tracing = { path = "../../primitives/tracing" } +pallet-balances = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] @@ -55,7 +54,6 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-staking/std", - "sp-std/std", "sp-tracing?/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/nomination-pools/benchmarking/Cargo.toml b/substrate/frame/nomination-pools/benchmarking/Cargo.toml index 3f9463a9c429b..e73a208926ee5 100644 --- a/substrate/frame/nomination-pools/benchmarking/Cargo.toml +++ b/substrate/frame/nomination-pools/benchmarking/Cargo.toml @@ -17,31 +17,30 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } # FRAME -frame-benchmarking = { path = "../../benchmarking", default-features = false } -frame-election-provider-support = { path = "../../election-provider-support", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-bags-list = { path = "../../bags-list", default-features = false } -pallet-staking = { path = "../../staking", default-features = false } -pallet-delegated-staking = { path = "../../delegated-staking", default-features = false } -pallet-nomination-pools = { path = "..", default-features = false } +frame-benchmarking = { workspace = true } +frame-election-provider-support = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-bags-list = { workspace = true } +pallet-staking = { workspace = true } +pallet-delegated-staking = { workspace = true } +pallet-nomination-pools = { workspace = true } # Substrate Primitives -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-runtime-interface = { path = "../../../primitives/runtime-interface", default-features = false } -sp-staking = { path = "../../../primitives/staking", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-runtime = { workspace = true } +sp-runtime-interface = { workspace = true } +sp-staking = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../../balances", default-features = false } -pallet-timestamp = { path = "../../timestamp" } -pallet-staking-reward-curve = { path = "../../staking/reward-curve" } -sp-core = { path = "../../../primitives/core" } -sp-io = { path = "../../../primitives/io" } +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -64,7 +63,6 @@ std = [ "sp-runtime-interface/std", "sp-runtime/std", "sp-staking/std", - "sp-std/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/nomination-pools/benchmarking/src/inner.rs b/substrate/frame/nomination-pools/benchmarking/src/inner.rs index b8c978945e9ee..2a45594251112 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/inner.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/inner.rs @@ -17,6 +17,7 @@ //! Benchmarks for the nomination pools coupled with the staking and bags list pallets. +use alloc::{vec, vec::Vec}; use frame_benchmarking::v1::{account, whitelist_account}; use frame_election_provider_support::SortedListProvider; use frame_support::{ @@ -41,7 +42,6 @@ use sp_runtime::{ Perbill, }; use sp_staking::EraIndex; -use sp_std::{vec, vec::Vec}; // `frame_benchmarking::benchmarks!` macro needs this use pallet_nomination_pools::Call; diff --git a/substrate/frame/nomination-pools/benchmarking/src/lib.rs b/substrate/frame/nomination-pools/benchmarking/src/lib.rs index 910cdf2e3dff6..feb73be716818 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/lib.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/lib.rs @@ -20,6 +20,8 @@ #![cfg_attr(not(feature = "std"), no_std)] #![recursion_limit = "256"] +extern crate alloc; + #[cfg(feature = "runtime-benchmarks")] pub mod inner; diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index 7cbb61e00a31a..15d9e2c56031f 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -17,7 +17,13 @@ use crate::VoterBagsListInstance; use frame_election_provider_support::VoteWeight; -use frame_support::{derive_impl, pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; +use frame_support::{ + derive_impl, + pallet_prelude::*, + parameter_types, + traits::{ConstU64, VariantCountOf}, + PalletId, +}; use sp_runtime::{ traits::{Convert, IdentityLookup}, BuildStorage, FixedU128, Perbill, @@ -45,20 +51,16 @@ impl pallet_timestamp::Config for Runtime { parameter_types! { pub const ExistentialDeposit: Balance = 10; } + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; + type MaxFreezes = VariantCountOf; type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = (); + type RuntimeFreezeReason = RuntimeFreezeReason; } pallet_staking_reward_curve::build! { @@ -74,36 +76,19 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; - type BondingDuration = ConstU32<3>; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = ConstU32<84>; type EventListeners = (Pools, DelegatedStaking); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/nomination-pools/fuzzer/Cargo.toml b/substrate/frame/nomination-pools/fuzzer/Cargo.toml index c0d63a2685937..7c45dcef256fb 100644 --- a/substrate/frame/nomination-pools/fuzzer/Cargo.toml +++ b/substrate/frame/nomination-pools/fuzzer/Cargo.toml @@ -17,18 +17,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -honggfuzz = "0.5.54" +honggfuzz = { workspace = true } -pallet-nomination-pools = { path = "..", features = ["fuzzing"] } +pallet-nomination-pools = { features = ["fuzzing"], workspace = true, default-features = true } -frame-system = { path = "../../system" } -frame-support = { path = "../../support" } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } -sp-runtime = { path = "../../../primitives/runtime" } -sp-io = { path = "../../../primitives/io" } -sp-tracing = { path = "../../../primitives/tracing" } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } -rand = { version = "0.8.5", features = ["small_rng"] } +rand = { features = ["small_rng"], workspace = true, default-features = true } log = { workspace = true, default-features = true } [[bin]] diff --git a/substrate/frame/nomination-pools/runtime-api/Cargo.toml b/substrate/frame/nomination-pools/runtime-api/Cargo.toml index a0ddac9e04567..2f91e550bc053 100644 --- a/substrate/frame/nomination-pools/runtime-api/Cargo.toml +++ b/substrate/frame/nomination-pools/runtime-api/Cargo.toml @@ -16,11 +16,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-api = { path = "../../../primitives/api", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -pallet-nomination-pools = { path = "..", default-features = false } +codec = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +pallet-nomination-pools = { workspace = true } [features] default = ["std"] -std = ["codec/std", "pallet-nomination-pools/std", "sp-api/std", "sp-std/std"] +std = ["codec/std", "pallet-nomination-pools/std", "sp-api/std"] diff --git a/substrate/frame/nomination-pools/src/adapter.rs b/substrate/frame/nomination-pools/src/adapter.rs index 4809fbc0e9da0..4d571855e4fe8 100644 --- a/substrate/frame/nomination-pools/src/adapter.rs +++ b/substrate/frame/nomination-pools/src/adapter.rs @@ -83,7 +83,7 @@ impl Member { /// [`DelegateStake`] for more detail. pub trait StakeStrategy { type Balance: frame_support::traits::tokens::Balance; - type AccountId: Clone + sp_std::fmt::Debug; + type AccountId: Clone + core::fmt::Debug; type CoreStaking: StakingInterface; /// The type of staking strategy of the current adapter. diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 2aaea04463661..472f0affcc735 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -351,8 +351,12 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use adapter::{Member, Pool, StakeStrategy}; +use alloc::{collections::btree_map::BTreeMap, vec::Vec}; use codec::Codec; +use core::{fmt::Debug, ops::Div}; use frame_support::{ defensive, defensive_assert, ensure, pallet_prelude::{MaxEncodedLen, *}, @@ -375,7 +379,6 @@ use sp_runtime::{ FixedPointNumber, Perbill, }; use sp_staking::{EraIndex, StakingInterface}; -use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, ops::Div, vec::Vec}; #[cfg(any(feature = "try-runtime", feature = "fuzzing", test, debug_assertions))] use sp_runtime::TryRuntimeError; @@ -494,7 +497,6 @@ impl ClaimPermission { frame_support::PartialEqNoBound, )] #[cfg_attr(feature = "std", derive(DefaultNoBound))] -#[codec(mel_bound(T: Config))] #[scale_info(skip_type_params(T))] pub struct PoolMember { /// The identifier of the pool to which `who` belongs. @@ -950,14 +952,14 @@ pub struct BondedPool { inner: BondedPoolInner, } -impl sp_std::ops::Deref for BondedPool { +impl core::ops::Deref for BondedPool { type Target = BondedPoolInner; fn deref(&self) -> &Self::Target { &self.inner } } -impl sp_std::ops::DerefMut for BondedPool { +impl core::ops::DerefMut for BondedPool { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } @@ -2601,7 +2603,7 @@ pub mod pallet { ) -> DispatchResult { let mut bonded_pool = match ensure_root(origin.clone()) { Ok(()) => BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?, - Err(frame_support::error::BadOrigin) => { + Err(sp_runtime::traits::BadOrigin) => { let who = ensure_signed(origin)?; let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index a9222ea53d75f..d8697364a76c5 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -17,8 +17,8 @@ use super::*; use crate::log; +use alloc::{collections::btree_map::BTreeMap, vec::Vec}; use frame_support::traits::{OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade}; -use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; @@ -60,7 +60,7 @@ pub mod unversioned { use super::*; /// Checks and updates `TotalValueLocked` if out of sync. - pub struct TotalValueLockedSync(sp_std::marker::PhantomData); + pub struct TotalValueLockedSync(core::marker::PhantomData); impl OnRuntimeUpgrade for TotalValueLockedSync { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { @@ -125,7 +125,7 @@ pub mod unversioned { /// /// If there are pools that fail to migrate or did not fit in the bounds, the remaining pools /// can be migrated via the permission-less extrinsic [`Call::migrate_pool_to_delegate_stake`]. - pub struct DelegationStakeMigration(sp_std::marker::PhantomData<(T, MaxPools)>); + pub struct DelegationStakeMigration(core::marker::PhantomData<(T, MaxPools)>); impl> OnRuntimeUpgrade for DelegationStakeMigration { fn on_runtime_upgrade() -> Weight { @@ -262,7 +262,7 @@ pub mod v8 { } } - pub struct VersionUncheckedMigrateV7ToV8(sp_std::marker::PhantomData); + pub struct VersionUncheckedMigrateV7ToV8(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV7ToV8 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { @@ -341,7 +341,7 @@ pub(crate) mod v7 { pub type BondedPools = CountedStorageMap, Twox64Concat, PoolId, V7BondedPoolInner>; - pub struct VersionUncheckedMigrateV6ToV7(sp_std::marker::PhantomData); + pub struct VersionUncheckedMigrateV6ToV7(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV6ToV7 { fn on_runtime_upgrade() -> Weight { let migrated = BondedPools::::count(); @@ -402,7 +402,7 @@ mod v6 { /// This migration would restrict reward account of pools to go below ED by doing a named /// freeze on all the existing pools. - pub struct MigrateToV6(sp_std::marker::PhantomData); + pub struct MigrateToV6(core::marker::PhantomData); impl MigrateToV6 { fn freeze_ed(pool_id: PoolId) -> Result<(), ()> { @@ -470,7 +470,7 @@ pub mod v5 { /// This migration adds `total_commission_pending` and `total_commission_claimed` field to every /// `RewardPool`, if any. - pub struct MigrateToV5(sp_std::marker::PhantomData); + pub struct MigrateToV5(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV5 { fn on_runtime_upgrade() -> Weight { let in_code = Pallet::::in_code_storage_version(); @@ -625,7 +625,7 @@ pub mod v4 { #[deprecated( note = "To avoid mangled storage please use `MigrateV3ToV5` instead. See: github.com/paritytech/substrate/pull/13715" )] - pub struct MigrateToV4(sp_std::marker::PhantomData<(T, U)>); + pub struct MigrateToV4(core::marker::PhantomData<(T, U)>); #[allow(deprecated)] impl> OnRuntimeUpgrade for MigrateToV4 { fn on_runtime_upgrade() -> Weight { @@ -707,7 +707,7 @@ pub mod v3 { use super::*; /// This migration removes stale bonded-pool metadata, if any. - pub struct MigrateToV3(sp_std::marker::PhantomData); + pub struct MigrateToV3(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV3 { fn on_runtime_upgrade() -> Weight { let current = Pallet::::in_code_storage_version(); @@ -845,7 +845,7 @@ pub mod v2 { /// Migrate the pool reward scheme to the new version, as per /// . - pub struct MigrateToV2(sp_std::marker::PhantomData); + pub struct MigrateToV2(core::marker::PhantomData); impl MigrateToV2 { fn run(current: StorageVersion) -> Weight { let mut reward_pools_translated = 0u64; @@ -1104,7 +1104,7 @@ pub mod v1 { /// Trivial migration which makes the roles of each pool optional. /// /// Note: The depositor is not optional since they can never change. - pub struct MigrateToV1(sp_std::marker::PhantomData); + pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { let current = Pallet::::in_code_storage_version(); diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index 93fe6aa56054d..6c0082073f682 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -18,7 +18,8 @@ use super::*; use crate::{self as pools}; use frame_support::{ - assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::fungible::Mutate, + assert_ok, derive_impl, ord_parameter_types, parameter_types, + traits::{fungible::Mutate, VariantCountOf}, PalletId, }; use frame_system::{EnsureSignedBy, RawOrigin}; @@ -251,20 +252,14 @@ parameter_types! { pub static ExistentialDeposit: Balance = 5; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = frame_support::traits::ConstU32<1024>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); + type MaxFreezes = VariantCountOf; + type RuntimeFreezeReason = RuntimeFreezeReason; } pub struct BalanceToU256; diff --git a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml index ea8eb20696931..ea50dd6d732d3 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml +++ b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml @@ -16,26 +16,26 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -scale-info = { version = "2.11.1", features = ["derive"] } +codec = { features = ["derive"], workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } -sp-runtime = { path = "../../../primitives/runtime" } -sp-io = { path = "../../../primitives/io" } -sp-std = { path = "../../../primitives/std" } -sp-staking = { path = "../../../primitives/staking" } -sp-core = { path = "../../../primitives/core" } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } -frame-system = { path = "../../system" } -frame-support = { path = "../../support" } -frame-election-provider-support = { path = "../../election-provider-support" } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } -pallet-timestamp = { path = "../../timestamp" } -pallet-balances = { path = "../../balances" } -pallet-staking = { path = "../../staking" } -pallet-delegated-staking = { path = "../../delegated-staking" } -pallet-bags-list = { path = "../../bags-list" } -pallet-staking-reward-curve = { path = "../../staking/reward-curve" } -pallet-nomination-pools = { path = ".." } +pallet-timestamp = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-delegated-staking = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } -sp-tracing = { path = "../../../primitives/tracing" } +sp-tracing = { workspace = true, default-features = true } log = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs index 820f2b7718ce4..ed47932a323bf 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs @@ -20,7 +20,7 @@ use frame_support::{ assert_ok, derive_impl, pallet_prelude::*, parameter_types, - traits::{ConstU64, ConstU8}, + traits::{ConstU64, ConstU8, VariantCountOf}, PalletId, }; use frame_system::EnsureRoot; @@ -63,20 +63,15 @@ parameter_types! { pub static ExistentialDeposit: Balance = 5; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; + type MaxFreezes = VariantCountOf; type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = (); + type RuntimeFreezeReason = RuntimeFreezeReason; } pallet_staking_reward_curve::build! { @@ -95,36 +90,20 @@ parameter_types! { pub static BondingDuration: u32 = 3; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; - type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; type EventListeners = (Pools, DelegatedStaking); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml index 5f9bc9af3a214..8bc5676cfe916 100644 --- a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml +++ b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml @@ -16,25 +16,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -scale-info = { version = "2.11.1", features = ["derive"] } +codec = { features = ["derive"], workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } -sp-runtime = { path = "../../../primitives/runtime" } -sp-io = { path = "../../../primitives/io" } -sp-std = { path = "../../../primitives/std" } -sp-staking = { path = "../../../primitives/staking" } -sp-core = { path = "../../../primitives/core" } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } -frame-system = { path = "../../system" } -frame-support = { path = "../../support" } -frame-election-provider-support = { path = "../../election-provider-support" } +frame-system = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } -pallet-timestamp = { path = "../../timestamp" } -pallet-balances = { path = "../../balances" } -pallet-staking = { path = "../../staking" } -pallet-bags-list = { path = "../../bags-list" } -pallet-staking-reward-curve = { path = "../../staking/reward-curve" } -pallet-nomination-pools = { path = ".." } +pallet-timestamp = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } -sp-tracing = { path = "../../../primitives/tracing" } +sp-tracing = { workspace = true, default-features = true } log = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs index eb9d463424c8e..d913c5fe6948c 100644 --- a/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs @@ -20,7 +20,7 @@ use frame_support::{ assert_ok, derive_impl, pallet_prelude::*, parameter_types, - traits::{ConstU64, ConstU8}, + traits::{ConstU64, ConstU8, VariantCountOf}, PalletId, }; use sp_runtime::{ @@ -56,20 +56,14 @@ parameter_types! { pub static ExistentialDeposit: Balance = 5; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); + type MaxFreezes = VariantCountOf; + type RuntimeFreezeReason = RuntimeFreezeReason; } pallet_staking_reward_curve::build! { @@ -88,36 +82,20 @@ parameter_types! { pub static BondingDuration: u32 = 3; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Runtime { type Currency = Balances; - type CurrencyBalance = Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; - type SessionInterface = (); type EraPayout = pallet_staking::ConvertCurve; - type NextNewSession = (); - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; type EventListeners = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index a59ef9334f0bc..9cf5e911a9d3a 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -16,20 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-balances = { path = "../balances", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -45,7 +44,6 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-staking/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-support/runtime-benchmarks", diff --git a/substrate/frame/offences/benchmarking/Cargo.toml b/substrate/frame/offences/benchmarking/Cargo.toml index bbd918a2883f3..eb97eb3d4132a 100644 --- a/substrate/frame/offences/benchmarking/Cargo.toml +++ b/substrate/frame/offences/benchmarking/Cargo.toml @@ -16,29 +16,28 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false } -frame-election-provider-support = { path = "../../election-provider-support", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-babe = { path = "../../babe", default-features = false } -pallet-balances = { path = "../../balances", default-features = false } -pallet-grandpa = { path = "../../grandpa", default-features = false } -pallet-im-online = { path = "../../im-online", default-features = false } -pallet-offences = { path = "..", default-features = false } -pallet-session = { path = "../../session", default-features = false } -pallet-staking = { path = "../../staking", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-staking = { path = "../../../primitives/staking", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { workspace = true } +frame-election-provider-support = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-babe = { workspace = true } +pallet-balances = { workspace = true } +pallet-grandpa = { workspace = true } +pallet-im-online = { workspace = true } +pallet-offences = { workspace = true } +pallet-session = { workspace = true } +pallet-staking = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } log = { workspace = true } [dev-dependencies] -pallet-staking-reward-curve = { path = "../../staking/reward-curve" } -pallet-timestamp = { path = "../../timestamp" } -sp-core = { path = "../../../primitives/core" } -sp-io = { path = "../../../primitives/io" } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -62,7 +61,6 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-staking/std", - "sp-std/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/offences/benchmarking/src/inner.rs b/substrate/frame/offences/benchmarking/src/inner.rs index 9aa88f7a0d6d0..b16e5be653d1e 100644 --- a/substrate/frame/offences/benchmarking/src/inner.rs +++ b/substrate/frame/offences/benchmarking/src/inner.rs @@ -17,7 +17,7 @@ //! Offences pallet benchmarking. -use sp_std::{prelude::*, vec}; +use alloc::{vec, vec::Vec}; use frame_benchmarking::v1::{account, benchmarks}; use frame_support::traits::{Currency, Get}; diff --git a/substrate/frame/offences/benchmarking/src/lib.rs b/substrate/frame/offences/benchmarking/src/lib.rs index b08955a133297..f696546d1f0a0 100644 --- a/substrate/frame/offences/benchmarking/src/lib.rs +++ b/substrate/frame/offences/benchmarking/src/lib.rs @@ -19,6 +19,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + #[cfg(feature = "runtime-benchmarks")] pub mod inner; diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 6cbdde5785282..e243ad0e718eb 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -41,20 +41,10 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = ConstU32<128>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_timestamp::Config for Test { @@ -134,35 +124,19 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBounds; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { type Currency = Balances; type CurrencyBalance = ::Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; - type BondingDuration = (); type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_im_online::Config for Test { diff --git a/substrate/frame/offences/src/lib.rs b/substrate/frame/offences/src/lib.rs index a328b2fee4e2e..ffea32a1f4703 100644 --- a/substrate/frame/offences/src/lib.rs +++ b/substrate/frame/offences/src/lib.rs @@ -26,16 +26,17 @@ pub mod migration; mod mock; mod tests; -use core::marker::PhantomData; +extern crate alloc; +use alloc::vec::Vec; use codec::Encode; +use core::marker::PhantomData; use frame_support::weights::Weight; use sp_runtime::{traits::Hash, Perbill}; use sp_staking::{ offence::{Kind, Offence, OffenceDetails, OffenceError, OnOffenceHandler, ReportOffence}, SessionIndex, }; -use sp_std::prelude::*; pub use pallet::*; diff --git a/substrate/frame/offences/src/migration.rs b/substrate/frame/offences/src/migration.rs index 199f47491369b..abf8acd94875d 100644 --- a/substrate/frame/offences/src/migration.rs +++ b/substrate/frame/offences/src/migration.rs @@ -16,6 +16,7 @@ // limitations under the License. use super::{Config, Kind, OffenceDetails, Pallet, Perbill, SessionIndex, LOG_TARGET}; +use alloc::vec::Vec; use frame_support::{ pallet_prelude::ValueQuery, storage_alias, @@ -24,7 +25,6 @@ use frame_support::{ Twox64Concat, }; use sp_staking::offence::OnOffenceHandler; -use sp_std::vec::Vec; #[cfg(feature = "try-runtime")] use frame_support::ensure; @@ -49,7 +49,7 @@ pub mod v1 { use super::*; - pub struct MigrateToV1(sp_std::marker::PhantomData); + pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index f550e69434946..597bdd37f57fb 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -15,19 +15,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -docify = "0.2.8" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +docify = { workspace = true } +scale-info = { features = ["derive"], workspace = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false, optional = true } +sp-runtime = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-metadata-ir = { optional = true, workspace = true } [features] default = ["std"] @@ -42,7 +41,6 @@ std = [ "sp-io/std", "sp-metadata-ir/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/paged-list/fuzzer/Cargo.toml b/substrate/frame/paged-list/fuzzer/Cargo.toml index 6ff07ba1ddd2f..bfdacfd8dd03f 100644 --- a/substrate/frame/paged-list/fuzzer/Cargo.toml +++ b/substrate/frame/paged-list/fuzzer/Cargo.toml @@ -17,9 +17,9 @@ name = "pallet-paged-list-fuzzer" path = "src/paged_list.rs" [dependencies] -arbitrary = "1.3.2" -honggfuzz = "0.5.49" +arbitrary = { workspace = true } +honggfuzz = { workspace = true } -frame-support = { path = "../../support", default-features = false, features = ["std"] } -sp-io = { path = "../../../primitives/io", default-features = false, features = ["std"] } -pallet-paged-list = { path = "..", default-features = false, features = ["std"] } +frame-support = { features = ["std"], workspace = true } +sp-io = { features = ["std"], workspace = true } +pallet-paged-list = { features = ["std"], workspace = true } diff --git a/substrate/frame/paged-list/src/lib.rs b/substrate/frame/paged-list/src/lib.rs index ddeed174f34bb..ed68dac63beb6 100644 --- a/substrate/frame/paged-list/src/lib.rs +++ b/substrate/frame/paged-list/src/lib.rs @@ -58,7 +58,7 @@ //! //! ## Low Level / Implementation Details //! -//! Implementation details are documented in [`paged_list::StoragePagedList`]. +//! Implementation details are documented in [`paged_list::StoragePagedList`]. //! All storage entries are prefixed with a unique prefix that is generated by [`ListPrefix`]. #![cfg_attr(not(feature = "std"), no_std)] @@ -69,6 +69,8 @@ pub mod mock; mod paged_list; mod tests; +extern crate alloc; + use codec::FullCodec; use frame_support::{ pallet_prelude::StorageList, diff --git a/substrate/frame/paged-list/src/paged_list.rs b/substrate/frame/paged-list/src/paged_list.rs index eecc728cd62a1..bbd889e252180 100644 --- a/substrate/frame/paged-list/src/paged_list.rs +++ b/substrate/frame/paged-list/src/paged_list.rs @@ -23,6 +23,7 @@ #![deny(missing_docs)] #![deny(unsafe_code)] +use alloc::vec::Vec; use codec::{Decode, Encode, EncodeLike, FullCodec}; use core::marker::PhantomData; use frame_support::{ @@ -32,7 +33,6 @@ use frame_support::{ CloneNoBound, DebugNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, }; use sp_runtime::traits::Saturating; -use sp_std::prelude::*; pub type PageIndex = u32; pub type ValueIndex = u32; @@ -177,7 +177,7 @@ pub struct Page { /// The index of the page. index: PageIndex, /// The remaining values of the page, to be drained by [`Page::next`]. - values: sp_std::iter::Skip>, + values: core::iter::Skip>, } impl Page { @@ -188,7 +188,7 @@ impl Page { ) -> Option { let key = page_key::(index); let values = sp_io::storage::get(&key) - .and_then(|raw| sp_std::vec::Vec::::decode(&mut &raw[..]).ok())?; + .and_then(|raw| alloc::vec::Vec::::decode(&mut &raw[..]).ok())?; if values.is_empty() { // Don't create empty pages. return None diff --git a/substrate/frame/parameters/Cargo.toml b/substrate/frame/parameters/Cargo.toml index c4d6d189d2d2c..a97ba1172a503 100644 --- a/substrate/frame/parameters/Cargo.toml +++ b/substrate/frame/parameters/Cargo.toml @@ -8,24 +8,23 @@ authors = ["Acala Developers", "Parity Technologies "] edition.workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -paste = { version = "1.0.14", default-features = false } +codec = { features = ["max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +paste = { workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -docify = "0.2.8" +docify = { workspace = true } -frame-support = { path = "../support", default-features = false, features = ["experimental"] } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { features = ["experimental"], workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core", features = ["std"] } -sp-io = { path = "../../primitives/io", features = ["std"] } -pallet-example-basic = { path = "../examples/basic", features = ["std"] } -pallet-balances = { path = "../balances", features = ["std"] } +sp-core = { features = ["std"], workspace = true, default-features = true } +sp-io = { features = ["std"], workspace = true, default-features = true } +pallet-example-basic = { features = ["std"], workspace = true, default-features = true } +pallet-balances = { features = ["std"], workspace = true, default-features = true } [features] default = ["std"] @@ -38,7 +37,6 @@ std = [ "serde", "sp-core/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/parameters/src/tests/mock.rs b/substrate/frame/parameters/src/tests/mock.rs index 6cfd7c8f30b81..53a3b3e394c4b 100644 --- a/substrate/frame/parameters/src/tests/mock.rs +++ b/substrate/frame/parameters/src/tests/mock.rs @@ -37,7 +37,6 @@ impl frame_system::Config for Runtime { #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type ReserveIdentifier = [u8; 8]; type AccountStore = System; } diff --git a/substrate/frame/parameters/src/tests/test_renamed.rs b/substrate/frame/parameters/src/tests/test_renamed.rs index cfc870fbe1096..7c371c5e55f87 100644 --- a/substrate/frame/parameters/src/tests/test_renamed.rs +++ b/substrate/frame/parameters/src/tests/test_renamed.rs @@ -39,7 +39,6 @@ impl frame_system::Config for Runtime { #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type ReserveIdentifier = [u8; 8]; type AccountStore = System; } diff --git a/substrate/frame/preimage/Cargo.toml b/substrate/frame/preimage/Cargo.toml index d420accbd6d91..425c4e81aa7b3 100644 --- a/substrate/frame/preimage/Cargo.toml +++ b/substrate/frame/preimage/Cargo.toml @@ -12,20 +12,19 @@ description = "FRAME pallet for storing preimages of hashes" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, optional = true } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { optional = true, workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } log = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core", default-features = false } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true } [features] default = ["std"] @@ -48,7 +47,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/preimage/src/benchmarking.rs b/substrate/frame/preimage/src/benchmarking.rs index d0c3404f40a91..2d3bec16b8183 100644 --- a/substrate/frame/preimage/src/benchmarking.rs +++ b/substrate/frame/preimage/src/benchmarking.rs @@ -18,11 +18,11 @@ //! Preimage pallet benchmarking. use super::*; +use alloc::vec; use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; use frame_support::assert_ok; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -use sp_std::{prelude::*, vec}; use crate::Pallet as Preimage; @@ -116,7 +116,7 @@ benchmarks! { T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, hash ) verify { - let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); + let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap().unwrap(); let s = RequestStatus::Requested { maybe_ticket: Some((noter, ticket)), count: 1, maybe_len: Some(MAX_SIZE) }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs index 4e47468516663..30056fc6d9a49 100644 --- a/substrate/frame/preimage/src/lib.rs +++ b/substrate/frame/preimage/src/lib.rs @@ -37,11 +37,13 @@ mod mock; mod tests; pub mod weights; +extern crate alloc; + +use alloc::{borrow::Cow, vec::Vec}; use sp_runtime::{ traits::{BadOrigin, Hash, Saturating}, Perbill, }; -use sp_std::{borrow::Cow, prelude::*}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ @@ -122,7 +124,9 @@ pub mod pallet { type ManagerOrigin: EnsureOrigin; /// A means of providing some cost while data is stored on-chain. - type Consideration: Consideration; + /// + /// Should never return a `None`, implying no cost for a non-empty preimage. + type Consideration: Consideration; } #[pallet::pallet] @@ -158,6 +162,8 @@ pub mod pallet { TooMany, /// Too few hashes were requested to be upgraded (i.e. zero). TooFew, + /// No ticket with a cost was returned by [`Config::Consideration`] to store the preimage. + NoCost, } /// A reason for this pallet placing a hold on funds. @@ -268,10 +274,10 @@ impl Pallet { // unreserve deposit T::Currency::unreserve(&who, amount); // take consideration - let Ok(ticket) = + let Ok(Some(ticket)) = T::Consideration::new(&who, Footprint::from_parts(1, len as usize)) - .defensive_proof("Unexpected inability to take deposit after unreserved") else { + defensive!("None ticket or inability to take deposit after unreserved"); return true }; RequestStatus::Unrequested { ticket: (who, ticket), len } @@ -282,12 +288,10 @@ impl Pallet { T::Currency::unreserve(&who, deposit); // take consideration if let Some(len) = maybe_len { - let Ok(ticket) = + let Ok(Some(ticket)) = T::Consideration::new(&who, Footprint::from_parts(1, len as usize)) - .defensive_proof( - "Unexpected inability to take deposit after unreserved", - ) else { + defensive!("None ticket or inability to take deposit after unreserved"); return true }; Some((who, ticket)) @@ -347,7 +351,8 @@ impl Pallet { RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: Some(len) }, (None, Some(depositor)) => { let ticket = - T::Consideration::new(depositor, Footprint::from_parts(1, len as usize))?; + T::Consideration::new(depositor, Footprint::from_parts(1, len as usize))? + .ok_or(Error::::NoCost)?; RequestStatus::Unrequested { ticket: (depositor.clone(), ticket), len } }, }; diff --git a/substrate/frame/preimage/src/migration.rs b/substrate/frame/preimage/src/migration.rs index a86109f892a4f..e38483ee68c62 100644 --- a/substrate/frame/preimage/src/migration.rs +++ b/substrate/frame/preimage/src/migration.rs @@ -18,11 +18,11 @@ //! Storage migrations for the preimage pallet. use super::*; +use alloc::collections::btree_map::BTreeMap; use frame_support::{ storage_alias, traits::{ConstU32, OnRuntimeUpgrade}, }; -use sp_std::collections::btree_map::BTreeMap; #[cfg(feature = "try-runtime")] use frame_support::ensure; @@ -79,7 +79,7 @@ pub mod v1 { /// /// Note: This needs to be run with the same hashing algorithm as before /// since it is not re-hashing the preimages. - pub struct Migration(sp_std::marker::PhantomData); + pub struct Migration(core::marker::PhantomData); impl OnRuntimeUpgrade for Migration { #[cfg(feature = "try-runtime")] diff --git a/substrate/frame/preimage/src/mock.rs b/substrate/frame/preimage/src/mock.rs index 903c34596aeba..9c72d09cae146 100644 --- a/substrate/frame/preimage/src/mock.rs +++ b/substrate/frame/preimage/src/mock.rs @@ -22,7 +22,7 @@ use super::*; use crate as pallet_preimage; use frame_support::{ derive_impl, ord_parameter_types, parameter_types, - traits::{fungible::HoldConsideration, ConstU32, ConstU64}, + traits::{fungible::HoldConsideration, ConstU64}, }; use frame_system::EnsureSignedBy; use sp_core::H256; @@ -48,20 +48,10 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<5>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = (); } ord_parameter_types! { diff --git a/substrate/frame/proxy/Cargo.toml b/substrate/frame/proxy/Cargo.toml index fcebbb5f3e8a0..af2427da380a9 100644 --- a/substrate/frame/proxy/Cargo.toml +++ b/substrate/frame/proxy/Cargo.toml @@ -16,19 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-utility = { path = "../utility" } -sp-core = { path = "../../primitives/core" } +pallet-balances = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] @@ -43,7 +42,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/proxy/src/benchmarking.rs b/substrate/frame/proxy/src/benchmarking.rs index e0d14163d21b2..4081af49c2435 100644 --- a/substrate/frame/proxy/src/benchmarking.rs +++ b/substrate/frame/proxy/src/benchmarking.rs @@ -21,6 +21,7 @@ use super::*; use crate::Pallet as Proxy; +use alloc::{boxed::Box, vec}; use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use sp_runtime::traits::Bounded; diff --git a/substrate/frame/proxy/src/lib.rs b/substrate/frame/proxy/src/lib.rs index 2b3fac5f59e4b..d681088165c8f 100644 --- a/substrate/frame/proxy/src/lib.rs +++ b/substrate/frame/proxy/src/lib.rs @@ -33,6 +33,9 @@ mod benchmarking; mod tests; pub mod weights; +extern crate alloc; + +use alloc::{boxed::Box, vec}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::GetDispatchInfo, @@ -47,7 +50,6 @@ use sp_runtime::{ traits::{Dispatchable, Hash, Saturating, StaticLookup, TrailingZeroInput, Zero}, DispatchError, DispatchResult, RuntimeDebug, }; -use sp_std::prelude::*; pub use weights::WeightInfo; type CallHashOf = <::CallHasher as Hash>::Output; diff --git a/substrate/frame/proxy/src/tests.rs b/substrate/frame/proxy/src/tests.rs index 3ed61fbedaaa9..3edb96026a82b 100644 --- a/substrate/frame/proxy/src/tests.rs +++ b/substrate/frame/proxy/src/tests.rs @@ -22,6 +22,7 @@ use super::*; use crate as proxy; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; use frame_support::{ assert_noop, assert_ok, derive_impl, diff --git a/substrate/frame/ranked-collective/Cargo.toml b/substrate/frame/ranked-collective/Cargo.toml index 05ce76cad2bbe..dd9b466e0f919 100644 --- a/substrate/frame/ranked-collective/Cargo.toml +++ b/substrate/frame/ranked-collective/Cargo.toml @@ -16,18 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -impl-trait-for-tuples = "0.2.2" +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +impl-trait-for-tuples = { workspace = true } [features] default = ["std"] @@ -42,7 +41,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/ranked-collective/src/benchmarking.rs b/substrate/frame/ranked-collective/src/benchmarking.rs index 462f55a238d2a..dc7f4aaca7735 100644 --- a/substrate/frame/ranked-collective/src/benchmarking.rs +++ b/substrate/frame/ranked-collective/src/benchmarking.rs @@ -20,6 +20,7 @@ use super::*; #[allow(unused_imports)] use crate::Pallet as RankedCollective; +use alloc::vec::Vec; use frame_benchmarking::v1::{ account, benchmarks_instance_pallet, whitelisted_caller, BenchmarkError, diff --git a/substrate/frame/ranked-collective/src/lib.rs b/substrate/frame/ranked-collective/src/lib.rs index ceaf03de21100..e34cf3d8df711 100644 --- a/substrate/frame/ranked-collective/src/lib.rs +++ b/substrate/frame/ranked-collective/src/lib.rs @@ -40,7 +40,10 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use codec::{Decode, Encode, MaxEncodedLen}; +use core::marker::PhantomData; use scale_info::TypeInfo; use sp_arithmetic::traits::Saturating; use sp_runtime::{ @@ -48,7 +51,6 @@ use sp_runtime::{ ArithmeticError::Overflow, DispatchError, Perbill, RuntimeDebug, }; -use sp_std::{marker::PhantomData, prelude::*}; use frame_support::{ dispatch::{DispatchResultWithPostInfo, PostDispatchInfo}, @@ -379,6 +381,7 @@ pub mod pallet { use super::*; use frame_support::{pallet_prelude::*, storage::KeyLenOf}; use frame_system::pallet_prelude::*; + use sp_runtime::traits::MaybeConvert; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -431,6 +434,14 @@ pub mod pallet { /// in the poll. type VoteWeight: Convert; + /// The maximum number of members for a given rank in the collective. + /// + /// The member at rank `x` contributes to the count at rank `x` and all ranks below it. + /// Therefore, the limit `m` at rank `x` sets the maximum total member count for rank `x` + /// and all ranks above. + /// The `None` indicates no member count limit for the given rank. + type MaxMemberCount: MaybeConvert; + /// Setup a member for benchmarking. #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup: BenchmarkSetup; @@ -511,6 +522,8 @@ pub mod pallet { NoPermission, /// The new member to exchange is the same as the old member SameMember, + /// The max member count for the rank has been reached. + TooManyMembers, } #[pallet::call] @@ -758,6 +771,9 @@ pub mod pallet { ensure!(!Members::::contains_key(&who), Error::::AlreadyMember); let index = MemberCount::::get(0); let count = index.checked_add(1).ok_or(Overflow)?; + if let Some(max) = T::MaxMemberCount::maybe_convert(0) { + ensure!(count <= max, Error::::TooManyMembers); + } Members::::insert(&who, MemberRecord { rank: 0 }); IdToIndex::::insert(0, &who, index); @@ -784,6 +800,11 @@ pub mod pallet { ensure!(max_rank >= rank, Error::::NoPermission); } let index = MemberCount::::get(rank); + let count = index.checked_add(1).ok_or(Overflow)?; + if let Some(max) = T::MaxMemberCount::maybe_convert(rank) { + ensure!(count <= max, Error::::TooManyMembers); + } + MemberCount::::insert(rank, index.checked_add(1).ok_or(Overflow)?); IdToIndex::::insert(rank, &who, index); IndexToId::::insert(rank, index, &who); diff --git a/substrate/frame/ranked-collective/src/tests.rs b/substrate/frame/ranked-collective/src/tests.rs index ad8b7d2a8018b..3a85928b76d5e 100644 --- a/substrate/frame/ranked-collective/src/tests.rs +++ b/substrate/frame/ranked-collective/src/tests.rs @@ -20,14 +20,12 @@ use std::collections::BTreeMap; use frame_support::{ - assert_noop, assert_ok, derive_impl, - error::BadOrigin, - parameter_types, + assert_noop, assert_ok, derive_impl, parameter_types, traits::{ConstU16, EitherOf, MapSuccess, Polling}, }; use sp_core::Get; use sp_runtime::{ - traits::{ReduceBy, ReplaceWithDefault}, + traits::{BadOrigin, MaybeConvert, ReduceBy, ReplaceWithDefault}, BuildStorage, }; @@ -148,6 +146,17 @@ impl> Convert for MinRankOfClass { } } +pub struct MaxMemberCount; +impl MaybeConvert for MaxMemberCount { + fn maybe_convert(a: Rank) -> Option { + if a == 11 { + Some(2) + } else { + None + } + } +} + parameter_types! { pub static MinRankOfClassDelta: Rank = 0; } @@ -179,6 +188,7 @@ impl Config for Test { type MinRankOfClass = MinRankOfClass; type MemberSwappedHandler = (); type VoteWeight = Geometric; + type MaxMemberCount = MaxMemberCount; #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = (); } @@ -645,3 +655,32 @@ fn exchange_member_same_noops() { ); }); } + +#[test] +fn max_member_count_works() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Club::do_add_member_to_rank(1, 10, false)); + assert_ok!(Club::do_add_member_to_rank(2, 10, false)); + assert_ok!(Club::do_add_member_to_rank(3, 10, false)); + assert_eq!(member_count(10), 3); + assert_eq!(member_count(11), 0); + + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); + assert_noop!(Club::promote_member(RuntimeOrigin::root(), 3), Error::::TooManyMembers); + assert_eq!(member_count(10), 3); + assert_eq!(member_count(11), 2); + + assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); + assert_eq!(member_count(10), 3); + assert_eq!(member_count(11), 2); + + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); + assert_noop!(Club::promote_member(RuntimeOrigin::root(), 1), Error::::TooManyMembers); + assert_eq!(member_count(10), 3); + assert_eq!(member_count(11), 2); + assert_eq!(member_count(12), 2); + }); +} diff --git a/substrate/frame/recovery/Cargo.toml b/substrate/frame/recovery/Cargo.toml index 2fd63597da9ca..42493b435105c 100644 --- a/substrate/frame/recovery/Cargo.toml +++ b/substrate/frame/recovery/Cargo.toml @@ -16,18 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] @@ -49,7 +48,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/recovery/src/benchmarking.rs b/substrate/frame/recovery/src/benchmarking.rs index 72f77336212dd..b7639742a6202 100644 --- a/substrate/frame/recovery/src/benchmarking.rs +++ b/substrate/frame/recovery/src/benchmarking.rs @@ -20,6 +20,7 @@ use super::*; use crate::Pallet; +use alloc::{boxed::Box, vec, vec::Vec}; use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; use frame_support::traits::{Currency, Get}; use frame_system::RawOrigin; diff --git a/substrate/frame/recovery/src/lib.rs b/substrate/frame/recovery/src/lib.rs index 5673147c8e005..69be4df971bc4 100644 --- a/substrate/frame/recovery/src/lib.rs +++ b/substrate/frame/recovery/src/lib.rs @@ -150,13 +150,15 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::{ traits::{CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion, StaticLookup}, RuntimeDebug, }; -use sp_std::prelude::*; use frame_support::{ dispatch::{GetDispatchInfo, PostDispatchInfo}, diff --git a/substrate/frame/recovery/src/mock.rs b/substrate/frame/recovery/src/mock.rs index bec7e02c128bb..8e30cbe997e17 100644 --- a/substrate/frame/recovery/src/mock.rs +++ b/substrate/frame/recovery/src/mock.rs @@ -47,20 +47,11 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = u128; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index dde522ff89b59..578486714d636 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -16,27 +16,26 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -assert_matches = { version = "1.5", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +assert_matches = { optional = true, workspace = true } +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +sp-arithmetic = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } log = { workspace = true } [dev-dependencies] -assert_matches = { version = "1.5" } -pallet-balances = { path = "../balances" } -pallet-preimage = { path = "../preimage" } -pallet-scheduler = { path = "../scheduler" } -sp-core = { path = "../../primitives/core" } +assert_matches = { workspace = true } +pallet-balances = { workspace = true, default-features = true } +pallet-preimage = { workspace = true, default-features = true } +pallet-scheduler = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] @@ -55,7 +54,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "assert_matches", diff --git a/substrate/frame/referenda/src/benchmarking.rs b/substrate/frame/referenda/src/benchmarking.rs index 47d43cc0600c0..67ac82787d31d 100644 --- a/substrate/frame/referenda/src/benchmarking.rs +++ b/substrate/frame/referenda/src/benchmarking.rs @@ -19,6 +19,7 @@ use super::*; use crate::Pallet as Referenda; +use alloc::{vec, vec::Vec}; use assert_matches::assert_matches; use frame_benchmarking::v1::{ account, benchmarks_instance_pallet, whitelist_account, BenchmarkError, @@ -632,7 +633,7 @@ benchmarks_instance_pallet! { } set_some_metadata { - use sp_std::borrow::Cow; + use alloc::borrow::Cow; let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin.clone()); @@ -643,7 +644,7 @@ benchmarks_instance_pallet! { } clear_metadata { - use sp_std::borrow::Cow; + use alloc::borrow::Cow; let origin = T::SubmitOrigin::try_successful_origin(&RawOrigin::Root.into()) .expect("SubmitOrigin has no successful origin required for the benchmark"); let index = create_referendum::(origin.clone()); diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs index fbe27e1a47847..e72dd7f11cbb2 100644 --- a/substrate/frame/referenda/src/lib.rs +++ b/substrate/frame/referenda/src/lib.rs @@ -64,7 +64,11 @@ #![recursion_limit = "256"] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::boxed::Box; use codec::{Codec, Encode}; +use core::fmt::Debug; use frame_support::{ dispatch::DispatchResult, ensure, @@ -84,7 +88,6 @@ use sp_runtime::{ traits::{AtLeast32BitUnsigned, Bounded, Dispatchable, One, Saturating, Zero}, DispatchError, Perbill, }; -use sp_std::{fmt::Debug, prelude::*}; mod branch; pub mod migration; @@ -102,6 +105,7 @@ pub use self::{ }, weights::WeightInfo, }; +pub use alloc::vec::Vec; #[cfg(test)] mod mock; @@ -112,7 +116,6 @@ mod tests; pub mod benchmarking; pub use frame_support::traits::Get; -pub use sp_std::vec::Vec; #[macro_export] macro_rules! impl_tracksinfo_get { @@ -891,7 +894,8 @@ impl, I: 'static> Pallet { call: BoundedCallOf, ) { let now = frame_system::Pallet::::block_number(); - let earliest_allowed = now.saturating_add(track.min_enactment_period); + // Earliest allowed block is always at minimum the next block. + let earliest_allowed = now.saturating_add(track.min_enactment_period.max(One::one())); let desired = desired.evaluate(now); let ok = T::Scheduler::schedule_named( (ASSEMBLY_ID, "enactment", index).using_encoded(sp_io::hashing::blake2_256), diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs index 135476d7cb137..bf0fa4e1a12e1 100644 --- a/substrate/frame/referenda/src/mock.rs +++ b/substrate/frame/referenda/src/mock.rs @@ -83,20 +83,9 @@ impl pallet_scheduler::Config for Test { type OriginPrivilegeCmp = EqualPrivilegeOnly; type Preimages = Preimage; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type MaxLocks = ConstU32<10>; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { pub static AlarmInterval: u64 = 1; @@ -123,7 +112,7 @@ impl TracksInfo for TestTracksInfo { type Id = u8; type RuntimeOrigin = ::PalletsOrigin; fn tracks() -> &'static [(Self::Id, TrackInfo)] { - static DATA: [(u8, TrackInfo); 2] = [ + static DATA: [(u8, TrackInfo); 3] = [ ( 0u8, TrackInfo { @@ -168,6 +157,28 @@ impl TracksInfo for TestTracksInfo { }, }, ), + ( + 2u8, + TrackInfo { + name: "none", + max_deciding: 3, + decision_deposit: 1, + prepare_period: 2, + decision_period: 2, + confirm_period: 1, + min_enactment_period: 0, + min_approval: Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(95), + ceil: Perbill::from_percent(100), + }, + min_support: Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(90), + ceil: Perbill::from_percent(100), + }, + }, + ), ]; &DATA[..] } @@ -176,6 +187,7 @@ impl TracksInfo for TestTracksInfo { match system_origin { frame_system::RawOrigin::Root => Ok(0), frame_system::RawOrigin::None => Ok(1), + frame_system::RawOrigin::Signed(1) => Ok(2), _ => Err(()), } } else { diff --git a/substrate/frame/referenda/src/tests.rs b/substrate/frame/referenda/src/tests.rs index 52251fcbdbeed..3f859636f7cbb 100644 --- a/substrate/frame/referenda/src/tests.rs +++ b/substrate/frame/referenda/src/tests.rs @@ -682,3 +682,27 @@ fn detects_incorrect_len() { ); }); } + +/// Ensures that `DispatchTime::After(0)` plus `min_enactment_period = 0` works. +#[test] +fn zero_enactment_delay_executes_proposal_at_next_block() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_ok!(Referenda::submit( + RuntimeOrigin::signed(1), + Box::new(RawOrigin::Signed(1).into()), + Preimage::bound( + pallet_balances::Call::transfer_keep_alive { dest: 42, value: 20 }.into() + ) + .unwrap(), + DispatchTime::After(0), + )); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(1), 0)); + assert_eq!(ReferendumCount::::get(), 1); + set_tally(0, 100, 0); + + run_to(9); + + assert_eq!(Balances::free_balance(42), 20); + }); +} diff --git a/substrate/frame/referenda/src/types.rs b/substrate/frame/referenda/src/types.rs index b3c583322cce3..1039b288b2aec 100644 --- a/substrate/frame/referenda/src/types.rs +++ b/substrate/frame/referenda/src/types.rs @@ -515,7 +515,7 @@ impl Curve { #[cfg(feature = "std")] impl Debug for Curve { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::LinearDecreasing { length, floor, ceil } => { write!( diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index d251aacfb5b2c..efc9917428c04 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -16,19 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core", default-features = false } +sp-core = { workspace = true } [features] default = ["std"] @@ -48,7 +47,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/remark/src/benchmarking.rs b/substrate/frame/remark/src/benchmarking.rs index 831946834963f..15b72b4748dd4 100644 --- a/substrate/frame/remark/src/benchmarking.rs +++ b/substrate/frame/remark/src/benchmarking.rs @@ -20,9 +20,9 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; +use alloc::vec; use frame_benchmarking::v1::{benchmarks, whitelisted_caller}; use frame_system::{EventRecord, Pallet as System, RawOrigin}; -use sp_std::*; #[cfg(test)] use crate::Pallet as Remark; diff --git a/substrate/frame/remark/src/lib.rs b/substrate/frame/remark/src/lib.rs index 8ca3cd395afb5..eae8e0b83f5d2 100644 --- a/substrate/frame/remark/src/lib.rs +++ b/substrate/frame/remark/src/lib.rs @@ -28,7 +28,9 @@ mod mock; #[cfg(test)] mod tests; -use sp_std::prelude::*; +extern crate alloc; + +use alloc::vec::Vec; // Re-export pallet items so that they can be accessed from the crate namespace. pub use pallet::*; diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index e7317d737fac1..dde264f3949a1 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -16,27 +16,27 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -pallet-session = { path = "../session", default-features = false, features = ["historical"] } -pallet-staking = { path = "../staking", default-features = false } +pallet-session = { features = ["historical"], workspace = true } +pallet-staking = { workspace = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-timestamp = { path = "../timestamp" } -pallet-staking-reward-curve = { path = "../staking/reward-curve" } +pallet-balances = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io", default-features = false } -sp-std = { path = "../../primitives/std" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true } +sp-std = { workspace = true, default-features = true } -frame-election-provider-support = { path = "../election-provider-support" } +frame-election-provider-support = { workspace = true, default-features = true } [features] runtime-benchmarks = [ diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index 7e7332c3f7e3b..ab43b723e8a9b 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -18,6 +18,7 @@ use super::*; use crate as root_offences; +use alloc::collections::btree_map::BTreeMap; use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, onchain, SequentialPhragmen, @@ -29,7 +30,6 @@ use frame_support::{ use pallet_staking::StakerStatus; use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage}; use sp_staking::{EraIndex, SessionIndex}; -use sp_std::collections::btree_map::BTreeMap; type Block = frame_system::mocking::MockBlock; type AccountId = u64; @@ -84,20 +84,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } pallet_staking_reward_curve::build! { @@ -135,15 +124,11 @@ parameter_types! { pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { type Currency = Balances; type CurrencyBalance = ::Balance; type UnixTime = Timestamp; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); type SessionsPerEra = SessionsPerEra; type SlashDeferDuration = SlashDeferDuration; type AdminOrigin = frame_system::EnsureRoot; @@ -151,19 +136,10 @@ impl pallet_staking::Config for Test { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type MaxUnlockingChunks = ConstU32<32>; - type HistoryDepth = ConstU32<84>; - type MaxControllersInDeprecationBatch = ConstU32<100>; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_session::historical::Config for Test { diff --git a/substrate/frame/root-testing/Cargo.toml b/substrate/frame/root-testing/Cargo.toml index 74a3b8f479fa3..96d8613626225 100644 --- a/substrate/frame/root-testing/Cargo.toml +++ b/substrate/frame/root-testing/Cargo.toml @@ -16,14 +16,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [features] try-runtime = [ @@ -40,5 +39,4 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index 7ecbdb6eeda5b..b704818f1adf0 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -15,26 +15,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -docify = "0.2.8" -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -pallet-balances = { path = "../balances", default-features = false, optional = true } -pallet-utility = { path = "../utility", default-features = false, optional = true } -pallet-proxy = { path = "../proxy", default-features = false, optional = true } +codec = { features = ["derive"], workspace = true } +docify = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-arithmetic = { workspace = true } +sp-runtime = { workspace = true } +pallet-balances = { optional = true, workspace = true } +pallet-utility = { optional = true, workspace = true } +pallet-proxy = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -pallet-balances = { path = "../balances" } -pallet-utility = { path = "../utility" } -pallet-proxy = { path = "../proxy" } -frame-support = { path = "../support", features = ["experimental"] } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } [features] default = ["std"] @@ -51,7 +50,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs index 0beb911267dc5..ec1ad82495147 100644 --- a/substrate/frame/safe-mode/src/mock.rs +++ b/substrate/frame/safe-mode/src/mock.rs @@ -68,20 +68,10 @@ pub enum HoldReason { SafeMode, } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU64<2>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<10>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl pallet_utility::Config for Test { diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml index 25911269a95dd..9121f59ff4626 100644 --- a/substrate/frame/salary/Cargo.toml +++ b/substrate/frame/salary/Cargo.toml @@ -16,18 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -pallet-ranked-collective = { path = "../ranked-collective", default-features = false, optional = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +pallet-ranked-collective = { optional = true, workspace = true } [features] default = ["std"] @@ -44,7 +43,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/salary/src/tests/integration.rs b/substrate/frame/salary/src/tests/integration.rs index 124ab38c5651b..69f218943aded 100644 --- a/substrate/frame/salary/src/tests/integration.rs +++ b/substrate/frame/salary/src/tests/integration.rs @@ -180,6 +180,7 @@ impl pallet_ranked_collective::Config for Test { type MinRankOfClass = MinRankOfClass; type MemberSwappedHandler = Salary; type VoteWeight = Geometric; + type MaxMemberCount = (); #[cfg(feature = "runtime-benchmarks")] type BenchmarkSetup = Salary; } diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index 2105ba1331476..0eefca57849aa 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -17,21 +17,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } log = { workspace = true } -sp-consensus-sassafras = { path = "../../primitives/consensus/sassafras", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +sp-consensus-sassafras = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -sp-core = { path = "../../primitives/core" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } +array-bytes = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } [features] default = ["std"] @@ -45,7 +44,6 @@ std = [ "sp-consensus-sassafras/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs index d521ed9dd91b8..285758afbe6db 100644 --- a/substrate/frame/sassafras/src/lib.rs +++ b/substrate/frame/sassafras/src/lib.rs @@ -47,10 +47,13 @@ #![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use codec::{Decode, Encode, MaxEncodedLen}; use log::{debug, error, trace, warn}; use scale_info::TypeInfo; +use alloc::vec::Vec; use frame_support::{ dispatch::{DispatchResultWithPostInfo, Pays}, traits::{Defensive, Get}, @@ -72,7 +75,6 @@ use sp_runtime::{ traits::{One, Zero}, BoundToRuntimeAppPublic, }; -use sp_std::prelude::Vec; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; @@ -288,7 +290,7 @@ pub mod pallet { pub epoch_config: EpochConfiguration, /// Phantom config #[serde(skip)] - pub _phantom: sp_std::marker::PhantomData, + pub _phantom: core::marker::PhantomData, } #[pallet::genesis_build] diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index e851f876112e8..29aaaec9aa408 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -13,22 +13,21 @@ readme = "README.md" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-weights = { path = "../../primitives/weights", default-features = false } -docify = "0.2.8" +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { workspace = true } +docify = { workspace = true } [dev-dependencies] -pallet-preimage = { path = "../preimage" } -sp-core = { path = "../../primitives/core", default-features = false } -substrate-test-utils = { path = "../../test-utils" } +pallet-preimage = { workspace = true, default-features = true } +sp-core = { workspace = true } +substrate-test-utils = { workspace = true } [features] default = ["std"] @@ -51,7 +50,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "sp-weights/std", ] try-runtime = [ diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs index 884f78000384c..d0a14fc73d64f 100644 --- a/substrate/frame/scheduler/src/benchmarking.rs +++ b/substrate/frame/scheduler/src/benchmarking.rs @@ -18,6 +18,7 @@ //! Scheduler pallet benchmarking. use super::*; +use alloc::vec; use frame_benchmarking::v1::{account, benchmarks, BenchmarkError}; use frame_support::{ ensure, @@ -25,7 +26,6 @@ use frame_support::{ weights::WeightMeter, }; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; -use sp_std::{prelude::*, vec}; use crate::Pallet as Scheduler; use frame_system::{Call as SystemCall, EventRecord}; diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index d19a1e0001dd3..3eecf6d6f9e88 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -85,7 +85,11 @@ mod mock; mod tests; pub mod weights; +extern crate alloc; + +use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; +use core::{borrow::Borrow, cmp::Ordering, marker::PhantomData}; use frame_support::{ dispatch::{DispatchResult, GetDispatchInfo, Parameter, RawOrigin}, ensure, @@ -106,7 +110,6 @@ use sp_runtime::{ traits::{BadOrigin, Dispatchable, One, Saturating, Zero}, BoundedVec, DispatchError, RuntimeDebug, }; -use sp_std::{borrow::Borrow, cmp::Ordering, marker::PhantomData, prelude::*}; pub use pallet::*; pub use weights::WeightInfo; diff --git a/substrate/frame/scheduler/src/migration.rs b/substrate/frame/scheduler/src/migration.rs index c2e956035a767..a304689a120cc 100644 --- a/substrate/frame/scheduler/src/migration.rs +++ b/substrate/frame/scheduler/src/migration.rs @@ -305,8 +305,8 @@ pub mod v4 { mod test { use super::*; use crate::mock::*; + use alloc::borrow::Cow; use frame_support::Hashable; - use sp_std::borrow::Cow; use substrate_test_utils::assert_eq_uvec; #[test] diff --git a/substrate/frame/scored-pool/Cargo.toml b/substrate/frame/scored-pool/Cargo.toml index f25bd1f1769ba..132799ead62a7 100644 --- a/substrate/frame/scored-pool/Cargo.toml +++ b/substrate/frame/scored-pool/Cargo.toml @@ -16,17 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] @@ -39,7 +38,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/scored-pool/src/lib.rs b/substrate/frame/scored-pool/src/lib.rs index 2bf70cbc574c8..c4464bbbfac04 100644 --- a/substrate/frame/scored-pool/src/lib.rs +++ b/substrate/frame/scored-pool/src/lib.rs @@ -98,7 +98,11 @@ mod mock; #[cfg(test)] mod tests; +extern crate alloc; + +use alloc::vec::Vec; use codec::{FullCodec, MaxEncodedLen}; +use core::{cmp::Reverse, fmt::Debug}; use frame_support::{ ensure, traits::{ChangeMembers, Currency, Get, InitializeMembers, ReservableCurrency}, @@ -106,7 +110,6 @@ use frame_support::{ }; pub use pallet::*; use sp_runtime::traits::{AtLeast32Bit, StaticLookup, Zero}; -use sp_std::{fmt::Debug, prelude::*}; type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; diff --git a/substrate/frame/scored-pool/src/mock.rs b/substrate/frame/scored-pool/src/mock.rs index 9d2f5eb1099f8..7708c06e56bd8 100644 --- a/substrate/frame/scored-pool/src/mock.rs +++ b/substrate/frame/scored-pool/src/mock.rs @@ -52,20 +52,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { diff --git a/substrate/frame/session/Cargo.toml b/substrate/frame/session/Cargo.toml index 42ea957ac1581..09bb93d8c1d09 100644 --- a/substrate/frame/session/Cargo.toml +++ b/substrate/frame/session/Cargo.toml @@ -16,21 +16,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -impl-trait-for-tuples = "0.2.2" +codec = { features = ["derive"], workspace = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-timestamp = { path = "../timestamp", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } -sp-trie = { path = "../../primitives/trie", default-features = false, optional = true } -sp-state-machine = { path = "../../primitives/state-machine", default-features = false } +scale-info = { features = ["derive", "serde"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-timestamp = { workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-session = { workspace = true } +sp-staking = { features = ["serde"], workspace = true } +sp-trie = { optional = true, workspace = true } +sp-state-machine = { workspace = true } [features] default = ["historical", "std"] @@ -48,7 +47,6 @@ std = [ "sp-session/std", "sp-staking/std", "sp-state-machine/std", - "sp-std/std", "sp-trie/std", ] try-runtime = [ diff --git a/substrate/frame/session/README.md b/substrate/frame/session/README.md index fa7c9b3f98348..5a063bffee0b1 100644 --- a/substrate/frame/session/README.md +++ b/substrate/frame/session/README.md @@ -70,7 +70,7 @@ set. use pallet_session as session; fn validators() -> Vec<::ValidatorId> { - >::validators() + pallet_session::Validators::::get() } ``` diff --git a/substrate/frame/session/benchmarking/Cargo.toml b/substrate/frame/session/benchmarking/Cargo.toml index a306f9015c029..9ca3549f681dc 100644 --- a/substrate/frame/session/benchmarking/Cargo.toml +++ b/substrate/frame/session/benchmarking/Cargo.toml @@ -16,26 +16,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -rand = { version = "0.8.5", default-features = false, features = ["std_rng"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-session = { path = "..", default-features = false } -pallet-staking = { path = "../../staking", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-session = { path = "../../../primitives/session", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +codec = { workspace = true } +rand = { features = ["std_rng"], workspace = true } +frame-benchmarking = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-session = { workspace = true } +pallet-staking = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -scale-info = "2.11.1" -frame-election-provider-support = { path = "../../election-provider-support" } -pallet-balances = { path = "../../balances" } -pallet-staking-reward-curve = { path = "../../staking/reward-curve" } -pallet-timestamp = { path = "../../timestamp" } -sp-core = { path = "../../../primitives/core" } -sp-io = { path = "../../../primitives/io" } +codec = { features = ["derive"], workspace = true, default-features = true } +scale-info = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -53,7 +52,6 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/session/benchmarking/src/inner.rs b/substrate/frame/session/benchmarking/src/inner.rs index d86c5d9ad278e..9ba47b34ed7a3 100644 --- a/substrate/frame/session/benchmarking/src/inner.rs +++ b/substrate/frame/session/benchmarking/src/inner.rs @@ -18,8 +18,8 @@ //! Benchmarks for the Session Pallet. // This is separated into its own crate due to cyclic dependency issues. +use alloc::{vec, vec::Vec}; use sp_runtime::traits::{One, StaticLookup, TrailingZeroInput}; -use sp_std::{prelude::*, vec}; use codec::Decode; use frame_benchmarking::v1::benchmarks; @@ -152,7 +152,7 @@ fn check_membership_proof_setup( Pallet::::on_initialize(frame_system::pallet_prelude::BlockNumberFor::::one()); // skip sessions until the new validator set is enacted - while Session::::validators().len() < n as usize { + while Validators::::get().len() < n as usize { Session::::rotate_session(); } diff --git a/substrate/frame/session/benchmarking/src/lib.rs b/substrate/frame/session/benchmarking/src/lib.rs index b08955a133297..f696546d1f0a0 100644 --- a/substrate/frame/session/benchmarking/src/lib.rs +++ b/substrate/frame/session/benchmarking/src/lib.rs @@ -19,6 +19,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + #[cfg(feature = "runtime-benchmarks")] pub mod inner; diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 5cba79ef5b9a2..2aec58cceded2 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -54,20 +54,10 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_timestamp::Config for Test { @@ -139,35 +129,19 @@ impl onchain::Config for OnChainSeqPhragmen { type Bounds = ElectionsBounds; } +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] impl pallet_staking::Config for Test { type Currency = Balances; type CurrencyBalance = ::Balance; type UnixTime = pallet_timestamp::Pallet; - type CurrencyToVote = (); - type RewardRemainder = (); - type RuntimeEvent = RuntimeEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); type AdminOrigin = frame_system::EnsureRoot; - type BondingDuration = (); type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxExposurePageSize = ConstU32<64>; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; - type MaxUnlockingChunks = ConstU32<32>; - type MaxControllersInDeprecationBatch = ConstU32<100>; - type HistoryDepth = ConstU32<84>; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; - type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; - type EventListeners = (); - type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; - type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl crate::Config for Test {} diff --git a/substrate/frame/session/src/historical/mod.rs b/substrate/frame/session/src/historical/mod.rs index b9cecea1a7f71..fac580b49b3ab 100644 --- a/substrate/frame/session/src/historical/mod.rs +++ b/substrate/frame/session/src/historical/mod.rs @@ -30,28 +30,30 @@ pub mod offchain; pub mod onchain; mod shared; +use alloc::vec::Vec; use codec::{Decode, Encode}; +use core::fmt::Debug; use sp_runtime::{ traits::{Convert, OpaqueKeys}, KeyTypeId, }; use sp_session::{MembershipProof, ValidatorCount}; use sp_staking::SessionIndex; -use sp_std::prelude::*; use sp_trie::{ trie_types::{TrieDBBuilder, TrieDBMutBuilderV0}, - LayoutV0, MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX, + LayoutV0, MemoryDB, Recorder, StorageProof, Trie, TrieMut, TrieRecorder, }; use frame_support::{ print, traits::{KeyOwnerProofSystem, ValidatorSet, ValidatorSetWithIdentification}, - Parameter, + Parameter, LOG_TARGET, }; use crate::{self as pallet_session, Pallet as Session}; pub use pallet::*; +use sp_trie::{accessed_nodes_tracker::AccessedNodesTracker, recorder_ext::RecorderExt}; #[frame_support::pallet] pub mod pallet { @@ -102,7 +104,7 @@ impl Pallet { None => return, // nothing to prune. }; - let up_to = sp_std::cmp::min(up_to, end); + let up_to = core::cmp::min(up_to, end); if up_to < start { return // out of bounds. harmless. @@ -118,6 +120,16 @@ impl Pallet { } }) } + + fn full_id_validators() -> Vec<(T::ValidatorId, T::FullIdentification)> { + >::validators() + .into_iter() + .filter_map(|validator| { + T::FullIdentificationOf::convert(validator.clone()) + .map(|full_id| (validator, full_id)) + }) + .collect::>() + } } impl ValidatorSet for Pallet { @@ -157,7 +169,7 @@ pub trait SessionManager: /// An `SessionManager` implementation that wraps an inner `I` and also /// sets the historical trie root of the ending session. -pub struct NoteHistoricalRoot(sp_std::marker::PhantomData<(T, I)>); +pub struct NoteHistoricalRoot(core::marker::PhantomData<(T, I)>); impl> NoteHistoricalRoot { fn do_new_session(new_index: SessionIndex, is_genesis: bool) -> Option> { @@ -264,35 +276,16 @@ impl ProvingTrie { Ok(ProvingTrie { db, root }) } - fn from_nodes(root: T::Hash, nodes: &[Vec]) -> Self { - use sp_trie::HashDBT; - - let mut memory_db = MemoryDB::default(); - for node in nodes { - HashDBT::insert(&mut memory_db, EMPTY_PREFIX, &node[..]); - } - - ProvingTrie { db: memory_db, root } + fn from_proof(root: T::Hash, proof: StorageProof) -> Self { + ProvingTrie { db: proof.into_memory_db(), root } } /// Prove the full verification data for a given key and key ID. pub fn prove(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option>> { let mut recorder = Recorder::>::new(); - { - let trie = - TrieDBBuilder::new(&self.db, &self.root).with_recorder(&mut recorder).build(); - let val_idx = (key_id, key_data).using_encoded(|s| { - trie.get(s).ok()?.and_then(|raw| u32::decode(&mut &*raw).ok()) - })?; - - val_idx.using_encoded(|s| { - trie.get(s) - .ok()? - .and_then(|raw| >::decode(&mut &*raw).ok()) - })?; - } + self.query(key_id, key_data, Some(&mut recorder)); - Some(recorder.drain().into_iter().map(|r| r.data).collect()) + Some(recorder.into_raw_storage_proof()) } /// Access the underlying trie root. @@ -300,10 +293,17 @@ impl ProvingTrie { &self.root } - // Check a proof contained within the current memory-db. Returns `None` if the - // nodes within the current `MemoryDB` are insufficient to query the item. - fn query(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option> { - let trie = TrieDBBuilder::new(&self.db, &self.root).build(); + /// Search for a key inside the proof. + fn query( + &self, + key_id: KeyTypeId, + key_data: &[u8], + recorder: Option<&mut dyn TrieRecorder>, + ) -> Option> { + let trie = TrieDBBuilder::new(&self.db, &self.root) + .with_optional_recorder(recorder) + .build(); + let val_idx = (key_id, key_data) .using_encoded(|s| trie.get(s)) .ok()? @@ -322,13 +322,7 @@ impl> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet Option { let session = >::current_index(); - let validators = >::validators() - .into_iter() - .filter_map(|validator| { - T::FullIdentificationOf::convert(validator.clone()) - .map(|full_id| (validator, full_id)) - }) - .collect::>(); + let validators = Self::full_id_validators(); let count = validators.len() as ValidatorCount; @@ -343,30 +337,35 @@ impl> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet Option> { - let (id, data) = key; - - if proof.session == >::current_index() { - >::key_owner(id, data.as_ref()).and_then(|owner| { - T::FullIdentificationOf::convert(owner.clone()).and_then(move |id| { - let count = >::validators().len() as ValidatorCount; - - if count != proof.validator_count { - return None - } + fn print_error(e: E) { + log::error!( + target: LOG_TARGET, + "Rejecting equivocation report because of key ownership proof error: {:?}", e + ); + } - Some((owner, id)) - }) - }) + let (id, data) = key; + let (root, count) = if proof.session == >::current_index() { + let validators = Self::full_id_validators(); + let count = validators.len() as ValidatorCount; + let trie = ProvingTrie::::generate_for(validators).ok()?; + (trie.root, count) } else { - let (root, count) = >::get(&proof.session)?; - - if count != proof.validator_count { - return None - } + >::get(&proof.session)? + }; - let trie = ProvingTrie::::from_nodes(root, &proof.trie_nodes); - trie.query(id, data.as_ref()) + if count != proof.validator_count { + return None } + + let proof = StorageProof::new_with_duplicate_nodes_check(proof.trie_nodes) + .map_err(print_error) + .ok()?; + let mut accessed_nodes_tracker = AccessedNodesTracker::::new(proof.len()); + let trie = ProvingTrie::::from_proof(root, proof); + let res = trie.query(id, data.as_ref(), Some(&mut accessed_nodes_tracker))?; + accessed_nodes_tracker.ensure_no_unused_nodes().map_err(print_error).ok()?; + Some(res) } } @@ -376,6 +375,7 @@ pub(crate) mod tests { use crate::mock::{ force_new_session, set_next_validators, NextValidators, Session, System, Test, }; + use alloc::vec; use sp_runtime::{key_types::DUMMY, testing::UintAuthorityId, BuildStorage}; use sp_state_machine::BasicExternalities; diff --git a/substrate/frame/session/src/historical/offchain.rs b/substrate/frame/session/src/historical/offchain.rs index 95f4d762949ee..685a0be8e191e 100644 --- a/substrate/frame/session/src/historical/offchain.rs +++ b/substrate/frame/session/src/historical/offchain.rs @@ -23,12 +23,12 @@ //! required data to the offchain validator set. This is used in conjunction with [`ProvingTrie`] //! and the off-chain indexing API. +use alloc::vec::Vec; use sp_runtime::{ offchain::storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, KeyTypeId, }; use sp_session::MembershipProof; -use sp_std::prelude::*; use super::{shared, Config, IdentificationTuple, ProvingTrie}; use crate::{Pallet as SessionModule, SessionIndex}; @@ -60,9 +60,9 @@ impl ValidatorSet { /// Implement conversion into iterator for usage /// with [ProvingTrie](super::ProvingTrie::generate_for). -impl sp_std::iter::IntoIterator for ValidatorSet { +impl core::iter::IntoIterator for ValidatorSet { type Item = (T::ValidatorId, T::FullIdentification); - type IntoIter = sp_std::vec::IntoIter; + type IntoIter = alloc::vec::IntoIter; fn into_iter(self) -> Self::IntoIter { self.validator_set.into_iter() } diff --git a/substrate/frame/session/src/historical/onchain.rs b/substrate/frame/session/src/historical/onchain.rs index 97a7f02bd096e..a9eb18474b86a 100644 --- a/substrate/frame/session/src/historical/onchain.rs +++ b/substrate/frame/session/src/historical/onchain.rs @@ -17,9 +17,9 @@ //! On-chain logic to store a validator-set for deferred validation using an off-chain worker. +use alloc::vec::Vec; use codec::Encode; use sp_runtime::traits::Convert; -use sp_std::prelude::*; use super::{shared, Config as HistoricalConfig}; use crate::{Config as SessionConfig, Pallet as SessionModule, SessionIndex}; diff --git a/substrate/frame/session/src/historical/shared.rs b/substrate/frame/session/src/historical/shared.rs index 297385dfb426e..06b25ec99a4cb 100644 --- a/substrate/frame/session/src/historical/shared.rs +++ b/substrate/frame/session/src/historical/shared.rs @@ -18,9 +18,9 @@ //! Shared logic between on-chain and off-chain components used for slashing using an off-chain //! worker. +use alloc::{borrow::ToOwned, vec::Vec}; use codec::Encode; use sp_staking::SessionIndex; -use sp_std::prelude::*; pub(super) const PREFIX: &[u8] = b"session_historical"; pub(super) const LAST_PRUNE: &[u8] = b"session_historical_last_prune"; diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index 9506e98adf7d7..e1a2a31911feb 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -95,7 +95,7 @@ //! use pallet_session as session; //! //! fn validators() -> Vec<::ValidatorId> { -//! >::validators() +//! pallet_session::Validators::::get() //! } //! # fn main(){} //! ``` @@ -115,7 +115,14 @@ mod mock; mod tests; pub mod weights; +extern crate alloc; + +use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, MaxEncodedLen}; +use core::{ + marker::PhantomData, + ops::{Rem, Sub}, +}; use frame_support::{ dispatch::DispatchResult, ensure, @@ -132,11 +139,6 @@ use sp_runtime::{ ConsensusEngineId, DispatchError, KeyTypeId, Permill, RuntimeAppPublic, }; use sp_staking::SessionIndex; -use sp_std::{ - marker::PhantomData, - ops::{Rem, Sub}, - prelude::*, -}; pub use pallet::*; pub use weights::WeightInfo; @@ -445,7 +447,7 @@ pub mod pallet { }); for (account, val, keys) in self.keys.iter().cloned() { - >::inner_set_keys(&val, keys) + Pallet::::inner_set_keys(&val, keys) .expect("genesis config must not contain duplicates; qed"); if frame_system::Pallet::::inc_consumers_without_limit(&account).is_err() { // This will leak a provider reference, however it only happens once (at @@ -477,7 +479,7 @@ pub mod pallet { T::SessionHandler::on_genesis_session::(&queued_keys); Validators::::put(initial_validators_0); - >::put(queued_keys); + QueuedKeys::::put(queued_keys); T::SessionManager::start_session(0); } @@ -485,12 +487,10 @@ pub mod pallet { /// The current set of validators. #[pallet::storage] - #[pallet::getter(fn validators)] pub type Validators = StorageValue<_, Vec, ValueQuery>; /// Current index of the session. #[pallet::storage] - #[pallet::getter(fn current_index)] pub type CurrentIndex = StorageValue<_, SessionIndex, ValueQuery>; /// True if the underlying economic identities or weighting behind the validators @@ -501,7 +501,6 @@ pub mod pallet { /// The queued keys for the next session. When the next session begins, these keys /// will be used to determine the validator's session keys. #[pallet::storage] - #[pallet::getter(fn queued_keys)] pub type QueuedKeys = StorageValue<_, Vec<(T::ValidatorId, T::Keys)>, ValueQuery>; /// Indices of disabled validators. @@ -510,7 +509,6 @@ pub mod pallet { /// disabled using binary search. It gets cleared when `on_session_ending` returns /// a new set of identities. #[pallet::storage] - #[pallet::getter(fn disabled_validators)] pub type DisabledValidators = StorageValue<_, Vec, ValueQuery>; /// The next session keys for a validator. @@ -607,33 +605,53 @@ pub mod pallet { } impl Pallet { + /// Public function to access the current set of validators. + pub fn validators() -> Vec { + Validators::::get() + } + + /// Public function to access the current session index. + pub fn current_index() -> SessionIndex { + CurrentIndex::::get() + } + + /// Public function to access the queued keys. + pub fn queued_keys() -> Vec<(T::ValidatorId, T::Keys)> { + QueuedKeys::::get() + } + + /// Public function to access the disabled validators. + pub fn disabled_validators() -> Vec { + DisabledValidators::::get() + } + /// Move on to next session. Register new validator set and session keys. Changes to the /// validator set have a session of delay to take effect. This allows for equivocation /// punishment after a fork. pub fn rotate_session() { - let session_index = >::get(); + let session_index = CurrentIndex::::get(); log::trace!(target: "runtime::session", "rotating session {:?}", session_index); - let changed = >::get(); + let changed = QueuedChanged::::get(); // Inform the session handlers that a session is going to end. T::SessionHandler::on_before_session_ending(); T::SessionManager::end_session(session_index); // Get queued session keys and validators. - let session_keys = >::get(); + let session_keys = QueuedKeys::::get(); let validators = session_keys.iter().map(|(validator, _)| validator.clone()).collect::>(); Validators::::put(&validators); if changed { // reset disabled validators if active set was changed - >::take(); + DisabledValidators::::take(); } // Increment session index. let session_index = session_index + 1; - >::put(session_index); + CurrentIndex::::put(session_index); T::SessionManager::start_session(session_index); @@ -681,8 +699,8 @@ impl Pallet { (queued_amalgamated, changed) }; - >::put(queued_amalgamated.clone()); - >::put(next_changed); + QueuedKeys::::put(queued_amalgamated.clone()); + QueuedChanged::::put(next_changed); // Record that this happened. Self::deposit_event(Event::NewSession { session_index }); @@ -697,7 +715,7 @@ impl Pallet { return false } - >::mutate(|disabled| { + DisabledValidators::::mutate(|disabled| { if let Err(index) = disabled.binary_search(&i) { disabled.insert(index, i); T::SessionHandler::on_disabled(i); @@ -714,7 +732,7 @@ impl Pallet { /// Returns `false` either if the validator could not be found or it was already /// disabled. pub fn disable(c: &T::ValidatorId) -> bool { - Self::validators() + Validators::::get() .iter() .position(|i| i == c) .map(|i| Self::disable_index(i as u32)) @@ -745,7 +763,7 @@ impl Pallet { let new_ids = T::Keys::key_ids(); // Translate NextKeys, and key ownership relations at the same time. - >::translate::(|val, old_keys| { + NextKeys::::translate::(|val, old_keys| { // Clear all key ownership relations. Typically the overlap should // stay the same, but no guarantees by the upgrade function. for i in old_ids.iter() { @@ -762,7 +780,7 @@ impl Pallet { Some(new_keys) }); - let _ = >::translate::, _>(|k| { + let _ = QueuedKeys::::translate::, _>(|k| { k.map(|k| { k.into_iter() .map(|(val, old_keys)| (val.clone(), upgrade(val, old_keys))) @@ -848,28 +866,28 @@ impl Pallet { } fn load_keys(v: &T::ValidatorId) -> Option { - >::get(v) + NextKeys::::get(v) } fn take_keys(v: &T::ValidatorId) -> Option { - >::take(v) + NextKeys::::take(v) } fn put_keys(v: &T::ValidatorId, keys: &T::Keys) { - >::insert(v, keys); + NextKeys::::insert(v, keys); } /// Query the owner of a session key by returning the owner's validator ID. pub fn key_owner(id: KeyTypeId, key_data: &[u8]) -> Option { - >::get((id, key_data)) + KeyOwner::::get((id, key_data)) } fn put_key_owner(id: KeyTypeId, key_data: &[u8], v: &T::ValidatorId) { - >::insert((id, key_data), v) + KeyOwner::::insert((id, key_data), v) } fn clear_key_owner(id: KeyTypeId, key_data: &[u8]) { - >::remove((id, key_data)); + KeyOwner::::remove((id, key_data)); } } @@ -884,11 +902,11 @@ impl ValidatorSet for Pallet { type ValidatorIdOf = T::ValidatorIdOf; fn session_index() -> sp_staking::SessionIndex { - Pallet::::current_index() + CurrentIndex::::get() } fn validators() -> Vec { - Pallet::::validators() + Validators::::get() } } @@ -906,18 +924,18 @@ impl EstimateNextNewSession> for Pallet { impl frame_support::traits::DisabledValidators for Pallet { fn is_disabled(index: u32) -> bool { - >::disabled_validators().binary_search(&index).is_ok() + DisabledValidators::::get().binary_search(&index).is_ok() } fn disabled_validators() -> Vec { - >::disabled_validators() + DisabledValidators::::get() } } /// Wraps the author-scraping logic for consensus engines that can recover /// the canonical index of an author. This then transforms it into the /// registering account-ID of that session key index. -pub struct FindAccountFromAuthorIndex(sp_std::marker::PhantomData<(T, Inner)>); +pub struct FindAccountFromAuthorIndex(core::marker::PhantomData<(T, Inner)>); impl> FindAuthor for FindAccountFromAuthorIndex @@ -928,7 +946,7 @@ impl> FindAuthor { let i = Inner::find_author(digests)?; - let validators = >::validators(); + let validators = Validators::::get(); validators.get(i as usize).cloned() } } diff --git a/substrate/frame/session/src/tests.rs b/substrate/frame/session/src/tests.rs index 69337e016ea8a..f392c2ab7663c 100644 --- a/substrate/frame/session/src/tests.rs +++ b/substrate/frame/session/src/tests.rs @@ -44,7 +44,7 @@ fn initialize_block(block: u64) { fn simple_setup_should_work() { new_test_ext().execute_with(|| { assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - assert_eq!(Session::validators(), vec![1, 2, 3]); + assert_eq!(Validators::::get(), vec![1, 2, 3]); }); } @@ -60,7 +60,7 @@ fn put_get_keys() { fn keys_cleared_on_kill() { let mut ext = new_test_ext(); ext.execute_with(|| { - assert_eq!(Session::validators(), vec![1, 2, 3]); + assert_eq!(Validators::::get(), vec![1, 2, 3]); assert_eq!(Session::load_keys(&1), Some(UintAuthorityId(1).into())); let id = DUMMY; @@ -79,7 +79,7 @@ fn keys_cleared_on_kill() { fn purge_keys_works_for_stash_id() { let mut ext = new_test_ext(); ext.execute_with(|| { - assert_eq!(Session::validators(), vec![1, 2, 3]); + assert_eq!(Validators::::get(), vec![1, 2, 3]); TestValidatorIdOf::set(vec![(10, 1), (20, 2), (3, 3)].into_iter().collect()); assert_eq!(Session::load_keys(&1), Some(UintAuthorityId(1).into())); assert_eq!(Session::load_keys(&2), Some(UintAuthorityId(2).into())); @@ -108,10 +108,10 @@ fn authorities_should_track_validators() { force_new_session(); initialize_block(1); assert_eq!( - Session::queued_keys(), + QueuedKeys::::get(), vec![(1, UintAuthorityId(1).into()), (2, UintAuthorityId(2).into()),] ); - assert_eq!(Session::validators(), vec![1, 2, 3]); + assert_eq!(Validators::::get(), vec![1, 2, 3]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); assert!(before_session_end_called()); reset_before_session_end_called(); @@ -119,10 +119,10 @@ fn authorities_should_track_validators() { force_new_session(); initialize_block(2); assert_eq!( - Session::queued_keys(), + QueuedKeys::::get(), vec![(1, UintAuthorityId(1).into()), (2, UintAuthorityId(2).into()),] ); - assert_eq!(Session::validators(), vec![1, 2]); + assert_eq!(Validators::::get(), vec![1, 2]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); assert!(before_session_end_called()); reset_before_session_end_called(); @@ -132,28 +132,28 @@ fn authorities_should_track_validators() { force_new_session(); initialize_block(3); assert_eq!( - Session::queued_keys(), + QueuedKeys::::get(), vec![ (1, UintAuthorityId(1).into()), (2, UintAuthorityId(2).into()), (4, UintAuthorityId(4).into()), ] ); - assert_eq!(Session::validators(), vec![1, 2]); + assert_eq!(Validators::::get(), vec![1, 2]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); assert!(before_session_end_called()); force_new_session(); initialize_block(4); assert_eq!( - Session::queued_keys(), + QueuedKeys::::get(), vec![ (1, UintAuthorityId(1).into()), (2, UintAuthorityId(2).into()), (4, UintAuthorityId(4).into()), ] ); - assert_eq!(Session::validators(), vec![1, 2, 4]); + assert_eq!(Validators::::get(), vec![1, 2, 4]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(4)]); }); } @@ -164,20 +164,20 @@ fn should_work_with_early_exit() { set_session_length(10); initialize_block(1); - assert_eq!(Session::current_index(), 0); + assert_eq!(CurrentIndex::::get(), 0); initialize_block(2); - assert_eq!(Session::current_index(), 0); + assert_eq!(CurrentIndex::::get(), 0); force_new_session(); initialize_block(3); - assert_eq!(Session::current_index(), 1); + assert_eq!(CurrentIndex::::get(), 1); initialize_block(9); - assert_eq!(Session::current_index(), 1); + assert_eq!(CurrentIndex::::get(), 1); initialize_block(10); - assert_eq!(Session::current_index(), 2); + assert_eq!(CurrentIndex::::get(), 2); }); } @@ -402,7 +402,7 @@ fn upgrade_keys() { // Set `QueuedKeys`. { - let storage_key = >::hashed_key(); + let storage_key = super::QueuedKeys::::hashed_key(); assert!(storage::unhashed::exists(&storage_key)); storage::unhashed::put(&storage_key, &val_keys); } @@ -410,7 +410,7 @@ fn upgrade_keys() { // Set `NextKeys`. { for &(i, ref keys) in val_keys.iter() { - let storage_key = >::hashed_key_for(i); + let storage_key = super::NextKeys::::hashed_key_for(i); assert!(storage::unhashed::exists(&storage_key)); storage::unhashed::put(&storage_key, keys); } @@ -446,12 +446,12 @@ fn upgrade_keys() { // Check queued keys. assert_eq!( - Session::queued_keys(), + QueuedKeys::::get(), vec![(1, mock_keys_for(1)), (2, mock_keys_for(2)), (3, mock_keys_for(3)),], ); for i in 1u64..4 { - assert_eq!(>::get(&i), Some(mock_keys_for(i))); + assert_eq!(super::NextKeys::::get(&i), Some(mock_keys_for(i))); } }) } @@ -466,8 +466,8 @@ fn test_migration_v1() { use frame_support::traits::{PalletInfoAccess, StorageVersion}; new_test_ext().execute_with(|| { - assert!(>::iter_values().count() > 0); - assert!(>::exists()); + assert!(HistoricalSessions::::iter_values().count() > 0); + assert!(StoredRange::::exists()); let old_pallet = "Session"; let new_pallet = ::name(); diff --git a/substrate/frame/society/Cargo.toml b/substrate/frame/society/Cargo.toml index ed7fea523bffb..b6fa70c42efc0 100644 --- a/substrate/frame/society/Cargo.toml +++ b/substrate/frame/society/Cargo.toml @@ -17,24 +17,23 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } -rand_chacha = { version = "0.3.1", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +rand_chacha = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +codec = { features = ["derive"], workspace = true } -sp-std = { path = "../../primitives/std", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +sp-io = { workspace = true } +sp-arithmetic = { workspace = true } +sp-runtime = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } [dev-dependencies] -frame-support-test = { path = "../support/test" } -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -sp-io = { path = "../../primitives/io" } +frame-support-test = { workspace = true } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -52,7 +51,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking", diff --git a/substrate/frame/society/src/benchmarking.rs b/substrate/frame/society/src/benchmarking.rs index 20af6e35ada52..8c3d2bf32ce73 100644 --- a/substrate/frame/society/src/benchmarking.rs +++ b/substrate/frame/society/src/benchmarking.rs @@ -24,6 +24,7 @@ use super::*; use frame_benchmarking::{account, benchmarks_instance_pallet, whitelisted_caller}; use frame_system::RawOrigin; +use alloc::vec; use sp_runtime::traits::Bounded; use crate::Pallet as Society; diff --git a/substrate/frame/society/src/lib.rs b/substrate/frame/society/src/lib.rs index 5bce245f73f17..b4c5c88af3d6e 100644 --- a/substrate/frame/society/src/lib.rs +++ b/substrate/frame/society/src/lib.rs @@ -257,6 +257,9 @@ pub mod weights; pub mod migrations; +extern crate alloc; + +use alloc::vec::Vec; use frame_support::{ impl_ensure_origin_with_arg_ignoring_arg, pallet_prelude::*, @@ -282,7 +285,6 @@ use sp_runtime::{ ArithmeticError::Overflow, Percent, RuntimeDebug, }; -use sp_std::prelude::*; pub use weights::WeightInfo; @@ -1362,7 +1364,7 @@ pub mod pallet { } /// Simple ensure origin struct to filter for the founder account. -pub struct EnsureFounder(sp_std::marker::PhantomData); +pub struct EnsureFounder(core::marker::PhantomData); impl EnsureOrigin<::RuntimeOrigin> for EnsureFounder { type Success = T::AccountId; fn try_origin(o: T::RuntimeOrigin) -> Result { diff --git a/substrate/frame/society/src/migrations.rs b/substrate/frame/society/src/migrations.rs index 7ded1f84f5823..396ed787c784c 100644 --- a/substrate/frame/society/src/migrations.rs +++ b/substrate/frame/society/src/migrations.rs @@ -18,6 +18,7 @@ //! # Migrations for Society Pallet use super::*; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; use frame_support::traits::{Defensive, DefensiveOption, Instance, UncheckedOnRuntimeUpgrade}; diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index e41f7f1c0ef30..3836e71cb00f2 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -30,13 +30,43 @@ //! > **F**ramework for **R**untime **A**ggregation of **M**odularized **E**ntities: Substrate's //! > State Transition Function (Runtime) Framework. //! -//! ## Documentation +//! //! ## Usage //! -//! See [`polkadot_sdk::frame`](../polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). +//! The main intended use of this crate is for it to be imported with its preludes: //! -//! ## WARNING: Experimental +//! ``` +//! # use polkadot_sdk_frame as frame; +//! #[frame::pallet] +//! pub mod pallet { +//! # use polkadot_sdk_frame as frame; +//! use frame::prelude::*; +//! // ^^ using the prelude! //! -//! **This crate and all of its content is experimental, and should not yet be used in production.** +//! #[pallet::config] +//! pub trait Config: frame_system::Config {} +//! +//! #[pallet::pallet] +//! pub struct Pallet(_); +//! } +//! +//! pub mod tests { +//! # use polkadot_sdk_frame as frame; +//! use frame::testing_prelude::*; +//! } +//! +//! pub mod runtime { +//! # use polkadot_sdk_frame as frame; +//! use frame::runtime::prelude::*; +//! } +//! ``` +//! +//! See: [`prelude`], [`testing_prelude`] and [`runtime::prelude`]. +//! +//! Please note that this crate can only be imported as `polkadot-sdk-frame` or `frame`. +//! +//! ## Documentation +//! +//! See [`polkadot_sdk::frame`](../polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). //! //! ## Underlying dependencies //! @@ -46,9 +76,9 @@ //! In short, this crate only re-exports types and traits from multiple sources. All of these //! sources are listed (and re-exported again) in [`deps`]. //! -//! ## Usage +//! ## WARNING: Experimental //! -//! Please note that this crate can only be imported as `polkadot-sdk-frame` or `frame`. +//! **This crate and all of its content is experimental, and should not yet be used in production.** #![cfg_attr(not(feature = "std"), no_std)] #![cfg(feature = "experimental")] @@ -102,10 +132,6 @@ pub mod prelude { #[doc(no_inline)] pub use frame_system::pallet_prelude::*; - /// All of the std alternative types. - #[doc(no_inline)] - pub use sp_std::prelude::*; - /// All FRAME-relevant derive macros. #[doc(no_inline)] pub use super::derive::*; @@ -134,7 +160,6 @@ pub mod testing_prelude { pub use frame_system::{self, mocking::*}; pub use sp_io::TestExternalities as TestState; - pub use sp_std::if_std; } /// All of the types and tools needed to build FRAME-based runtimes. @@ -317,13 +342,13 @@ pub mod primitives { /// This is already part of the [`prelude`]. pub mod derive { pub use codec::{Decode, Encode}; + pub use core::fmt::Debug; pub use frame_support::{ CloneNoBound, DebugNoBound, DefaultNoBound, EqNoBound, OrdNoBound, PartialEqNoBound, PartialOrdNoBound, RuntimeDebugNoBound, }; pub use scale_info::TypeInfo; pub use sp_runtime::RuntimeDebug; - pub use sp_std::fmt::Debug; } /// Access to all of the dependencies of this crate. In case the re-exports are not enough, this @@ -343,7 +368,6 @@ pub mod deps { pub use sp_core; pub use sp_io; pub use sp_runtime; - pub use sp_std; pub use codec; pub use scale_info; diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index 22df746d667ab..57eeec543634d 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -17,40 +17,39 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-session = { path = "../session", default-features = false, features = [ +], workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-staking = { features = ["serde"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-session = { features = [ "historical", -] } -pallet-authorship = { path = "../authorship", default-features = false } -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } -frame-election-provider-support = { path = "../election-provider-support", default-features = false } +], workspace = true } +pallet-authorship = { workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } +frame-election-provider-support = { workspace = true } log = { workspace = true } # Optional imports for benchmarking -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -rand_chacha = { version = "0.3.1", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +rand_chacha = { optional = true, workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-tracing = { path = "../../primitives/tracing" } -sp-core = { path = "../../primitives/core" } -sp-npos-elections = { path = "../../primitives/npos-elections" } -pallet-timestamp = { path = "../timestamp" } -pallet-staking-reward-curve = { path = "reward-curve" } -pallet-bags-list = { path = "../bags-list" } -substrate-test-utils = { path = "../../test-utils" } -frame-benchmarking = { path = "../benchmarking" } -frame-election-provider-support = { path = "../election-provider-support" } -rand_chacha = { version = "0.3.1" } +pallet-balances = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-npos-elections = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } +frame-benchmarking = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +rand_chacha = { workspace = true, default-features = true } [features] default = ["std"] @@ -74,7 +73,6 @@ std = [ "sp-npos-elections/std", "sp-runtime/std", "sp-staking/std", - "sp-std/std", "sp-tracing/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/staking/reward-curve/Cargo.toml b/substrate/frame/staking/reward-curve/Cargo.toml index e2a2782db2da1..acb819c3169ec 100644 --- a/substrate/frame/staking/reward-curve/Cargo.toml +++ b/substrate/frame/staking/reward-curve/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "3.0.0" -proc-macro2 = "1.0.56" +proc-macro-crate = { workspace = true } +proc-macro2 = { workspace = true } quote = { workspace = true } syn = { features = ["full", "visit"], workspace = true } [dev-dependencies] -sp-runtime = { path = "../../../primitives/runtime" } +sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/frame/staking/reward-fn/Cargo.toml b/substrate/frame/staking/reward-fn/Cargo.toml index 5169db5072e2f..5adbb8382da50 100644 --- a/substrate/frame/staking/reward-fn/Cargo.toml +++ b/substrate/frame/staking/reward-fn/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } -sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } +sp-arithmetic = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/staking/runtime-api/Cargo.toml b/substrate/frame/staking/runtime-api/Cargo.toml index 19da2f24ff00e..624279624bc80 100644 --- a/substrate/frame/staking/runtime-api/Cargo.toml +++ b/substrate/frame/staking/runtime-api/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-api = { default-features = false, path = "../../../primitives/api" } -sp-staking = { default-features = false, path = "../../../primitives/staking" } +codec = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-staking = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index 3ed33ffea4223..1f8580d7a3e67 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -33,7 +33,6 @@ use sp_runtime::{ Perbill, Percent, Saturating, }; use sp_staking::{currency_to_vote::CurrencyToVote, SessionIndex}; -use sp_std::prelude::*; pub use frame_benchmarking::v1::{ account, benchmarks, impl_benchmark_test_suite, whitelist_account, whitelisted_caller, @@ -169,7 +168,7 @@ impl ListScenario { // burn the entire issuance. let i = T::Currency::burn(T::Currency::total_issuance()); - sp_std::mem::forget(i); + core::mem::forget(i); // create accounts with the origin weight diff --git a/substrate/frame/staking/src/ledger.rs b/substrate/frame/staking/src/ledger.rs index 294918376d82c..dc4b4fc326b81 100644 --- a/substrate/frame/staking/src/ledger.rs +++ b/substrate/frame/staking/src/ledger.rs @@ -36,7 +36,6 @@ use frame_support::{ traits::{Defensive, LockableCurrency}, }; use sp_staking::{StakingAccount, StakingInterface}; -use sp_std::prelude::*; use crate::{ BalanceOf, Bonded, Config, Error, Ledger, Pallet, Payee, RewardDestination, StakingLedger, diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 053ecdef2b00b..9e59cbd3d0cb8 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -304,6 +304,9 @@ pub mod weights; mod pallet; +extern crate alloc; + +use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; use codec::{Decode, Encode, HasCompact, MaxEncodedLen}; use frame_support::{ defensive, defensive_assert, @@ -325,7 +328,6 @@ use sp_staking::{ StakingAccount, }; pub use sp_staking::{Exposure, IndividualExposure, StakerStatus}; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; pub use weights::WeightInfo; pub use pallet::{pallet::*, UseNominatorsAndValidatorsMap, UseValidatorsMap}; @@ -674,7 +676,7 @@ impl StakingLedger { // slightly under-slashed, by at most `MaxUnlockingChunks * ED`, which is not a big // deal. slash_from_target = - sp_std::mem::replace(target, Zero::zero()).saturating_add(slash_from_target) + core::mem::replace(target, Zero::zero()).saturating_add(slash_from_target) } self.total = self.total.saturating_sub(slash_from_target); @@ -916,7 +918,7 @@ impl EraPayout for () { /// Adaptor to turn a `PiecewiseLinear` curve definition into an `EraPayout` impl, used for /// backwards compatibility. -pub struct ConvertCurve(sp_std::marker::PhantomData); +pub struct ConvertCurve(core::marker::PhantomData); impl EraPayout for ConvertCurve where Balance: AtLeast32BitUnsigned + Clone + Copy, @@ -974,7 +976,7 @@ impl Default for Forcing { /// A `Convert` implementation that finds the stash of the given controller account, /// if any. -pub struct StashOf(sp_std::marker::PhantomData); +pub struct StashOf(core::marker::PhantomData); impl Convert> for StashOf { fn convert(controller: T::AccountId) -> Option { @@ -987,7 +989,7 @@ impl Convert> for StashOf { /// /// Active exposure is the exposure of the validator set currently validating, i.e. in /// `active_era`. It can differ from the latest planned exposure in `current_era`. -pub struct ExposureOf(sp_std::marker::PhantomData); +pub struct ExposureOf(core::marker::PhantomData); impl Convert>>> for ExposureOf @@ -1000,7 +1002,7 @@ impl Convert /// Filter historical offences out and only allow those from the bonding period. pub struct FilterHistoricalOffences { - _inner: sp_std::marker::PhantomData<(T, R)>, + _inner: core::marker::PhantomData<(T, R)>, } impl ReportOffence @@ -1033,7 +1035,7 @@ where /// Wrapper struct for Era related information. It is not a pure encapsulation as these storage /// items can be accessed directly but nevertheless, its recommended to use `EraInfo` where we /// can and add more functions to it as needed. -pub struct EraInfo(sp_std::marker::PhantomData); +pub struct EraInfo(core::marker::PhantomData); impl EraInfo { /// Returns true if validator has one or more page of era rewards not claimed yet. // Also looks at legacy storage that can be cleaned up after #433. diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index b2ddf77004f95..5c9cf86132131 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -67,7 +67,7 @@ pub mod v15 { // The disabling strategy used by staking pallet type DefaultDisablingStrategy = UpToLimitDisablingStrategy; - pub struct VersionUncheckedMigrateV14ToV15(sp_std::marker::PhantomData); + pub struct VersionUncheckedMigrateV14ToV15(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV14ToV15 { fn on_runtime_upgrade() -> Weight { let mut migrated = v14::OffendingValidators::::take() @@ -382,14 +382,14 @@ pub mod v10 { pub mod v9 { use super::*; #[cfg(feature = "try-runtime")] - use codec::{Decode, Encode}; + use alloc::vec::Vec; #[cfg(feature = "try-runtime")] - use sp_std::vec::Vec; + use codec::{Decode, Encode}; /// Migration implementation that injects all validators into sorted list. /// /// This is only useful for chains that started their `VoterList` just based on nominators. - pub struct InjectValidatorsIntoVoterList(sp_std::marker::PhantomData); + pub struct InjectValidatorsIntoVoterList(core::marker::PhantomData); impl OnRuntimeUpgrade for InjectValidatorsIntoVoterList { fn on_runtime_upgrade() -> Weight { if StorageVersion::::get() == ObsoleteReleases::V8_0_0 { diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 8c60dec65a81a..7e6a87955b083 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -124,20 +124,12 @@ impl frame_system::Config for Test { type Block = Block; type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { type MaxLocks = frame_support::traits::ConstU32<1024>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } sp_runtime::impl_opaque_keys! { @@ -269,19 +261,15 @@ impl OnStakingUpdate for EventListenerMock { // Disabling threshold for `UpToLimitDisablingStrategy` pub(crate) const DISABLING_LIMIT_FACTOR: usize = 3; +#[derive_impl(crate::config_preludes::TestDefaultConfig)] impl crate::pallet::pallet::Config for Test { type Currency = Balances; - type CurrencyBalance = ::Balance; type UnixTime = Timestamp; - type CurrencyToVote = (); type RewardRemainder = RewardRemainderMock; - type RuntimeEvent = RuntimeEvent; - type Slash = (); type Reward = MockReward; type SessionsPerEra = SessionsPerEra; type SlashDeferDuration = SlashDeferDuration; type AdminOrigin = EnsureOneOrRoot; - type BondingDuration = BondingDuration; type SessionInterface = Self; type EraPayout = ConvertCurve; type NextNewSession = Session; @@ -296,8 +284,6 @@ impl crate::pallet::pallet::Config for Test { type HistoryDepth = HistoryDepth; type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; type EventListeners = EventListenerMock; - type BenchmarkingConfig = TestBenchmarkingConfig; - type WeightInfo = (); type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 90374451a3a52..b19a127d13c43 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -48,7 +48,6 @@ use sp_staking::{ StakingAccount::{self, Controller, Stash}, StakingInterface, }; -use sp_std::prelude::*; use crate::{ election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo, @@ -56,6 +55,7 @@ use crate::{ LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, }; +use alloc::{boxed::Box, vec, vec::Vec}; use super::pallet::*; @@ -1584,14 +1584,14 @@ impl ScoreProvider for Pallet { let imbalance = T::Currency::burn(T::Currency::total_issuance()); // kinda ugly, but gets the job done. The fact that this works here is a HUGE exception. // Don't try this pattern in other places. - sp_std::mem::forget(imbalance); + core::mem::forget(imbalance); } } /// A simple sorted list implementation that does not require any additional pallets. Note, this /// does not provide validators in sorted order. If you desire nominators in a sorted order take /// a look at [`pallet-bags-list`]. -pub struct UseValidatorsMap(sp_std::marker::PhantomData); +pub struct UseValidatorsMap(core::marker::PhantomData); impl SortedListProvider for UseValidatorsMap { type Score = BalanceOf; type Error = (); @@ -1657,7 +1657,7 @@ impl SortedListProvider for UseValidatorsMap { /// A simple voter list implementation that does not require any additional pallets. Note, this /// does not provided nominators in sorted ordered. If you desire nominators in a sorted order take /// a look at [`pallet-bags-list]. -pub struct UseNominatorsAndValidatorsMap(sp_std::marker::PhantomData); +pub struct UseNominatorsAndValidatorsMap(core::marker::PhantomData); impl SortedListProvider for UseNominatorsAndValidatorsMap { type Error = (); type Score = VoteWeight; @@ -1995,7 +1995,7 @@ impl Pallet { /// is resolved, turn warns into check /// failures. fn check_bonded_consistency() -> Result<(), TryRuntimeError> { - use sp_std::collections::btree_set::BTreeSet; + use alloc::collections::btree_set::BTreeSet; let mut count_controller_double = 0; let mut count_double = 0; @@ -2159,8 +2159,8 @@ impl Pallet { /// = exposure.own + exposure.own). /// * Paged exposures metadata (`ErasStakersOverview`) matches the paged exposures state. fn check_paged_exposures() -> Result<(), TryRuntimeError> { + use alloc::collections::btree_map::BTreeMap; use sp_staking::PagedExposureMetadata; - use sp_std::collections::btree_map::BTreeMap; // Sanity check for the paged exposure of the active era. let mut exposures: BTreeMap>> = diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 284a801a0f050..79f9d298ada78 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -17,6 +17,7 @@ //! Staking FRAME Pallet. +use alloc::vec::Vec; use codec::Codec; use frame_election_provider_support::{ ElectionProvider, ElectionProviderBase, SortedListProvider, VoteWeight, @@ -41,7 +42,6 @@ use sp_staking::{ StakingAccount::{self, Controller, Stash}, StakingInterface, }; -use sp_std::prelude::*; mod impls; @@ -86,9 +86,10 @@ pub mod pallet { Remove, } - #[pallet::config] + #[pallet::config(with_default)] pub trait Config: frame_system::Config { /// The staking balance. + #[pallet::no_default] type Currency: LockableCurrency< Self::AccountId, Moment = BlockNumberFor, @@ -100,7 +101,7 @@ pub mod pallet { + codec::FullCodec + Copy + MaybeSerializeDeserialize - + sp_std::fmt::Debug + + core::fmt::Debug + Default + From + TypeInfo @@ -109,6 +110,7 @@ pub mod pallet { /// /// It is guaranteed to start being called from the first `on_finalize`. Thus value at /// genesis is not used. + #[pallet::no_default] type UnixTime: UnixTime; /// Convert a balance into a number used for election calculation. This must fit into a @@ -117,9 +119,11 @@ pub mod pallet { /// in 128. /// Consequently, the backward convert is used convert the u128s from sp-elections back to a /// [`BalanceOf`]. + #[pallet::no_default_bounds] type CurrencyToVote: sp_staking::currency_to_vote::CurrencyToVote>; /// Something that provides the election functionality. + #[pallet::no_default] type ElectionProvider: ElectionProvider< AccountId = Self::AccountId, BlockNumber = BlockNumberFor, @@ -127,6 +131,7 @@ pub mod pallet { DataProvider = Pallet, >; /// Something that provides the election functionality at genesis. + #[pallet::no_default] type GenesisElectionProvider: ElectionProvider< AccountId = Self::AccountId, BlockNumber = BlockNumberFor, @@ -134,6 +139,7 @@ pub mod pallet { >; /// Something that defines the maximum number of nominations per nominator. + #[pallet::no_default_bounds] type NominationsQuota: NominationsQuota>; /// Number of eras to keep in history. @@ -161,17 +167,21 @@ pub mod pallet { /// Tokens have been minted and are unused for validator-reward. /// See [Era payout](./index.html#era-payout). + #[pallet::no_default_bounds] type RewardRemainder: OnUnbalanced>; /// The overarching event type. + #[pallet::no_default_bounds] type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Handler for the unbalanced reduction when slashing a staker. + #[pallet::no_default_bounds] type Slash: OnUnbalanced>; /// Handler for the unbalanced increment when rewarding a staker. /// NOTE: in most cases, the implementation of `OnUnbalanced` should modify the total /// issuance. + #[pallet::no_default_bounds] type Reward: OnUnbalanced>; /// Number of sessions per era. @@ -192,6 +202,7 @@ pub mod pallet { /// The origin which can manage less critical staking parameters that does not require root. /// /// Supported actions: (1) cancel deferred slash, (2) set minimum commission. + #[pallet::no_default] type AdminOrigin: EnsureOrigin; /// Interface for interacting with a session pallet. @@ -199,10 +210,12 @@ pub mod pallet { /// The payout for validators and the system for the current era. /// See [Era payout](./index.html#era-payout). + #[pallet::no_default] type EraPayout: EraPayout>; /// Something that can estimate the next session change, accurately or as a best effort /// guess. + #[pallet::no_default_bounds] type NextNewSession: EstimateNextNewSession>; /// The maximum size of each `T::ExposurePage`. @@ -230,6 +243,7 @@ pub mod pallet { /// staker. In case of `bags-list`, this always means using `rebag` and `putInFrontOf`. /// /// Invariant: what comes out of this list will always be a nominator. + #[pallet::no_default] type VoterList: SortedListProvider; /// WIP: This is a noop as of now, the actual business logic that's described below is going @@ -252,6 +266,7 @@ pub mod pallet { /// validators, they can chill at any point, and their approval stakes will still be /// recorded. This implies that what comes out of iterating this list MIGHT NOT BE AN ACTIVE /// VALIDATOR. + #[pallet::no_default] type TargetList: SortedListProvider>; /// The maximum number of `unlocking` chunks a [`StakingLedger`] can @@ -274,18 +289,66 @@ pub mod pallet { /// receives. /// /// WARNING: this only reports slashing and withdraw events for the time being. + #[pallet::no_default_bounds] type EventListeners: sp_staking::OnStakingUpdate>; - // `DisablingStragegy` controls how validators are disabled + /// `DisablingStragegy` controls how validators are disabled + #[pallet::no_default_bounds] type DisablingStrategy: DisablingStrategy; /// Some parameters of the benchmarking. + #[cfg(feature = "std")] + type BenchmarkingConfig: BenchmarkingConfig; + + #[cfg(not(feature = "std"))] + #[pallet::no_default] type BenchmarkingConfig: BenchmarkingConfig; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } + /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. + pub mod config_preludes { + use super::*; + use frame_support::{derive_impl, parameter_types, traits::ConstU32}; + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig, no_aggregated_types)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + parameter_types! { + pub const SessionsPerEra: SessionIndex = 3; + pub const BondingDuration: EraIndex = 3; + } + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + #[inject_runtime_type] + type RuntimeEvent = (); + type CurrencyBalance = u128; + type CurrencyToVote = (); + type NominationsQuota = crate::FixedNominationsQuota<16>; + type HistoryDepth = ConstU32<84>; + type RewardRemainder = (); + type Slash = (); + type Reward = (); + type SessionsPerEra = SessionsPerEra; + type BondingDuration = BondingDuration; + type SlashDeferDuration = (); + type SessionInterface = (); + type NextNewSession = (); + type MaxExposurePageSize = ConstU32<64>; + type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<100>; + type EventListeners = (); + type DisablingStrategy = crate::UpToLimitDisablingStrategy; + #[cfg(feature = "std")] + type BenchmarkingConfig = crate::TestBenchmarkingConfig; + type WeightInfo = (); + } + } + /// The ideal number of active validators. #[pallet::storage] #[pallet::getter(fn validator_count)] diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index 1fe608cd3358b..9bc8197c50b3e 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -54,6 +54,7 @@ use crate::{ NominatorSlashInEra, Pallet, Perbill, SessionInterface, SpanSlash, UnappliedSlash, ValidatorSlashInEra, }; +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ ensure, @@ -65,7 +66,6 @@ use sp_runtime::{ DispatchResult, RuntimeDebug, }; use sp_staking::{EraIndex, StakingInterface}; -use sp_std::vec::Vec; /// The proportion of the slashing reward to be paid out on the first slashing detection. /// This is f_1 in the paper. @@ -148,7 +148,7 @@ impl SlashingSpans { SlashingSpan { index, start, length: Some(length) } }); - sp_std::iter::once(last).chain(prior) + core::iter::once(last).chain(prior) } /// Yields the era index where the most recent non-zero slash occurred. @@ -182,7 +182,7 @@ impl SlashingSpans { }; // readjust the ongoing span, if it started before the beginning of the window. - self.last_start = sp_std::cmp::max(self.last_start, window_start); + self.last_start = core::cmp::max(self.last_start, window_start); pruned } } @@ -408,7 +408,7 @@ struct InspectingSpans<'a, T: Config + 'a> { paid_out: &'a mut BalanceOf, slash_of: &'a mut BalanceOf, reward_proportion: Perbill, - _marker: sp_std::marker::PhantomData, + _marker: core::marker::PhantomData, } // fetches the slashing spans record for a stash account, initializing it if necessary. @@ -433,7 +433,7 @@ fn fetch_spans<'a, T: Config + 'a>( slash_of, paid_out, reward_proportion, - _marker: sp_std::marker::PhantomData, + _marker: core::marker::PhantomData, } } @@ -451,7 +451,7 @@ impl<'a, T: 'a + Config> InspectingSpans<'a, T> { // although `amount` may be zero, as it is only a difference. fn add_slash(&mut self, amount: BalanceOf, slash_era: EraIndex) { *self.slash_of += amount; - self.spans.last_nonzero_slash = sp_std::cmp::max(self.spans.last_nonzero_slash, slash_era); + self.spans.last_nonzero_slash = core::cmp::max(self.spans.last_nonzero_slash, slash_era); } // find the span index of the given era, if covered. diff --git a/substrate/frame/staking/src/testing_utils.rs b/substrate/frame/staking/src/testing_utils.rs index d4938ea43ebe2..65aaa5f09de4e 100644 --- a/substrate/frame/staking/src/testing_utils.rs +++ b/substrate/frame/staking/src/testing_utils.rs @@ -30,7 +30,6 @@ use sp_io::hashing::blake2_256; use frame_election_provider_support::SortedListProvider; use frame_support::{pallet_prelude::*, traits::Currency}; use sp_runtime::{traits::StaticLookup, Perbill}; -use sp_std::prelude::*; const SEED: u32 = 0; diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 2229eb28329ad..c35e5e8a06c6f 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -41,7 +41,6 @@ use sp_staking::{ offence::{OffenceDetails, OnOffenceHandler}, SessionIndex, }; -use sp_std::prelude::*; use substrate_test_utils::assert_eq_uvec; #[test] @@ -780,7 +779,7 @@ fn nominators_also_get_slashed_pro_rata() { #[test] fn double_staking_should_fail() { // should test (in the same order): - // * an account already bonded as stash cannot be be stashed again. + // * an account already bonded as stash cannot be stashed again. // * an account already bonded as stash cannot nominate. // * an account already bonded as controller can nominate. ExtBuilder::default().try_state(false).build_and_execute(|| { diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index 0870989d81f15..db8d10dabfdef 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -15,27 +15,26 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -thousands = { version = "0.2.0", optional = true } -zstd = { version = "0.12.4", default-features = false, optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -remote-externalities = { package = "frame-remote-externalities", path = "../../utils/frame/remote-externalities", optional = true } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -substrate-state-trie-migration-rpc = { path = "../../utils/frame/rpc/state-trie-migration-rpc", optional = true } +thousands = { optional = true, workspace = true } +zstd = { optional = true, workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +remote-externalities = { optional = true, workspace = true, default-features = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +substrate-state-trie-migration-rpc = { optional = true, workspace = true, default-features = true } [dev-dependencies] -parking_lot = "0.12.1" -tokio = { version = "1.22.0", features = ["macros"] } -pallet-balances = { path = "../balances" } -sp-tracing = { path = "../../primitives/tracing" } +parking_lot = { workspace = true, default-features = true } +tokio = { features = ["macros"], workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] @@ -50,7 +49,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "sp-tracing/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 4ec649f9080d4..3fe5abb810313 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -55,6 +55,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + pub use pallet::*; pub mod weights; @@ -75,6 +77,8 @@ pub mod pallet { pub use crate::weights::WeightInfo; + use alloc::{vec, vec::Vec}; + use core::ops::Deref; use frame_support::{ dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, ensure, @@ -93,7 +97,6 @@ pub mod pallet { self, traits::{Saturating, Zero}, }; - use sp_std::{ops::Deref, prelude::*}; pub(crate) type BalanceOf = <::Currency as Inspect<::AccountId>>::Balance; @@ -109,7 +112,6 @@ pub mod pallet { MaxEncodedLen, )] #[scale_info(skip_type_params(MaxKeyLen))] - #[codec(mel_bound())] pub enum Progress> { /// Yet to begin. ToStart, @@ -126,7 +128,6 @@ pub mod pallet { /// /// It tracks the last top and child keys read. #[derive(Clone, Encode, Decode, scale_info::TypeInfo, PartialEq, Eq, MaxEncodedLen)] - #[codec(mel_bound(T: Config))] #[scale_info(skip_type_params(T))] pub struct MigrationTask { /// The current top trie migration progress. @@ -171,11 +172,11 @@ pub mod pallet { pub(crate) child_items: u32, #[codec(skip)] - pub(crate) _ph: sp_std::marker::PhantomData, + pub(crate) _ph: core::marker::PhantomData, } - impl> sp_std::fmt::Debug for Progress { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + impl> core::fmt::Debug for Progress { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Progress::ToStart => f.write_str("To start"), Progress::LastKey(key) => write!(f, "Last: {:?}", HexDisplay::from(key.deref())), @@ -184,8 +185,8 @@ pub mod pallet { } } - impl sp_std::fmt::Debug for MigrationTask { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + impl core::fmt::Debug for MigrationTask { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("MigrationTask") .field("top", &self.progress_top) .field("child", &self.progress_child) @@ -955,8 +956,8 @@ pub mod pallet { #[cfg(feature = "runtime-benchmarks")] mod benchmarks { use super::{pallet::Pallet as StateTrieMigration, *}; + use alloc::vec; use frame_support::traits::fungible::{Inspect, Mutate}; - use sp_std::prelude::*; // The size of the key seemingly makes no difference in the read/write time, so we make it // constant. @@ -1082,7 +1083,7 @@ mod benchmarks { process_top_key { let v in 1 .. (4 * 1024 * 1024); - let value = sp_std::vec![1u8; v as usize]; + let value = alloc::vec![1u8; v as usize]; sp_io::storage::set(KEY, &value); }: { let data = sp_io::storage::get(KEY).unwrap(); @@ -1103,6 +1104,7 @@ mod benchmarks { mod mock { use super::*; use crate as pallet_state_trie_migration; + use alloc::{vec, vec::Vec}; use frame_support::{derive_impl, parameter_types, traits::Hooks, weights::Weight}; use frame_system::{EnsureRoot, EnsureSigned}; use sp_core::{ diff --git a/substrate/frame/statement/Cargo.toml b/substrate/frame/statement/Cargo.toml index 989f0c330fc10..000e5033d8f19 100644 --- a/substrate/frame/statement/Cargo.toml +++ b/substrate/frame/statement/Cargo.toml @@ -15,20 +15,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-statement-store = { path = "../../primitives/statement-store", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-statement-store = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-io = { workspace = true } +sp-core = { workspace = true } log = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] @@ -44,7 +43,6 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-statement-store/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/statement/src/lib.rs b/substrate/frame/statement/src/lib.rs index c68dac2d29722..6a7f577ab0869 100644 --- a/substrate/frame/statement/src/lib.rs +++ b/substrate/frame/statement/src/lib.rs @@ -92,7 +92,7 @@ pub mod pallet { } #[pallet::pallet] - pub struct Pallet(sp_std::marker::PhantomData); + pub struct Pallet(core::marker::PhantomData); #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] diff --git a/substrate/frame/statement/src/mock.rs b/substrate/frame/statement/src/mock.rs index 35d51e7a27bfc..34afd332c083d 100644 --- a/substrate/frame/statement/src/mock.rs +++ b/substrate/frame/statement/src/mock.rs @@ -51,20 +51,10 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<5>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; } ord_parameter_types! { diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index fcbb00087e26c..1a94753728b24 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -16,19 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } -docify = "0.2.8" +docify = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] @@ -41,7 +40,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/sudo/src/benchmarking.rs b/substrate/frame/sudo/src/benchmarking.rs index e64233fe7480a..dee7d09c9d0c6 100644 --- a/substrate/frame/sudo/src/benchmarking.rs +++ b/substrate/frame/sudo/src/benchmarking.rs @@ -19,6 +19,7 @@ use super::*; use crate::Pallet; +use alloc::{boxed::Box, vec}; use frame_benchmarking::v2::*; use frame_system::RawOrigin; diff --git a/substrate/frame/sudo/src/extension.rs b/substrate/frame/sudo/src/extension.rs index e90286e5a7c6b..fb7eaf7894806 100644 --- a/substrate/frame/sudo/src/extension.rs +++ b/substrate/frame/sudo/src/extension.rs @@ -17,6 +17,7 @@ use crate::{Config, Key}; use codec::{Decode, Encode}; +use core::{fmt, marker::PhantomData}; use frame_support::{dispatch::DispatchInfo, ensure}; use scale_info::TypeInfo; use sp_runtime::{ @@ -26,7 +27,6 @@ use sp_runtime::{ UnknownTransaction, ValidTransaction, }, }; -use sp_std::{fmt, marker::PhantomData}; /// Ensure that signed transactions are only valid if they are signed by sudo account. /// diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index 63b68e694307e..07296e90b6485 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -121,8 +121,11 @@ #![deny(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::boxed::Box; + use sp_runtime::{traits::StaticLookup, DispatchResult}; -use sp_std::prelude::*; use frame_support::{dispatch::GetDispatchInfo, traits::UnfilteredDispatchable}; diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index a6c4fd6ee309e..560697ab710b1 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -16,59 +16,59 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = { version = "6.2.2", default-features = false } +array-bytes = { workspace = true } serde = { features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", "max-encoded-len", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } -frame-metadata = { version = "16.0.0", default-features = false, features = [ +], workspace = true } +frame-metadata = { features = [ "current", -] } -sp-api = { path = "../../primitives/api", default-features = false, features = [ +], workspace = true } +sp-api = { features = [ "frame-metadata", -] } -sp-std = { path = "../../primitives/std", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = [ +], workspace = true } +sp-std = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = [ "serde", -] } -sp-tracing = { path = "../../primitives/tracing", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } -sp-inherents = { path = "../../primitives/inherents", default-features = false } -sp-staking = { path = "../../primitives/staking", default-features = false } -sp-weights = { path = "../../primitives/weights", default-features = false } -sp-debug-derive = { path = "../../primitives/debug-derive", default-features = false } -sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false } -tt-call = "1.0.8" -macro_magic = "0.5.0" -frame-support-procedural = { path = "procedural", default-features = false } -paste = "1.0" -sp-state-machine = { path = "../../primitives/state-machine", default-features = false, optional = true } -bitflags = "1.3" -impl-trait-for-tuples = "0.2.2" -smallvec = "1.11.0" +], workspace = true } +sp-tracing = { workspace = true } +sp-core = { workspace = true } +sp-arithmetic = { workspace = true } +sp-inherents = { workspace = true } +sp-staking = { workspace = true } +sp-weights = { workspace = true } +sp-debug-derive = { workspace = true } +sp-metadata-ir = { workspace = true } +tt-call = { workspace = true } +macro_magic = { workspace = true } +frame-support-procedural = { workspace = true } +paste = { workspace = true, default-features = true } +sp-state-machine = { optional = true, workspace = true } +bitflags = { workspace = true } +impl-trait-for-tuples = { workspace = true } +smallvec = { workspace = true, default-features = true } log = { workspace = true } -sp-crypto-hashing-proc-macro = { path = "../../primitives/crypto/hashing/proc-macro" } -k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } -environmental = { version = "1.1.4", default-features = false } -sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } +sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } +k256 = { features = ["ecdsa"], workspace = true } +environmental = { workspace = true } +sp-genesis-builder = { workspace = true } serde_json = { features = ["alloc"], workspace = true } -docify = "0.2.8" -static_assertions = "1.1.0" +docify = { workspace = true } +static_assertions = { workspace = true, default-features = true } -aquamarine = { version = "0.5.0" } +aquamarine = { workspace = true } [dev-dependencies] -assert_matches = "1.3.0" -pretty_assertions = "1.2.1" -sp-timestamp = { path = "../../primitives/timestamp", default-features = false } -frame-system = { path = "../system" } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } +assert_matches = { workspace = true } +pretty_assertions = { workspace = true } +sp-timestamp = { workspace = true } +frame-system = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index b04af63de8117..fbb4da0177a4d 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -18,21 +18,21 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -derive-syn-parse = "0.2.0" -Inflector = "0.11.4" -cfg-expr = "0.15.5" -itertools = "0.11" -proc-macro2 = "1.0.56" +derive-syn-parse = { workspace = true } +Inflector = { workspace = true } +cfg-expr = { workspace = true } +itertools = { workspace = true } +proc-macro2 = { workspace = true } quote = { workspace = true } -syn = { features = ["full", "visit-mut"], workspace = true } -frame-support-procedural-tools = { path = "tools" } -macro_magic = { version = "0.5.0", features = ["proc_support"] } -proc-macro-warning = { version = "1.0.0", default-features = false } -expander = "2.0.0" -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing", default-features = false } +syn = { features = ["full", "parsing", "visit-mut"], workspace = true } +frame-support-procedural-tools = { workspace = true, default-features = true } +macro_magic = { features = ["proc_support"], workspace = true } +proc-macro-warning = { workspace = true } +expander = { workspace = true } +sp-crypto-hashing = { workspace = true } [dev-dependencies] -regex = "1" +regex = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs index da483fa6cf0b6..c5fe8440d21be 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -58,17 +58,17 @@ pub fn expand_outer_inherent( trait InherentDataExt { fn create_extrinsics(&self) -> - #scrate::__private::sp_std::vec::Vec<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic>; + #scrate::__private::Vec<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic>; fn check_extrinsics(&self, block: &#block) -> #scrate::inherent::CheckInherentsResult; } impl InherentDataExt for #scrate::inherent::InherentData { fn create_extrinsics(&self) -> - #scrate::__private::sp_std::vec::Vec<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic> + #scrate::__private::Vec<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic> { use #scrate::inherent::ProvideInherent; - let mut inherents = #scrate::__private::sp_std::vec::Vec::new(); + let mut inherents = #scrate::__private::Vec::new(); #( #pallet_attrs diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 0e76f9a92469a..daef1b171617f 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -111,7 +111,7 @@ pub fn expand_runtime_metadata( >(); #scrate::__private::metadata_ir::MetadataIR { - pallets: #scrate::__private::sp_std::vec![ #(#pallets),* ], + pallets: #scrate::__private::vec![ #(#pallets),* ], extrinsic: #scrate::__private::metadata_ir::ExtrinsicMetadataIR { ty, version: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSION, @@ -156,7 +156,7 @@ pub fn expand_runtime_metadata( }) } - pub fn metadata_versions() -> #scrate::__private::sp_std::vec::Vec { + pub fn metadata_versions() -> #scrate::__private::Vec { #scrate::__private::metadata_ir::supported_versions() } } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs index 83049919d01c3..4a14853c04eec 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -105,25 +105,25 @@ pub fn expand_outer_origin( #[derive(Clone)] pub struct RuntimeOrigin { pub caller: OriginCaller, - filter: #scrate::__private::sp_std::rc::Rc::RuntimeCall) -> bool>>, + filter: #scrate::__private::Rc<#scrate::__private::Box::RuntimeCall) -> bool>>, } #[cfg(not(feature = "std"))] - impl #scrate::__private::sp_std::fmt::Debug for RuntimeOrigin { + impl core::fmt::Debug for RuntimeOrigin { fn fmt( &self, - fmt: &mut #scrate::__private::sp_std::fmt::Formatter, - ) -> #scrate::__private::sp_std::result::Result<(), #scrate::__private::sp_std::fmt::Error> { + fmt: &mut core::fmt::Formatter, + ) -> core::result::Result<(), core::fmt::Error> { fmt.write_str("") } } #[cfg(feature = "std")] - impl #scrate::__private::sp_std::fmt::Debug for RuntimeOrigin { + impl core::fmt::Debug for RuntimeOrigin { fn fmt( &self, - fmt: &mut #scrate::__private::sp_std::fmt::Formatter, - ) -> #scrate::__private::sp_std::result::Result<(), #scrate::__private::sp_std::fmt::Error> { + fmt: &mut core::fmt::Formatter, + ) -> core::result::Result<(), core::fmt::Error> { fmt.debug_struct("Origin") .field("caller", &self.caller) .field("filter", &"[function ptr]") @@ -139,7 +139,7 @@ pub fn expand_outer_origin( fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static) { let f = self.filter.clone(); - self.filter = #scrate::__private::sp_std::rc::Rc::new(Box::new(move |call| { + self.filter = #scrate::__private::Rc::new(#scrate::__private::Box::new(move |call| { f(call) && filter(call) })); } @@ -150,7 +150,7 @@ pub fn expand_outer_origin( as #scrate::traits::Contains<<#runtime as #system_path::Config>::RuntimeCall> >::contains; - self.filter = #scrate::__private::sp_std::rc::Rc::new(Box::new(filter)); + self.filter = #scrate::__private::Rc::new(#scrate::__private::Box::new(filter)); } fn set_caller_from(&mut self, other: impl Into) { @@ -252,7 +252,7 @@ pub fn expand_outer_origin( impl TryFrom for #system_path::Origin<#runtime> { type Error = OriginCaller; fn try_from(x: OriginCaller) - -> #scrate::__private::sp_std::result::Result<#system_path::Origin<#runtime>, OriginCaller> + -> core::result::Result<#system_path::Origin<#runtime>, OriginCaller> { if let OriginCaller::system(l) = x { Ok(l) @@ -275,7 +275,7 @@ pub fn expand_outer_origin( fn from(x: OriginCaller) -> Self { let mut o = RuntimeOrigin { caller: x, - filter: #scrate::__private::sp_std::rc::Rc::new(Box::new(|_| true)), + filter: #scrate::__private::Rc::new(#scrate::__private::Box::new(|_| true)), }; #scrate::traits::OriginTrait::reset_filter(&mut o); @@ -284,7 +284,7 @@ pub fn expand_outer_origin( } } - impl From for #scrate::__private::sp_std::result::Result<#system_path::Origin<#runtime>, RuntimeOrigin> { + impl From for core::result::Result<#system_path::Origin<#runtime>, RuntimeOrigin> { /// NOTE: converting to pallet origin loses the origin filter information. fn from(val: RuntimeOrigin) -> Self { if let OriginCaller::system(l) = val.caller { @@ -349,7 +349,7 @@ fn expand_origin_caller_variant( } fn expand_origin_pallet_conversions( - scrate: &TokenStream, + _scrate: &TokenStream, runtime: &Ident, pallet: &Pallet, instance: Option<&Ident>, @@ -394,7 +394,7 @@ fn expand_origin_pallet_conversions( } #attr - impl From for #scrate::__private::sp_std::result::Result<#pallet_origin, RuntimeOrigin> { + impl From for core::result::Result<#pallet_origin, RuntimeOrigin> { /// NOTE: converting to pallet origin loses the origin filter information. fn from(val: RuntimeOrigin) -> Self { if let OriginCaller::#variant_name(l) = val.caller { @@ -410,7 +410,7 @@ fn expand_origin_pallet_conversions( type Error = OriginCaller; fn try_from( x: OriginCaller, - ) -> #scrate::__private::sp_std::result::Result<#pallet_origin, OriginCaller> { + ) -> core::result::Result<#pallet_origin, OriginCaller> { if let OriginCaller::#variant_name(l) = x { Ok(l) } else { @@ -424,7 +424,7 @@ fn expand_origin_pallet_conversions( type Error = (); fn try_from( x: &'a OriginCaller, - ) -> #scrate::__private::sp_std::result::Result<&'a #pallet_origin, ()> { + ) -> core::result::Result<&'a #pallet_origin, ()> { if let OriginCaller::#variant_name(l) = x { Ok(&l) } else { @@ -438,7 +438,7 @@ fn expand_origin_pallet_conversions( type Error = (); fn try_from( x: &'a RuntimeOrigin, - ) -> #scrate::__private::sp_std::result::Result<&'a #pallet_origin, ()> { + ) -> core::result::Result<&'a #pallet_origin, ()> { if let OriginCaller::#variant_name(l) = &x.caller { Ok(&l) } else { diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 1505d158895f0..17042c2487803 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -470,7 +470,7 @@ fn construct_runtime_final_expansion( #[doc(hidden)] trait InternalConstructRuntime { #[inline(always)] - fn runtime_metadata(&self) -> #scrate::__private::sp_std::vec::Vec<#scrate::__private::metadata_ir::RuntimeApiMetadataIR> { + fn runtime_metadata(&self) -> #scrate::__private::Vec<#scrate::__private::metadata_ir::RuntimeApiMetadataIR> { Default::default() } } @@ -669,10 +669,10 @@ pub(crate) fn decl_pallet_runtime_setup( impl #scrate::traits::PalletInfo for PalletInfo { fn index() -> Option { - let type_id = #scrate::__private::sp_std::any::TypeId::of::

(); + let type_id = core::any::TypeId::of::

(); #( #pallet_attrs - if type_id == #scrate::__private::sp_std::any::TypeId::of::<#names>() { + if type_id == core::any::TypeId::of::<#names>() { return Some(#indices) } )* @@ -681,10 +681,10 @@ pub(crate) fn decl_pallet_runtime_setup( } fn name() -> Option<&'static str> { - let type_id = #scrate::__private::sp_std::any::TypeId::of::

(); + let type_id = core::any::TypeId::of::

(); #( #pallet_attrs - if type_id == #scrate::__private::sp_std::any::TypeId::of::<#names>() { + if type_id == core::any::TypeId::of::<#names>() { return Some(#name_strings) } )* @@ -693,10 +693,10 @@ pub(crate) fn decl_pallet_runtime_setup( } fn name_hash() -> Option<[u8; 16]> { - let type_id = #scrate::__private::sp_std::any::TypeId::of::

(); + let type_id = core::any::TypeId::of::

(); #( #pallet_attrs - if type_id == #scrate::__private::sp_std::any::TypeId::of::<#names>() { + if type_id == core::any::TypeId::of::<#names>() { return Some(#name_hashes) } )* @@ -705,10 +705,10 @@ pub(crate) fn decl_pallet_runtime_setup( } fn module_name() -> Option<&'static str> { - let type_id = #scrate::__private::sp_std::any::TypeId::of::

(); + let type_id = core::any::TypeId::of::

(); #( #pallet_attrs - if type_id == #scrate::__private::sp_std::any::TypeId::of::<#names>() { + if type_id == core::any::TypeId::of::<#names>() { return Some(#module_names) } )* @@ -717,10 +717,10 @@ pub(crate) fn decl_pallet_runtime_setup( } fn crate_version() -> Option<#scrate::traits::CrateVersion> { - let type_id = #scrate::__private::sp_std::any::TypeId::of::

(); + let type_id = core::any::TypeId::of::

(); #( #pallet_attrs - if type_id == #scrate::__private::sp_std::any::TypeId::of::<#names>() { + if type_id == core::any::TypeId::of::<#names>() { return Some( <#pallet_structs as #scrate::traits::PalletInfoAccess>::crate_version() ) diff --git a/substrate/frame/support/procedural/src/construct_runtime/parse.rs b/substrate/frame/support/procedural/src/construct_runtime/parse.rs index ded77bed4c8e2..532e032d0cb78 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/parse.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/parse.rs @@ -65,8 +65,6 @@ pub enum RuntimeDeclaration { /// Declaration of a runtime with some pallet with implicit declaration of parts. #[derive(Debug)] pub struct ImplicitRuntimeDeclaration { - pub name: Ident, - pub where_section: Option, pub pallets: Vec, } @@ -98,11 +96,7 @@ impl Parse for RuntimeDeclaration { match convert_pallets(pallets.content.inner.into_iter().collect())? { PalletsConversion::Implicit(pallets) => - Ok(RuntimeDeclaration::Implicit(ImplicitRuntimeDeclaration { - name, - where_section, - pallets, - })), + Ok(RuntimeDeclaration::Implicit(ImplicitRuntimeDeclaration { pallets })), PalletsConversion::Explicit(pallets) => Ok(RuntimeDeclaration::Explicit(ExplicitRuntimeDeclaration { name, @@ -124,9 +118,6 @@ impl Parse for RuntimeDeclaration { #[derive(Debug)] pub struct WhereSection { pub span: Span, - pub block: syn::TypePath, - pub node_block: syn::TypePath, - pub unchecked_extrinsic: syn::TypePath, } impl Parse for WhereSection { @@ -145,10 +136,9 @@ impl Parse for WhereSection { } input.parse::()?; } - let block = remove_kind(input, WhereKind::Block, &mut definitions)?.value; - let node_block = remove_kind(input, WhereKind::NodeBlock, &mut definitions)?.value; - let unchecked_extrinsic = - remove_kind(input, WhereKind::UncheckedExtrinsic, &mut definitions)?.value; + remove_kind(input, WhereKind::Block, &mut definitions)?; + remove_kind(input, WhereKind::NodeBlock, &mut definitions)?; + remove_kind(input, WhereKind::UncheckedExtrinsic, &mut definitions)?; if let Some(WhereDefinition { ref kind_span, ref kind, .. }) = definitions.first() { let msg = format!( "`{:?}` was declared above. Please use exactly one declaration for `{:?}`.", @@ -156,7 +146,7 @@ impl Parse for WhereSection { ); return Err(Error::new(*kind_span, msg)) } - Ok(Self { span: input.span(), block, node_block, unchecked_extrinsic }) + Ok(Self { span: input.span() }) } } @@ -171,7 +161,6 @@ pub enum WhereKind { pub struct WhereDefinition { pub kind_span: Span, pub kind: WhereKind, - pub value: syn::TypePath, } impl Parse for WhereDefinition { @@ -187,14 +176,10 @@ impl Parse for WhereDefinition { return Err(lookahead.error()) }; - Ok(Self { - kind_span, - kind, - value: { - let _: Token![=] = input.parse()?; - input.parse()? - }, - }) + let _: Token![=] = input.parse()?; + let _: syn::TypePath = input.parse()?; + + Ok(Self { kind_span, kind }) } } diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index e812ac071b2c9..51e5657a2e8be 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -81,6 +81,9 @@ fn counter_prefix(prefix: &str) -> String { /// Construct a runtime, with the given name and the given pallets. /// +/// NOTE: A new version of this macro is available at `frame_support::runtime`. This macro will +/// soon be deprecated. Please use the new macro instead. +/// /// The parameters here are specific types for `Block`, `NodeBlock`, and `UncheckedExtrinsic` /// and the pallets that are used by the runtime. /// `Block` is the block type that is used in the runtime and `NodeBlock` is the block type @@ -1188,67 +1191,11 @@ pub fn import_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { .into() } -/// Construct a runtime, with the given name and the given pallets. -/// -/// # Example: /// -/// ```ignore -/// #[frame_support::runtime] -/// mod runtime { -/// // The main runtime -/// #[runtime::runtime] -/// // Runtime Types to be generated -/// #[runtime::derive( -/// RuntimeCall, -/// RuntimeEvent, -/// RuntimeError, -/// RuntimeOrigin, -/// RuntimeFreezeReason, -/// RuntimeHoldReason, -/// RuntimeSlashReason, -/// RuntimeLockId, -/// RuntimeTask, -/// )] -/// pub struct Runtime; -/// -/// #[runtime::pallet_index(0)] -/// pub type System = frame_system; -/// -/// #[runtime::pallet_index(1)] -/// pub type Test = path::to::test; -/// -/// // Pallet with instance. -/// #[runtime::pallet_index(2)] -/// pub type Test2_Instance1 = test2; -/// -/// // Pallet with calls disabled. -/// #[runtime::pallet_index(3)] -/// #[runtime::disable_call] -/// pub type Test3 = test3; -/// -/// // Pallet with unsigned extrinsics disabled. -/// #[runtime::pallet_index(4)] -/// #[runtime::disable_unsigned] -/// pub type Test4 = test4; -/// } -/// ``` -/// -/// # Legacy Ordering -/// -/// An optional attribute can be defined as #[frame_support::runtime(legacy_ordering)] to -/// ensure that the order of hooks is same as the order of pallets (and not based on the -/// pallet_index). This is to support legacy runtimes and should be avoided for new ones. -/// -/// # Note -/// -/// The population of the genesis storage depends on the order of pallets. So, if one of your -/// pallets depends on another pallet, the pallet that is depended upon needs to come before -/// the pallet depending on it. -/// -/// # Type definitions +/// --- /// -/// * The macro generates a type alias for each pallet to their `Pallet`. E.g. `type System = -/// frame_system::Pallet` +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::runtime`. #[proc_macro_attribute] pub fn runtime(attr: TokenStream, item: TokenStream) -> TokenStream { runtime::runtime(attr, item) diff --git a/substrate/frame/support/procedural/src/pallet/expand/constants.rs b/substrate/frame/support/procedural/src/pallet/expand/constants.rs index d7fbb5a718973..a36df790bd298 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/constants.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/constants.rs @@ -88,7 +88,7 @@ pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { name: #ident_str, ty: #frame_support::__private::scale_info::meta_type::<#const_type>(), value: { #default_byte_impl }, - docs: #frame_support::__private::sp_std::vec![ #( #doc ),* ], + docs: #frame_support::__private::vec![ #( #doc ),* ], } }) }); @@ -98,9 +98,9 @@ pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { #[doc(hidden)] pub fn pallet_constants_metadata() - -> #frame_support::__private::sp_std::vec::Vec<#frame_support::__private::metadata_ir::PalletConstantMetadataIR> + -> #frame_support::__private::Vec<#frame_support::__private::metadata_ir::PalletConstantMetadataIR> { - #frame_support::__private::sp_std::vec![ #( #consts ),* ] + #frame_support::__private::vec![ #( #consts ),* ] } } ) diff --git a/substrate/frame/support/procedural/src/pallet/expand/documentation.rs b/substrate/frame/support/procedural/src/pallet/expand/documentation.rs index ec19f889a9f20..e2c72ee921c46 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/documentation.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/documentation.rs @@ -163,9 +163,9 @@ pub fn expand_documentation(def: &mut Def) -> proc_macro2::TokenStream { #[doc(hidden)] pub fn pallet_documentation_metadata() - -> #frame_support::__private::sp_std::vec::Vec<&'static str> + -> #frame_support::__private::Vec<&'static str> { - #frame_support::__private::sp_std::vec![ #( #docs ),* ] + #frame_support::__private::vec![ #( #docs ),* ] } } ) diff --git a/substrate/frame/support/procedural/src/pallet/expand/error.rs b/substrate/frame/support/procedural/src/pallet/expand/error.rs index 72fb6e9235723..05478ee390841 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/error.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/error.rs @@ -66,28 +66,30 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { #[doc(hidden)] #[codec(skip)] __Ignore( - #frame_support::__private::sp_std::marker::PhantomData<(#type_use_gen)>, + core::marker::PhantomData<(#type_use_gen)>, #frame_support::Never, ) ); - let as_str_matches = error.variants.iter().map( - |VariantDef { ident: variant, field: field_ty, docs: _, cfg_attrs }| { - let variant_str = variant.to_string(); - let cfg_attrs = cfg_attrs.iter().map(|attr| attr.to_token_stream()); - match field_ty { - Some(VariantField { is_named: true }) => { - quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant { .. } => #variant_str,) - }, - Some(VariantField { is_named: false }) => { - quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant(..) => #variant_str,) - }, - None => { - quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant => #variant_str,) - }, - } - }, - ); + let as_str_matches = + error + .variants + .iter() + .map(|VariantDef { ident: variant, field: field_ty, cfg_attrs }| { + let variant_str = variant.to_string(); + let cfg_attrs = cfg_attrs.iter().map(|attr| attr.to_token_stream()); + match field_ty { + Some(VariantField { is_named: true }) => { + quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant { .. } => #variant_str,) + }, + Some(VariantField { is_named: false }) => { + quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant(..) => #variant_str,) + }, + None => { + quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant => #variant_str,) + }, + } + }); let error_item = { let item = &mut def.item.content.as_mut().expect("Checked by def parser").1[error.index]; @@ -122,11 +124,11 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { } quote::quote_spanned!(error.attr_span => - impl<#type_impl_gen> #frame_support::__private::sp_std::fmt::Debug for #error_ident<#type_use_gen> + impl<#type_impl_gen> core::fmt::Debug for #error_ident<#type_use_gen> #config_where_clause { - fn fmt(&self, f: &mut #frame_support::__private::sp_std::fmt::Formatter<'_>) - -> #frame_support::__private::sp_std::fmt::Result + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) + -> core::fmt::Result { f.write_str(self.as_str()) } diff --git a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs index 3623b595268d0..d7b1ca14f574b 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs @@ -255,7 +255,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<#frame_support::__private::sp_std::vec::Vec, #frame_support::sp_runtime::TryRuntimeError> { + fn pre_upgrade() -> Result<#frame_support::__private::Vec, #frame_support::sp_runtime::TryRuntimeError> { < Self as @@ -264,7 +264,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } #[cfg(feature = "try-runtime")] - fn post_upgrade(state: #frame_support::__private::sp_std::vec::Vec) -> Result<(), #frame_support::sp_runtime::TryRuntimeError> { + fn post_upgrade(state: #frame_support::__private::Vec) -> Result<(), #frame_support::sp_runtime::TryRuntimeError> { #post_storage_version_check < diff --git a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs index 7cdf6bde9de87..7ebc4bb2e9dc2 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -54,7 +54,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { if let Some(field) = pallet_item.fields.iter_mut().next() { if field.ty == syn::parse_quote!(_) { field.ty = syn::parse_quote!( - #frame_support::__private::sp_std::marker::PhantomData<(#type_use_gen)> + core::marker::PhantomData<(#type_use_gen)> ); } } @@ -139,10 +139,10 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { #storages_where_clauses { fn storage_info() - -> #frame_support::__private::sp_std::vec::Vec<#frame_support::traits::StorageInfo> + -> #frame_support::__private::Vec<#frame_support::traits::StorageInfo> { #[allow(unused_mut)] - let mut res = #frame_support::__private::sp_std::vec![]; + let mut res = #frame_support::__private::vec![]; #( #(#storage_cfg_attrs)* @@ -179,8 +179,8 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { let whitelisted_storage_keys_impl = quote::quote![ use #frame_support::traits::{StorageInfoTrait, TrackedStorageKey, WhitelistedStorageKeys}; impl<#type_impl_gen> WhitelistedStorageKeys for #pallet_ident<#type_use_gen> #storages_where_clauses { - fn whitelisted_storage_keys() -> #frame_support::__private::sp_std::vec::Vec { - use #frame_support::__private::sp_std::vec; + fn whitelisted_storage_keys() -> #frame_support::__private::Vec { + use #frame_support::__private::vec; vec![#( TrackedStorageKey::new(#whitelisted_storage_idents::<#type_use_gen>::hashed_key().to_vec()) ),*] @@ -272,7 +272,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { #config_where_clause { fn count() -> usize { 1 } - fn infos() -> #frame_support::__private::sp_std::vec::Vec<#frame_support::traits::PalletInfoData> { + fn infos() -> #frame_support::__private::Vec<#frame_support::traits::PalletInfoData> { use #frame_support::traits::PalletInfoAccess; let item = #frame_support::traits::PalletInfoData { index: Self::index(), @@ -280,7 +280,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { module_name: Self::module_name(), crate_version: Self::crate_version(), }; - #frame_support::__private::sp_std::vec![item] + #frame_support::__private::vec![item] } } diff --git a/substrate/frame/support/procedural/src/pallet/expand/storage.rs b/substrate/frame/support/procedural/src/pallet/expand/storage.rs index 3cc8a843e3b16..267b0f2dd3ba4 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/storage.rs @@ -423,7 +423,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { #(#cfg_attrs)* { <#full_ident as #frame_support::storage::StorageEntryMetadataBuilder>::build_metadata( - #frame_support::__private::sp_std::vec![ + #frame_support::__private::vec![ #( #docs, )* ], &mut entries, @@ -853,7 +853,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { impl<#type_impl_gen> #frame_support::traits::TryDecodeEntireStorage for #pallet_ident<#type_use_gen> #completed_where_clause { - fn try_decode_entire_state() -> Result> { + fn try_decode_entire_state() -> Result> { let pallet_name = <::PalletInfo as #frame_support::traits::PalletInfo> ::name::<#pallet_ident<#type_use_gen>>() .expect("Every active pallet has a name in the runtime; qed"); @@ -861,7 +861,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::__private::log::debug!(target: "runtime::try-decode-state", "trying to decode pallet: {pallet_name}"); // NOTE: for now, we have to exclude storage items that are feature gated. - let mut errors = #frame_support::__private::sp_std::vec::Vec::new(); + let mut errors = #frame_support::__private::Vec::new(); let mut decoded = 0usize; #( @@ -902,7 +902,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { .expect("No name found for the pallet in the runtime! This usually means that the pallet wasn't added to `construct_runtime!`."), entries: { #[allow(unused_mut)] - let mut entries = #frame_support::__private::sp_std::vec![]; + let mut entries = #frame_support::__private::vec![]; #( #entries_builder )* entries }, diff --git a/substrate/frame/support/procedural/src/pallet/expand/tasks.rs b/substrate/frame/support/procedural/src/pallet/expand/tasks.rs index 6697e5c822a31..7201c352d92cd 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/tasks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/tasks.rs @@ -159,7 +159,6 @@ impl ToTokens for TasksDef { let task_fn_names = self.tasks.iter().map(|task| &task.item.sig.ident); let task_arg_names = self.tasks.iter().map(|task| &task.arg_names).collect::>(); - let sp_std = quote!(#scrate::__private::sp_std); let impl_generics = &self.item_impl.generics; tokens.extend(quote! { impl #impl_generics #enum_use @@ -169,13 +168,13 @@ impl ToTokens for TasksDef { impl #impl_generics #scrate::traits::Task for #enum_use { - type Enumeration = #sp_std::vec::IntoIter<#enum_use>; + type Enumeration = #scrate::__private::IntoIter<#enum_use>; fn iter() -> Self::Enumeration { - let mut all_tasks = #sp_std::vec![]; + let mut all_tasks = #scrate::__private::vec![]; #(all_tasks .extend(#task_iters.map(|(#(#task_arg_names),*)| #enum_ident::#task_fn_idents { #(#task_arg_names: #task_arg_names.clone()),* }) - .collect::<#sp_std::vec::Vec<_>>()); + .collect::<#scrate::__private::Vec<_>>()); )* all_tasks.into_iter() } diff --git a/substrate/frame/support/procedural/src/pallet/parse/composite.rs b/substrate/frame/support/procedural/src/pallet/parse/composite.rs index c3ac74846bf7c..20fc30cd26b1f 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/composite.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/composite.rs @@ -87,8 +87,6 @@ pub mod keyword { } pub struct CompositeDef { - /// The index of the CompositeDef item in the pallet module. - pub index: usize, /// The composite keyword used (contains span). pub composite_keyword: keyword::CompositeKeyword, /// Name of the associated type. @@ -104,7 +102,6 @@ pub struct CompositeDef { impl CompositeDef { pub fn try_from( attr_span: proc_macro2::Span, - index: usize, scrate: &syn::Path, item: &mut syn::Item, ) -> syn::Result { @@ -180,7 +177,6 @@ impl CompositeDef { syn::parse2::(item.ident.to_token_stream())?; Ok(CompositeDef { - index, composite_keyword, attr_span, generics: item.generics.clone(), diff --git a/substrate/frame/support/procedural/src/pallet/parse/config.rs b/substrate/frame/support/procedural/src/pallet/parse/config.rs index eaeaab2475880..6febaac9ffa32 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/config.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/config.rs @@ -62,8 +62,6 @@ pub struct ConfigDef { pub has_event_type: bool, /// The where clause on trait definition but modified so `Self` is `T`. pub where_clause: Option, - /// The span of the pallet::config attribute. - pub attr_span: proc_macro2::Span, /// Whether a default sub-trait should be generated. /// /// Contains default sub-trait items (instantiated by `#[pallet::config(with_default)]`). @@ -325,7 +323,6 @@ pub fn replace_self_by_t(input: proc_macro2::TokenStream) -> proc_macro2::TokenS impl ConfigDef { pub fn try_from( frame_system: &syn::Path, - attr_span: proc_macro2::Span, index: usize, item: &mut syn::Item, enable_default: bool, @@ -484,7 +481,6 @@ impl ConfigDef { consts_metadata, has_event_type, where_clause, - attr_span, default_sub_trait, }) } diff --git a/substrate/frame/support/procedural/src/pallet/parse/error.rs b/substrate/frame/support/procedural/src/pallet/parse/error.rs index 362df8d7340ce..bc4087a0ea763 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/error.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/error.rs @@ -16,7 +16,6 @@ // limitations under the License. use super::helper; -use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; use syn::{spanned::Spanned, Fields}; @@ -37,8 +36,6 @@ pub struct VariantDef { pub ident: syn::Ident, /// The variant field, if any. pub field: Option, - /// The variant doc literals. - pub docs: Vec, /// The `cfg` attributes. pub cfg_attrs: Vec, } @@ -101,12 +98,7 @@ impl ErrorDef { } let cfg_attrs: Vec = helper::get_item_cfg_attrs(&variant.attrs); - Ok(VariantDef { - ident: variant.ident.clone(), - field: field_ty, - docs: get_doc_literals(&variant.attrs), - cfg_attrs, - }) + Ok(VariantDef { ident: variant.ident.clone(), field: field_ty, cfg_attrs }) }) .collect::>()?; diff --git a/substrate/frame/support/procedural/src/pallet/parse/extra_constants.rs b/substrate/frame/support/procedural/src/pallet/parse/extra_constants.rs index 2ba6c44b7d158..12a373db180c3 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/extra_constants.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/extra_constants.rs @@ -37,8 +37,6 @@ pub struct ExtraConstantsDef { pub where_clause: Option, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, - /// The index of call item in pallet module. - pub index: usize, /// The extra constant defined. pub extra_constants: Vec, } @@ -77,7 +75,7 @@ impl syn::parse::Parse for ExtraConstAttr { } impl ExtraConstantsDef { - pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -150,11 +148,6 @@ impl ExtraConstantsDef { }); } - Ok(Self { - index, - instances, - where_clause: item.generics.where_clause.clone(), - extra_constants, - }) + Ok(Self { instances, where_clause: item.generics.where_clause.clone(), extra_constants }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/genesis_build.rs b/substrate/frame/support/procedural/src/pallet/parse/genesis_build.rs index d0e1d9ec998ec..bc925a21c9c8e 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/genesis_build.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/genesis_build.rs @@ -20,8 +20,6 @@ use syn::spanned::Spanned; /// Definition for pallet genesis build implementation. pub struct GenesisBuildDef { - /// The index of item in pallet module. - pub index: usize, /// A set of usage of instance, must be check for consistency with trait. pub instances: Option>, /// The where_clause used. @@ -31,11 +29,7 @@ pub struct GenesisBuildDef { } impl GenesisBuildDef { - pub fn try_from( - attr_span: proc_macro2::Span, - index: usize, - item: &mut syn::Item, - ) -> syn::Result { + pub fn try_from(attr_span: proc_macro2::Span, item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -56,6 +50,6 @@ impl GenesisBuildDef { let instances = helper::check_genesis_builder_usage(item_trait)?.map(|instances| vec![instances]); - Ok(Self { attr_span, index, instances, where_clause: item.generics.where_clause.clone() }) + Ok(Self { attr_span, instances, where_clause: item.generics.where_clause.clone() }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/hooks.rs b/substrate/frame/support/procedural/src/pallet/parse/hooks.rs index 37d7d22f4b6bb..07b51c8b91fa8 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/hooks.rs @@ -20,8 +20,6 @@ use syn::spanned::Spanned; /// Implementation of the pallet hooks. pub struct HooksDef { - /// The index of item in pallet. - pub index: usize, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, /// The where_clause used. @@ -33,11 +31,7 @@ pub struct HooksDef { } impl HooksDef { - pub fn try_from( - attr_span: proc_macro2::Span, - index: usize, - item: &mut syn::Item, - ) -> syn::Result { + pub fn try_from(attr_span: proc_macro2::Span, item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -77,7 +71,6 @@ impl HooksDef { Ok(Self { attr_span, - index, instances, has_runtime_upgrade, where_clause: item.generics.where_clause.clone(), diff --git a/substrate/frame/support/procedural/src/pallet/parse/inherent.rs b/substrate/frame/support/procedural/src/pallet/parse/inherent.rs index d8641691a40e3..56ebe8e5df433 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/inherent.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/inherent.rs @@ -20,14 +20,12 @@ use syn::spanned::Spanned; /// The definition of the pallet inherent implementation. pub struct InherentDef { - /// The index of inherent item in pallet module. - pub index: usize, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, } impl InherentDef { - pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -55,6 +53,6 @@ impl InherentDef { helper::check_impl_gen(&item.generics, item.impl_token.span())?, ]; - Ok(InherentDef { index, instances }) + Ok(InherentDef { instances }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/mod.rs index 6e12774611ddf..f55b166c7917c 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/mod.rs @@ -109,10 +109,9 @@ impl Def { let pallet_attr: Option = helper::take_first_item_pallet_attr(item)?; match pallet_attr { - Some(PalletAttr::Config(span, with_default)) if config.is_none() => + Some(PalletAttr::Config(_, with_default)) if config.is_none() => config = Some(config::ConfigDef::try_from( &frame_system, - span, index, item, with_default, @@ -122,7 +121,7 @@ impl Def { pallet_struct = Some(p); }, Some(PalletAttr::Hooks(span)) if hooks.is_none() => { - let m = hooks::HooksDef::try_from(span, index, item)?; + let m = hooks::HooksDef::try_from(span, item)?; hooks = Some(m); }, Some(PalletAttr::RuntimeCall(cw, span)) if call.is_none() => @@ -162,27 +161,27 @@ impl Def { genesis_config = Some(g); }, Some(PalletAttr::GenesisBuild(span)) if genesis_build.is_none() => { - let g = genesis_build::GenesisBuildDef::try_from(span, index, item)?; + let g = genesis_build::GenesisBuildDef::try_from(span, item)?; genesis_build = Some(g); }, Some(PalletAttr::RuntimeOrigin(_)) if origin.is_none() => - origin = Some(origin::OriginDef::try_from(index, item)?), + origin = Some(origin::OriginDef::try_from(item)?), Some(PalletAttr::Inherent(_)) if inherent.is_none() => - inherent = Some(inherent::InherentDef::try_from(index, item)?), + inherent = Some(inherent::InherentDef::try_from(item)?), Some(PalletAttr::Storage(span)) => storages.push(storage::StorageDef::try_from(span, index, item, dev_mode)?), Some(PalletAttr::ValidateUnsigned(_)) if validate_unsigned.is_none() => { - let v = validate_unsigned::ValidateUnsignedDef::try_from(index, item)?; + let v = validate_unsigned::ValidateUnsignedDef::try_from(item)?; validate_unsigned = Some(v); }, Some(PalletAttr::TypeValue(span)) => type_values.push(type_value::TypeValueDef::try_from(span, index, item)?), Some(PalletAttr::ExtraConstants(_)) => extra_constants = - Some(extra_constants::ExtraConstantsDef::try_from(index, item)?), + Some(extra_constants::ExtraConstantsDef::try_from(item)?), Some(PalletAttr::Composite(span)) => { let composite = - composite::CompositeDef::try_from(span, index, &frame_support, item)?; + composite::CompositeDef::try_from(span, &frame_support, item)?; if composites.iter().any(|def| { match (&def.composite_keyword, &composite.composite_keyword) { ( @@ -722,7 +721,6 @@ impl syn::parse::Parse for PalletAttr { #[derive(Clone)] pub struct InheritedCallWeightAttr { pub typename: syn::Type, - pub span: proc_macro2::Span, } impl syn::parse::Parse for InheritedCallWeightAttr { @@ -744,6 +742,6 @@ impl syn::parse::Parse for InheritedCallWeightAttr { return Err(lookahead.error()) }; - Ok(Self { typename: buffer.parse()?, span: input.span() }) + Ok(Self { typename: buffer.parse()? }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/origin.rs b/substrate/frame/support/procedural/src/pallet/parse/origin.rs index 76e2a8841196b..11311b3d5033c 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/origin.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/origin.rs @@ -25,16 +25,13 @@ use syn::spanned::Spanned; /// * `struct Origin` /// * `enum Origin` pub struct OriginDef { - /// The index of item in pallet module. - pub index: usize, - pub has_instance: bool, pub is_generic: bool, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, } impl OriginDef { - pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item_span = item.span(); let (vis, ident, generics) = match &item { syn::Item::Enum(item) => (&item.vis, &item.ident, &item.generics), @@ -46,7 +43,6 @@ impl OriginDef { }, }; - let has_instance = generics.params.len() == 2; let is_generic = !generics.params.is_empty(); let mut instances = vec![]; @@ -67,6 +63,6 @@ impl OriginDef { return Err(syn::Error::new(ident.span(), msg)) } - Ok(OriginDef { index, has_instance, is_generic, instances }) + Ok(OriginDef { is_generic, instances }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/tasks.rs b/substrate/frame/support/procedural/src/pallet/parse/tasks.rs index 6405bb415a6f1..ed860849a4db4 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/tasks.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/tasks.rs @@ -34,8 +34,8 @@ use syn::{ parse2, spanned::Spanned, token::{Bracket, Paren, PathSep, Pound}, - Attribute, Error, Expr, Ident, ImplItem, ImplItemFn, ItemEnum, ItemImpl, LitInt, Path, - PathArguments, Result, TypePath, + Error, Expr, Ident, ImplItem, ImplItemFn, ItemEnum, ItemImpl, LitInt, Path, PathArguments, + Result, TypePath, }; pub mod keywords { @@ -180,7 +180,6 @@ pub struct TaskDef { pub condition_attr: TaskConditionAttr, pub list_attr: TaskListAttr, pub weight_attr: TaskWeightAttr, - pub normal_attrs: Vec, pub item: ImplItemFn, pub arg_names: Vec, } @@ -190,7 +189,7 @@ impl syn::parse::Parse for TaskDef { let item = input.parse::()?; // we only want to activate TaskAttrType parsing errors for tasks-related attributes, // so we filter them here - let (task_attrs, normal_attrs) = partition_task_attrs(&item); + let task_attrs = partition_task_attrs(&item).0; let task_attrs: Vec = task_attrs .into_iter() @@ -293,15 +292,7 @@ impl syn::parse::Parse for TaskDef { let list_attr = list_attr.try_into().expect("we check the type above; QED"); let weight_attr = weight_attr.try_into().expect("we check the type above; QED"); - Ok(TaskDef { - index_attr, - condition_attr, - list_attr, - weight_attr, - normal_attrs, - item, - arg_names, - }) + Ok(TaskDef { index_attr, condition_attr, list_attr, weight_attr, item, arg_names }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/tests/tasks.rs b/substrate/frame/support/procedural/src/pallet/parse/tests/tasks.rs index 9f14362840473..7df91ae777d7f 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/tests/tasks.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/tests/tasks.rs @@ -124,10 +124,10 @@ fn test_parse_pallet_manual_tasks_impl_without_manual_tasks_enum() { where T: TypeInfo, { - type Enumeration = sp_std::vec::IntoIter>; + type Enumeration = alloc::vec::IntoIter>; fn iter() -> Self::Enumeration { - sp_std::vec![Task::increment, Task::decrement].into_iter() + alloc::vec![Task::increment, Task::decrement].into_iter() } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/type_value.rs b/substrate/frame/support/procedural/src/pallet/parse/type_value.rs index 4d9db30b3a788..b9c0635bb3f5e 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/type_value.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/type_value.rs @@ -28,12 +28,8 @@ pub struct TypeValueDef { pub ident: syn::Ident, /// The type return by Get. pub type_: Box, - /// The block returning the value to get - pub block: Box, /// If type value is generic over `T` (or `T` and `I` for instantiable pallet) pub is_generic: bool, - /// A set of usage of instance, must be check for consistency with config. - pub instances: Vec, /// The where clause of the function. pub where_clause: Option, /// The span of the pallet::type_value attribute. @@ -90,7 +86,6 @@ impl TypeValueDef { let vis = item.vis.clone(); let ident = item.sig.ident.clone(); - let block = item.block.clone(); let type_ = match item.sig.output.clone() { syn::ReturnType::Type(_, type_) => type_, syn::ReturnType::Default => { @@ -99,25 +94,11 @@ impl TypeValueDef { }, }; - let mut instances = vec![]; - if let Some(usage) = helper::check_type_value_gen(&item.sig.generics, item.sig.span())? { - instances.push(usage); - } + helper::check_type_value_gen(&item.sig.generics, item.sig.span())?; let is_generic = item.sig.generics.type_params().count() > 0; let where_clause = item.sig.generics.where_clause.clone(); - Ok(TypeValueDef { - attr_span, - index, - is_generic, - vis, - ident, - block, - type_, - instances, - where_clause, - docs, - }) + Ok(TypeValueDef { attr_span, index, is_generic, vis, ident, type_, where_clause, docs }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/validate_unsigned.rs b/substrate/frame/support/procedural/src/pallet/parse/validate_unsigned.rs index 2bf0a1b6c1886..038db0d325813 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/validate_unsigned.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/validate_unsigned.rs @@ -19,15 +19,10 @@ use super::helper; use syn::spanned::Spanned; /// The definition of the pallet validate unsigned implementation. -pub struct ValidateUnsignedDef { - /// The index of validate unsigned item in pallet module. - pub index: usize, - /// A set of usage of instance, must be check for consistency with config. - pub instances: Vec, -} +pub struct ValidateUnsignedDef {} impl ValidateUnsignedDef { - pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -52,11 +47,9 @@ impl ValidateUnsignedDef { return Err(syn::Error::new(item.span(), msg)) } - let instances = vec![ - helper::check_pallet_struct_usage(&item.self_ty)?, - helper::check_impl_gen(&item.generics, item.impl_token.span())?, - ]; + helper::check_pallet_struct_usage(&item.self_ty)?; + helper::check_impl_gen(&item.generics, item.impl_token.span())?; - Ok(ValidateUnsignedDef { index, instances }) + Ok(ValidateUnsignedDef {}) } } diff --git a/substrate/frame/support/procedural/src/runtime/expand/mod.rs b/substrate/frame/support/procedural/src/runtime/expand/mod.rs index 43f11896808c7..f34ab1cef5436 100644 --- a/substrate/frame/support/procedural/src/runtime/expand/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/expand/mod.rs @@ -99,14 +99,20 @@ fn construct_runtime_implicit_to_explicit( for pallet in definition.pallet_decls.iter() { let pallet_path = &pallet.path; let pallet_name = &pallet.name; - let pallet_instance = pallet.instance.as_ref().map(|instance| quote::quote!(<#instance>)); + let runtime_param = &pallet.runtime_param; + let pallet_segment_and_instance = match (&pallet.pallet_segment, &pallet.instance) { + (Some(segment), Some(instance)) => quote::quote!(::#segment<#runtime_param, #instance>), + (Some(segment), None) => quote::quote!(::#segment<#runtime_param>), + (None, Some(instance)) => quote::quote!(<#instance>), + (None, None) => quote::quote!(), + }; expansion = quote::quote!( #frame_support::__private::tt_call! { macro = [{ #pallet_path::tt_default_parts_v2 }] your_tt_return = [{ #frame_support::__private::tt_return }] ~~> #frame_support::match_and_insert! { target = [{ #expansion }] - pattern = [{ #pallet_name = #pallet_path #pallet_instance }] + pattern = [{ #pallet_name = #pallet_path #pallet_segment_and_instance }] } } ); @@ -274,7 +280,7 @@ fn construct_runtime_final_expansion( #[doc(hidden)] trait InternalConstructRuntime { #[inline(always)] - fn runtime_metadata(&self) -> #scrate::__private::sp_std::vec::Vec<#scrate::__private::metadata_ir::RuntimeApiMetadataIR> { + fn runtime_metadata(&self) -> #scrate::__private::Vec<#scrate::__private::metadata_ir::RuntimeApiMetadataIR> { Default::default() } } diff --git a/substrate/frame/support/procedural/src/runtime/parse/mod.rs b/substrate/frame/support/procedural/src/runtime/parse/mod.rs index dd83cd0da90a2..a3d1c9417df81 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/mod.rs @@ -109,7 +109,6 @@ pub enum AllPalletsDeclaration { /// Declaration of a runtime with some pallet with implicit declaration of parts. #[derive(Debug, Clone)] pub struct ImplicitAllPalletsDeclaration { - pub name: Ident, pub pallet_decls: Vec, pub pallet_count: usize, } @@ -123,7 +122,6 @@ pub struct ExplicitAllPalletsDeclaration { pub struct Def { pub input: TokenStream2, - pub item: syn::ItemMod, pub runtime_struct: runtime_struct::RuntimeStructDef, pub pallets: AllPalletsDeclaration, pub runtime_types: Vec, @@ -161,8 +159,8 @@ impl Def { helper::take_first_item_runtime_attr::(item)? { match runtime_attr { - RuntimeAttr::Runtime(span) if runtime_struct.is_none() => { - let p = runtime_struct::RuntimeStructDef::try_from(span, item)?; + RuntimeAttr::Runtime(_) if runtime_struct.is_none() => { + let p = runtime_struct::RuntimeStructDef::try_from(item)?; runtime_struct = Some(p); }, RuntimeAttr::Derive(_, types) if runtime_types.is_none() => { @@ -189,7 +187,7 @@ impl Def { match *pallet_item.ty.clone() { syn::Type::Path(ref path) => { let pallet_decl = - PalletDeclaration::try_from(item.span(), &pallet_item, path)?; + PalletDeclaration::try_from(item.span(), &pallet_item, &path.path)?; if let Some(used_pallet) = names.insert(pallet_decl.name.clone(), pallet_decl.name.span()) @@ -240,7 +238,6 @@ impl Def { let decl_count = pallet_decls.len(); let pallets = if decl_count > 0 { AllPalletsDeclaration::Implicit(ImplicitAllPalletsDeclaration { - name, pallet_decls, pallet_count: decl_count.saturating_add(pallets.len()), }) @@ -250,7 +247,6 @@ impl Def { let def = Def { input, - item, runtime_struct: runtime_struct.ok_or_else(|| { syn::Error::new(item_span, "Missing Runtime. Please add a struct inside the module and annotate it with `#[runtime::runtime]`" @@ -267,3 +263,24 @@ impl Def { Ok(def) } } + +#[test] +fn runtime_parsing_works() { + let def = Def::try_from(syn::parse_quote! { + #[runtime::runtime] + mod runtime { + #[runtime::derive(RuntimeCall, RuntimeEvent)] + #[runtime::runtime] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system::Pallet; + + #[runtime::pallet_index(1)] + pub type Pallet1 = pallet1; + } + }) + .expect("Failed to parse runtime definition"); + + assert_eq!(def.runtime_struct.ident, "Runtime"); +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs index 09f5290541d3a..ebfd0c9ccceed 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs @@ -15,10 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::construct_runtime::parse::{Pallet, PalletPart, PalletPartKeyword, PalletPath}; +use crate::{ + construct_runtime::parse::{Pallet, PalletPart, PalletPartKeyword, PalletPath}, + runtime::parse::PalletDeclaration, +}; use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; -use syn::{punctuated::Punctuated, spanned::Spanned, token, Error, Ident, PathArguments}; +use syn::{punctuated::Punctuated, token, Error}; impl Pallet { pub fn try_from( @@ -55,20 +58,10 @@ impl Pallet { "Invalid pallet declaration, expected a path or a trait object", ))?; - let mut instance = None; - if let Some(segment) = path.inner.segments.iter_mut().find(|seg| !seg.arguments.is_empty()) - { - if let PathArguments::AngleBracketed(syn::AngleBracketedGenericArguments { - args, .. - }) = segment.arguments.clone() - { - if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.first() { - instance = - Some(Ident::new(&arg_path.to_token_stream().to_string(), arg_path.span())); - segment.arguments = PathArguments::None; - } - } - } + let PalletDeclaration { path: inner, instance, .. } = + PalletDeclaration::try_from(attr_span, item, &path.inner)?; + + path = PalletPath { inner }; pallet_parts = pallet_parts .into_iter() @@ -101,3 +94,95 @@ impl Pallet { }) } } + +#[test] +fn pallet_parsing_works() { + use syn::{parse_quote, ItemType}; + + let item: ItemType = parse_quote! { + pub type System = frame_system + Call; + }; + let ItemType { ty, .. } = item.clone(); + let syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) = *ty else { + panic!("Expected a trait object"); + }; + + let index = 0; + let pallet = + Pallet::try_from(proc_macro2::Span::call_site(), &item, index, false, false, &bounds) + .unwrap(); + + assert_eq!(pallet.name.to_string(), "System"); + assert_eq!(pallet.index, index); + assert_eq!(pallet.path.to_token_stream().to_string(), "frame_system"); + assert_eq!(pallet.instance, None); +} + +#[test] +fn pallet_parsing_works_with_instance() { + use syn::{parse_quote, ItemType}; + + let item: ItemType = parse_quote! { + pub type System = frame_system + Call; + }; + let ItemType { ty, .. } = item.clone(); + let syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) = *ty else { + panic!("Expected a trait object"); + }; + + let index = 0; + let pallet = + Pallet::try_from(proc_macro2::Span::call_site(), &item, index, false, false, &bounds) + .unwrap(); + + assert_eq!(pallet.name.to_string(), "System"); + assert_eq!(pallet.index, index); + assert_eq!(pallet.path.to_token_stream().to_string(), "frame_system"); + assert_eq!(pallet.instance, Some(parse_quote! { Instance1 })); +} + +#[test] +fn pallet_parsing_works_with_pallet() { + use syn::{parse_quote, ItemType}; + + let item: ItemType = parse_quote! { + pub type System = frame_system::Pallet + Call; + }; + let ItemType { ty, .. } = item.clone(); + let syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) = *ty else { + panic!("Expected a trait object"); + }; + + let index = 0; + let pallet = + Pallet::try_from(proc_macro2::Span::call_site(), &item, index, false, false, &bounds) + .unwrap(); + + assert_eq!(pallet.name.to_string(), "System"); + assert_eq!(pallet.index, index); + assert_eq!(pallet.path.to_token_stream().to_string(), "frame_system"); + assert_eq!(pallet.instance, None); +} + +#[test] +fn pallet_parsing_works_with_instance_and_pallet() { + use syn::{parse_quote, ItemType}; + + let item: ItemType = parse_quote! { + pub type System = frame_system::Pallet + Call; + }; + let ItemType { ty, .. } = item.clone(); + let syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) = *ty else { + panic!("Expected a trait object"); + }; + + let index = 0; + let pallet = + Pallet::try_from(proc_macro2::Span::call_site(), &item, index, false, false, &bounds) + .unwrap(); + + assert_eq!(pallet.name.to_string(), "System"); + assert_eq!(pallet.index, index); + assert_eq!(pallet.path.to_token_stream().to_string(), "frame_system"); + assert_eq!(pallet.instance, Some(parse_quote! { Instance1 })); +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs index e167d37d5f140..d34df77b7cfc1 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs @@ -15,18 +15,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -use quote::ToTokens; -use syn::{spanned::Spanned, Attribute, Ident, PathArguments}; +use syn::{Ident, PathArguments}; /// The declaration of a pallet. #[derive(Debug, Clone)] pub struct PalletDeclaration { /// The name of the pallet, e.g.`System` in `pub type System = frame_system`. pub name: Ident, - /// Optional attributes tagged right above a pallet declaration. - pub attrs: Vec, /// The path of the pallet, e.g. `frame_system` in `pub type System = frame_system`. pub path: syn::Path, + /// The segment of the pallet, e.g. `Pallet` in `pub type System = frame_system::Pallet`. + pub pallet_segment: Option, + /// The runtime parameter of the pallet, e.g. `Runtime` in + /// `pub type System = frame_system::Pallet`. + pub runtime_param: Option, /// The instance of the pallet, e.g. `Instance1` in `pub type Council = /// pallet_collective`. pub instance: Option, @@ -36,26 +38,135 @@ impl PalletDeclaration { pub fn try_from( _attr_span: proc_macro2::Span, item: &syn::ItemType, - path: &syn::TypePath, + path: &syn::Path, ) -> syn::Result { let name = item.ident.clone(); - let mut path = path.path.clone(); + let mut path = path.clone(); + let mut pallet_segment = None; + let mut runtime_param = None; let mut instance = None; if let Some(segment) = path.segments.iter_mut().find(|seg| !seg.arguments.is_empty()) { if let PathArguments::AngleBracketed(syn::AngleBracketedGenericArguments { args, .. }) = segment.arguments.clone() { - if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.first() { - instance = - Some(Ident::new(&arg_path.to_token_stream().to_string(), arg_path.span())); + if segment.ident == "Pallet" { + let mut segment = segment.clone(); segment.arguments = PathArguments::None; + pallet_segment = Some(segment.clone()); + } + let mut args_iter = args.iter(); + if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = + args_iter.next() + { + let ident = arg_path.path.require_ident()?.clone(); + if segment.ident == "Pallet" { + runtime_param = Some(ident); + if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = + args_iter.next() + { + instance = Some(arg_path.path.require_ident()?.clone()); + } + } else { + instance = Some(ident); + segment.arguments = PathArguments::None; + } } } } - Ok(Self { name, path, instance, attrs: item.attrs.clone() }) + if pallet_segment.is_some() { + path = syn::Path { + leading_colon: None, + segments: path + .segments + .iter() + .filter(|seg| seg.arguments.is_empty()) + .cloned() + .collect(), + }; + } + + Ok(Self { name, path, pallet_segment, runtime_param, instance }) } } + +#[test] +fn declaration_works() { + use syn::parse_quote; + + let decl: PalletDeclaration = PalletDeclaration::try_from( + proc_macro2::Span::call_site(), + &parse_quote! { pub type System = frame_system; }, + &parse_quote! { frame_system }, + ) + .expect("Failed to parse pallet declaration"); + + assert_eq!(decl.name, "System"); + assert_eq!(decl.path, parse_quote! { frame_system }); + assert_eq!(decl.pallet_segment, None); + assert_eq!(decl.runtime_param, None); + assert_eq!(decl.instance, None); +} + +#[test] +fn declaration_works_with_instance() { + use syn::parse_quote; + + let decl: PalletDeclaration = PalletDeclaration::try_from( + proc_macro2::Span::call_site(), + &parse_quote! { pub type System = frame_system; }, + &parse_quote! { frame_system }, + ) + .expect("Failed to parse pallet declaration"); + + assert_eq!(decl.name, "System"); + assert_eq!(decl.path, parse_quote! { frame_system }); + assert_eq!(decl.pallet_segment, None); + assert_eq!(decl.runtime_param, None); + assert_eq!(decl.instance, Some(parse_quote! { Instance1 })); +} + +#[test] +fn declaration_works_with_pallet() { + use syn::parse_quote; + + let decl: PalletDeclaration = PalletDeclaration::try_from( + proc_macro2::Span::call_site(), + &parse_quote! { pub type System = frame_system::Pallet; }, + &parse_quote! { frame_system::Pallet }, + ) + .expect("Failed to parse pallet declaration"); + + assert_eq!(decl.name, "System"); + assert_eq!(decl.path, parse_quote! { frame_system }); + + let segment: syn::PathSegment = + syn::PathSegment { ident: parse_quote! { Pallet }, arguments: PathArguments::None }; + assert_eq!(decl.pallet_segment, Some(segment)); + assert_eq!(decl.runtime_param, Some(parse_quote! { Runtime })); + assert_eq!(decl.instance, None); +} + +#[test] +fn declaration_works_with_pallet_and_instance() { + use syn::parse_quote; + + let decl: PalletDeclaration = PalletDeclaration::try_from( + proc_macro2::Span::call_site(), + &parse_quote! { pub type System = frame_system::Pallet; }, + &parse_quote! { frame_system::Pallet }, + ) + .expect("Failed to parse pallet declaration"); + + assert_eq!(decl.name, "System"); + assert_eq!(decl.path, parse_quote! { frame_system }); + + let segment: syn::PathSegment = + syn::PathSegment { ident: parse_quote! { Pallet }, arguments: PathArguments::None }; + assert_eq!(decl.pallet_segment, Some(segment)); + assert_eq!(decl.runtime_param, Some(parse_quote! { Runtime })); + assert_eq!(decl.instance, Some(parse_quote! { Instance1 })); +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs b/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs index 8fa746ee80727..33c845ee946b5 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs @@ -18,11 +18,10 @@ use syn::spanned::Spanned; pub struct RuntimeStructDef { pub ident: syn::Ident, - pub attr_span: proc_macro2::Span, } impl RuntimeStructDef { - pub fn try_from(attr_span: proc_macro2::Span, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Struct(item) = item { item } else { @@ -30,6 +29,6 @@ impl RuntimeStructDef { return Err(syn::Error::new(item.span(), msg)) }; - Ok(Self { ident: item.ident.clone(), attr_span }) + Ok(Self { ident: item.ident.clone() }) } } diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml index a75307aca79b6..f672740e57e85 100644 --- a/substrate/frame/support/procedural/tools/Cargo.toml +++ b/substrate/frame/support/procedural/tools/Cargo.toml @@ -15,8 +15,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -proc-macro-crate = "3.0.0" -proc-macro2 = "1.0.56" +proc-macro-crate = { workspace = true } +proc-macro2 = { workspace = true } quote = { workspace = true } syn = { features = ["extra-traits", "full", "visit"], workspace = true } -frame-support-procedural-tools-derive = { path = "derive" } +frame-support-procedural-tools-derive = { workspace = true, default-features = true } diff --git a/substrate/frame/support/procedural/tools/derive/Cargo.toml b/substrate/frame/support/procedural/tools/derive/Cargo.toml index b39d99a822fb7..2292c2a7c7247 100644 --- a/substrate/frame/support/procedural/tools/derive/Cargo.toml +++ b/substrate/frame/support/procedural/tools/derive/Cargo.toml @@ -18,6 +18,6 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro2 = "1.0.56" +proc-macro2 = { workspace = true } quote = { features = ["proc-macro"], workspace = true } syn = { features = ["extra-traits", "full", "parsing", "proc-macro"], workspace = true } diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index 4a313551aca63..351ba3a15efcc 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -20,6 +20,7 @@ use crate::traits::UnfilteredDispatchable; use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen}; +use core::fmt; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -28,7 +29,6 @@ use sp_runtime::{ traits::SignedExtension, DispatchError, RuntimeDebug, }; -use sp_std::fmt; use sp_weights::Weight; /// The return type of a `Dispatchable` in frame. When returned explicitly from @@ -220,14 +220,14 @@ pub trait OneOrMany { } impl OneOrMany for DispatchClass { - type Iter = sp_std::iter::Once; + type Iter = core::iter::Once; fn into_iter(self) -> Self::Iter { - sp_std::iter::once(self) + core::iter::once(self) } } impl<'a> OneOrMany for &'a [DispatchClass] { - type Iter = sp_std::iter::Cloned>; + type Iter = core::iter::Cloned>; fn into_iter(self) -> Self::Iter { self.iter().cloned() } diff --git a/substrate/frame/support/src/dispatch_context.rs b/substrate/frame/support/src/dispatch_context.rs index 254302c8f14d2..b34c6bdada3d4 100644 --- a/substrate/frame/support/src/dispatch_context.rs +++ b/substrate/frame/support/src/dispatch_context.rs @@ -81,11 +81,11 @@ //! In your pallet you will only have to use [`with_context`], because as described above //! [`run_in_context`] will be handled by FRAME for you. -use sp_std::{ - any::{Any, TypeId}, +use alloc::{ boxed::Box, collections::btree_map::{BTreeMap, Entry}, }; +use core::any::{Any, TypeId}; environmental::environmental!(DISPATCH_CONTEXT: BTreeMap>); @@ -158,7 +158,7 @@ pub fn with_context(callback: impl FnOnce(&mut Value) -> R) -> if value.is_none() { log::error!( "Failed to downcast value for type {} in dispatch context!", - sp_std::any::type_name::(), + core::any::type_name::(), ); } diff --git a/substrate/frame/support/src/genesis_builder_helper.rs b/substrate/frame/support/src/genesis_builder_helper.rs index 7389c5a787d76..662ea2cb1862d 100644 --- a/substrate/frame/support/src/genesis_builder_helper.rs +++ b/substrate/frame/support/src/genesis_builder_helper.rs @@ -41,7 +41,7 @@ pub fn build_state(json: Vec) -> BuildResult { /// to [`sp_genesis_builder::GenesisBuilder::get_preset`]. pub fn get_preset( name: &Option, - preset_for_name: impl FnOnce(&sp_genesis_builder::PresetId) -> Option>, + preset_for_name: impl FnOnce(&sp_genesis_builder::PresetId) -> Option>, ) -> Option> where GC: BuildGenesisConfig + Default, diff --git a/substrate/frame/support/src/hash.rs b/substrate/frame/support/src/hash.rs index 9c48f4b187ad3..a09890560c644 100644 --- a/substrate/frame/support/src/hash.rs +++ b/substrate/frame/support/src/hash.rs @@ -17,10 +17,10 @@ //! Hash utilities. +use alloc::vec::Vec; use codec::{Codec, MaxEncodedLen}; use sp_io::hashing::{blake2_128, blake2_256, twox_128, twox_256, twox_64}; use sp_metadata_ir as metadata_ir; -use sp_std::prelude::Vec; // This trait must be kept coherent with frame-support-procedural HasherKind usage pub trait Hashable: Sized { diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 8ae1f56b4d686..3ad8c5fabaa2c 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -33,11 +33,20 @@ #[doc(hidden)] extern crate self as frame_support; +#[doc(hidden)] +extern crate alloc; + /// Private exports that are being used by macros. /// /// The exports are not stable and should not be relied on. #[doc(hidden)] pub mod __private { + pub use alloc::{ + boxed::Box, + rc::Rc, + vec, + vec::{IntoIter, Vec}, + }; pub use codec; pub use frame_metadata as metadata; pub use log; @@ -261,7 +270,7 @@ macro_rules! parameter_types { ) => ( $( #[ $attr ] )* $vis struct $name $( - < $($ty_params),* >( $($crate::__private::sp_std::marker::PhantomData<$ty_params>),* ) + < $($ty_params),* >( $(core::marker::PhantomData<$ty_params>),* ) )?; $crate::parameter_types!(IMPL_CONST $name , $type , $value $( $(, $ty_params)* )?); $crate::parameter_types!( $( $rest )* ); @@ -273,7 +282,7 @@ macro_rules! parameter_types { ) => ( $( #[ $attr ] )* $vis struct $name $( - < $($ty_params),* >( $($crate::__private::sp_std::marker::PhantomData<$ty_params>),* ) + < $($ty_params),* >( $(core::marker::PhantomData<$ty_params>),* ) )?; $crate::parameter_types!(IMPL $name, $type, $value $( $(, $ty_params)* )?); $crate::parameter_types!( $( $rest )* ); @@ -285,7 +294,7 @@ macro_rules! parameter_types { ) => ( $( #[ $attr ] )* $vis struct $name $( - < $($ty_params),* >( $($crate::__private::sp_std::marker::PhantomData<$ty_params>),* ) + < $($ty_params),* >( $(core::marker::PhantomData<$ty_params>),* ) )?; $crate::parameter_types!(IMPL_STORAGE $name, $type, $value $( $(, $ty_params)* )?); $crate::parameter_types!( $( $rest )* ); @@ -468,7 +477,7 @@ macro_rules! ord_parameter_types { (IMPL $name:ident , $type:ty , $value:expr) => { impl $crate::traits::SortedMembers<$type> for $name { fn contains(t: &$type) -> bool { &$value == t } - fn sorted_members() -> $crate::__private::sp_std::prelude::Vec<$type> { vec![$value] } + fn sorted_members() -> $crate::__private::Vec<$type> { vec![$value] } fn count() -> usize { 1 } #[cfg(feature = "runtime-benchmarks")] fn add(_: &$type) {} @@ -499,7 +508,7 @@ macro_rules! runtime_print { } /// Print out the debuggable type. -pub fn debug(data: &impl sp_std::fmt::Debug) { +pub fn debug(data: &impl core::fmt::Debug) { runtime_print!("{:?}", data); } @@ -508,6 +517,29 @@ pub use frame_support_procedural::{ construct_runtime, match_and_insert, transactional, PalletError, RuntimeDebugNoBound, }; +/// Construct a runtime, with the given name and the given pallets. +/// +/// # Example: +#[doc = docify::embed!("src/tests/runtime.rs", runtime_macro)] +/// +/// # Supported Attributes: +/// +/// ## Legacy Ordering +/// +/// An optional attribute can be defined as #[frame_support::runtime(legacy_ordering)] to +/// ensure that the order of hooks is same as the order of pallets (and not based on the +/// pallet_index). This is to support legacy runtimes and should be avoided for new ones. +/// +/// # Note +/// +/// The population of the genesis storage depends on the order of pallets. So, if one of your +/// pallets depends on another pallet, the pallet that is depended upon needs to come before +/// the pallet depending on it. +/// +/// # Type definitions +/// +/// * The macro generates a type alias for each pallet to their `Pallet`. E.g. `type System = +/// frame_system::Pallet` pub use frame_support_procedural::runtime; #[doc(hidden)] @@ -903,6 +935,7 @@ pub mod pallet_prelude { pub use codec::{Decode, Encode, MaxEncodedLen}; pub use frame_support::pallet_macros::*; + pub use core::marker::PhantomData; /// The optional attribute `#[inject_runtime_type]` can be attached to `RuntimeCall`, /// `RuntimeEvent`, `RuntimeOrigin` or `PalletInfo` in an impl statement that has /// `#[register_default_impl]` attached to indicate that this item is generated by @@ -931,7 +964,6 @@ pub mod pallet_prelude { }, DispatchError, RuntimeDebug, MAX_MODULE_ERROR_ENCODED_SIZE, }; - pub use sp_std::marker::PhantomData; pub use sp_weights::Weight; } @@ -1262,7 +1294,7 @@ pub mod pallet_macros { /// # use frame_support::pallet_prelude::*; /// # use frame_support::inherent::IsFatalError; /// # use sp_timestamp::InherentError; - /// # use sp_std::result; + /// # use core::result; /// # /// // Example inherent identifier /// pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"timstap0"; @@ -1888,7 +1920,7 @@ pub mod pallet_macros { /// Field types in enum variants must also implement [`frame_support::PalletError`], /// otherwise the pallet will fail to compile. Rust primitive types have already /// implemented the [`frame_support::PalletError`] trait along with some commonly used - /// stdlib types such as [`Option`] and [`sp_std::marker::PhantomData`], and hence + /// stdlib types such as [`Option`] and [`core::marker::PhantomData`], and hence /// in most use cases, a manual implementation is not necessary and is discouraged. /// /// The generic `T` must not bound anything and a `where` clause is not allowed. That said, @@ -2274,6 +2306,18 @@ pub mod pallet_macros { /// } /// ``` /// + /// ### Value Trait Bounds + /// + /// To use a type as the value of a storage type, be it `StorageValue`, `StorageMap` or + /// anything else, you need to meet a number of trait bound constraints. + /// + /// See: . + /// + /// Notably, all value types need to implement `Encode`, `Decode`, `MaxEncodedLen` and + /// `TypeInfo`, and possibly `Default`, if + /// [`ValueQuery`](frame_support::storage::types::ValueQuery) is used, explained in the + /// next section. + /// /// ### QueryKind /// /// Every storage type mentioned above has a generic type called diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index 968639e02d35b..7f74614695640 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -17,20 +17,21 @@ use crate::{ defensive, - storage::transactional::with_transaction_opaque_err, + storage::{storage_prefix, transactional::with_transaction_opaque_err}, traits::{ Defensive, GetStorageVersion, NoStorageVersionSet, PalletInfoAccess, SafeMode, StorageVersion, }, weights::{RuntimeDbWeight, Weight, WeightMeter}, }; +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; +use core::marker::PhantomData; use impl_trait_for_tuples::impl_for_tuples; use sp_arithmetic::traits::Bounded; use sp_core::Get; use sp_io::{hashing::twox_128, storage::clear_prefix, KillStorageResult}; use sp_runtime::traits::Zero; -use sp_std::{marker::PhantomData, vec::Vec}; /// Handles storage migration pallet versioning. /// @@ -71,7 +72,7 @@ use sp_std::{marker::PhantomData, vec::Vec}; /// /// - https://internals.rust-lang.org/t/lang-team-minutes-private-in-public-rules/4504/40 /// mod version_unchecked { /// use super::*; -/// pub struct VersionUncheckedMigrateV5ToV6(sp_std::marker::PhantomData); +/// pub struct VersionUncheckedMigrateV5ToV6(core::marker::PhantomData); /// impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { /// // `UncheckedOnRuntimeUpgrade` implementation... /// } @@ -102,7 +103,7 @@ pub struct VersionedMigration), + MigrationExecuted(alloc::vec::Vec), /// This migration is a noop, do not run post_upgrade checks. Noop, } @@ -125,7 +126,7 @@ impl< /// [`VersionedPostUpgradeData`] before passing them to post_upgrade, so it knows whether the /// migration ran or not. #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { let on_chain_version = Pallet::on_chain_storage_version(); if on_chain_version == FROM { Ok(VersionedPostUpgradeData::MigrationExecuted(Inner::pre_upgrade()?).encode()) @@ -175,7 +176,7 @@ impl< /// the migration ran, and [`VersionedPostUpgradeData::Noop`] otherwise. #[cfg(feature = "try-runtime")] fn post_upgrade( - versioned_post_upgrade_data_bytes: sp_std::vec::Vec, + versioned_post_upgrade_data_bytes: alloc::vec::Vec, ) -> Result<(), sp_runtime::TryRuntimeError> { use codec::DecodeAll; match ::decode_all(&mut &versioned_post_upgrade_data_bytes[..]) @@ -339,7 +340,7 @@ impl, DbWeight: Get> frame_support::traits } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { use crate::storage::unhashed::contains_prefixed_key; let hashed_prefix = twox_128(P::get().as_bytes()); @@ -350,11 +351,11 @@ impl, DbWeight: Get> frame_support::traits P::get() ), }; - Ok(sp_std::vec::Vec::new()) + Ok(alloc::vec::Vec::new()) } #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: sp_std::vec::Vec) -> Result<(), sp_runtime::TryRuntimeError> { + fn post_upgrade(_state: alloc::vec::Vec) -> Result<(), sp_runtime::TryRuntimeError> { use crate::storage::unhashed::contains_prefixed_key; let hashed_prefix = twox_128(P::get().as_bytes()); @@ -369,6 +370,118 @@ impl, DbWeight: Get> frame_support::traits } } +/// `RemoveStorage` is a utility struct used to remove a storage item from a specific pallet. +/// +/// This struct is generic over three parameters: +/// - `P` is a type that implements the [`Get`] trait for a static string, representing the pallet's +/// name. +/// - `S` is a type that implements the [`Get`] trait for a static string, representing the storage +/// name. +/// - `DbWeight` is a type that implements the [`Get`] trait for [`RuntimeDbWeight`], providing the +/// weight for database operations. +/// +/// On runtime upgrade, the `on_runtime_upgrade` function will clear the storage from the specified +/// storage, logging the number of keys removed. If the `try-runtime` feature is enabled, the +/// `pre_upgrade` and `post_upgrade` functions can be used to verify the storage removal before and +/// after the upgrade. +/// +/// # Examples: +/// ```ignore +/// construct_runtime! { +/// pub enum Runtime +/// { +/// System: frame_system = 0, +/// +/// SomePallet: pallet_something = 1, +/// +/// YourOtherPallets... +/// } +/// }; +/// +/// parameter_types! { +/// pub const SomePallet: &'static str = "SomePallet"; +/// pub const StorageAccounts: &'static str = "Accounts"; +/// pub const StorageAccountCount: &'static str = "AccountCount"; +/// } +/// +/// pub type Migrations = ( +/// RemoveStorage, +/// RemoveStorage, +/// AnyOtherMigrations... +/// ); +/// +/// pub type Executive = frame_executive::Executive< +/// Runtime, +/// Block, +/// frame_system::ChainContext, +/// Runtime, +/// Migrations +/// >; +/// ``` +/// +/// WARNING: `RemoveStorage` has no guard rails preventing it from bricking the chain if the +/// operation of removing storage for the given pallet would exceed the block weight limit. +/// +/// If your storage has too many keys to be removed in a single block, it is advised to wait for +/// a multi-block scheduler currently under development which will allow for removal of storage +/// items (and performing other heavy migrations) over multiple blocks +/// (see ). +pub struct RemoveStorage, S: Get<&'static str>, DbWeight: Get>( + PhantomData<(P, S, DbWeight)>, +); +impl, S: Get<&'static str>, DbWeight: Get> + frame_support::traits::OnRuntimeUpgrade for RemoveStorage +{ + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let hashed_prefix = storage_prefix(P::get().as_bytes(), S::get().as_bytes()); + let keys_removed = match clear_prefix(&hashed_prefix, None) { + KillStorageResult::AllRemoved(value) => value, + KillStorageResult::SomeRemaining(value) => { + log::error!( + "`clear_prefix` failed to remove all keys for storage `{}` from pallet `{}`. THIS SHOULD NEVER HAPPEN! ๐Ÿšจ", + S::get(), P::get() + ); + value + }, + } as u64; + + log::info!("Removed `{}` `{}` `{}` keys ๐Ÿงน", keys_removed, P::get(), S::get()); + + DbWeight::get().reads_writes(keys_removed + 1, keys_removed) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use crate::storage::unhashed::contains_prefixed_key; + + let hashed_prefix = storage_prefix(P::get().as_bytes(), S::get().as_bytes()); + match contains_prefixed_key(&hashed_prefix) { + true => log::info!("Found `{}` `{}` keys pre-removal ๐Ÿ‘€", P::get(), S::get()), + false => log::warn!( + "Migration RemoveStorage<{}, {}> can be removed (no keys found pre-removal).", + P::get(), + S::get() + ), + }; + Ok(Default::default()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: alloc::vec::Vec) -> Result<(), sp_runtime::TryRuntimeError> { + use crate::storage::unhashed::contains_prefixed_key; + + let hashed_prefix = storage_prefix(P::get().as_bytes(), S::get().as_bytes()); + match contains_prefixed_key(&hashed_prefix) { + true => { + log::error!("`{}` `{}` has keys remaining post-removal โ—", P::get(), S::get()); + return Err("Keys remaining post-removal, this should never happen ๐Ÿšจ".into()) + }, + false => log::info!("No `{}` `{}` keys found post-removal ๐ŸŽ‰", P::get(), S::get()), + }; + Ok(()) + } +} + /// A migration that can proceed in multiple steps. pub trait SteppedMigration { /// The cursor type that stores the progress (aka. state) of this migration. diff --git a/substrate/frame/support/src/storage/bounded_btree_map.rs b/substrate/frame/support/src/storage/bounded_btree_map.rs index 91196be9e802c..d3560dd5f0d4d 100644 --- a/substrate/frame/support/src/storage/bounded_btree_map.rs +++ b/substrate/frame/support/src/storage/bounded_btree_map.rs @@ -26,9 +26,9 @@ impl StorageDecodeLength for BoundedBTreeMap {} pub mod test { use super::*; use crate::Twox128; + use alloc::collections::btree_map::BTreeMap; use frame_support::traits::{ConstU32, Get}; use sp_io::TestExternalities; - use sp_std::collections::btree_map::BTreeMap; #[crate::storage_alias] type Foo = StorageValue>>; diff --git a/substrate/frame/support/src/storage/bounded_btree_set.rs b/substrate/frame/support/src/storage/bounded_btree_set.rs index cf801eb47874f..70ab2304cab01 100644 --- a/substrate/frame/support/src/storage/bounded_btree_set.rs +++ b/substrate/frame/support/src/storage/bounded_btree_set.rs @@ -26,9 +26,9 @@ impl StorageDecodeNonDedupLength for BoundedBTreeSet {} pub mod test { use super::*; use crate::Twox128; + use alloc::collections::btree_set::BTreeSet; use frame_support::traits::{ConstU32, Get}; use sp_io::TestExternalities; - use sp_std::collections::btree_set::BTreeSet; #[crate::storage_alias] type Foo = StorageValue>>; diff --git a/substrate/frame/support/src/storage/child.rs b/substrate/frame/support/src/storage/child.rs index 76e6f4ee4023e..5ebba26936585 100644 --- a/substrate/frame/support/src/storage/child.rs +++ b/substrate/frame/support/src/storage/child.rs @@ -21,10 +21,10 @@ // NOTE: could replace unhashed by having only one kind of storage (top trie being the child info // of null length parent storage key). +use alloc::vec::Vec; use codec::{Codec, Decode, Encode}; pub use sp_core::storage::{ChildInfo, ChildType, StateVersion}; pub use sp_io::{KillStorageResult, MultiRemovalResults}; -use sp_std::prelude::*; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get(child_info: &ChildInfo, key: &[u8]) -> Option { diff --git a/substrate/frame/support/src/storage/generator/double_map.rs b/substrate/frame/support/src/storage/generator/double_map.rs index a4c1f58203e3c..b68f3fa495ff9 100644 --- a/substrate/frame/support/src/storage/generator/double_map.rs +++ b/substrate/frame/support/src/storage/generator/double_map.rs @@ -20,8 +20,8 @@ use crate::{ storage::{self, storage_prefix, unhashed, KeyPrefixIterator, PrefixIterator, StorageAppend}, Never, }; +use alloc::vec::Vec; use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; -use sp_std::prelude::*; /// Generator for `StorageDoubleMap` used by `decl_storage`. /// @@ -516,6 +516,7 @@ mod test_iterators { unhashed, }, }; + use alloc::vec; use codec::Encode; #[test] diff --git a/substrate/frame/support/src/storage/generator/map.rs b/substrate/frame/support/src/storage/generator/map.rs index 257aa7e7bcf9a..b41f9c7171670 100644 --- a/substrate/frame/support/src/storage/generator/map.rs +++ b/substrate/frame/support/src/storage/generator/map.rs @@ -20,9 +20,8 @@ use crate::{ storage::{self, storage_prefix, unhashed, KeyPrefixIterator, PrefixIterator, StorageAppend}, Never, }; +use alloc::vec::Vec; use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; -#[cfg(not(feature = "std"))] -use sp_std::prelude::*; /// Generator for `StorageMap` used by `decl_storage`. /// @@ -80,7 +79,7 @@ pub struct StorageMapIterator { prefix: Vec, previous_key: Vec, drain: bool, - _phantom: ::sp_std::marker::PhantomData<(K, V, Hasher)>, + _phantom: ::core::marker::PhantomData<(K, V, Hasher)>, } impl Iterator @@ -370,6 +369,7 @@ mod test_iterators { unhashed, }, }; + use alloc::vec; use codec::Encode; #[test] diff --git a/substrate/frame/support/src/storage/generator/mod.rs b/substrate/frame/support/src/storage/generator/mod.rs index dd6d622852db1..b0b1bda24bb74 100644 --- a/substrate/frame/support/src/storage/generator/mod.rs +++ b/substrate/frame/support/src/storage/generator/mod.rs @@ -36,6 +36,7 @@ pub use value::StorageValue; #[cfg(test)] mod tests { + use alloc::vec::Vec; use codec::Encode; use sp_io::TestExternalities; use sp_runtime::{generic, traits::BlakeTwo256, BuildStorage}; diff --git a/substrate/frame/support/src/storage/generator/nmap.rs b/substrate/frame/support/src/storage/generator/nmap.rs index 4b49ad3eb38d4..0466583a27955 100755 --- a/substrate/frame/support/src/storage/generator/nmap.rs +++ b/substrate/frame/support/src/storage/generator/nmap.rs @@ -40,9 +40,8 @@ use crate::{ }, Never, }; +use alloc::vec::Vec; use codec::{Decode, Encode, EncodeLike, FullCodec}; -#[cfg(not(feature = "std"))] -use sp_std::prelude::*; /// Generator for `StorageNMap` used by `decl_storage` and storage types. /// @@ -464,6 +463,7 @@ mod test_iterators { unhashed, }, }; + use alloc::vec; use codec::Encode; #[test] diff --git a/substrate/frame/support/src/storage/hashed.rs b/substrate/frame/support/src/storage/hashed.rs index 6633adce8ff65..d823eb9887994 100644 --- a/substrate/frame/support/src/storage/hashed.rs +++ b/substrate/frame/support/src/storage/hashed.rs @@ -18,8 +18,8 @@ //! Operation on runtime storage using hashed keys. use super::unhashed; +use alloc::vec::Vec; use codec::{Decode, Encode}; -use sp_std::prelude::*; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get(hash: &HashFn, key: &[u8]) -> Option diff --git a/substrate/frame/support/src/storage/migration.rs b/substrate/frame/support/src/storage/migration.rs index 252625cf4f7d0..1dd690c3ed67b 100644 --- a/substrate/frame/support/src/storage/migration.rs +++ b/substrate/frame/support/src/storage/migration.rs @@ -22,8 +22,8 @@ use crate::{ storage::{storage_prefix, unhashed}, StorageHasher, Twox128, }; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; -use sp_std::prelude::*; use super::PrefixIterator; @@ -32,7 +32,7 @@ pub struct StorageIterator { prefix: Vec, previous_key: Vec, drain: bool, - _phantom: ::sp_std::marker::PhantomData, + _phantom: ::core::marker::PhantomData, } impl StorageIterator { @@ -95,7 +95,7 @@ pub struct StorageKeyIterator { prefix: Vec, previous_key: Vec, drain: bool, - _phantom: ::sp_std::marker::PhantomData<(K, T, H)>, + _phantom: ::core::marker::PhantomData<(K, T, H)>, } impl StorageKeyIterator { diff --git a/substrate/frame/support/src/storage/mod.rs b/substrate/frame/support/src/storage/mod.rs index f7d7447482d06..7fb991d377923 100644 --- a/substrate/frame/support/src/storage/mod.rs +++ b/substrate/frame/support/src/storage/mod.rs @@ -24,10 +24,11 @@ use crate::{ ReversibleKeyGenerator, TupleToEncodedIter, }, }; +use alloc::{collections::btree_set::BTreeSet, vec::Vec}; use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; +use core::marker::PhantomData; use sp_core::storage::ChildInfo; use sp_runtime::generic::{Digest, DigestItem}; -use sp_std::{collections::btree_set::BTreeSet, marker::PhantomData, prelude::*}; pub use self::{ stream_iter::StorageStreamIter, diff --git a/substrate/frame/support/src/storage/storage_noop_guard.rs b/substrate/frame/support/src/storage/storage_noop_guard.rs index c4d40fa99a35c..23201e58db5b7 100644 --- a/substrate/frame/support/src/storage/storage_noop_guard.rs +++ b/substrate/frame/support/src/storage/storage_noop_guard.rs @@ -38,7 +38,7 @@ /// ``` #[must_use] pub struct StorageNoopGuard<'a> { - storage_root: sp_std::vec::Vec, + storage_root: alloc::vec::Vec, error_message: &'a str, } @@ -71,7 +71,8 @@ impl<'a> StorageNoopGuard<'a> { impl<'a> Drop for StorageNoopGuard<'a> { fn drop(&mut self) { // No need to double panic, eg. inside a test assertion failure. - if sp_std::thread::panicking() { + #[cfg(feature = "std")] + if std::thread::panicking() { return } assert_eq!( @@ -85,9 +86,10 @@ impl<'a> Drop for StorageNoopGuard<'a> { #[cfg(test)] mod tests { - use super::*; use sp_io::TestExternalities; + use super::*; + #[test] #[should_panic(expected = "`StorageNoopGuard` detected an attempted storage change.")] fn storage_noop_guard_panics_on_changed() { @@ -112,7 +114,7 @@ mod tests { TestExternalities::default().execute_with(|| { let guard = StorageNoopGuard::default(); frame_support::storage::unhashed::put(b"key", b"value"); - sp_std::mem::drop(guard); + std::mem::drop(guard); frame_support::storage::unhashed::kill(b"key"); }); } @@ -122,7 +124,7 @@ mod tests { TestExternalities::default().execute_with(|| { let guard = StorageNoopGuard::default(); frame_support::storage::unhashed::put(b"key", b"value"); - sp_std::mem::forget(guard); + std::mem::forget(guard); }); } diff --git a/substrate/frame/support/src/storage/stream_iter.rs b/substrate/frame/support/src/storage/stream_iter.rs index 529b2f387c71c..0d1e5582f841b 100644 --- a/substrate/frame/support/src/storage/stream_iter.rs +++ b/substrate/frame/support/src/storage/stream_iter.rs @@ -16,8 +16,8 @@ // limitations under the License. use crate::{BoundedBTreeMap, BoundedBTreeSet, BoundedVec, WeakBoundedVec}; +use alloc::vec::Vec; use codec::Decode; -use sp_std::vec::Vec; /// Provides the sealed trait `StreamIter`. mod private { @@ -26,7 +26,7 @@ mod private { /// Used as marker trait for types that support stream iteration. pub trait StreamIter { /// The actual iterator implementation. - type Iterator: sp_std::iter::Iterator; + type Iterator: core::iter::Iterator; /// Create the stream iterator for the value found at `key`. fn stream_iter(key: Vec) -> Self::Iterator; @@ -40,7 +40,7 @@ mod private { } } - impl StreamIter for sp_std::collections::btree_set::BTreeSet { + impl StreamIter for alloc::collections::btree_set::BTreeSet { type Iterator = ScaleContainerStreamIter; fn stream_iter(key: Vec) -> Self::Iterator { @@ -49,7 +49,7 @@ mod private { } impl StreamIter - for sp_std::collections::btree_map::BTreeMap + for alloc::collections::btree_map::BTreeMap { type Iterator = ScaleContainerStreamIter<(K, V)>; @@ -116,14 +116,14 @@ impl(len) ++ data`. -/// This type provides an [`Iterator`](sp_std::iter::Iterator) implementation that decodes +/// This type provides an [`Iterator`](core::iter::Iterator) implementation that decodes /// one item after another with each call to [`next`](Self::next). The bytes representing /// the container are also not read at once into memory and instead being read in chunks. As long /// as individual items are smaller than these chunks the memory usage of this iterator should /// be constant. On decoding errors [`next`](Self::next) will return `None` to signal that the /// iterator is finished. pub struct ScaleContainerStreamIter { - marker: sp_std::marker::PhantomData, + marker: core::marker::PhantomData, input: StorageInput, length: u32, read: u32, @@ -156,7 +156,7 @@ impl ScaleContainerStreamIter { 0 }; - Self { marker: sp_std::marker::PhantomData, input, length, read: 0 } + Self { marker: core::marker::PhantomData, input, length, read: 0 } } /// Creates a new instance of the stream iterator. @@ -168,11 +168,11 @@ impl ScaleContainerStreamIter { let mut input = StorageInput::new(key); let length = if input.exists() { codec::Compact::::decode(&mut input)?.0 } else { 0 }; - Ok(Self { marker: sp_std::marker::PhantomData, input, length, read: 0 }) + Ok(Self { marker: core::marker::PhantomData, input, length, read: 0 }) } } -impl sp_std::iter::Iterator for ScaleContainerStreamIter { +impl core::iter::Iterator for ScaleContainerStreamIter { type Item = T; fn next(&mut self) -> Option { @@ -235,7 +235,7 @@ impl StorageInput { /// /// - `key`: The storage key of the storage item that this input will read. fn new(key: Vec) -> Self { - let mut buffer = sp_std::vec![0; STORAGE_INPUT_BUFFER_CAPACITY]; + let mut buffer = alloc::vec![0; STORAGE_INPUT_BUFFER_CAPACITY]; unsafe { buffer.set_len(buffer.capacity()); } @@ -270,7 +270,7 @@ impl StorageInput { sp_io::storage::read(&self.key, &mut self.buffer[present_bytes..], self.offset) { let bytes_read = - sp_std::cmp::min(length_minus_offset as usize, self.buffer.len() - present_bytes); + core::cmp::min(length_minus_offset as usize, self.buffer.len() - present_bytes); let buffer_len = present_bytes + bytes_read; unsafe { self.buffer.set_len(buffer_len); diff --git a/substrate/frame/support/src/storage/types/counted_map.rs b/substrate/frame/support/src/storage/types/counted_map.rs index 0444e269928ab..9adcb33ae0743 100644 --- a/substrate/frame/support/src/storage/types/counted_map.rs +++ b/substrate/frame/support/src/storage/types/counted_map.rs @@ -29,11 +29,11 @@ use crate::{ traits::{Get, GetDefault, StorageInfo, StorageInfoTrait, StorageInstance}, Never, }; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen, Ref}; use sp_io::MultiRemovalResults; use sp_metadata_ir::StorageEntryMetadataIR; use sp_runtime::traits::Saturating; -use sp_std::prelude::*; /// A wrapper around a [`StorageMap`] and a [`StorageValue`] (with the value being `u32`) to keep /// track of how many items are in a map, without needing to iterate all the values. diff --git a/substrate/frame/support/src/storage/types/counted_nmap.rs b/substrate/frame/support/src/storage/types/counted_nmap.rs index 51cde93f28c01..13c1b10be39c4 100644 --- a/substrate/frame/support/src/storage/types/counted_nmap.rs +++ b/substrate/frame/support/src/storage/types/counted_nmap.rs @@ -28,10 +28,10 @@ use crate::{ traits::{Get, GetDefault, StorageInfo, StorageInstance}, Never, }; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen, Ref}; use sp_metadata_ir::StorageEntryMetadataIR; use sp_runtime::traits::Saturating; -use sp_std::prelude::*; /// A wrapper around a [`StorageNMap`] and a [`StorageValue`] (with the value being `u32`) to keep /// track of how many items are in a map, without needing to iterate all the values. @@ -683,6 +683,7 @@ mod test { hash::{StorageHasher as _, *}, storage::types::{Key as NMapKey, ValueQuery}, }; + use alloc::boxed::Box; use sp_io::{hashing::twox_128, TestExternalities}; use sp_metadata_ir::{StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR}; diff --git a/substrate/frame/support/src/storage/types/double_map.rs b/substrate/frame/support/src/storage/types/double_map.rs index 2a7af7a984633..3d227feb902f3 100644 --- a/substrate/frame/support/src/storage/types/double_map.rs +++ b/substrate/frame/support/src/storage/types/double_map.rs @@ -26,11 +26,11 @@ use crate::{ traits::{Get, GetDefault, StorageInfo, StorageInstance}, StorageHasher, Twox128, }; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use frame_support::storage::StorageDecodeNonDedupLength; use sp_arithmetic::traits::SaturatedConversion; use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; -use sp_std::prelude::*; /// A type representing a *double map* in storage. This structure associates a pair of keys with a /// value of a specified type stored on-chain. diff --git a/substrate/frame/support/src/storage/types/key.rs b/substrate/frame/support/src/storage/types/key.rs index 90cf09dd1d341..b9b497b61d949 100755 --- a/substrate/frame/support/src/storage/types/key.rs +++ b/substrate/frame/support/src/storage/types/key.rs @@ -18,10 +18,10 @@ //! Storage key type. use crate::hash::{ReversibleStorageHasher, StorageHasher}; +use alloc::{boxed::Box, vec::Vec}; use codec::{Encode, EncodeLike, FullCodec, MaxEncodedLen}; use paste::paste; use scale_info::StaticTypeInfo; -use sp_std::prelude::*; /// A type used exclusively by storage maps as their key type. /// @@ -203,19 +203,19 @@ impl<'a, T: EncodeLike + EncodeLikeTuple, U: Encode> EncodeLikeTuple /// Trait to indicate that a tuple can be converted into an iterator of a vector of encoded bytes. pub trait TupleToEncodedIter { - fn to_encoded_iter(&self) -> sp_std::vec::IntoIter>; + fn to_encoded_iter(&self) -> alloc::vec::IntoIter>; } #[impl_trait_for_tuples::impl_for_tuples(1, 18)] #[tuple_types_custom_trait_bound(Encode)] impl TupleToEncodedIter for Tuple { - fn to_encoded_iter(&self) -> sp_std::vec::IntoIter> { + fn to_encoded_iter(&self) -> alloc::vec::IntoIter> { [for_tuples!( #(self.Tuple.encode()),* )].to_vec().into_iter() } } impl TupleToEncodedIter for &T { - fn to_encoded_iter(&self) -> sp_std::vec::IntoIter> { + fn to_encoded_iter(&self) -> alloc::vec::IntoIter> { (*self).to_encoded_iter() } } @@ -223,7 +223,7 @@ impl TupleToEncodedIter for &T { impl<'a, T: EncodeLike + TupleToEncodedIter, U: Encode> TupleToEncodedIter for codec::Ref<'a, T, U> { - fn to_encoded_iter(&self) -> sp_std::vec::IntoIter> { + fn to_encoded_iter(&self) -> alloc::vec::IntoIter> { use core::ops::Deref as _; self.deref().to_encoded_iter() } diff --git a/substrate/frame/support/src/storage/types/map.rs b/substrate/frame/support/src/storage/types/map.rs index b79a6ae9b8482..b70026eea50e1 100644 --- a/substrate/frame/support/src/storage/types/map.rs +++ b/substrate/frame/support/src/storage/types/map.rs @@ -26,11 +26,11 @@ use crate::{ traits::{Get, GetDefault, StorageInfo, StorageInstance}, StorageHasher, Twox128, }; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use frame_support::storage::StorageDecodeNonDedupLength; use sp_arithmetic::traits::SaturatedConversion; use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; -use sp_std::prelude::*; /// A type representing a *map* in storage. A *storage map* is a mapping of keys to values of a /// given type stored on-chain. diff --git a/substrate/frame/support/src/storage/types/mod.rs b/substrate/frame/support/src/storage/types/mod.rs index 631410f425d17..b063e11621d61 100644 --- a/substrate/frame/support/src/storage/types/mod.rs +++ b/substrate/frame/support/src/storage/types/mod.rs @@ -18,9 +18,9 @@ //! Storage types to build abstraction on storage, they implements storage traits such as //! StorageMap and others. +use alloc::vec::Vec; use codec::FullCodec; use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryModifierIR}; -use sp_std::prelude::*; mod counted_map; mod counted_nmap; @@ -93,7 +93,7 @@ where } /// Implements [`QueryKindTrait`] with `Query` type being `Result`. -pub struct ResultQuery(sp_std::marker::PhantomData); +pub struct ResultQuery(core::marker::PhantomData); impl QueryKindTrait for ResultQuery where Value: FullCodec + 'static, diff --git a/substrate/frame/support/src/storage/types/nmap.rs b/substrate/frame/support/src/storage/types/nmap.rs index 253f02a14f079..c3dfd5b3e48c9 100755 --- a/substrate/frame/support/src/storage/types/nmap.rs +++ b/substrate/frame/support/src/storage/types/nmap.rs @@ -28,10 +28,10 @@ use crate::{ }, traits::{Get, GetDefault, StorageInfo, StorageInstance}, }; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; use sp_runtime::SaturatedConversion; -use sp_std::prelude::*; /// A type representing an *NMap* in storage. This structure associates an arbitrary number of keys /// with a value of a specified type stored on-chain. @@ -655,6 +655,7 @@ mod test { hash::{StorageHasher as _, *}, storage::types::{Key as NMapKey, ValueQuery}, }; + use alloc::boxed::Box; use sp_io::{hashing::twox_128, TestExternalities}; use sp_metadata_ir::{StorageEntryModifierIR, StorageHasherIR}; diff --git a/substrate/frame/support/src/storage/types/value.rs b/substrate/frame/support/src/storage/types/value.rs index a2d93a6a165ff..9cc985b36d8c6 100644 --- a/substrate/frame/support/src/storage/types/value.rs +++ b/substrate/frame/support/src/storage/types/value.rs @@ -25,11 +25,11 @@ use crate::{ }, traits::{Get, GetDefault, StorageInfo, StorageInstance}, }; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use frame_support::storage::StorageDecodeNonDedupLength; use sp_arithmetic::traits::SaturatedConversion; use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; -use sp_std::prelude::*; /// A type representing a *value* in storage. A *storage value* is a single value of a given type /// stored on-chain. diff --git a/substrate/frame/support/src/storage/unhashed.rs b/substrate/frame/support/src/storage/unhashed.rs index 776c7d0f3c3a8..7f9bc93d7d818 100644 --- a/substrate/frame/support/src/storage/unhashed.rs +++ b/substrate/frame/support/src/storage/unhashed.rs @@ -17,8 +17,8 @@ //! Operation on unhashed runtime storage. +use alloc::vec::Vec; use codec::{Decode, Encode}; -use sp_std::prelude::*; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get(key: &[u8]) -> Option { diff --git a/substrate/frame/support/src/tests/mod.rs b/substrate/frame/support/src/tests/mod.rs index 88afa243f0932..34652231e3bce 100644 --- a/substrate/frame/support/src/tests/mod.rs +++ b/substrate/frame/support/src/tests/mod.rs @@ -27,6 +27,7 @@ use sp_runtime::{generic, traits::BlakeTwo256, BuildStorage}; pub use self::frame_system::{pallet_prelude::*, Config, Pallet}; mod inject_runtime_type; +mod runtime; mod storage_alias; mod tasks; @@ -220,12 +221,25 @@ type Header = generic::Header; type UncheckedExtrinsic = generic::UncheckedExtrinsic; type Block = generic::Block; -crate::construct_runtime!( - pub enum Runtime - { - System: self::frame_system, - } -); +#[crate::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = self::frame_system; +} #[crate::derive_impl(self::frame_system::config_preludes::TestDefaultConfig as self::frame_system::DefaultConfig)] impl Config for Runtime { diff --git a/substrate/frame/support/src/tests/runtime.rs b/substrate/frame/support/src/tests/runtime.rs new file mode 100644 index 0000000000000..a9d9281f50da3 --- /dev/null +++ b/substrate/frame/support/src/tests/runtime.rs @@ -0,0 +1,130 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{frame_system, Block}; +use crate::derive_impl; + +#[crate::pallet(dev_mode)] +mod pallet_basic { + use super::frame_system; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} +} + +impl pallet_basic::Config for Runtime {} + +#[crate::pallet(dev_mode)] +mod pallet_with_disabled_call { + use super::frame_system; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} +} + +impl pallet_with_disabled_call::Config for Runtime {} + +#[crate::pallet(dev_mode)] +mod pallet_with_disabled_unsigned { + use super::frame_system; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} +} + +impl pallet_with_disabled_unsigned::Config for Runtime {} + +#[crate::pallet] +mod pallet_with_instance { + use super::frame_system; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} +} + +#[allow(unused)] +type Instance1 = pallet_with_instance::Pallet; + +impl pallet_with_instance::Config for Runtime {} + +#[allow(unused)] +type Instance2 = pallet_with_instance::Pallet; + +impl pallet_with_instance::Config for Runtime {} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; +} + +#[docify::export(runtime_macro)] +#[crate::runtime] +mod runtime { + // The main runtime + #[runtime::runtime] + // Runtime Types to be generated + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + // Use the concrete pallet type + #[runtime::pallet_index(0)] + pub type System = frame_system::Pallet; + + // Use path to the pallet + #[runtime::pallet_index(1)] + pub type Basic = pallet_basic; + + // Use the concrete pallet type with instance + #[runtime::pallet_index(2)] + pub type PalletWithInstance1 = pallet_with_instance::Pallet; + + // Use path to the pallet with instance + #[runtime::pallet_index(3)] + pub type PalletWithInstance2 = pallet_with_instance; + + // Ensure that the runtime does not export the calls from the pallet + #[runtime::pallet_index(4)] + #[runtime::disable_call] + pub type PalletWithDisabledCall = pallet_with_disabled_call::Pallet; + + // Ensure that the runtime does not export the unsigned calls from the pallet + #[runtime::pallet_index(5)] + #[runtime::disable_unsigned] + pub type PalletWithDisabledUnsigned = pallet_with_disabled_unsigned::Pallet; +} diff --git a/substrate/frame/support/src/traits/dynamic_params.rs b/substrate/frame/support/src/traits/dynamic_params.rs index 32dae6799eaf7..3ef298fc5a5a0 100644 --- a/substrate/frame/support/src/traits/dynamic_params.rs +++ b/substrate/frame/support/src/traits/dynamic_params.rs @@ -85,7 +85,7 @@ impl AggregatedKeyValue for () { /// /// This concretization is useful when configuring pallets, since a pallet will require a parameter /// store for its own KV type and not the aggregated runtime-wide KV type. -pub struct ParameterStoreAdapter(sp_std::marker::PhantomData<(PS, KV)>); +pub struct ParameterStoreAdapter(core::marker::PhantomData<(PS, KV)>); impl ParameterStore for ParameterStoreAdapter where diff --git a/substrate/frame/support/src/traits/filter.rs b/substrate/frame/support/src/traits/filter.rs index 44f9f136cfc2a..ff62449847d25 100644 --- a/substrate/frame/support/src/traits/filter.rs +++ b/substrate/frame/support/src/traits/filter.rs @@ -18,7 +18,7 @@ //! Traits and associated utilities for dealing with abstract constraint filters. pub use super::members::Contains; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Trait to add a constraint onto the filter. pub trait FilterStack: Contains { @@ -103,7 +103,7 @@ macro_rules! impl_filter_stack { mod $module { #[allow(unused_imports)] use super::*; - use $crate::__private::sp_std::{boxed::Box, cell::RefCell, mem::{swap, take}, vec::Vec}; + use std::{boxed::Box, cell::RefCell, mem::{swap, take}, vec::Vec}; use $crate::traits::filter::{Contains, FilterStack}; thread_local! { diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index ccccc50632866..012a74d0ae92f 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -24,9 +24,10 @@ use crate::weights::Weight; use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::traits::AtLeast32BitUnsigned; -use sp_std::prelude::*; use sp_weights::WeightMeter; +#[cfg(feature = "try-runtime")] +use alloc::vec::Vec; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; @@ -351,6 +352,7 @@ pub trait IntegrityTest { /// - [`crate::traits::misc::OffchainWorker`] /// - [`OnIdle`] /// - [`IntegrityTest`] +/// - [`OnPoll`] /// /// ## Ordering /// @@ -363,34 +365,32 @@ pub trait IntegrityTest { /// /// ```mermaid /// graph LR -/// Optional --> BeforeExtrinsics -/// BeforeExtrinsics --> Extrinsics -/// Extrinsics --> AfterExtrinsics -/// subgraph Optional +/// Optional --> Mandatory +/// Mandatory --> ExtrinsicsMandatory +/// ExtrinsicsMandatory --> Poll +/// Poll --> Extrinsics +/// Extrinsics --> AfterMandatory +/// AfterMandatory --> onIdle +/// +/// subgraph Optional /// OnRuntimeUpgrade /// end /// -/// subgraph BeforeExtrinsics +/// subgraph Mandatory /// OnInitialize /// end /// +/// subgraph ExtrinsicsMandatory +/// Inherent1 --> Inherent2 +/// end +/// /// subgraph Extrinsics /// direction TB -/// Inherent1 -/// Inherent2 -/// Extrinsic1 -/// Extrinsic2 -/// -/// Inherent1 --> Inherent2 -/// Inherent2 --> Extrinsic1 /// Extrinsic1 --> Extrinsic2 /// end /// -/// subgraph AfterExtrinsics -/// OnIdle +/// subgraph AfterMandatory /// OnFinalize -/// -/// OnIdle --> OnFinalize /// end /// ``` /// @@ -466,6 +466,8 @@ pub trait Hooks { /// /// Is not guaranteed to execute in a block and should therefore only be used in no-deadline /// scenarios. + /// + /// This is the non-mandatory version of [`Hooks::on_initialize`]. fn on_poll(_n: BlockNumber, _weight: &mut WeightMeter) {} /// Hook executed when a code change (aka. a "runtime upgrade") is detected by the FRAME @@ -706,7 +708,7 @@ mod tests { #[test] fn on_idle_round_robin_works() { - static mut ON_IDLE_INVOCATION_ORDER: sp_std::vec::Vec<&str> = sp_std::vec::Vec::new(); + static mut ON_IDLE_INVOCATION_ORDER: alloc::vec::Vec<&str> = alloc::vec::Vec::new(); struct Test1; struct Test2; diff --git a/substrate/frame/support/src/traits/members.rs b/substrate/frame/support/src/traits/members.rs index 53de84ab22455..89b6b6cdfad95 100644 --- a/substrate/frame/support/src/traits/members.rs +++ b/substrate/frame/support/src/traits/members.rs @@ -17,10 +17,11 @@ //! Traits for dealing with the idea of membership. +use alloc::vec::Vec; +use core::marker::PhantomData; use impl_trait_for_tuples::impl_for_tuples; use sp_arithmetic::traits::AtLeast16BitUnsigned; use sp_runtime::DispatchResult; -use sp_std::{marker::PhantomData, prelude::*}; /// A trait for querying whether a type can be said to "contain" a value. pub trait Contains { diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index 2eec606b6d18b..d28716237119e 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -19,15 +19,15 @@ use super::storage::Footprint; use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; +use core::{fmt::Debug, marker::PhantomData}; use scale_info::TypeInfo; use sp_core::{ConstU32, Get, TypedGet}; use sp_runtime::{traits::Convert, BoundedSlice, RuntimeDebug}; -use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; use sp_weights::{Weight, WeightMeter}; /// Errors that can happen when attempting to process a message with /// [`ProcessMessage::process_message()`]. -#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, TypeInfo, RuntimeDebug)] +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, TypeInfo, Debug)] pub enum ProcessMessageError { /// The message data format is unknown (e.g. unrecognised header) BadFormat, diff --git a/substrate/frame/support/src/traits/metadata.rs b/substrate/frame/support/src/traits/metadata.rs index 8bda4186bc967..1e46470a3911f 100644 --- a/substrate/frame/support/src/traits/metadata.rs +++ b/substrate/frame/support/src/traits/metadata.rs @@ -17,10 +17,11 @@ //! Traits for managing information attached to pallets and their constituents. +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; +use core::ops::Add; use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::RuntimeDebug; -use sp_std::{ops::Add, prelude::*}; /// Provides information about the pallet itself and its setup in the runtime. /// @@ -146,16 +147,16 @@ impl CrateVersion { } } -impl sp_std::cmp::Ord for CrateVersion { - fn cmp(&self, other: &Self) -> sp_std::cmp::Ordering { +impl Ord for CrateVersion { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { self.major .cmp(&other.major) .then_with(|| self.minor.cmp(&other.minor).then_with(|| self.patch.cmp(&other.patch))) } } -impl sp_std::cmp::PartialOrd for CrateVersion { - fn partial_cmp(&self, other: &Self) -> Option { +impl PartialOrd for CrateVersion { + fn partial_cmp(&self, other: &Self) -> Option { Some(::cmp(self, other)) } } @@ -248,7 +249,7 @@ impl PartialEq for StorageVersion { } impl PartialOrd for StorageVersion { - fn partial_cmp(&self, other: &u16) -> Option { + fn partial_cmp(&self, other: &u16) -> Option { Some(self.0.cmp(other)) } } diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index bc7407a7be624..7c8c22d1ae5a3 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -18,19 +18,20 @@ //! Smaller traits used in FRAME which don't need their own file. use crate::dispatch::{DispatchResult, Parameter}; +use alloc::{vec, vec::Vec}; use codec::{CompactLen, Decode, DecodeLimit, Encode, EncodeLike, Input, MaxEncodedLen}; use impl_trait_for_tuples::impl_for_tuples; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_arithmetic::traits::{CheckedAdd, CheckedMul, CheckedSub, One, Saturating}; use sp_core::bounded::bounded_vec::TruncateFrom; +use core::cmp::Ordering; #[doc(hidden)] pub use sp_runtime::traits::{ ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet, }; use sp_runtime::{traits::Block as BlockT, DispatchError}; -use sp_std::{cmp::Ordering, prelude::*}; #[doc(hidden)] pub const DEFENSIVE_OP_PUBLIC_ERROR: &str = "a defensive failure has been triggered; please report the block number at https://github.com/paritytech/substrate/issues"; @@ -48,7 +49,7 @@ impl VariantCount for () { } /// Adapter for `Get` to access `VARIANT_COUNT` from `trait pub trait VariantCount {`. -pub struct VariantCountOf(sp_std::marker::PhantomData); +pub struct VariantCountOf(core::marker::PhantomData); impl Get for VariantCountOf { fn get() -> u32 { T::VARIANT_COUNT @@ -190,10 +191,10 @@ pub trait DefensiveOption { /// Defensively transform this option to a result, mapping `None` to the return value of an /// error closure. - fn defensive_ok_or_else E>(self, err: F) -> Result; + fn defensive_ok_or_else E>(self, err: F) -> Result; /// Defensively transform this option to a result, mapping `None` to a default value. - fn defensive_ok_or(self, err: E) -> Result; + fn defensive_ok_or(self, err: E) -> Result; /// Exactly the same as `map`, but it prints the appropriate warnings if the value being mapped /// is `None`. @@ -252,7 +253,7 @@ impl Defensive for Option { } } -impl Defensive for Result { +impl Defensive for Result { fn defensive_unwrap_or(self, or: T) -> T { match self { Ok(inner) => inner, @@ -307,7 +308,7 @@ impl Defensive for Result { } } -impl DefensiveResult for Result { +impl DefensiveResult for Result { fn defensive_map_err F>(self, o: O) -> Result { self.map_err(|e| { defensive!(e); @@ -357,7 +358,7 @@ impl DefensiveOption for Option { ) } - fn defensive_ok_or_else E>(self, err: F) -> Result { + fn defensive_ok_or_else E>(self, err: F) -> Result { self.ok_or_else(|| { let err_value = err(); defensive!(err_value); @@ -365,7 +366,7 @@ impl DefensiveOption for Option { }) } - fn defensive_ok_or(self, err: E) -> Result { + fn defensive_ok_or(self, err: E) -> Result { self.ok_or_else(|| { defensive!(err); err @@ -416,11 +417,11 @@ impl DefensiveSatura } fn defensive_saturating_accrue(&mut self, other: Self) { // Use `replace` here since `take` would require `T: Default`. - *self = sp_std::mem::replace(self, One::one()).defensive_saturating_add(other); + *self = core::mem::replace(self, One::one()).defensive_saturating_add(other); } fn defensive_saturating_reduce(&mut self, other: Self) { // Use `replace` here since `take` would require `T: Default`. - *self = sp_std::mem::replace(self, One::one()).defensive_saturating_sub(other); + *self = core::mem::replace(self, One::one()).defensive_saturating_sub(other); } fn defensive_saturating_inc(&mut self) { self.defensive_saturating_accrue(One::one()); @@ -510,7 +511,7 @@ pub trait DefensiveMin { impl DefensiveMin for T where - T: sp_std::cmp::PartialOrd, + T: PartialOrd, { fn defensive_min(self, other: T) -> Self { if self <= other { @@ -574,7 +575,7 @@ pub trait DefensiveMax { impl DefensiveMax for T where - T: sp_std::cmp::PartialOrd, + T: PartialOrd, { fn defensive_max(self, other: T) -> Self { if self >= other { @@ -1050,7 +1051,7 @@ impl TypeInfo for WrapperOpaque { #[derive(Debug, Eq, PartialEq, Default, Clone)] pub struct WrapperKeepOpaque { data: Vec, - _phantom: sp_std::marker::PhantomData, + _phantom: core::marker::PhantomData, } impl WrapperKeepOpaque { @@ -1073,7 +1074,7 @@ impl WrapperKeepOpaque { /// Create from the given encoded `data`. pub fn from_encoded(data: Vec) -> Self { - Self { data, _phantom: sp_std::marker::PhantomData } + Self { data, _phantom: core::marker::PhantomData } } } @@ -1100,7 +1101,7 @@ impl Encode for WrapperKeepOpaque { impl Decode for WrapperKeepOpaque { fn decode(input: &mut I) -> Result { - Ok(Self { data: Vec::::decode(input)?, _phantom: sp_std::marker::PhantomData }) + Ok(Self { data: Vec::::decode(input)?, _phantom: core::marker::PhantomData }) } fn skip(input: &mut I) -> Result<(), codec::Error> { @@ -1212,8 +1213,8 @@ pub trait AccountTouch { #[cfg(test)] mod test { use super::*; + use core::marker::PhantomData; use sp_core::bounded::{BoundedSlice, BoundedVec}; - use sp_std::marker::PhantomData; #[test] fn defensive_assert_works() { diff --git a/substrate/frame/support/src/traits/preimages.rs b/substrate/frame/support/src/traits/preimages.rs index 647af029c16dc..80020d8d00809 100644 --- a/substrate/frame/support/src/traits/preimages.rs +++ b/substrate/frame/support/src/traits/preimages.rs @@ -17,6 +17,7 @@ //! Stuff for dealing with hashed preimages. +use alloc::borrow::Cow; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; use sp_core::RuntimeDebug; @@ -24,7 +25,6 @@ use sp_runtime::{ traits::{ConstU32, Hash}, DispatchError, }; -use sp_std::borrow::Cow; pub type BoundedInline = crate::BoundedVec>; @@ -37,7 +37,7 @@ pub enum Bounded { /// A hash with no preimage length. We do not support creation of this except /// for transitioning from legacy state. In the future we will make this a pure /// `Dummy` item storing only the final `dummy` field. - Legacy { hash: H::Output, dummy: sp_std::marker::PhantomData }, + Legacy { hash: H::Output, dummy: core::marker::PhantomData }, /// A an bounded `Call`. Its encoding must be at most 128 bytes. Inline(BoundedInline), /// A hash of the call together with an upper limit for its size.` @@ -61,7 +61,7 @@ impl Bounded { { use Bounded::*; match self { - Legacy { hash, .. } => Legacy { hash, dummy: sp_std::marker::PhantomData }, + Legacy { hash, .. } => Legacy { hash, dummy: core::marker::PhantomData }, Inline(x) => Inline(x), Lookup { hash, len } => Lookup { hash, len }, } @@ -123,7 +123,7 @@ impl Bounded { /// Constructs a `Legacy` bounded item. #[deprecated = "This API is only for transitioning to Scheduler v3 API"] pub fn from_legacy_hash(hash: impl Into) -> Self { - Self::Legacy { hash: hash.into(), dummy: sp_std::marker::PhantomData } + Self::Legacy { hash: hash.into(), dummy: core::marker::PhantomData } } } diff --git a/substrate/frame/support/src/traits/schedule.rs b/substrate/frame/support/src/traits/schedule.rs index f41c73fe69a88..a302e28d4ce24 100644 --- a/substrate/frame/support/src/traits/schedule.rs +++ b/substrate/frame/support/src/traits/schedule.rs @@ -19,10 +19,11 @@ #[allow(deprecated)] use super::PreimageProvider; +use alloc::vec::Vec; use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen}; +use core::{fmt::Debug, result::Result}; use scale_info::TypeInfo; use sp_runtime::{traits::Saturating, DispatchError, RuntimeDebug}; -use sp_std::{fmt::Debug, prelude::*, result::Result}; /// Information relating to the period of a scheduled task. First item is the length of the /// period and the second is the number of times it should be executed in total before the task @@ -182,7 +183,7 @@ pub mod v1 { /// A type that can be used as a scheduler. pub trait Named { /// An address which can be used for removing a scheduled task. - type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug + MaxEncodedLen; + type Address: Codec + Clone + Eq + EncodeLike + core::fmt::Debug + MaxEncodedLen; /// Schedule a dispatch to happen at the beginning of some block in the future. /// @@ -353,7 +354,7 @@ pub mod v2 { /// A type that can be used as a scheduler. pub trait Named { /// An address which can be used for removing a scheduled task. - type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug + MaxEncodedLen; + type Address: Codec + Clone + Eq + EncodeLike + core::fmt::Debug + MaxEncodedLen; /// A means of expressing a call by the hash of its encoded data. type Hash; @@ -448,7 +449,7 @@ pub mod v3 { /// A type that can be used as a scheduler. pub trait Named { /// An address which can be used for removing a scheduled task. - type Address: Codec + MaxEncodedLen + Clone + Eq + EncodeLike + sp_std::fmt::Debug; + type Address: Codec + MaxEncodedLen + Clone + Eq + EncodeLike + core::fmt::Debug; /// The hasher used in the runtime. type Hasher: sp_runtime::traits::Hash; diff --git a/substrate/frame/support/src/traits/storage.rs b/substrate/frame/support/src/traits/storage.rs index 9e467aea4220d..22fb28e4c0e79 100644 --- a/substrate/frame/support/src/traits/storage.rs +++ b/substrate/frame/support/src/traits/storage.rs @@ -17,6 +17,7 @@ //! Traits for encoding data related to pallet's storage items. +use alloc::{collections::btree_set::BTreeSet, vec, vec::Vec}; use codec::{Encode, FullCodec, MaxEncodedLen}; use core::marker::PhantomData; use impl_trait_for_tuples::impl_for_tuples; @@ -27,7 +28,6 @@ use sp_runtime::{ traits::{Convert, Member, Saturating}, DispatchError, RuntimeDebug, }; -use sp_std::{collections::btree_set::BTreeSet, prelude::*}; /// An instance of a pallet in the storage. /// @@ -194,7 +194,7 @@ where } /// Some sort of cost taken from account temporarily in order to offset the cost to the chain of -/// holding some data [`Footprint`] in state. +/// holding some data `Footprint` (e.g. [`Footprint`]) in state. /// /// The cost may be increased, reduced or dropped entirely as the footprint changes. /// @@ -206,16 +206,20 @@ where /// treated as one*. Don't type to duplicate it, and remember to drop it when you're done with /// it. #[must_use] -pub trait Consideration: Member + FullCodec + TypeInfo + MaxEncodedLen { +pub trait Consideration: + Member + FullCodec + TypeInfo + MaxEncodedLen +{ /// Create a ticket for the `new` footprint attributable to `who`. This ticket *must* ultimately - /// be consumed through `update` or `drop` once the footprint changes or is removed. - fn new(who: &AccountId, new: Footprint) -> Result; + /// be consumed through `update` or `drop` once the footprint changes or is removed. `None` + /// implies no cost for a given footprint. + fn new(who: &AccountId, new: Footprint) -> Result, DispatchError>; /// Optionally consume an old ticket and alter the footprint, enforcing the new cost to `who` - /// and returning the new ticket (or an error if there was an issue). + /// and returning the new ticket (or an error if there was an issue). `None` implies no cost for + /// a given footprint. /// /// For creating tickets and dropping them, you can use the simpler `new` and `drop` instead. - fn update(self, who: &AccountId, new: Footprint) -> Result; + fn update(self, who: &AccountId, new: Footprint) -> Result, DispatchError>; /// Consume a ticket for some `old` footprint attributable to `who` which should now been freed. fn drop(self, who: &AccountId) -> Result<(), DispatchError>; @@ -230,12 +234,12 @@ pub trait Consideration: Member + FullCodec + TypeInfo + MaxEncodedLe } } -impl Consideration for () { - fn new(_: &A, _: Footprint) -> Result { - Ok(()) +impl Consideration for () { + fn new(_: &A, _: F) -> Result, DispatchError> { + Ok(Some(())) } - fn update(self, _: &A, _: Footprint) -> Result<(), DispatchError> { - Ok(()) + fn update(self, _: &A, _: F) -> Result, DispatchError> { + Ok(Some(())) } fn drop(self, _: &A) -> Result<(), DispatchError> { Ok(()) diff --git a/substrate/frame/support/src/traits/tasks.rs b/substrate/frame/support/src/traits/tasks.rs index 42b837e55970d..0b5d0c082509d 100644 --- a/substrate/frame/support/src/traits/tasks.rs +++ b/substrate/frame/support/src/traits/tasks.rs @@ -18,20 +18,22 @@ //! Contains the [`Task`] trait, which defines a general-purpose way for defining and executing //! service work, and supporting types. +use alloc::{vec, vec::IntoIter}; use codec::FullCodec; +use core::{fmt::Debug, iter::Iterator}; use scale_info::TypeInfo; use sp_runtime::DispatchError; -use sp_std::{fmt::Debug, iter::Iterator, vec, vec::IntoIter}; use sp_weights::Weight; /// Contain's re-exports of all the supporting types for the [`Task`] trait. Used in the macro /// expansion of `RuntimeTask`. #[doc(hidden)] pub mod __private { + pub use alloc::{vec, vec::IntoIter}; pub use codec::FullCodec; + pub use core::{fmt::Debug, iter::Iterator}; pub use scale_info::TypeInfo; pub use sp_runtime::DispatchError; - pub use sp_std::{fmt::Debug, iter::Iterator, vec, vec::IntoIter}; pub use sp_weights::Weight; } diff --git a/substrate/frame/support/src/traits/tokens.rs b/substrate/frame/support/src/traits/tokens.rs index 8842b20580181..138703cf1d135 100644 --- a/substrate/frame/support/src/traits/tokens.rs +++ b/substrate/frame/support/src/traits/tokens.rs @@ -30,8 +30,8 @@ pub use imbalance::Imbalance; pub mod pay; pub use misc::{ AssetId, Balance, BalanceStatus, ConversionFromAssetBalance, ConversionToAssetBalance, - ConvertRank, DepositConsequence, ExistenceRequirement, Fortitude, GetSalary, Locker, Precision, - Preservation, Provenance, Restriction, UnityAssetBalanceConversion, UnityOrOuterConversion, - WithdrawConsequence, WithdrawReasons, + ConvertRank, DepositConsequence, ExistenceRequirement, Fortitude, GetSalary, IdAmount, Locker, + Precision, Preservation, Provenance, Restriction, UnityAssetBalanceConversion, + UnityOrOuterConversion, WithdrawConsequence, WithdrawReasons, }; pub use pay::{Pay, PayFromAccount, PaymentStatus}; diff --git a/substrate/frame/support/src/traits/tokens/currency/reservable.rs b/substrate/frame/support/src/traits/tokens/currency/reservable.rs index ff8b0c6eea838..60ea9a71805fc 100644 --- a/substrate/frame/support/src/traits/tokens/currency/reservable.rs +++ b/substrate/frame/support/src/traits/tokens/currency/reservable.rs @@ -242,7 +242,7 @@ pub trait NamedReservableCurrency: ReservableCurrency { /// /// All "anonymous" operations are then implemented as their named counterparts with the given `Id`. pub struct WithName( - sp_std::marker::PhantomData<(NamedReservable, Id, AccountId)>, + core::marker::PhantomData<(NamedReservable, Id, AccountId)>, ); impl< NamedReservable: NamedReservableCurrency, diff --git a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs index 020dffe28c85b..41907b2aa009d 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs @@ -26,9 +26,9 @@ use crate::traits::{ misc::{SameOrOther, TryDrop}, tokens::{AssetId, Balance}, }; +use core::marker::PhantomData; use frame_support_procedural::{EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; use sp_runtime::traits::Zero; -use sp_std::marker::PhantomData; /// Handler for when an imbalance gets dropped. This could handle either a credit (negative) or /// debt (positive) imbalance. @@ -93,7 +93,7 @@ impl, OppositeOnDrop: HandleImbalance /// Forget the imbalance without invoking the on-drop handler. pub(crate) fn forget(imbalance: Self) { - sp_std::mem::forget(imbalance); + core::mem::forget(imbalance); } } @@ -108,7 +108,7 @@ impl, OppositeOnDrop: HandleImbalance fn drop_zero(self) -> Result<(), Self> { if self.amount.is_zero() { - sp_std::mem::forget(self); + core::mem::forget(self); Ok(()) } else { Err(self) @@ -118,7 +118,7 @@ impl, OppositeOnDrop: HandleImbalance fn split(self, amount: B) -> (Self, Self) { let first = self.amount.min(amount); let second = self.amount - first; - sp_std::mem::forget(self); + core::mem::forget(self); (Imbalance::new(first), Imbalance::new(second)) } @@ -130,19 +130,19 @@ impl, OppositeOnDrop: HandleImbalance fn merge(mut self, other: Self) -> Self { self.amount = self.amount.saturating_add(other.amount); - sp_std::mem::forget(other); + core::mem::forget(other); self } fn subsume(&mut self, other: Self) { self.amount = self.amount.saturating_add(other.amount); - sp_std::mem::forget(other); + core::mem::forget(other); } fn offset( self, other: Imbalance, ) -> SameOrOther> { let (a, b) = (self.amount, other.amount); - sp_std::mem::forget((self, other)); + core::mem::forget((self, other)); if a == b { SameOrOther::None diff --git a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs index 2aa53d622dbff..c9f366911a8b6 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs @@ -39,7 +39,7 @@ pub struct ItemOf< F: fungibles::Inspect, A: Get<>::AssetId>, AccountId, ->(sp_std::marker::PhantomData<(F, A, AccountId)>); +>(core::marker::PhantomData<(F, A, AccountId)>); impl< F: fungibles::Inspect, @@ -361,7 +361,7 @@ impl< } pub struct ConvertImbalanceDropHandler( - sp_std::marker::PhantomData<(AccountId, Balance, AssetIdType, AssetId, Handler)>, + core::marker::PhantomData<(AccountId, Balance, AssetIdType, AssetId, Handler)>, ); impl< diff --git a/substrate/frame/support/src/traits/tokens/fungible/mod.rs b/substrate/frame/support/src/traits/tokens/fungible/mod.rs index 01c3b9dfe46a5..f40e494b930d5 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/mod.rs @@ -58,14 +58,19 @@ //! 3 holds for 100 units, the account can spend its funds for any reason down to 300 units, at //! which point the holds will start to come into play. //! -//! - **Frozen Balance**: A freeze on a specified amount of an account's free balance until a -//! specified block number. +//! - **Frozen Balance**: A freeze on a specified amount of an account's balance. Tokens that are +//! frozen cannot be transferred. //! //! Multiple freezes always operate over the same funds, so they "overlay" rather than //! "stack". This means that if an account has 3 freezes for 100 units, the account can spend its //! funds for any reason down to 100 units, at which point the freezes will start to come into //! play. //! +//! It's important to note that the frozen balance can exceed the total balance of the account. +//! This is useful, eg, in cases where you want to prevent a user from transferring any fund. In +//! such a case, setting the frozen balance to `Balance::MAX` would serve that purpose +//! effectively. +//! //! - **Minimum Balance (a.k.a. Existential Deposit, a.k.a. ED)**: The minimum balance required to //! create or keep an account open. This is to prevent "dust accounts" from filling storage. When //! the free plus the held balance (i.e. the total balance) falls below this, then the account is @@ -156,9 +161,9 @@ mod regular; mod union_of; use codec::{Decode, Encode, MaxEncodedLen}; +use core::marker::PhantomData; use frame_support_procedural::{CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; use scale_info::TypeInfo; -use sp_std::marker::PhantomData; use super::{ Fortitude::{Force, Polite}, @@ -198,31 +203,40 @@ use crate::{ MaxEncodedLen, RuntimeDebugNoBound, )] -#[scale_info(skip_type_params(A, F, R, D))] +#[scale_info(skip_type_params(A, F, R, D, Fp))] #[codec(mel_bound())] -pub struct FreezeConsideration(F::Balance, PhantomData (A, R, D)>) +pub struct FreezeConsideration(F::Balance, PhantomData (A, R, D, Fp)>) where F: MutateFreeze; impl< A: 'static, F: 'static + MutateFreeze, R: 'static + Get, - D: 'static + Convert, - > Consideration for FreezeConsideration + D: 'static + Convert, + Fp: 'static, + > Consideration for FreezeConsideration { - fn new(who: &A, footprint: Footprint) -> Result { + fn new(who: &A, footprint: Fp) -> Result, DispatchError> { let new = D::convert(footprint); - F::increase_frozen(&R::get(), who, new)?; - Ok(Self(new, PhantomData)) + if new.is_zero() { + Ok(None) + } else { + F::increase_frozen(&R::get(), who, new)?; + Ok(Some(Self(new, PhantomData))) + } } - fn update(self, who: &A, footprint: Footprint) -> Result { + fn update(self, who: &A, footprint: Fp) -> Result, DispatchError> { let new = D::convert(footprint); if self.0 > new { F::decrease_frozen(&R::get(), who, self.0 - new)?; } else if new > self.0 { F::increase_frozen(&R::get(), who, new - self.0)?; } - Ok(Self(new, PhantomData)) + if new.is_zero() { + Ok(None) + } else { + Ok(Some(Self(new, PhantomData))) + } } fn drop(self, who: &A) -> Result<(), DispatchError> { F::decrease_frozen(&R::get(), who, self.0).map(|_| ()) @@ -240,31 +254,43 @@ impl< MaxEncodedLen, RuntimeDebugNoBound, )] -#[scale_info(skip_type_params(A, F, R, D))] +#[scale_info(skip_type_params(A, F, R, D, Fp))] #[codec(mel_bound())] -pub struct HoldConsideration(F::Balance, PhantomData (A, R, D)>) +pub struct HoldConsideration( + F::Balance, + PhantomData (A, R, D, Fp)>, +) where F: MutateHold; impl< A: 'static, F: 'static + MutateHold, R: 'static + Get, - D: 'static + Convert, - > Consideration for HoldConsideration + D: 'static + Convert, + Fp: 'static, + > Consideration for HoldConsideration { - fn new(who: &A, footprint: Footprint) -> Result { + fn new(who: &A, footprint: Fp) -> Result, DispatchError> { let new = D::convert(footprint); - F::hold(&R::get(), who, new)?; - Ok(Self(new, PhantomData)) + if new.is_zero() { + Ok(None) + } else { + F::hold(&R::get(), who, new)?; + Ok(Some(Self(new, PhantomData))) + } } - fn update(self, who: &A, footprint: Footprint) -> Result { + fn update(self, who: &A, footprint: Fp) -> Result, DispatchError> { let new = D::convert(footprint); if self.0 > new { F::release(&R::get(), who, self.0 - new, BestEffort)?; } else if new > self.0 { F::hold(&R::get(), who, new - self.0)?; } - Ok(Self(new, PhantomData)) + if new.is_zero() { + Ok(None) + } else { + Ok(Some(Self(new, PhantomData))) + } } fn drop(self, who: &A) -> Result<(), DispatchError> { F::release(&R::get(), who, self.0, BestEffort).map(|_| ()) @@ -291,22 +317,34 @@ impl< MaxEncodedLen, RuntimeDebugNoBound, )] -#[scale_info(skip_type_params(A, Fx, Rx, D))] +#[scale_info(skip_type_params(A, Fx, Rx, D, Fp))] #[codec(mel_bound())] -pub struct LoneFreezeConsideration(PhantomData (A, Fx, Rx, D)>); +pub struct LoneFreezeConsideration(PhantomData (A, Fx, Rx, D, Fp)>); impl< A: 'static, Fx: 'static + MutateFreeze, Rx: 'static + Get, - D: 'static + Convert, - > Consideration for LoneFreezeConsideration + D: 'static + Convert, + Fp: 'static, + > Consideration for LoneFreezeConsideration { - fn new(who: &A, footprint: Footprint) -> Result { + fn new(who: &A, footprint: Fp) -> Result, DispatchError> { ensure!(Fx::balance_frozen(&Rx::get(), who).is_zero(), DispatchError::Unavailable); - Fx::set_frozen(&Rx::get(), who, D::convert(footprint), Polite).map(|_| Self(PhantomData)) + let new = D::convert(footprint); + if new.is_zero() { + Ok(None) + } else { + Fx::set_frozen(&Rx::get(), who, new, Polite).map(|_| Some(Self(PhantomData))) + } } - fn update(self, who: &A, footprint: Footprint) -> Result { - Fx::set_frozen(&Rx::get(), who, D::convert(footprint), Polite).map(|_| Self(PhantomData)) + fn update(self, who: &A, footprint: Fp) -> Result, DispatchError> { + let new = D::convert(footprint); + let _ = Fx::set_frozen(&Rx::get(), who, new, Polite)?; + if new.is_zero() { + Ok(None) + } else { + Ok(Some(Self(PhantomData))) + } } fn drop(self, who: &A) -> Result<(), DispatchError> { Fx::thaw(&Rx::get(), who).map(|_| ()) @@ -330,22 +368,34 @@ impl< MaxEncodedLen, RuntimeDebugNoBound, )] -#[scale_info(skip_type_params(A, Fx, Rx, D))] +#[scale_info(skip_type_params(A, Fx, Rx, D, Fp))] #[codec(mel_bound())] -pub struct LoneHoldConsideration(PhantomData (A, Fx, Rx, D)>); +pub struct LoneHoldConsideration(PhantomData (A, Fx, Rx, D, Fp)>); impl< A: 'static, F: 'static + MutateHold, R: 'static + Get, - D: 'static + Convert, - > Consideration for LoneHoldConsideration + D: 'static + Convert, + Fp: 'static, + > Consideration for LoneHoldConsideration { - fn new(who: &A, footprint: Footprint) -> Result { + fn new(who: &A, footprint: Fp) -> Result, DispatchError> { ensure!(F::balance_on_hold(&R::get(), who).is_zero(), DispatchError::Unavailable); - F::set_on_hold(&R::get(), who, D::convert(footprint)).map(|_| Self(PhantomData)) + let new = D::convert(footprint); + if new.is_zero() { + Ok(None) + } else { + F::set_on_hold(&R::get(), who, new).map(|_| Some(Self(PhantomData))) + } } - fn update(self, who: &A, footprint: Footprint) -> Result { - F::set_on_hold(&R::get(), who, D::convert(footprint)).map(|_| Self(PhantomData)) + fn update(self, who: &A, footprint: Fp) -> Result, DispatchError> { + let new = D::convert(footprint); + let _ = F::set_on_hold(&R::get(), who, new)?; + if new.is_zero() { + Ok(None) + } else { + Ok(Some(Self(PhantomData))) + } } fn drop(self, who: &A) -> Result<(), DispatchError> { F::release_all(&R::get(), who, BestEffort).map(|_| ()) diff --git a/substrate/frame/support/src/traits/tokens/fungible/regular.rs b/substrate/frame/support/src/traits/tokens/fungible/regular.rs index c46614be4734c..54a04444649d2 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/regular.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/regular.rs @@ -36,9 +36,9 @@ use crate::{ SameOrOther, TryDrop, }, }; +use core::marker::PhantomData; use sp_arithmetic::traits::{CheckedAdd, CheckedSub, One}; use sp_runtime::{traits::Saturating, ArithmeticError, DispatchError, TokenError}; -use sp_std::marker::PhantomData; use super::{Credit, Debt, HandleImbalanceDrop, Imbalance}; diff --git a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs index 63791b0522370..3adbbdda31431 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs @@ -21,6 +21,7 @@ //! See the [`crate::traits::fungible`] doc for more information about fungible traits. use codec::{Decode, Encode, MaxEncodedLen}; +use core::cmp::Ordering; use frame_support::traits::{ fungible::imbalance, tokens::{ @@ -36,7 +37,6 @@ use sp_runtime::{ Either::{Left, Right}, RuntimeDebug, }; -use sp_std::cmp::Ordering; /// The `NativeOrWithId` enum classifies an asset as either `Native` to the current chain or as an /// asset with a specific ID. @@ -101,7 +101,7 @@ impl Convert, Either<(), AssetId>> for Nat /// - `AssetKind` is a superset type encompassing asset kinds from `Left` and `Right` sets. /// - `AccountId` is an account identifier type. pub struct UnionOf( - sp_std::marker::PhantomData<(Left, Right, Criterion, AssetKind, AccountId)>, + core::marker::PhantomData<(Left, Right, Criterion, AssetKind, AccountId)>, ); impl< @@ -664,7 +664,7 @@ pub struct ConvertImbalanceDropHandler< Balance, AssetId, AccountId, ->(sp_std::marker::PhantomData<(Left, Right, Criterion, AssetKind, Balance, AssetId, AccountId)>); +>(core::marker::PhantomData<(Left, Right, Criterion, AssetKind, Balance, AssetId, AccountId)>); impl< Left: fungible::HandleImbalanceDrop, diff --git a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs index bb0d83721a481..c3b213cc8fc86 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs @@ -26,9 +26,9 @@ use crate::traits::{ misc::{SameOrOther, TryDrop}, tokens::{imbalance::Imbalance as ImbalanceT, AssetId, Balance}, }; +use core::marker::PhantomData; use frame_support_procedural::{EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; use sp_runtime::traits::Zero; -use sp_std::marker::PhantomData; /// Handler for when an imbalance gets dropped. This could handle either a credit (negative) or /// debt (positive) imbalance. @@ -98,12 +98,12 @@ impl< /// Forget the imbalance without invoking the on-drop handler. pub(crate) fn forget(imbalance: Self) { - sp_std::mem::forget(imbalance); + core::mem::forget(imbalance); } pub fn drop_zero(self) -> Result<(), Self> { if self.amount.is_zero() { - sp_std::mem::forget(self); + core::mem::forget(self); Ok(()) } else { Err(self) @@ -114,7 +114,7 @@ impl< let first = self.amount.min(amount); let second = self.amount - first; let asset = self.asset.clone(); - sp_std::mem::forget(self); + core::mem::forget(self); (Imbalance::new(asset.clone(), first), Imbalance::new(asset, second)) } @@ -129,7 +129,7 @@ impl< pub fn merge(mut self, other: Self) -> Result { if self.asset == other.asset { self.amount = self.amount.saturating_add(other.amount); - sp_std::mem::forget(other); + core::mem::forget(other); Ok(self) } else { Err((self, other)) @@ -138,7 +138,7 @@ impl< pub fn subsume(&mut self, other: Self) -> Result<(), Self> { if self.asset == other.asset { self.amount = self.amount.saturating_add(other.amount); - sp_std::mem::forget(other); + core::mem::forget(other); Ok(()) } else { Err(other) @@ -154,7 +154,7 @@ impl< if self.asset == other.asset { let (a, b) = (self.amount, other.amount); let asset = self.asset.clone(); - sp_std::mem::forget((self, other)); + core::mem::forget((self, other)); if a == b { Ok(SameOrOther::None) diff --git a/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs b/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs index ab722426dadf6..27f663e575095 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs @@ -20,7 +20,7 @@ //! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use crate::dispatch::DispatchResult; -use sp_std::vec::Vec; +use alloc::vec::Vec; pub trait Inspect: super::Inspect { // Get name for an AssetId. diff --git a/substrate/frame/support/src/traits/tokens/fungibles/regular.rs b/substrate/frame/support/src/traits/tokens/fungibles/regular.rs index 946c4756cff60..3985da7856d75 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/regular.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/regular.rs @@ -19,7 +19,7 @@ //! //! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. -use sp_std::marker::PhantomData; +use core::marker::PhantomData; use crate::{ ensure, diff --git a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs index f4259a78f0a25..77047150e00ce 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs @@ -41,7 +41,7 @@ use sp_runtime::{ /// - `AssetKind` is a superset type encompassing asset kinds from `Left` and `Right` sets. /// - `AccountId` is an account identifier type. pub struct UnionOf( - sp_std::marker::PhantomData<(Left, Right, Criterion, AssetKind, AccountId)>, + core::marker::PhantomData<(Left, Right, Criterion, AssetKind, AccountId)>, ); impl< @@ -622,7 +622,7 @@ pub struct ConvertImbalanceDropHandler< Balance, AccountId, >( - sp_std::marker::PhantomData<( + core::marker::PhantomData<( Left, Right, LeftAssetId, diff --git a/substrate/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs b/substrate/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs index ecb8de8841f91..4bf9af3fbb186 100644 --- a/substrate/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs +++ b/substrate/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs @@ -17,9 +17,9 @@ //! Trait for handling imbalances. +use core::marker::PhantomData; use frame_support::traits::{fungible, fungibles, misc::TryDrop}; use sp_core::TypedGet; -use sp_std::marker::PhantomData; /// Handler for when some currency "account" decreased in balance for /// some reason. diff --git a/substrate/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs b/substrate/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs index 03e821b161b69..eec892cc31154 100644 --- a/substrate/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs @@ -20,8 +20,8 @@ use super::super::imbalance::Imbalance; use crate::traits::misc::SameOrOther; use codec::FullCodec; +use core::fmt::Debug; use sp_runtime::traits::{AtLeast32BitUnsigned, MaybeSerializeDeserialize}; -use sp_std::fmt::Debug; /// Either a positive or a negative imbalance. pub enum SignedImbalance> { diff --git a/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs b/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs index 59a582389ba61..d79ae562ec676 100644 --- a/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs +++ b/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs @@ -18,8 +18,8 @@ //! Means for splitting an imbalance into two and handling them differently. use super::super::imbalance::{Imbalance, OnUnbalanced}; +use core::{marker::PhantomData, ops::Div}; use sp_runtime::traits::Saturating; -use sp_std::{marker::PhantomData, ops::Div}; /// Split an unbalanced amount two ways between a common divisor. pub struct SplitTwoWays( diff --git a/substrate/frame/support/src/traits/tokens/misc.rs b/substrate/frame/support/src/traits/tokens/misc.rs index 424acb1d550b1..9fa1df862097f 100644 --- a/substrate/frame/support/src/traits/tokens/misc.rs +++ b/substrate/frame/support/src/traits/tokens/misc.rs @@ -17,15 +17,15 @@ //! Miscellaneous types. -use crate::traits::Contains; +use crate::{traits::Contains, TypeInfo}; use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; +use core::fmt::Debug; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; use sp_core::RuntimeDebug; use sp_runtime::{ traits::{Convert, MaybeSerializeDeserialize}, ArithmeticError, DispatchError, TokenError, }; -use sp_std::fmt::Debug; /// The origin of funds to be used for a deposit operation. #[derive(Copy, Clone, RuntimeDebug, Eq, PartialEq)] @@ -351,9 +351,18 @@ pub trait GetSalary { } /// Adapter for a rank-to-salary `Convert` implementation into a `GetSalary` implementation. -pub struct ConvertRank(sp_std::marker::PhantomData); +pub struct ConvertRank(core::marker::PhantomData); impl> GetSalary for ConvertRank { fn get_salary(rank: R, _: &A) -> B { C::convert(rank) } } + +/// An identifier and balance. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct IdAmount { + /// An identifier for this item. + pub id: Id, + /// Some amount for this item. + pub amount: Balance, +} diff --git a/substrate/frame/support/src/traits/tokens/nonfungible.rs b/substrate/frame/support/src/traits/tokens/nonfungible.rs index e3fc84f1d57b2..249f84b227593 100644 --- a/substrate/frame/support/src/traits/tokens/nonfungible.rs +++ b/substrate/frame/support/src/traits/tokens/nonfungible.rs @@ -26,9 +26,9 @@ use super::nonfungibles; use crate::{dispatch::DispatchResult, traits::Get}; +use alloc::vec::Vec; use codec::{Decode, Encode}; use sp_runtime::TokenError; -use sp_std::prelude::*; /// Trait for providing an interface to a read-only NFT-like set of items. pub trait Inspect { @@ -125,7 +125,7 @@ pub struct ItemOf< F: nonfungibles::Inspect, A: Get<>::CollectionId>, AccountId, ->(sp_std::marker::PhantomData<(F, A, AccountId)>); +>(core::marker::PhantomData<(F, A, AccountId)>); impl< F: nonfungibles::Inspect, diff --git a/substrate/frame/support/src/traits/tokens/nonfungible_v2.rs b/substrate/frame/support/src/traits/tokens/nonfungible_v2.rs index 05f76e2859d2e..5775162e34ed0 100644 --- a/substrate/frame/support/src/traits/tokens/nonfungible_v2.rs +++ b/substrate/frame/support/src/traits/tokens/nonfungible_v2.rs @@ -29,9 +29,9 @@ use crate::{ dispatch::{DispatchResult, Parameter}, traits::Get, }; +use alloc::vec::Vec; use codec::{Decode, Encode}; use sp_runtime::TokenError; -use sp_std::prelude::*; /// Trait for providing an interface to a read-only NFT-like item. pub trait Inspect { @@ -207,7 +207,7 @@ pub struct ItemOf< F: nonfungibles::Inspect, A: Get<>::CollectionId>, AccountId, ->(sp_std::marker::PhantomData<(F, A, AccountId)>); +>(core::marker::PhantomData<(F, A, AccountId)>); impl< F: nonfungibles::Inspect, diff --git a/substrate/frame/support/src/traits/tokens/nonfungibles.rs b/substrate/frame/support/src/traits/tokens/nonfungibles.rs index 615e79c29c85f..22358cf806fb1 100644 --- a/substrate/frame/support/src/traits/tokens/nonfungibles.rs +++ b/substrate/frame/support/src/traits/tokens/nonfungibles.rs @@ -28,9 +28,9 @@ //! `nonfungible` traits by using the `nonfungible::ItemOf` type adapter. use crate::dispatch::DispatchResult; +use alloc::vec::Vec; use codec::{Decode, Encode}; use sp_runtime::{DispatchError, TokenError}; -use sp_std::prelude::*; /// Trait for providing an interface to many read-only NFT-like sets of items. pub trait Inspect { diff --git a/substrate/frame/support/src/traits/tokens/nonfungibles_v2.rs b/substrate/frame/support/src/traits/tokens/nonfungibles_v2.rs index c0209b6d5123d..edf1c2b8023df 100644 --- a/substrate/frame/support/src/traits/tokens/nonfungibles_v2.rs +++ b/substrate/frame/support/src/traits/tokens/nonfungibles_v2.rs @@ -28,9 +28,9 @@ //! `nonfungible` traits by using the `nonfungible::ItemOf` type adapter. use crate::dispatch::{DispatchResult, Parameter}; +use alloc::vec::Vec; use codec::{Decode, Encode}; use sp_runtime::{DispatchError, TokenError}; -use sp_std::prelude::*; /// Trait for providing an interface to many read-only NFT-like sets of items. pub trait Inspect { diff --git a/substrate/frame/support/src/traits/tokens/pay.rs b/substrate/frame/support/src/traits/tokens/pay.rs index 62d7a056a3f1b..5a7ed4d6aa130 100644 --- a/substrate/frame/support/src/traits/tokens/pay.rs +++ b/substrate/frame/support/src/traits/tokens/pay.rs @@ -18,10 +18,10 @@ //! The Pay trait and associated types. use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; +use core::fmt::Debug; use scale_info::TypeInfo; use sp_core::{RuntimeDebug, TypedGet}; use sp_runtime::DispatchError; -use sp_std::fmt::Debug; use super::{fungible, fungibles, Balance, Preservation::Expendable}; diff --git a/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs b/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs index d5dc93fcf28fe..8dbeecd8e860e 100644 --- a/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs +++ b/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs @@ -26,10 +26,10 @@ use crate::{ traits::{PartialStorageInfoTrait, StorageInfo}, StorageHasher, }; +use alloc::{vec, vec::Vec}; use codec::{Decode, DecodeAll, FullCodec}; use impl_trait_for_tuples::impl_for_tuples; use sp_core::Get; -use sp_std::prelude::*; /// Decode the entire data under the given storage type. /// @@ -82,8 +82,8 @@ impl core::fmt::Display for TryDecodeEntireStorageError { write!( f, "`{}::{}` key `{}` is undecodable", - &sp_std::str::from_utf8(&self.info.pallet_name).unwrap_or(""), - &sp_std::str::from_utf8(&self.info.storage_name).unwrap_or(""), + &alloc::str::from_utf8(&self.info.pallet_name).unwrap_or(""), + &alloc::str::from_utf8(&self.info.storage_name).unwrap_or(""), array_bytes::bytes2hex("0x", &self.key) ) } diff --git a/substrate/frame/support/src/traits/try_runtime/mod.rs b/substrate/frame/support/src/traits/try_runtime/mod.rs index c1bf1feb19e54..09c33c0144067 100644 --- a/substrate/frame/support/src/traits/try_runtime/mod.rs +++ b/substrate/frame/support/src/traits/try_runtime/mod.rs @@ -22,10 +22,10 @@ pub use decode_entire_state::{TryDecodeEntireStorage, TryDecodeEntireStorageErro use super::StorageInstance; +use alloc::vec::Vec; use impl_trait_for_tuples::impl_for_tuples; use sp_arithmetic::traits::AtLeast32BitUnsigned; use sp_runtime::TryRuntimeError; -use sp_std::prelude::*; /// Which state tests to execute. #[derive(codec::Encode, codec::Decode, Clone, scale_info::TypeInfo)] @@ -55,15 +55,15 @@ impl Default for Select { } } -impl sp_std::fmt::Debug for Select { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { +impl core::fmt::Debug for Select { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Select::RoundRobin(x) => write!(f, "RoundRobin({})", x), Select::Only(x) => write!( f, "Only({:?})", x.iter() - .map(|x| sp_std::str::from_utf8(x).unwrap_or("")) + .map(|x| alloc::str::from_utf8(x).unwrap_or("")) .collect::>(), ), Select::All => write!(f, "All"), @@ -73,7 +73,7 @@ impl sp_std::fmt::Debug for Select { } #[cfg(feature = "std")] -impl sp_std::str::FromStr for Select { +impl std::str::FromStr for Select { type Err = &'static str; fn from_str(s: &str) -> Result { match s { @@ -153,9 +153,7 @@ pub trait TryState { #[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(all(feature = "tuples-128"), impl_for_tuples(128))] -impl TryState - for Tuple -{ +impl TryState for Tuple { for_tuples!( where #( Tuple: crate::traits::PalletInfoAccess )* ); fn try_state(n: BlockNumber, targets: Select) -> Result<(), TryRuntimeError> { match targets { @@ -221,7 +219,7 @@ impl TryState { /// Initializes a new tally. @@ -74,7 +74,7 @@ impl PollStatus { } } -pub struct ClassCountOf(sp_std::marker::PhantomData<(P, T)>); +pub struct ClassCountOf(core::marker::PhantomData<(P, T)>); impl> sp_runtime::traits::Get for ClassCountOf { fn get() -> u32 { P::classes().len() as u32 diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 6e861ad769cf7..82ac1d2c7475b 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -15,29 +15,28 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -static_assertions = "1.1.0" +static_assertions = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } -sp-api = { path = "../../../primitives/api", default-features = false } -sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-state-machine = { path = "../../../primitives/state-machine", optional = true } -frame-support = { path = "..", default-features = false, features = ["experimental"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-version = { path = "../../../primitives/version", default-features = false } -sp-metadata-ir = { path = "../../../primitives/metadata-ir", default-features = false } -trybuild = { version = "1.0.88", features = ["diff"] } -pretty_assertions = "1.3.0" -rustversion = "1.0.6" -frame-system = { path = "../../system", default-features = false } -frame-executive = { path = "../../executive", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-metadata = { features = ["current"], workspace = true } +sp-api = { workspace = true } +sp-arithmetic = { workspace = true } +sp-io = { workspace = true } +sp-state-machine = { optional = true, workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true } +frame-benchmarking = { workspace = true } +sp-runtime = { workspace = true } +sp-core = { workspace = true } +sp-version = { workspace = true } +sp-metadata-ir = { workspace = true } +trybuild = { features = ["diff"], workspace = true } +pretty_assertions = { workspace = true } +rustversion = { workspace = true } +frame-system = { workspace = true } +frame-executive = { workspace = true } # The "std" feature for this pallet is never activated on purpose, in order to test construct_runtime error message -test-pallet = { package = "frame-support-test-pallet", path = "pallet", default-features = false } +test-pallet = { workspace = true } [features] default = ["std"] @@ -57,7 +56,6 @@ std = [ "sp-metadata-ir/std", "sp-runtime/std", "sp-state-machine/std", - "sp-std/std", "sp-version/std", "test-pallet/std", ] diff --git a/substrate/frame/support/test/compile_pass/Cargo.toml b/substrate/frame/support/test/compile_pass/Cargo.toml index d6e0c66261a99..60896ca2a10f2 100644 --- a/substrate/frame/support/test/compile_pass/Cargo.toml +++ b/substrate/frame/support/test/compile_pass/Cargo.toml @@ -15,13 +15,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-support = { path = "../..", default-features = false } -frame-system = { path = "../../../system", default-features = false } -sp-core = { path = "../../../../primitives/core", default-features = false } -sp-runtime = { path = "../../../../primitives/runtime", default-features = false } -sp-version = { path = "../../../../primitives/version", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-version = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index 8607339a2b054..cee0eac6f1bcd 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -15,12 +15,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], workspace = true } -frame-support = { path = "../..", default-features = false } -frame-system = { path = "../../../system", default-features = false } -sp-runtime = { path = "../../../../primitives/runtime", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/support/test/src/lib.rs b/substrate/frame/support/test/src/lib.rs index a8a723375033a..b080740b0a4b1 100644 --- a/substrate/frame/support/test/src/lib.rs +++ b/substrate/frame/support/test/src/lib.rs @@ -127,7 +127,7 @@ pub mod pallet_prelude { /// Provides an implementation of [`frame_support::traits::Randomness`] that should only be used in /// tests! -pub struct TestRandomness(sp_std::marker::PhantomData); +pub struct TestRandomness(core::marker::PhantomData); impl frame_support::traits::Randomness> for TestRandomness diff --git a/substrate/frame/support/test/stg_frame_crate/Cargo.toml b/substrate/frame/support/test/stg_frame_crate/Cargo.toml index 5b97db60c00bb..8447cc12ef20c 100644 --- a/substrate/frame/support/test/stg_frame_crate/Cargo.toml +++ b/substrate/frame/support/test/stg_frame_crate/Cargo.toml @@ -15,9 +15,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -frame = { package = "polkadot-sdk-frame", path = "../../..", default-features = false, features = ["experimental", "runtime"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +frame = { features = ["experimental", "runtime"], workspace = true } +scale-info = { features = ["derive"], workspace = true } [features] default = ["std"] diff --git a/substrate/frame/support/test/tests/issue2219.rs b/substrate/frame/support/test/tests/issue2219.rs index 20c2773406ff1..7a2138d056a09 100644 --- a/substrate/frame/support/test/tests/issue2219.rs +++ b/substrate/frame/support/test/tests/issue2219.rs @@ -139,7 +139,7 @@ mod module { pub enable_storage_role: bool, pub request_life_time: u64, #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] diff --git a/substrate/frame/support/test/tests/origin.rs b/substrate/frame/support/test/tests/origin.rs index 4f14bda184c86..e6dd0cfc0e315 100644 --- a/substrate/frame/support/test/tests/origin.rs +++ b/substrate/frame/support/test/tests/origin.rs @@ -65,7 +65,7 @@ mod nested { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] @@ -135,7 +135,7 @@ pub mod module { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index c441d4c371af0..6f8af949cc313 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -446,7 +446,7 @@ pub mod pallet { T::AccountId: From + SomeAssociation1 + From, { #[serde(skip)] - _config: sp_std::marker::PhantomData, + _config: core::marker::PhantomData, _myfield: u32, } diff --git a/substrate/frame/support/test/tests/pallet_instance.rs b/substrate/frame/support/test/tests/pallet_instance.rs index dfe4caa476d3b..09a49617044da 100644 --- a/substrate/frame/support/test/tests/pallet_instance.rs +++ b/substrate/frame/support/test/tests/pallet_instance.rs @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use core::any::TypeId; use frame_support::{ derive_impl, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, @@ -33,7 +34,6 @@ use sp_io::{ TestExternalities, }; use sp_runtime::{DispatchError, ModuleError}; -use sp_std::any::TypeId; #[frame_support::pallet(dev_mode)] pub mod pallet { @@ -194,7 +194,7 @@ pub mod pallet { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig, I: 'static = ()> { #[serde(skip)] - _config: sp_std::marker::PhantomData<(T, I)>, + _config: core::marker::PhantomData<(T, I)>, _myfield: u32, } diff --git a/substrate/frame/support/test/tests/runtime.rs b/substrate/frame/support/test/tests/runtime.rs index 1f4d9110a24fc..06c2b5b7071c1 100644 --- a/substrate/frame/support/test/tests/runtime.rs +++ b/substrate/frame/support/test/tests/runtime.rs @@ -169,7 +169,7 @@ mod nested { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] @@ -251,7 +251,7 @@ pub mod module3 { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] @@ -871,7 +871,7 @@ fn test_metadata() { PalletMetadata { name: "Module3", storage: Some(PalletStorageMetadata { - prefix: "Module3", + prefix: "Module3", entries: vec![ StorageEntryMetadata { name: "Storage", diff --git a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs index 5b74cc172c6eb..4233db21e2031 100644 --- a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs +++ b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs @@ -169,7 +169,7 @@ mod nested { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] @@ -251,7 +251,7 @@ pub mod module3 { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] @@ -340,7 +340,7 @@ mod runtime { pub type Module1_9 = module1; } -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; @@ -808,7 +808,7 @@ fn test_metadata() { PalletMetadata { name: "Module3", storage: Some(PalletStorageMetadata { - prefix: "Module3", + prefix: "Module3", entries: vec![ StorageEntryMetadata { name: "Storage", diff --git a/substrate/frame/support/test/tests/versioned_migration.rs b/substrate/frame/support/test/tests/versioned_migration.rs index c83dd6b71de9b..58c9e4ce93b58 100644 --- a/substrate/frame/support/test/tests/versioned_migration.rs +++ b/substrate/frame/support/test/tests/versioned_migration.rs @@ -51,7 +51,7 @@ mod dummy_pallet { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { #[serde(skip)] - _config: sp_std::marker::PhantomData, + _config: core::marker::PhantomData, } #[pallet::genesis_build] @@ -90,7 +90,7 @@ pub(crate) fn new_test_ext() -> sp_io::TestExternalities { /// A dummy migration for testing the `VersionedMigration` trait. /// Sets SomeStorage to S. -struct SomeUnversionedMigration(sp_std::marker::PhantomData); +struct SomeUnversionedMigration(core::marker::PhantomData); parameter_types! { const UpgradeReads: u64 = 4; diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index a2a8970814b0a..3d056c894b92f 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -16,24 +16,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -cfg-if = "1.0" -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +cfg-if = { workspace = true } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } +scale-info = { features = ["derive", "serde"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } -frame-support = { path = "../support", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false } -sp-version = { path = "../../primitives/version", default-features = false, features = ["serde"] } -sp-weights = { path = "../../primitives/weights", default-features = false, features = ["serde"] } -docify = "0.2.8" +frame-support = { workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-std = { workspace = true } +sp-version = { features = ["serde"], workspace = true } +sp-weights = { features = ["serde"], workspace = true } +docify = { workspace = true } [dev-dependencies] -criterion = "0.5.1" -sp-externalities = { path = "../../primitives/externalities" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +criterion = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/system/benchmarking/Cargo.toml b/substrate/frame/system/benchmarking/Cargo.toml index 022f0ffce6b5e..dec68d20b6995 100644 --- a/substrate/frame/system/benchmarking/Cargo.toml +++ b/substrate/frame/system/benchmarking/Cargo.toml @@ -16,19 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "..", default-features = false } -sp-core = { path = "../../../primitives/core", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -sp-io = { path = "../../../primitives/io" } -sp-externalities = { path = "../../../primitives/externalities" } -sp-version = { path = "../../../primitives/version" } +sp-io = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } [features] default = ["std"] @@ -42,7 +41,6 @@ std = [ "sp-externalities/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "sp-version/std", ] diff --git a/substrate/frame/system/benchmarking/src/inner.rs b/substrate/frame/system/benchmarking/src/inner.rs index c1631b0a2e334..0fb592f3dbba7 100644 --- a/substrate/frame/system/benchmarking/src/inner.rs +++ b/substrate/frame/system/benchmarking/src/inner.rs @@ -17,13 +17,13 @@ //! Frame System benchmarks. +use alloc::{vec, vec::Vec}; use codec::Encode; use frame_benchmarking::v2::*; use frame_support::{dispatch::DispatchClass, storage, traits::Get}; use frame_system::{Call, Pallet as System, RawOrigin}; use sp_core::storage::well_known_keys; use sp_runtime::traits::Hash; -use sp_std::{prelude::*, vec}; pub struct Pallet(System); pub trait Config: frame_system::Config { diff --git a/substrate/frame/system/benchmarking/src/lib.rs b/substrate/frame/system/benchmarking/src/lib.rs index e55038aeb9551..f66d20ac8aed9 100644 --- a/substrate/frame/system/benchmarking/src/lib.rs +++ b/substrate/frame/system/benchmarking/src/lib.rs @@ -19,6 +19,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + #[cfg(feature = "runtime-benchmarks")] pub mod inner; diff --git a/substrate/frame/system/rpc/runtime-api/Cargo.toml b/substrate/frame/system/rpc/runtime-api/Cargo.toml index 8b71ca2a13952..901a035b6476c 100644 --- a/substrate/frame/system/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/system/rpc/runtime-api/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -sp-api = { path = "../../../../primitives/api", default-features = false } -docify = "0.2.0" +codec = { workspace = true } +sp-api = { workspace = true } +docify = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/system/src/extensions/check_genesis.rs b/substrate/frame/system/src/extensions/check_genesis.rs index 76a711a823e7d..000ec56da64f3 100644 --- a/substrate/frame/system/src/extensions/check_genesis.rs +++ b/substrate/frame/system/src/extensions/check_genesis.rs @@ -31,16 +31,16 @@ use sp_runtime::{ /// the extension does not affect any other fields of `TransactionValidity` directly. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] -pub struct CheckGenesis(sp_std::marker::PhantomData); +pub struct CheckGenesis(core::marker::PhantomData); -impl sp_std::fmt::Debug for CheckGenesis { +impl core::fmt::Debug for CheckGenesis { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "CheckGenesis") } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } @@ -48,7 +48,7 @@ impl sp_std::fmt::Debug for CheckGenesis { impl CheckGenesis { /// Creates new `SignedExtension` to check genesis hash. pub fn new() -> Self { - Self(sp_std::marker::PhantomData) + Self(core::marker::PhantomData) } } diff --git a/substrate/frame/system/src/extensions/check_mortality.rs b/substrate/frame/system/src/extensions/check_mortality.rs index 148dfd4aad471..6666c4812fbc3 100644 --- a/substrate/frame/system/src/extensions/check_mortality.rs +++ b/substrate/frame/system/src/extensions/check_mortality.rs @@ -28,28 +28,31 @@ use sp_runtime::{ /// Check for transaction mortality. /// +/// The extension adds [`Era`] to every signed extrinsic. It also contributes to the signed data, by +/// including the hash of the block at [`Era::birth`]. +/// /// # Transaction Validity /// /// The extension affects `longevity` of the transaction according to the [`Era`] definition. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] -pub struct CheckMortality(pub Era, sp_std::marker::PhantomData); +pub struct CheckMortality(pub Era, core::marker::PhantomData); impl CheckMortality { /// utility constructor. Used only in client/factory code. pub fn from(era: Era) -> Self { - Self(era, sp_std::marker::PhantomData) + Self(era, core::marker::PhantomData) } } -impl sp_std::fmt::Debug for CheckMortality { +impl core::fmt::Debug for CheckMortality { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "CheckMortality({:?})", self.0) } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } diff --git a/substrate/frame/system/src/extensions/check_non_zero_sender.rs b/substrate/frame/system/src/extensions/check_non_zero_sender.rs index 92eed60fc66b5..06dc2bf177ac9 100644 --- a/substrate/frame/system/src/extensions/check_non_zero_sender.rs +++ b/substrate/frame/system/src/extensions/check_non_zero_sender.rs @@ -17,6 +17,7 @@ use crate::Config; use codec::{Decode, Encode}; +use core::marker::PhantomData; use frame_support::{dispatch::DispatchInfo, DefaultNoBound}; use scale_info::TypeInfo; use sp_runtime::{ @@ -25,21 +26,20 @@ use sp_runtime::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, }; -use sp_std::{marker::PhantomData, prelude::*}; /// Check to ensure that the sender is not the zero address. #[derive(Encode, Decode, DefaultNoBound, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct CheckNonZeroSender(PhantomData); -impl sp_std::fmt::Debug for CheckNonZeroSender { +impl core::fmt::Debug for CheckNonZeroSender { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "CheckNonZeroSender") } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } @@ -47,7 +47,7 @@ impl sp_std::fmt::Debug for CheckNonZeroSender { impl CheckNonZeroSender { /// Create new `SignedExtension` to check runtime version. pub fn new() -> Self { - Self(sp_std::marker::PhantomData) + Self(core::marker::PhantomData) } } @@ -61,7 +61,7 @@ where type Pre = (); const IDENTIFIER: &'static str = "CheckNonZeroSender"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { Ok(()) } diff --git a/substrate/frame/system/src/extensions/check_nonce.rs b/substrate/frame/system/src/extensions/check_nonce.rs index 894ab72eb593b..3535870d1b595 100644 --- a/substrate/frame/system/src/extensions/check_nonce.rs +++ b/substrate/frame/system/src/extensions/check_nonce.rs @@ -16,6 +16,7 @@ // limitations under the License. use crate::Config; +use alloc::vec; use codec::{Decode, Encode}; use frame_support::dispatch::DispatchInfo; use scale_info::TypeInfo; @@ -26,7 +27,6 @@ use sp_runtime::{ ValidTransaction, }, }; -use sp_std::vec; /// Nonce check and increment to give replay protection for transactions. /// @@ -46,14 +46,14 @@ impl CheckNonce { } } -impl sp_std::fmt::Debug for CheckNonce { +impl core::fmt::Debug for CheckNonce { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "CheckNonce({})", self.0) } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } @@ -68,7 +68,7 @@ where type Pre = (); const IDENTIFIER: &'static str = "CheckNonce"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { Ok(()) } diff --git a/substrate/frame/system/src/extensions/check_spec_version.rs b/substrate/frame/system/src/extensions/check_spec_version.rs index 24d5ef9cafb17..ee7e6f2efd001 100644 --- a/substrate/frame/system/src/extensions/check_spec_version.rs +++ b/substrate/frame/system/src/extensions/check_spec_version.rs @@ -31,16 +31,16 @@ use sp_runtime::{ /// is not affected in any other way. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] -pub struct CheckSpecVersion(sp_std::marker::PhantomData); +pub struct CheckSpecVersion(core::marker::PhantomData); -impl sp_std::fmt::Debug for CheckSpecVersion { +impl core::fmt::Debug for CheckSpecVersion { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "CheckSpecVersion") } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } @@ -48,7 +48,7 @@ impl sp_std::fmt::Debug for CheckSpecVersion { impl CheckSpecVersion { /// Create new `SignedExtension` to check runtime version. pub fn new() -> Self { - Self(sp_std::marker::PhantomData) + Self(core::marker::PhantomData) } } diff --git a/substrate/frame/system/src/extensions/check_tx_version.rs b/substrate/frame/system/src/extensions/check_tx_version.rs index 3f9d6a1903fe1..15983c2cd088b 100644 --- a/substrate/frame/system/src/extensions/check_tx_version.rs +++ b/substrate/frame/system/src/extensions/check_tx_version.rs @@ -31,16 +31,16 @@ use sp_runtime::{ /// is not affected in any other way. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] -pub struct CheckTxVersion(sp_std::marker::PhantomData); +pub struct CheckTxVersion(core::marker::PhantomData); -impl sp_std::fmt::Debug for CheckTxVersion { +impl core::fmt::Debug for CheckTxVersion { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "CheckTxVersion") } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } @@ -48,7 +48,7 @@ impl sp_std::fmt::Debug for CheckTxVersion { impl CheckTxVersion { /// Create new `SignedExtension` to check transaction version. pub fn new() -> Self { - Self(sp_std::marker::PhantomData) + Self(core::marker::PhantomData) } } diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 5d6c68989ed53..22da2a5b98725 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{limits::BlockWeights, Config, DispatchClass, Pallet, LOG_TARGET}; +use crate::{limits::BlockWeights, Config, Pallet, LOG_TARGET}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, PostDispatchInfo}, @@ -37,7 +37,7 @@ use sp_weights::Weight; /// transaction is valid. #[derive(Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)] #[scale_info(skip_type_params(T))] -pub struct CheckWeight(sp_std::marker::PhantomData); +pub struct CheckWeight(core::marker::PhantomData); impl CheckWeight where @@ -106,8 +106,7 @@ where let all_weight = Pallet::::block_weight(); let maximum_weight = T::BlockWeights::get(); let next_weight = - calculate_consumed_weight::(&maximum_weight, all_weight, info)?; - check_combined_proof_size::(info, &maximum_weight, next_len, &next_weight)?; + calculate_consumed_weight::(&maximum_weight, all_weight, info, len)?; Self::check_extrinsic_weight(info)?; crate::AllExtrinsicsLen::::put(next_len); @@ -130,36 +129,6 @@ where } } -/// Check that the combined extrinsic length and proof size together do not exceed the PoV limit. -pub fn check_combined_proof_size( - info: &DispatchInfoOf, - maximum_weight: &BlockWeights, - next_len: u32, - next_weight: &crate::ConsumedWeight, -) -> Result<(), TransactionValidityError> -where - Call: Dispatchable, -{ - // This extra check ensures that the extrinsic length does not push the - // PoV over the limit. - let total_pov_size = next_weight.total().proof_size().saturating_add(next_len as u64); - if total_pov_size > maximum_weight.max_block.proof_size() { - log::debug!( - target: LOG_TARGET, - "Extrinsic exceeds total pov size. Still including if mandatory. size: {}kb, limit: {}kb, is_mandatory: {}", - total_pov_size as f64/1024.0, - maximum_weight.max_block.proof_size() as f64/1024.0, - info.class == DispatchClass::Mandatory - ); - return match info.class { - // Allow mandatory extrinsics - DispatchClass::Mandatory => Ok(()), - _ => Err(InvalidTransaction::ExhaustsResources.into()), - }; - } - Ok(()) -} - /// Checks if the current extrinsic can fit into the block with respect to block weight limits. /// /// Upon successes, it returns the new block weight as a `Result`. @@ -167,12 +136,16 @@ pub fn calculate_consumed_weight( maximum_weight: &BlockWeights, mut all_weight: crate::ConsumedWeight, info: &DispatchInfoOf, + len: usize, ) -> Result where Call: Dispatchable, { - let extrinsic_weight = - info.weight.saturating_add(maximum_weight.get(info.class).base_extrinsic); + // Also Consider extrinsic length as proof weight. + let extrinsic_weight = info + .weight + .saturating_add(maximum_weight.get(info.class).base_extrinsic) + .saturating_add(Weight::from_parts(0, len as u64)); let limit_per_class = maximum_weight.get(info.class); // add the weight. If class is unlimited, use saturating add instead of checked one. @@ -238,7 +211,7 @@ where type Pre = (); const IDENTIFIER: &'static str = "CheckWeight"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { Ok(()) } @@ -308,14 +281,14 @@ where } } -impl sp_std::fmt::Debug for CheckWeight { +impl core::fmt::Debug for CheckWeight { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "CheckWeight") } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } @@ -327,8 +300,8 @@ mod tests { mock::{new_test_ext, System, Test, CALL}, AllExtrinsicsLen, BlockWeight, DispatchClass, }; + use core::marker::PhantomData; use frame_support::{assert_err, assert_ok, dispatch::Pays, weights::Weight}; - use sp_std::marker::PhantomData; fn block_weights() -> crate::limits::BlockWeights { ::BlockWeights::get() @@ -772,168 +745,115 @@ mod tests { &maximum_weight, all_weight.clone(), &mandatory1, + 0 )); assert_err!( calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, all_weight, &mandatory2, + 0 ), InvalidTransaction::ExhaustsResources ); } #[test] - fn maximum_proof_size_includes_length() { + fn proof_size_includes_length() { let maximum_weight = BlockWeights::builder() .base_block(Weight::zero()) .for_class(DispatchClass::non_mandatory(), |w| { w.base_extrinsic = Weight::zero(); - w.max_total = Some(Weight::from_parts(20, 10)); + w.max_total = Some(Weight::from_parts(20, 1000)); }) .for_class(DispatchClass::Mandatory, |w| { w.base_extrinsic = Weight::zero(); - w.reserved = Some(Weight::from_parts(5, 10)); - w.max_total = None; + w.max_total = Some(Weight::from_parts(20, 1000)); }) .build_or_panic(); + let all_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => Weight::from_parts(5, 0), + DispatchClass::Operational => Weight::from_parts(5, 0), + DispatchClass::Mandatory => Weight::from_parts(0, 0), + }); - assert_eq!(maximum_weight.max_block, Weight::from_parts(20, 10)); + let normal = DispatchInfo { + weight: Weight::from_parts(5, 0), + class: DispatchClass::Normal, + ..Default::default() + }; - let info = DispatchInfo { class: DispatchClass::Normal, ..Default::default() }; - let mandatory = DispatchInfo { class: DispatchClass::Mandatory, ..Default::default() }; - // We have 10 reftime and 5 proof size left over. - let next_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => Weight::from_parts(10, 5), - DispatchClass::Operational => Weight::from_parts(0, 0), - DispatchClass::Mandatory => Weight::zero(), - }); + let mandatory = DispatchInfo { + weight: Weight::from_parts(5, 0), + class: DispatchClass::Mandatory, + ..Default::default() + }; - // Simple checks for the length - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + // Using 0 length extrinsics. + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, + all_weight.clone(), + &normal, 0, - &next_weight - )); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + ) + .unwrap(); + + assert_eq!(consumed.total().saturating_sub(all_weight.total()), normal.weight); + + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 5, - &next_weight - )); - assert_err!( - check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 6, - &next_weight - ), - InvalidTransaction::ExhaustsResources - ); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( + all_weight.clone(), &mandatory, - &maximum_weight, - 6, - &next_weight - )); + 0, + ) + .unwrap(); + assert_eq!(consumed.total().saturating_sub(all_weight.total()), mandatory.weight); - // We have 10 reftime and 0 proof size left over. - let next_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => Weight::from_parts(10, 10), - DispatchClass::Operational => Weight::from_parts(0, 0), - DispatchClass::Mandatory => Weight::zero(), - }); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + // Using non zero length extrinsics. + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 0, - &next_weight - )); - assert_err!( - check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 1, - &next_weight - ), - InvalidTransaction::ExhaustsResources + all_weight.clone(), + &normal, + 100, + ) + .unwrap(); + // Must account for the len in the proof size + assert_eq!( + consumed.total().saturating_sub(all_weight.total()), + normal.weight.add_proof_size(100) ); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &mandatory, - &maximum_weight, - 1, - &next_weight - )); - // We have 10 reftime and 2 proof size left over. - // Used weight is spread across dispatch classes this time. - let next_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => Weight::from_parts(10, 5), - DispatchClass::Operational => Weight::from_parts(0, 3), - DispatchClass::Mandatory => Weight::zero(), - }); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 0, - &next_weight - )); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 2, - &next_weight - )); - assert_err!( - check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 3, - &next_weight - ), - InvalidTransaction::ExhaustsResources - ); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( + all_weight.clone(), &mandatory, - &maximum_weight, - 3, - &next_weight - )); + 100, + ) + .unwrap(); + // Must account for the len in the proof size + assert_eq!( + consumed.total().saturating_sub(all_weight.total()), + mandatory.weight.add_proof_size(100) + ); - // Ref time is over the limit. Should not happen, but we should make sure that it is - // ignored. - let next_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => Weight::from_parts(30, 5), - DispatchClass::Operational => Weight::from_parts(0, 0), - DispatchClass::Mandatory => Weight::zero(), - }); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, + // Using oversized zero length extrinsics. + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 0, - &next_weight - )); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 5, - &next_weight - )); - assert_err!( - check_combined_proof_size::<::RuntimeCall>( - &info, - &maximum_weight, - 6, - &next_weight - ), - InvalidTransaction::ExhaustsResources + all_weight.clone(), + &normal, + 2000, ); - assert_ok!(check_combined_proof_size::<::RuntimeCall>( - &mandatory, + // errors out + assert_eq!(consumed, Err(InvalidTransaction::ExhaustsResources.into())); + + // Using oversized zero length extrinsics. + let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, - 6, - &next_weight - )); + all_weight.clone(), + &mandatory, + 2000, + ); + // errors out + assert_eq!(consumed, Err(InvalidTransaction::ExhaustsResources.into())); } } diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 84d00a1e917ec..0c6ff2cb8ddba 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -97,6 +97,10 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{boxed::Box, vec, vec::Vec}; +use core::{fmt::Debug, marker::PhantomData}; use pallet_prelude::{BlockNumberFor, HeaderFor}; #[cfg(feature = "std")] use serde::Serialize; @@ -118,7 +122,6 @@ use sp_runtime::{ }; #[cfg(any(feature = "std", test))] use sp_std::map; -use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; use sp_version::RuntimeVersion; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; @@ -269,7 +272,7 @@ pub mod pallet { /// /// NOTE: Avoids overriding `BlockHashCount` when using `mocking::{MockBlock, MockBlockU32, /// MockBlockU128}`. - pub struct TestBlockHashCount>(sp_std::marker::PhantomData); + pub struct TestBlockHashCount>(core::marker::PhantomData); impl, C: Get> Get for TestBlockHashCount { fn get() -> I { C::get().into() @@ -511,7 +514,7 @@ pub mod pallet { + Default + Copy + CheckEqual - + sp_std::hash::Hash + + core::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + MaxEncodedLen; @@ -1011,7 +1014,7 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig { #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } #[pallet::genesis_build] @@ -1153,7 +1156,7 @@ impl From for LastRuntimeUpgradeInfo { } /// Ensure the origin is Root. -pub struct EnsureRoot(sp_std::marker::PhantomData); +pub struct EnsureRoot(core::marker::PhantomData); impl, O>> + From>, AccountId> EnsureOrigin for EnsureRoot { @@ -1179,7 +1182,7 @@ impl_ensure_origin_with_arg_ignoring_arg! { /// Ensure the origin is Root and return the provided `Success` value. pub struct EnsureRootWithSuccess( - sp_std::marker::PhantomData<(AccountId, Success)>, + core::marker::PhantomData<(AccountId, Success)>, ); impl< O: Into, O>> + From>, @@ -1209,7 +1212,7 @@ impl_ensure_origin_with_arg_ignoring_arg! { /// Ensure the origin is provided `Ensure` origin and return the provided `Success` value. pub struct EnsureWithSuccess( - sp_std::marker::PhantomData<(Ensure, AccountId, Success)>, + core::marker::PhantomData<(Ensure, AccountId, Success)>, ); impl< @@ -1232,7 +1235,7 @@ impl< } /// Ensure the origin is any `Signed` origin. -pub struct EnsureSigned(sp_std::marker::PhantomData); +pub struct EnsureSigned(core::marker::PhantomData); impl, O>> + From>, AccountId: Decode> EnsureOrigin for EnsureSigned { @@ -1259,7 +1262,7 @@ impl_ensure_origin_with_arg_ignoring_arg! { } /// Ensure the origin is `Signed` origin from the given `AccountId`. -pub struct EnsureSignedBy(sp_std::marker::PhantomData<(Who, AccountId)>); +pub struct EnsureSignedBy(core::marker::PhantomData<(Who, AccountId)>); impl< O: Into, O>> + From>, Who: SortedMembers, @@ -1291,7 +1294,7 @@ impl_ensure_origin_with_arg_ignoring_arg! { } /// Ensure the origin is `None`. i.e. unsigned transaction. -pub struct EnsureNone(sp_std::marker::PhantomData); +pub struct EnsureNone(core::marker::PhantomData); impl, O>> + From>, AccountId> EnsureOrigin for EnsureNone { @@ -1316,7 +1319,7 @@ impl_ensure_origin_with_arg_ignoring_arg! { } /// Always fail. -pub struct EnsureNever(sp_std::marker::PhantomData); +pub struct EnsureNever(core::marker::PhantomData); impl EnsureOrigin for EnsureNever { type Success = Success; fn try_origin(o: O) -> Result { @@ -1906,7 +1909,7 @@ impl Pallet { /// Should only be called if you know what you are doing and outside of the runtime block /// execution else it can have a large impact on the PoV size of a block. pub fn read_events_no_consensus( - ) -> impl sp_std::iter::Iterator>> { + ) -> impl Iterator>> { Events::::stream_iter() } diff --git a/substrate/frame/system/src/migrations/mod.rs b/substrate/frame/system/src/migrations/mod.rs index 945bbc5395525..7c69843d73f10 100644 --- a/substrate/frame/system/src/migrations/mod.rs +++ b/substrate/frame/system/src/migrations/mod.rs @@ -24,7 +24,6 @@ use frame_support::{ pallet_prelude::ValueQuery, traits::PalletInfoAccess, weights::Weight, Blake2_128Concat, }; use sp_runtime::RuntimeDebug; -use sp_std::prelude::*; /// Type used to encode the number of references an account has. type RefCount = u32; diff --git a/substrate/frame/system/src/offchain.rs b/substrate/frame/system/src/offchain.rs index a64b326196403..1f72ea2d37452 100644 --- a/substrate/frame/system/src/offchain.rs +++ b/substrate/frame/system/src/offchain.rs @@ -56,13 +56,13 @@ #![warn(missing_docs)] +use alloc::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec}; use codec::Encode; use sp_runtime::{ app_crypto::RuntimeAppPublic, traits::{Extrinsic as ExtrinsicT, IdentifyAccount, One}, RuntimeDebug, }; -use sp_std::{collections::btree_set::BTreeSet, prelude::*}; /// Marker struct used to flag using all supported keys to sign a payload. pub struct ForAll {} @@ -76,7 +76,7 @@ pub struct ForAny {} /// utility function can be used. However, this struct is used by `Signer` /// to submit a signed transactions providing the signature along with the call. pub struct SubmitTransaction, OverarchingCall> { - _phantom: sp_std::marker::PhantomData<(T, OverarchingCall)>, + _phantom: core::marker::PhantomData<(T, OverarchingCall)>, } impl SubmitTransaction @@ -115,7 +115,7 @@ where #[derive(RuntimeDebug)] pub struct Signer, X = ForAny> { accounts: Option>, - _phantom: sp_std::marker::PhantomData<(X, C)>, + _phantom: core::marker::PhantomData<(X, C)>, } impl, X> Default for Signer { diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index 93ce09611b55d..25aecea7b79ea 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -17,24 +17,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-inherents = { path = "../../primitives/inherents", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false, optional = true } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-storage = { path = "../../primitives/storage", default-features = false } -sp-timestamp = { path = "../../primitives/timestamp", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { optional = true, workspace = true } +sp-runtime = { workspace = true } +sp-storage = { workspace = true } +sp-timestamp = { workspace = true } -docify = "0.2.8" +docify = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -49,7 +48,6 @@ std = [ "sp-inherents/std", "sp-io?/std", "sp-runtime/std", - "sp-std/std", "sp-storage/std", "sp-timestamp/std", ] diff --git a/substrate/frame/timestamp/src/benchmarking.rs b/substrate/frame/timestamp/src/benchmarking.rs index 82dfdfa8b3120..d8c27b4967af9 100644 --- a/substrate/frame/timestamp/src/benchmarking.rs +++ b/substrate/frame/timestamp/src/benchmarking.rs @@ -25,7 +25,7 @@ use frame_support::{ensure, traits::OnFinalize}; use frame_system::RawOrigin; use sp_storage::TrackedStorageKey; -use crate::Pallet as Timestamp; +use crate::{Now, Pallet as Timestamp}; const MAX_TIME: u32 = 100; @@ -42,7 +42,7 @@ benchmarks! { }); }: _(RawOrigin::None, t.into()) verify { - ensure!(Timestamp::::now() == t.into(), "Time was not set."); + ensure!(Now::::get() == t.into(), "Time was not set."); } on_finalize { diff --git a/substrate/frame/timestamp/src/lib.rs b/substrate/frame/timestamp/src/lib.rs index 5269f17eca6b2..ca495c5e24efb 100644 --- a/substrate/frame/timestamp/src/lib.rs +++ b/substrate/frame/timestamp/src/lib.rs @@ -133,9 +133,9 @@ mod mock; mod tests; pub mod weights; +use core::{cmp, result}; use frame_support::traits::{OnTimestampSet, Time, UnixTime}; use sp_runtime::traits::{AtLeast32Bit, SaturatedConversion, Scale, Zero}; -use sp_std::{cmp, result}; use sp_timestamp::{InherentError, InherentType, INHERENT_IDENTIFIER}; pub use weights::WeightInfo; @@ -202,7 +202,6 @@ pub mod pallet { /// The current time for the current block. #[pallet::storage] - #[pallet::getter(fn now)] pub type Now = StorageValue<_, T::Moment, ValueQuery>; /// Whether the timestamp has been updated in this block. @@ -261,7 +260,7 @@ pub mod pallet { pub fn set(origin: OriginFor, #[pallet::compact] now: T::Moment) -> DispatchResult { ensure_none(origin)?; assert!(!DidUpdate::::exists(), "Timestamp must be updated only once in the block"); - let prev = Self::now(); + let prev = Now::::get(); assert!( prev.is_zero() || now >= prev + T::MinimumPeriod::get(), "Timestamp must increment by at least between sequential blocks" @@ -296,7 +295,7 @@ pub mod pallet { .expect("Timestamp inherent data must be provided"); let data = (*inherent_data).saturated_into::(); - let next_time = cmp::max(data, Self::now() + T::MinimumPeriod::get()); + let next_time = cmp::max(data, Now::::get() + T::MinimumPeriod::get()); Some(Call::set { now: next_time }) } @@ -317,7 +316,7 @@ pub mod pallet { .expect("Timestamp inherent data not correctly encoded") .expect("Timestamp inherent data must be provided"); - let minimum = (Self::now() + T::MinimumPeriod::get()).saturated_into::(); + let minimum = (Now::::get() + T::MinimumPeriod::get()).saturated_into::(); if t > *(data + MAX_TIMESTAMP_DRIFT_MILLIS) { Err(InherentError::TooFarInFuture) } else if t < minimum { @@ -339,7 +338,7 @@ impl Pallet { /// NOTE: if this function is called prior to setting the timestamp, /// it will return the timestamp of the previous block. pub fn get() -> T::Moment { - Self::now() + Now::::get() } /// Set the timestamp to something in particular. Only used for tests. @@ -356,7 +355,7 @@ impl Time for Pallet { type Moment = T::Moment; fn now() -> Self::Moment { - Self::now() + Now::::get() } } @@ -367,15 +366,13 @@ impl UnixTime for Pallet { fn now() -> core::time::Duration { // now is duration since unix epoch in millisecond as documented in // `sp_timestamp::InherentDataProvider`. - let now = Self::now(); - sp_std::if_std! { - if now == T::Moment::zero() { - log::error!( - target: "runtime::timestamp", - "`pallet_timestamp::UnixTime::now` is called at genesis, invalid value returned: 0", - ); - } - } + let now = Now::::get(); + + log::error!( + target: "runtime::timestamp", + "`pallet_timestamp::UnixTime::now` is called at genesis, invalid value returned: 0", + ); + core::time::Duration::from_millis(now.saturated_into::()) } } diff --git a/substrate/frame/timestamp/src/tests.rs b/substrate/frame/timestamp/src/tests.rs index cc49d8a3296e8..a83855561889f 100644 --- a/substrate/frame/timestamp/src/tests.rs +++ b/substrate/frame/timestamp/src/tests.rs @@ -25,7 +25,7 @@ fn timestamp_works() { new_test_ext().execute_with(|| { crate::Now::::put(46); assert_ok!(Timestamp::set(RuntimeOrigin::none(), 69)); - assert_eq!(Timestamp::now(), 69); + assert_eq!(crate::Now::::get(), 69); assert_eq!(Some(69), get_captured_moment()); }); } diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index bcd54461406ea..bec2ce0a47b8b 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -16,22 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-treasury = { path = "../treasury", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-treasury = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-storage = { path = "../../primitives/storage" } +pallet-balances = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } [features] default = ["std"] @@ -48,7 +47,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "sp-storage/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/tips/src/lib.rs b/substrate/frame/tips/src/lib.rs index 8c360fb57d724..67bcdfa0685e5 100644 --- a/substrate/frame/tips/src/lib.rs +++ b/substrate/frame/tips/src/lib.rs @@ -60,12 +60,14 @@ mod tests; pub mod migrations; pub mod weights; +extern crate alloc; + use sp_runtime::{ traits::{AccountIdConversion, BadOrigin, Hash, StaticLookup, TrailingZeroInput, Zero}, Percent, RuntimeDebug, }; -use sp_std::prelude::*; +use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; use frame_support::{ ensure, @@ -169,6 +171,9 @@ pub mod pallet { /// update weights file when altering this method. type Tippers: SortedMembers + ContainsLengthBound; + /// Handler for the unbalanced decrease when slashing for a removed tip. + type OnSlash: OnUnbalanced>; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -177,7 +182,6 @@ pub mod pallet { /// This has the insecure enumerable hash function since the key itself is already /// guaranteed to be a secure hash. #[pallet::storage] - #[pallet::getter(fn tips)] pub type Tips, I: 'static = ()> = StorageMap< _, Twox64Concat, @@ -189,7 +193,6 @@ pub mod pallet { /// Simple preimage lookup from the reason's hash to the original data. Again, has an /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. #[pallet::storage] - #[pallet::getter(fn reasons)] pub type Reasons, I: 'static = ()> = StorageMap<_, Identity, T::Hash, Vec, OptionQuery>; @@ -489,6 +492,18 @@ pub mod pallet { impl, I: 'static> Pallet { // Add public immutables and private mutables. + /// Access tips storage from outside + pub fn tips( + hash: T::Hash, + ) -> Option, BlockNumberFor, T::Hash>> { + Tips::::get(hash) + } + + /// Access reasons storage from outside + pub fn reasons(hash: T::Hash) -> Option> { + Reasons::::get(hash) + } + /// The account ID of the treasury pot. /// /// This actually does computation. If you need to keep using it, then make sure you cache the diff --git a/substrate/frame/tips/src/migrations/unreserve_deposits.rs b/substrate/frame/tips/src/migrations/unreserve_deposits.rs index 16cb1a80e812b..afc424309bf4d 100644 --- a/substrate/frame/tips/src/migrations/unreserve_deposits.rs +++ b/substrate/frame/tips/src/migrations/unreserve_deposits.rs @@ -18,6 +18,7 @@ //! A migration that unreserves all deposit and unlocks all stake held in the context of this //! pallet. +use alloc::collections::btree_map::BTreeMap; use core::iter::Sum; use frame_support::{ pallet_prelude::OptionQuery, @@ -27,7 +28,6 @@ use frame_support::{ Parameter, Twox64Concat, }; use sp_runtime::{traits::Zero, Saturating}; -use sp_std::collections::btree_map::BTreeMap; #[cfg(feature = "try-runtime")] const LOG_TARGET: &str = "runtime::tips::migrations::unreserve_deposits"; @@ -85,7 +85,7 @@ type Tips, I: 'static> = StorageMap< /// The pallet should be made inoperable before or immediately after this migration is run. /// /// (See also the `RemovePallet` migration in `frame/support/src/migrations.rs`) -pub struct UnreserveDeposits, I: 'static>(sp_std::marker::PhantomData<(T, I)>); +pub struct UnreserveDeposits, I: 'static>(core::marker::PhantomData<(T, I)>); impl, I: 'static> UnreserveDeposits { /// Calculates and returns the total amount reserved by each account by this pallet from open @@ -133,7 +133,7 @@ where /// Fails with a `TryRuntimeError` if somehow the amount reserved by this pallet is greater than /// the actual total reserved amount for any accounts. #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { use codec::Encode; use frame_support::ensure; @@ -189,7 +189,7 @@ where /// Verifies that the account reserved balances were reduced by the actual expected amounts. #[cfg(feature = "try-runtime")] fn post_upgrade( - account_reserved_before_bytes: sp_std::vec::Vec, + account_reserved_before_bytes: alloc::vec::Vec, ) -> Result<(), sp_runtime::TryRuntimeError> { use codec::Decode; diff --git a/substrate/frame/tips/src/tests.rs b/substrate/frame/tips/src/tests.rs index 78df3736815a1..7e4a9368ad0c6 100644 --- a/substrate/frame/tips/src/tests.rs +++ b/substrate/frame/tips/src/tests.rs @@ -65,20 +65,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { static TenToFourteenTestValue: Vec = vec![10,11,12,13,14]; @@ -105,7 +94,6 @@ impl ContainsLengthBound for TenToFourteen { } } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub const TreasuryPalletId2: PalletId = PalletId(*b"py/trsr2"); @@ -116,13 +104,8 @@ parameter_types! { impl pallet_treasury::Config for Test { type PalletId = TreasuryPalletId; type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -143,13 +126,8 @@ impl pallet_treasury::Config for Test { impl pallet_treasury::Config for Test { type PalletId = TreasuryPalletId2; type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -180,6 +158,7 @@ impl Config for Test { type DataDepositPerByte = ConstU64<1>; type MaxTipAmount = ConstU64<10_000_000>; type RuntimeEvent = RuntimeEvent; + type OnSlash = (); type WeightInfo = (); } @@ -192,6 +171,7 @@ impl Config for Test { type DataDepositPerByte = ConstU64<1>; type MaxTipAmount = ConstU64<10_000_000>; type RuntimeEvent = RuntimeEvent; + type OnSlash = (); type WeightInfo = (); } diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 4f7da9ae46fab..c405ea1e94390 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -16,21 +16,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } -pallet-balances = { path = "../balances" } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] @@ -44,7 +43,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index 177621d9adbd1..b87d780b96721 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -17,21 +17,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-asset-conversion = { path = "../../asset-conversion", default-features = false } -pallet-transaction-payment = { path = "..", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +sp-runtime = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-transaction-payment = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-storage = { path = "../../../primitives/storage", default-features = false } -pallet-assets = { path = "../../assets" } -pallet-balances = { path = "../../balances" } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-storage = { workspace = true } +pallet-assets = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] @@ -47,7 +46,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "sp-storage/std", ] try-runtime = [ diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs index ed0ed56e6e074..538d88bfacfaa 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs @@ -42,7 +42,7 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; +extern crate alloc; use codec::{Decode, Encode}; use frame_support::{ @@ -214,13 +214,13 @@ where } } -impl sp_std::fmt::Debug for ChargeAssetTxPayment { +impl core::fmt::Debug for ChargeAssetTxPayment { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "ChargeAssetTxPayment<{:?}, {:?}>", self.tip, self.asset_id.encode()) } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } @@ -252,7 +252,7 @@ where Option>, ); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { Ok(()) } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index cc43cffd7deba..3f8c7bc0ea34d 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -98,20 +98,10 @@ parameter_types! { pub const ExistentialDeposit: u64 = 10; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; - type MaxLocks = (); - type WeightInfo = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl WeightToFeeT for WeightToFee { diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs index f2f2c57bb376d..0ef3fb1111439 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs @@ -17,6 +17,8 @@ use super::*; use crate::Config; +use alloc::vec; +use core::marker::PhantomData; use frame_support::{ ensure, traits::{fungible::Inspect, tokens::Balance}, @@ -28,7 +30,6 @@ use sp_runtime::{ transaction_validity::InvalidTransaction, Saturating, }; -use sp_std::marker::PhantomData; /// Handle withdrawing, refunding and depositing of transaction fees. pub trait OnChargeAssetTransaction { diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index a4a8efad869c8..ec8fb38dda390 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -17,29 +17,28 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies -sp-core = { path = "../../../primitives/core", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } -pallet-transaction-payment = { path = "..", default-features = false } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-transaction-payment = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } # Other dependencies -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } -sp-storage = { path = "../../../primitives/storage", default-features = false } +sp-storage = { workspace = true } -pallet-assets = { path = "../../assets" } -pallet-authorship = { path = "../../authorship" } -pallet-balances = { path = "../../balances" } +pallet-assets = { workspace = true, default-features = true } +pallet-authorship = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] @@ -57,7 +56,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "sp-storage/std", ] runtime-benchmarks = [ diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs index 753fae747a37e..97f1116993fc1 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -35,8 +35,6 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; - use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, @@ -198,13 +196,13 @@ where } } -impl sp_std::fmt::Debug for ChargeAssetTxPayment { +impl core::fmt::Debug for ChargeAssetTxPayment { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "ChargeAssetTxPayment<{:?}, {:?}>", self.tip, self.asset_id.encode()) } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } @@ -232,7 +230,7 @@ where Option>, ); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { Ok(()) } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs index fce712c3eba31..e84df1e4eb91b 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -81,20 +81,10 @@ parameter_types! { pub const ExistentialDeposit: u64 = 10; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; - type MaxLocks = (); - type WeightInfo = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl WeightToFeeT for WeightToFee { diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs index 717114ab6bd03..2486474bad45b 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs @@ -18,6 +18,7 @@ use super::*; use crate::Config; use codec::FullCodec; +use core::{fmt::Debug, marker::PhantomData}; use frame_support::{ traits::{ fungibles::{Balanced, Credit, Inspect}, @@ -33,7 +34,6 @@ use sp_runtime::{ traits::{DispatchInfoOf, MaybeSerializeDeserialize, One, PostDispatchInfoOf}, transaction_validity::InvalidTransaction, }; -use sp_std::{fmt::Debug, marker::PhantomData}; /// Handle withdrawing, refunding and depositing of transaction fees. pub trait OnChargeAssetTransaction { diff --git a/substrate/frame/transaction-payment/rpc/Cargo.toml b/substrate/frame/transaction-payment/rpc/Cargo.toml index 2c9f814460f7c..d2fb92a6bf345 100644 --- a/substrate/frame/transaction-payment/rpc/Cargo.toml +++ b/substrate/frame/transaction-payment/rpc/Cargo.toml @@ -16,12 +16,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } -pallet-transaction-payment-rpc-runtime-api = { path = "runtime-api" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-core = { path = "../../../primitives/core" } -sp-rpc = { path = "../../../primitives/rpc" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-weights = { path = "../../../primitives/weights" } +codec = { workspace = true, default-features = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-weights = { workspace = true, default-features = true } diff --git a/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml index 6c0241ec5c03c..1a384c74b31c2 100644 --- a/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -16,11 +16,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -pallet-transaction-payment = { path = "../..", default-features = false } -sp-api = { path = "../../../../primitives/api", default-features = false } -sp-runtime = { path = "../../../../primitives/runtime", default-features = false } -sp-weights = { path = "../../../../primitives/weights", default-features = false } +codec = { features = ["derive"], workspace = true } +pallet-transaction-payment = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml index 4d32a5123cf3f..b5bc7719def60 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml @@ -15,15 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies -sp-runtime = { path = "../../../primitives/runtime", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } +sp-runtime = { workspace = true } -frame-support = { path = "../../support", default-features = false } -frame-system = { path = "../../system", default-features = false } +frame-support = { workspace = true } +frame-system = { workspace = true } # Other dependencies -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } [features] default = ["std"] @@ -33,7 +32,6 @@ std = [ "frame-system/std", "scale-info/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-support/runtime-benchmarks", diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs index 00391d79478c7..3ab38743bafdd 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -16,8 +16,8 @@ //! # Skip Feeless Payment Pallet //! //! This pallet allows runtimes that include it to skip payment of transaction fees for -//! dispatchables marked by [`#[pallet::feeless_if]`](`macro@ -//! frame_support::pallet_prelude::feeless_if`). +//! dispatchables marked by +//! [`#[pallet::feeless_if]`](frame_support::pallet_prelude::feeless_if). //! //! ## Overview //! @@ -30,8 +30,9 @@ //! ## Integration //! //! This pallet wraps an existing transaction payment pallet. This means you should both pallets -//! in your `construct_runtime` macro and include this pallet's -//! [`SignedExtension`] ([`SkipCheckIfFeeless`]) that would accept the existing one as an argument. +//! in your [`construct_runtime`](frame_support::construct_runtime) macro and +//! include this pallet's [`SignedExtension`] ([`SkipCheckIfFeeless`]) that would accept the +//! existing one as an argument. #![cfg_attr(not(feature = "std"), no_std)] @@ -76,7 +77,7 @@ pub mod pallet { /// A [`SignedExtension`] that skips the wrapped extension if the dispatchable is feeless. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct SkipCheckIfFeeless(pub S, sp_std::marker::PhantomData); +pub struct SkipCheckIfFeeless(pub S, core::marker::PhantomData); // Make this extension "invisible" from the outside (ie metadata type information) impl TypeInfo for SkipCheckIfFeeless { @@ -86,20 +87,20 @@ impl TypeInfo for SkipCheckIfFeeless { } } -impl sp_std::fmt::Debug for SkipCheckIfFeeless { +impl core::fmt::Debug for SkipCheckIfFeeless { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "SkipCheckIfFeeless<{:?}>", self.0.encode()) } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } impl From for SkipCheckIfFeeless { fn from(s: S) -> Self { - Self(s, sp_std::marker::PhantomData) + Self(s, core::marker::PhantomData) } } diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs index 4ddeae11fcab4..d6d600f24e77c 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs @@ -45,7 +45,7 @@ impl SignedExtension for DummyExtension { type AdditionalSigned = (); type Pre = (); const IDENTIFIER: &'static str = "DummyExtension"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { Ok(()) } diff --git a/substrate/frame/transaction-payment/src/lib.rs b/substrate/frame/transaction-payment/src/lib.rs index 0e440ee4e9ff5..69fad6e0e3248 100644 --- a/substrate/frame/transaction-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/src/lib.rs @@ -69,7 +69,6 @@ use sp_runtime::{ }, FixedPointNumber, FixedU128, Perbill, Perquintill, RuntimeDebug, }; -use sp_std::prelude::*; pub use types::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; #[cfg(test)] @@ -137,7 +136,7 @@ type BalanceOf = <::OnChargeTransaction as OnChargeTransaction -pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData<(T, S, V, M, X)>); +pub struct TargetedFeeAdjustment(core::marker::PhantomData<(T, S, V, M, X)>); /// Something that can convert the current multiplier to the next one. pub trait MultiplierUpdate: Convert { @@ -264,7 +263,7 @@ where } /// A struct to make the fee multiplier a constant -pub struct ConstFeeMultiplier>(sp_std::marker::PhantomData); +pub struct ConstFeeMultiplier>(core::marker::PhantomData); impl> MultiplierUpdate for ConstFeeMultiplier { fn min() -> Multiplier { @@ -406,7 +405,7 @@ pub mod pallet { pub struct GenesisConfig { pub multiplier: Multiplier, #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: core::marker::PhantomData, } impl Default for GenesisConfig { @@ -806,13 +805,13 @@ where } } -impl sp_std::fmt::Debug for ChargeTransactionPayment { +impl core::fmt::Debug for ChargeTransactionPayment { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "ChargeTransactionPayment<{:?}>", self.0) } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } @@ -834,7 +833,7 @@ where // imbalance resulting from withdrawing the fee <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, ); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { Ok(()) } diff --git a/substrate/frame/transaction-payment/src/mock.rs b/substrate/frame/transaction-payment/src/mock.rs index 7b731eeb82501..fa61572e9831f 100644 --- a/substrate/frame/transaction-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/src/mock.rs @@ -21,7 +21,7 @@ use frame_support::{ derive_impl, dispatch::DispatchClass, parameter_types, - traits::{fungible, ConstU64, Imbalance, OnUnbalanced}, + traits::{fungible, Imbalance, OnUnbalanced}, weights::{Weight, WeightToFee as WeightToFeeT}, }; use frame_system as system; @@ -73,20 +73,9 @@ impl frame_system::Config for Runtime { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl WeightToFeeT for WeightToFee { diff --git a/substrate/frame/transaction-payment/src/types.rs b/substrate/frame/transaction-payment/src/types.rs index 25cecc58a63ab..67c7311d0cab5 100644 --- a/substrate/frame/transaction-payment/src/types.rs +++ b/substrate/frame/transaction-payment/src/types.rs @@ -24,7 +24,6 @@ use serde::{Deserialize, Serialize}; use scale_info::TypeInfo; use sp_runtime::traits::{AtLeast32BitUnsigned, Zero}; -use sp_std::prelude::*; use frame_support::dispatch::DispatchClass; diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index bf647ca13ec1c..e57ee1e729c4d 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -16,24 +16,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = { version = "6.2.2", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +array-bytes = { optional = true, workspace = true, default-features = true } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-balances = { path = "../balances", default-features = false } -sp-inherents = { path = "../../primitives/inherents", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-transaction-storage-proof = { workspace = true } log = { workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core", default-features = false } -sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof", default-features = true } +sp-core = { workspace = true } +sp-transaction-storage-proof = { default-features = true, workspace = true } [features] default = ["std"] @@ -58,7 +57,6 @@ std = [ "sp-inherents/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", "sp-transaction-storage-proof/std", ] try-runtime = [ diff --git a/substrate/frame/transaction-storage/src/benchmarking.rs b/substrate/frame/transaction-storage/src/benchmarking.rs index 8d485d9f3cac2..f360e9847a1e1 100644 --- a/substrate/frame/transaction-storage/src/benchmarking.rs +++ b/substrate/frame/transaction-storage/src/benchmarking.rs @@ -20,11 +20,11 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; +use alloc::{vec, vec::Vec}; use frame_benchmarking::v1::{benchmarks, whitelisted_caller}; use frame_support::traits::{Get, OnFinalize, OnInitialize}; use frame_system::{pallet_prelude::BlockNumberFor, EventRecord, Pallet as System, RawOrigin}; use sp_runtime::traits::{Bounded, CheckedDiv, One, Zero}; -use sp_std::*; use sp_transaction_storage_proof::TransactionStorageProof; use crate::Pallet as TransactionStorage; diff --git a/substrate/frame/transaction-storage/src/lib.rs b/substrate/frame/transaction-storage/src/lib.rs index 398cb350c501e..68f24526300d8 100644 --- a/substrate/frame/transaction-storage/src/lib.rs +++ b/substrate/frame/transaction-storage/src/lib.rs @@ -28,7 +28,11 @@ mod mock; #[cfg(test)] mod tests; +extern crate alloc; + +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; +use core::result; use frame_support::{ dispatch::GetDispatchInfo, traits::{ @@ -38,7 +42,6 @@ use frame_support::{ }, }; use sp_runtime::traits::{BlakeTwo256, Dispatchable, Hash, One, Saturating, Zero}; -use sp_std::{prelude::*, result}; use sp_transaction_storage_proof::{ encode_index, random_chunk, InherentError, TransactionStorageProof, CHUNK_SIZE, INHERENT_IDENTIFIER, @@ -159,11 +162,11 @@ pub mod pallet { fn on_initialize(n: BlockNumberFor) -> Weight { // Drop obsolete roots. The proof for `obsolete` will be checked later // in this block, so we drop `obsolete` - 1. - let period = >::get(); + let period = StoragePeriod::::get(); let obsolete = n.saturating_sub(period.saturating_add(One::one())); if obsolete > Zero::zero() { - >::remove(obsolete); - >::remove(obsolete); + Transactions::::remove(obsolete); + ChunkCount::::remove(obsolete); } // 2 writes in `on_initialize` and 2 writes + 2 reads in `on_finalize` T::DbWeight::get().reads_writes(2, 4) @@ -171,21 +174,21 @@ pub mod pallet { fn on_finalize(n: BlockNumberFor) { assert!( - >::take() || { + ProofChecked::::take() || { // Proof is not required for early or empty blocks. - let number = >::block_number(); - let period = >::get(); + let number = frame_system::Pallet::::block_number(); + let period = StoragePeriod::::get(); let target_number = number.saturating_sub(period); - target_number.is_zero() || >::get(target_number) == 0 + target_number.is_zero() || ChunkCount::::get(target_number) == 0 }, "Storage proof must be checked once in the block" ); // Insert new transactions - let transactions = >::take(); + let transactions = BlockTransactions::::take(); let total_chunks = transactions.last().map_or(0, |t| t.block_chunks); if total_chunks != 0 { - >::insert(n, total_chunks); - >::insert(n, transactions); + ChunkCount::::insert(n, total_chunks); + Transactions::::insert(n, transactions); } } } @@ -215,11 +218,11 @@ pub mod pallet { let content_hash = sp_io::hashing::blake2_256(&data); let extrinsic_index = - >::extrinsic_index().ok_or(Error::::BadContext)?; + frame_system::Pallet::::extrinsic_index().ok_or(Error::::BadContext)?; sp_io::transaction_index::index(extrinsic_index, data.len() as u32, content_hash); let mut index = 0; - >::mutate(|transactions| { + BlockTransactions::::mutate(|transactions| { if transactions.len() + 1 > T::MaxBlockTransactions::get() as usize { return Err(Error::::TooManyTransactions) } @@ -253,17 +256,17 @@ pub mod pallet { index: u32, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; - let transactions = >::get(block).ok_or(Error::::RenewedNotFound)?; + let transactions = Transactions::::get(block).ok_or(Error::::RenewedNotFound)?; let info = transactions.get(index as usize).ok_or(Error::::RenewedNotFound)?; let extrinsic_index = - >::extrinsic_index().ok_or(Error::::BadContext)?; + frame_system::Pallet::::extrinsic_index().ok_or(Error::::BadContext)?; Self::apply_fee(sender, info.size)?; sp_io::transaction_index::renew(extrinsic_index, info.content_hash.into()); let mut index = 0; - >::mutate(|transactions| { + BlockTransactions::::mutate(|transactions| { if transactions.len() + 1 > T::MaxBlockTransactions::get() as usize { return Err(Error::::TooManyTransactions) } @@ -297,15 +300,15 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { ensure_none(origin)?; ensure!(!ProofChecked::::get(), Error::::DoubleCheck); - let number = >::block_number(); - let period = >::get(); + let number = frame_system::Pallet::::block_number(); + let period = StoragePeriod::::get(); let target_number = number.saturating_sub(period); ensure!(!target_number.is_zero(), Error::::UnexpectedProof); - let total_chunks = >::get(target_number); + let total_chunks = ChunkCount::::get(target_number); ensure!(total_chunks != 0, Error::::UnexpectedProof); - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); let selected_chunk_index = random_chunk(parent_hash.as_ref(), total_chunks); - let (info, chunk_index) = match >::get(target_number) { + let (info, chunk_index) = match Transactions::::get(target_number) { Some(infos) => { let index = match infos .binary_search_by_key(&selected_chunk_index, |info| info.block_chunks) @@ -349,8 +352,7 @@ pub mod pallet { /// Collection of transaction metadata by block number. #[pallet::storage] - #[pallet::getter(fn transaction_roots)] - pub(super) type Transactions = StorageMap< + pub type Transactions = StorageMap< _, Blake2_128Concat, BlockNumberFor, @@ -360,32 +362,30 @@ pub mod pallet { /// Count indexed chunks for each block. #[pallet::storage] - pub(super) type ChunkCount = + pub type ChunkCount = StorageMap<_, Blake2_128Concat, BlockNumberFor, u32, ValueQuery>; #[pallet::storage] - #[pallet::getter(fn byte_fee)] /// Storage fee per byte. - pub(super) type ByteFee = StorageValue<_, BalanceOf>; + pub type ByteFee = StorageValue<_, BalanceOf>; #[pallet::storage] - #[pallet::getter(fn entry_fee)] /// Storage fee per transaction. - pub(super) type EntryFee = StorageValue<_, BalanceOf>; + pub type EntryFee = StorageValue<_, BalanceOf>; /// Storage period for data in blocks. Should match `sp_storage_proof::DEFAULT_STORAGE_PERIOD` /// for block authoring. #[pallet::storage] - pub(super) type StoragePeriod = StorageValue<_, BlockNumberFor, ValueQuery>; + pub type StoragePeriod = StorageValue<_, BlockNumberFor, ValueQuery>; // Intermediates #[pallet::storage] - pub(super) type BlockTransactions = + pub type BlockTransactions = StorageValue<_, BoundedVec, ValueQuery>; /// Was the proof checked in this block? #[pallet::storage] - pub(super) type ProofChecked = StorageValue<_, bool, ValueQuery>; + pub type ProofChecked = StorageValue<_, bool, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig { @@ -407,9 +407,9 @@ pub mod pallet { #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { - >::put(&self.byte_fee); - >::put(&self.entry_fee); - >::put(&self.storage_period); + ByteFee::::put(&self.byte_fee); + EntryFee::::put(&self.entry_fee); + StoragePeriod::::put(&self.storage_period); } } @@ -439,6 +439,21 @@ pub mod pallet { } impl Pallet { + /// Get transaction storage information from outside of this pallet. + pub fn transaction_roots( + block: BlockNumberFor, + ) -> Option> { + Transactions::::get(block) + } + /// Get ByteFee storage information from outside of this pallet. + pub fn byte_fee() -> Option> { + ByteFee::::get() + } + /// Get EntryFee storage information from outside of this pallet. + pub fn entry_fee() -> Option> { + EntryFee::::get() + } + fn apply_fee(sender: T::AccountId, size: u32) -> DispatchResult { let byte_fee = ByteFee::::get().ok_or(Error::::NotConfigured)?; let entry_fee = EntryFee::::get().ok_or(Error::::NotConfigured)?; diff --git a/substrate/frame/transaction-storage/src/tests.rs b/substrate/frame/transaction-storage/src/tests.rs index 621f74804ecca..b725990e6e121 100644 --- a/substrate/frame/transaction-storage/src/tests.rs +++ b/substrate/frame/transaction-storage/src/tests.rs @@ -40,9 +40,9 @@ fn discards_data() { vec![0u8; 2000 as usize] )); let proof_provider = || { - let block_num = >::block_number(); + let block_num = frame_system::Pallet::::block_number(); if block_num == 11 { - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); Some( build_proof(parent_hash.as_ref(), vec![vec![0u8; 2000], vec![0u8; 2000]]) .unwrap(), @@ -92,7 +92,7 @@ fn checks_proof() { vec![0u8; MAX_DATA_SIZE as usize] )); run_to_block(10, || None); - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); let proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; MAX_DATA_SIZE as usize]]).unwrap(); assert_noop!( @@ -100,7 +100,7 @@ fn checks_proof() { Error::::UnexpectedProof, ); run_to_block(11, || None); - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); let invalid_proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; 1000]]).unwrap(); assert_noop!( @@ -132,9 +132,9 @@ fn renews_data() { )); assert_eq!(Balances::free_balance(1), 1_000_000_000 - 4000 * 2 - 200 * 2); let proof_provider = || { - let block_num = >::block_number(); + let block_num = frame_system::Pallet::::block_number(); if block_num == 11 || block_num == 16 { - let parent_hash = >::parent_hash(); + let parent_hash = frame_system::Pallet::::parent_hash(); Some(build_proof(parent_hash.as_ref(), vec![vec![0u8; 2000]]).unwrap()) } else { None diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index c93272af11d46..2727ab809320c 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -16,26 +16,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", "max-encoded-len", -] } -docify = "0.2.8" -impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +docify = { workspace = true } +impl-trait-for-tuples = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -pallet-balances = { path = "../balances", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +sp-runtime = { workspace = true } +sp-core = { optional = true, workspace = true } [dev-dependencies] -sp-io = { path = "../../primitives/io" } -pallet-utility = { path = "../utility" } -sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +sp-core = { workspace = true } [features] default = ["std"] @@ -51,7 +50,6 @@ std = [ "sp-core?/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "dep:sp-core", diff --git a/substrate/frame/treasury/README.md b/substrate/frame/treasury/README.md index 4945d79d14296..2bd58a9817aab 100644 --- a/substrate/frame/treasury/README.md +++ b/substrate/frame/treasury/README.md @@ -26,6 +26,14 @@ and use the funds to pay developers. ### Dispatchable Functions General spending/proposal protocol: -- `propose_spend` - Make a spending proposal and stake the required deposit. -- `reject_proposal` - Reject a proposal, slashing the deposit. -- `approve_proposal` - Accept the proposal, returning the deposit. +- `spend_local` - Propose and approve a spend of treasury funds, enables the + creation of spends using the native currency of the chain, utilizing the funds + stored in the pot +- `spend` - Propose and approve a spend of treasury funds, allows spending any + asset kind managed by the treasury +- `remove_approval` - Force a previously approved proposal to be removed from + the approval queue +- `payout` - Claim a spend +- `check_status` - Check the status of the spend and remove it from the storage + if processed +- `void_spend` - Void previously approved spend diff --git a/substrate/frame/treasury/src/benchmarking.rs b/substrate/frame/treasury/src/benchmarking.rs index 0b9999e37fbea..63978c94e682f 100644 --- a/substrate/frame/treasury/src/benchmarking.rs +++ b/substrate/frame/treasury/src/benchmarking.rs @@ -59,12 +59,12 @@ where const SEED: u32 = 0; -// Create the pre-requisite information needed to create a treasury `propose_spend`. +// Create the pre-requisite information needed to create a treasury `spend_local`. fn setup_proposal, I: 'static>( u: u32, ) -> (T::AccountId, BalanceOf, AccountIdLookupOf) { let caller = account("caller", u, SEED); - let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100u32.into()); + let value: BalanceOf = T::Currency::minimum_balance() * 100u32.into(); let _ = T::Currency::make_free_balance_be(&caller, value); let beneficiary = account("beneficiary", u, SEED); let beneficiary_lookup = T::Lookup::unlookup(beneficiary); @@ -73,12 +73,10 @@ fn setup_proposal, I: 'static>( // Create proposals that are approved for use in `on_initialize`. fn create_approved_proposals, I: 'static>(n: u32) -> Result<(), &'static str> { + let origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; for i in 0..n { - let (caller, value, lookup) = setup_proposal::(i); - #[allow(deprecated)] - Treasury::::propose_spend(RawOrigin::Signed(caller).into(), value, lookup)?; - let proposal_id = >::get() - 1; - Approvals::::try_append(proposal_id).unwrap(); + let (_, value, lookup) = setup_proposal::(i); + Treasury::::spend_local(origin.clone(), value, lookup)?; } ensure!(>::get().len() == n as usize, "Not all approved"); Ok(()) @@ -126,71 +124,13 @@ mod benchmarks { Ok(()) } - #[benchmark] - fn propose_spend() -> Result<(), BenchmarkError> { - let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - - #[extrinsic_call] - _(RawOrigin::Signed(caller), value, beneficiary_lookup); - - Ok(()) - } - - #[benchmark] - fn reject_proposal() -> Result<(), BenchmarkError> { - let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); - #[allow(deprecated)] - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - beneficiary_lookup, - )?; - let proposal_id = Treasury::::proposal_count() - 1; - let reject_origin = - T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - - #[extrinsic_call] - _(reject_origin as T::RuntimeOrigin, proposal_id); - - Ok(()) - } - - #[benchmark] - fn approve_proposal( - p: Linear<0, { T::MaxApprovals::get() - 1 }>, - ) -> Result<(), BenchmarkError> { - let approve_origin = - T::ApproveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - create_approved_proposals::(p)?; - let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); - #[allow(deprecated)] - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - beneficiary_lookup, - )?; - let proposal_id = Treasury::::proposal_count() - 1; - - #[extrinsic_call] - _(approve_origin as T::RuntimeOrigin, proposal_id); - - Ok(()) - } - #[benchmark] fn remove_approval() -> Result<(), BenchmarkError> { - let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); - #[allow(deprecated)] - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - beneficiary_lookup, - )?; + let origin = + T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let (_, value, beneficiary_lookup) = setup_proposal::(SEED); + Treasury::::spend_local(origin, value, beneficiary_lookup)?; let proposal_id = Treasury::::proposal_count() - 1; - Approvals::::try_append(proposal_id).unwrap(); let reject_origin = T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs index 1ccd845664323..3954489a2d156 100644 --- a/substrate/frame/treasury/src/lib.rs +++ b/substrate/frame/treasury/src/lib.rs @@ -81,14 +81,16 @@ use core::marker::PhantomData; #[cfg(feature = "runtime-benchmarks")] pub use benchmarking::ArgumentsFactory; +extern crate alloc; + use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +use alloc::{boxed::Box, collections::btree_map::BTreeMap}; use sp_runtime::{ traits::{AccountIdConversion, CheckedAdd, Saturating, StaticLookup, Zero}, Permill, RuntimeDebug, }; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; use frame_support::{ dispatch::{DispatchResult, DispatchResultWithPostInfo}, @@ -205,9 +207,6 @@ pub mod pallet { /// The staking balance. type Currency: Currency + ReservableCurrency; - /// Origin from which approvals must come. - type ApproveOrigin: EnsureOrigin; - /// Origin from which rejections must come. type RejectOrigin: EnsureOrigin; @@ -215,22 +214,6 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// Handler for the unbalanced decrease when slashing for a rejected proposal or bounty. - type OnSlash: OnUnbalanced>; - - /// Fraction of a proposal's value that should be bonded in order to place the proposal. - /// An accepted proposal gets these back. A rejected proposal does not. - #[pallet::constant] - type ProposalBond: Get; - - /// Minimum amount of funds that should be placed in a deposit for making a proposal. - #[pallet::constant] - type ProposalBondMinimum: Get>; - - /// Maximum amount of funds that should be placed in a deposit for making a proposal. - #[pallet::constant] - type ProposalBondMaximum: Get>>; - /// Period between successive spends. #[pallet::constant] type SpendPeriod: Get>; @@ -345,7 +328,7 @@ pub mod pallet { #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig, I: 'static = ()> { #[serde(skip)] - _config: sp_std::marker::PhantomData<(T, I)>, + _config: core::marker::PhantomData<(T, I)>, } #[pallet::genesis_build] @@ -363,14 +346,10 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { - /// New proposal. - Proposed { proposal_index: ProposalIndex }, /// We have ended a spend period and will now allocate funds. Spending { budget_remaining: BalanceOf }, /// Some funds have been allocated. Awarded { proposal_index: ProposalIndex, award: BalanceOf, account: T::AccountId }, - /// A proposal was rejected; funds were slashed. - Rejected { proposal_index: ProposalIndex, slashed: BalanceOf }, /// Some of our funds have been burnt. Burnt { burnt_funds: BalanceOf }, /// Spending has finished; this is the amount that rolls over until next spend. @@ -408,8 +387,6 @@ pub mod pallet { /// Error for the treasury pallet. #[pallet::error] pub enum Error { - /// Proposer's balance is too low. - InsufficientProposersBalance, /// No proposal, bounty or spend at that index. InvalidIndex, /// Too many approvals in the queue. @@ -476,123 +453,6 @@ pub mod pallet { #[pallet::call] impl, I: 'static> Pallet { - /// Put forward a suggestion for spending. - /// - /// ## Dispatch Origin - /// - /// Must be signed. - /// - /// ## Details - /// A deposit proportional to the value is reserved and slashed if the proposal is rejected. - /// It is returned once the proposal is awarded. - /// - /// ### Complexity - /// - O(1) - /// - /// ## Events - /// - /// Emits [`Event::Proposed`] if successful. - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::propose_spend())] - #[allow(deprecated)] - #[deprecated( - note = "`propose_spend` will be removed in February 2024. Use `spend` instead." - )] - pub fn propose_spend( - origin: OriginFor, - #[pallet::compact] value: BalanceOf, - beneficiary: AccountIdLookupOf, - ) -> DispatchResult { - let proposer = ensure_signed(origin)?; - let beneficiary = T::Lookup::lookup(beneficiary)?; - - let bond = Self::calculate_bond(value); - T::Currency::reserve(&proposer, bond) - .map_err(|_| Error::::InsufficientProposersBalance)?; - - let c = Self::proposal_count(); - >::put(c + 1); - >::insert(c, Proposal { proposer, value, beneficiary, bond }); - - Self::deposit_event(Event::Proposed { proposal_index: c }); - Ok(()) - } - - /// Reject a proposed spend. - /// - /// ## Dispatch Origin - /// - /// Must be [`Config::RejectOrigin`]. - /// - /// ## Details - /// The original deposit will be slashed. - /// - /// ### Complexity - /// - O(1) - /// - /// ## Events - /// - /// Emits [`Event::Rejected`] if successful. - #[pallet::call_index(1)] - #[pallet::weight((T::WeightInfo::reject_proposal(), DispatchClass::Operational))] - #[allow(deprecated)] - #[deprecated( - note = "`reject_proposal` will be removed in February 2024. Use `spend` instead." - )] - pub fn reject_proposal( - origin: OriginFor, - #[pallet::compact] proposal_id: ProposalIndex, - ) -> DispatchResult { - T::RejectOrigin::ensure_origin(origin)?; - - let proposal = - >::take(&proposal_id).ok_or(Error::::InvalidIndex)?; - let value = proposal.bond; - let imbalance = T::Currency::slash_reserved(&proposal.proposer, value).0; - T::OnSlash::on_unbalanced(imbalance); - - Self::deposit_event(Event::::Rejected { - proposal_index: proposal_id, - slashed: value, - }); - Ok(()) - } - - /// Approve a proposal. - /// - /// ## Dispatch Origin - /// - /// Must be [`Config::ApproveOrigin`]. - /// - /// ## Details - /// - /// At a later time, the proposal will be allocated to the beneficiary and the original - /// deposit will be returned. - /// - /// ### Complexity - /// - O(1). - /// - /// ## Events - /// - /// No events are emitted from this dispatch. - #[pallet::call_index(2)] - #[pallet::weight((T::WeightInfo::approve_proposal(T::MaxApprovals::get()), DispatchClass::Operational))] - #[allow(deprecated)] - #[deprecated( - note = "`approve_proposal` will be removed in February 2024. Use `spend` instead." - )] - pub fn approve_proposal( - origin: OriginFor, - #[pallet::compact] proposal_id: ProposalIndex, - ) -> DispatchResult { - T::ApproveOrigin::ensure_origin(origin)?; - - ensure!(>::contains_key(proposal_id), Error::::InvalidIndex); - Approvals::::try_append(proposal_id) - .map_err(|_| Error::::TooManyApprovals)?; - Ok(()) - } - /// Propose and approve a spend of treasury funds. /// /// ## Dispatch Origin @@ -794,7 +654,7 @@ pub mod pallet { /// /// ## Dispatch Origin /// - /// Must be signed. + /// Must be signed /// /// ## Details /// @@ -934,15 +794,6 @@ impl, I: 'static> Pallet { T::PalletId::get().into_account_truncating() } - /// The needed bond for a proposal whose spend is `value`. - fn calculate_bond(value: BalanceOf) -> BalanceOf { - let mut r = T::ProposalBondMinimum::get().max(T::ProposalBond::get() * value); - if let Some(m) = T::ProposalBondMaximum::get() { - r = r.min(m); - } - r - } - /// Spend some money! returns number of approvals before spend. pub fn spend_funds() -> Weight { let mut total_weight = Weight::zero(); diff --git a/substrate/frame/treasury/src/tests.rs b/substrate/frame/treasury/src/tests.rs index 67d81cb5c3022..97b735928192b 100644 --- a/substrate/frame/treasury/src/tests.rs +++ b/substrate/frame/treasury/src/tests.rs @@ -60,20 +60,10 @@ impl frame_system::Config for Test { type Block = Block; type AccountData = pallet_balances::AccountData; } + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_utility::Config for Test { @@ -136,7 +126,6 @@ impl Pay for TestPay { } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub TreasuryAccount: u128 = Treasury::account_id(); @@ -152,6 +141,7 @@ impl frame_support::traits::EnsureOrigin for TestSpendOrigin { frame_system::RawOrigin::Signed(11) => Ok(10), frame_system::RawOrigin::Signed(12) => Ok(20), frame_system::RawOrigin::Signed(13) => Ok(50), + frame_system::RawOrigin::Signed(14) => Ok(500), r => Err(RuntimeOrigin::from(r)), }) } @@ -174,13 +164,8 @@ impl> ConversionFromAssetBalance for MulBy { impl Config for Test { type PalletId = TreasuryPalletId; type Currency = pallet_balances::Pallet; - type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ConstU64<1>; - type ProposalBondMaximum = (); type SpendPeriod = ConstU64<2>; type Burn = Burn; type BurnDestination = (); // Just gets burned. @@ -295,56 +280,12 @@ fn minting_works() { }); } -#[test] -fn spend_proposal_takes_min_deposit() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); - assert_eq!(Balances::free_balance(0), 99); - assert_eq!(Balances::reserved_balance(0), 1); - }); -} - -#[test] -fn spend_proposal_takes_proportional_deposit() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 5); - }); -} - -#[test] -fn spend_proposal_fails_when_proposer_poor() { - ExtBuilder::default().build().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3) - }, - Error::::InsufficientProposersBalance, - ); - }); -} - #[test] fn accepted_spend_proposal_ignored_outside_spend_period() { ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); >::on_initialize(1); assert_eq!(Balances::free_balance(3), 0); @@ -365,112 +306,13 @@ fn unused_pot_should_diminish() { }); } -#[test] -fn rejected_spend_proposal_ignored_on_spend_period() { - ExtBuilder::default().build().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - - >::on_initialize(2); - assert_eq!(Balances::free_balance(3), 0); - assert_eq!(Treasury::pot(), 50); - }); -} - -#[test] -fn reject_already_rejected_spend_proposal_fails() { - ExtBuilder::default().build().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - assert_noop!( - { - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }, - Error::::InvalidIndex - ); - }); -} - -#[test] -fn reject_non_existent_spend_proposal_fails() { - ExtBuilder::default().build().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }, - Error::::InvalidIndex - ); - }); -} - -#[test] -fn accept_non_existent_spend_proposal_fails() { - ExtBuilder::default().build().execute_with(|| { - assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, - Error::::InvalidIndex - ); - }); -} - -#[test] -fn accept_already_rejected_spend_proposal_fails() { - ExtBuilder::default().build().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::reject_proposal(RuntimeOrigin::root(), 0) - }); - assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, - Error::::InvalidIndex - ); - }); -} - #[test] fn accepted_spend_proposal_enacted_on_spend_period() { ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 100); @@ -484,14 +326,7 @@ fn pot_underflow_should_not_diminish() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 150, 3)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed @@ -512,26 +347,12 @@ fn treasury_account_doesnt_get_deleted() { assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), treasury_balance, 3)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 1) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), Treasury::pot(), 3)); >::on_initialize(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied @@ -554,22 +375,9 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist assert_eq!(Treasury::pot(), 0); // Pot is empty - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 1) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 99, 3)); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + >::on_initialize(2); assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed @@ -611,26 +419,12 @@ fn max_approvals_limited() { Balances::make_free_balance_be(&0, u64::MAX); for _ in 0..::MaxApprovals::get() { - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); } // One too many will fail - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); assert_noop!( - { - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }, + Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3), Error::::TooManyApprovals ); }); @@ -641,14 +435,8 @@ fn remove_already_removed_approval_fails() { ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3) - }); - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); + assert_eq!(Treasury::approvals(), vec![0]); assert_ok!(Treasury::remove_approval(RuntimeOrigin::root(), 0)); assert_eq!(Treasury::approvals(), vec![]); @@ -982,11 +770,9 @@ fn check_status_works() { fn try_state_proposals_invariant_1_works() { ExtBuilder::default().build().execute_with(|| { use frame_support::pallet_prelude::DispatchError::Other; - // Add a proposal using `propose_spend` - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); + // Add a proposal and approve using `spend_local` + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + assert_eq!(Proposals::::iter().count(), 1); assert_eq!(ProposalCount::::get(), 1); // Check invariant 1 holds @@ -1005,12 +791,11 @@ fn try_state_proposals_invariant_1_works() { fn try_state_proposals_invariant_2_works() { ExtBuilder::default().build().execute_with(|| { use frame_support::pallet_prelude::DispatchError::Other; - // Add a proposal using `propose_spend` - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3) - }); + // Add a proposal and approve using `spend_local` + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + assert_eq!(Proposals::::iter().count(), 1); + assert_eq!(Approvals::::get().len(), 1); let current_proposal_count = ProposalCount::::get(); assert_eq!(current_proposal_count, 1); // Check invariant 2 holds @@ -1035,17 +820,10 @@ fn try_state_proposals_invariant_2_works() { fn try_state_proposals_invariant_3_works() { ExtBuilder::default().build().execute_with(|| { use frame_support::pallet_prelude::DispatchError::Other; - // Add a proposal using `propose_spend` - assert_ok!({ - #[allow(deprecated)] - Treasury::propose_spend(RuntimeOrigin::signed(0), 10, 3) - }); + // Add a proposal and approve using `spend_local` + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 10, 3)); + assert_eq!(Proposals::::iter().count(), 1); - // Approve the proposal - assert_ok!({ - #[allow(deprecated)] - Treasury::approve_proposal(RuntimeOrigin::root(), 0) - }); assert_eq!(Approvals::::get().len(), 1); // Check invariant 3 holds assert!(Approvals::::get() diff --git a/substrate/frame/treasury/src/weights.rs b/substrate/frame/treasury/src/weights.rs index 82277e2d28f6c..8c9c6eb1d0fbb 100644 --- a/substrate/frame/treasury/src/weights.rs +++ b/substrate/frame/treasury/src/weights.rs @@ -52,9 +52,6 @@ use core::marker::PhantomData; /// Weight functions needed for `pallet_treasury`. pub trait WeightInfo { fn spend_local() -> Weight; - fn propose_spend() -> Weight; - fn reject_proposal() -> Weight; - fn approve_proposal(p: u32, ) -> Weight; fn remove_approval() -> Weight; fn on_initialize_proposals(p: u32, ) -> Weight; fn spend() -> Weight; @@ -81,50 +78,8 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: `Treasury::ProposalCount` (r:1 w:1) - /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Proposals` (r:0 w:1) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `177` - // Estimated: `1489` - // Minimum execution time: 24_704_000 picoseconds. - Weight::from_parts(25_484_000, 1489) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Treasury::Proposals` (r:1 w:1) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `335` - // Estimated: `3593` - // Minimum execution time: 26_632_000 picoseconds. - Weight::from_parts(27_325_000, 3593) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `Treasury::Proposals` (r:1 w:0) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `504 + p * (8 ยฑ0)` - // Estimated: `3573` - // Minimum execution time: 8_436_000 picoseconds. - Weight::from_parts(11_268_438, 3573) - // Standard Error: 1_039 - .saturating_add(Weight::from_parts(70_903, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: Treasury Approvals (r:1 w:1) + /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` @@ -232,50 +187,8 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: `Treasury::ProposalCount` (r:1 w:1) - /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Proposals` (r:0 w:1) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - fn propose_spend() -> Weight { - // Proof Size summary in bytes: - // Measured: `177` - // Estimated: `1489` - // Minimum execution time: 24_704_000 picoseconds. - Weight::from_parts(25_484_000, 1489) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Treasury::Proposals` (r:1 w:1) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn reject_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `335` - // Estimated: `3593` - // Minimum execution time: 26_632_000 picoseconds. - Weight::from_parts(27_325_000, 3593) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `Treasury::Proposals` (r:1 w:0) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 99]`. - fn approve_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `504 + p * (8 ยฑ0)` - // Estimated: `3573` - // Minimum execution time: 8_436_000 picoseconds. - Weight::from_parts(11_268_438, 3573) - // Standard Error: 1_039 - .saturating_add(Weight::from_parts(70_903, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: Treasury Approvals (r:1 w:1) + /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` diff --git a/substrate/frame/try-runtime/Cargo.toml b/substrate/frame/try-runtime/Cargo.toml index e4e5f1940b25b..228d96095ac92 100644 --- a/substrate/frame/try-runtime/Cargo.toml +++ b/substrate/frame/try-runtime/Cargo.toml @@ -15,11 +15,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] @@ -28,6 +27,5 @@ std = [ "frame-support/std", "sp-api/std", "sp-runtime/std", - "sp-std/std", ] try-runtime = ["frame-support/try-runtime", "sp-runtime/try-runtime"] diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index e44bb90dd7f84..861a85881f2dd 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -15,24 +15,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -docify = "0.2.8" -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } -pallet-balances = { path = "../balances", default-features = false, optional = true } -pallet-utility = { path = "../utility", default-features = false, optional = true } -pallet-proxy = { path = "../proxy", default-features = false, optional = true } +codec = { features = ["derive"], workspace = true } +docify = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-runtime = { workspace = true } +pallet-balances = { optional = true, workspace = true } +pallet-utility = { optional = true, workspace = true } +pallet-proxy = { optional = true, workspace = true } [dev-dependencies] -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -pallet-balances = { path = "../balances" } -pallet-utility = { path = "../utility" } -pallet-proxy = { path = "../proxy" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } [features] default = ["std"] @@ -48,7 +47,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/tx-pause/src/benchmarking.rs b/substrate/frame/tx-pause/src/benchmarking.rs index 126c0837949d5..95ae250eff7b5 100644 --- a/substrate/frame/tx-pause/src/benchmarking.rs +++ b/substrate/frame/tx-pause/src/benchmarking.rs @@ -18,6 +18,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::{Pallet as TxPause, *}; +use alloc::vec; use frame_benchmarking::v2::*; #[benchmarks] diff --git a/substrate/frame/tx-pause/src/lib.rs b/substrate/frame/tx-pause/src/lib.rs index 5904b5ed31628..68f7a0312554d 100644 --- a/substrate/frame/tx-pause/src/lib.rs +++ b/substrate/frame/tx-pause/src/lib.rs @@ -79,6 +79,9 @@ pub mod mock; mod tests; pub mod weights; +extern crate alloc; + +use alloc::vec::Vec; use frame_support::{ dispatch::GetDispatchInfo, pallet_prelude::*, @@ -87,7 +90,6 @@ use frame_support::{ }; use frame_system::pallet_prelude::*; use sp_runtime::{traits::Dispatchable, DispatchResult}; -use sp_std::prelude::*; pub use pallet::*; pub use weights::*; diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index f42d4cb58a2ac..84ce45e835280 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -36,24 +36,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -parameter_types! { - pub const ExistentialDeposit: u64 = 1; - pub const MaxLocks: u32 = 10; -} +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = MaxLocks; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type MaxFreezes = ConstU32<0>; } impl pallet_utility::Config for Test { diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml index 65b727b40b254..2b1794aa60fe7 100644 --- a/substrate/frame/uniques/Cargo.toml +++ b/substrate/frame/uniques/Cargo.toml @@ -16,20 +16,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } -sp-std = { path = "../../primitives/std" } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } [features] default = ["std"] @@ -44,7 +43,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/uniques/src/benchmarking.rs b/substrate/frame/uniques/src/benchmarking.rs index 80d02f1362189..a8a83010c51ff 100644 --- a/substrate/frame/uniques/src/benchmarking.rs +++ b/substrate/frame/uniques/src/benchmarking.rs @@ -20,6 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; +use alloc::{vec, vec::Vec}; use frame_benchmarking::v1::{ account, benchmarks_instance_pallet, whitelist_account, whitelisted_caller, BenchmarkError, }; @@ -29,7 +30,6 @@ use frame_support::{ }; use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::Bounded; -use sp_std::prelude::*; use crate::Pallet as Uniques; diff --git a/substrate/frame/uniques/src/impl_nonfungibles.rs b/substrate/frame/uniques/src/impl_nonfungibles.rs index 0ae055a98d8c8..8e4af723f8033 100644 --- a/substrate/frame/uniques/src/impl_nonfungibles.rs +++ b/substrate/frame/uniques/src/impl_nonfungibles.rs @@ -18,13 +18,13 @@ //! Implementations for `nonfungibles` traits. use super::*; +use alloc::vec::Vec; use frame_support::{ storage::KeyPrefixIterator, traits::{tokens::nonfungibles::*, Get}, BoundedSlice, }; use sp_runtime::{DispatchError, DispatchResult}; -use sp_std::prelude::*; impl, I: 'static> Inspect<::AccountId> for Pallet { type ItemId = T::ItemId; diff --git a/substrate/frame/uniques/src/lib.rs b/substrate/frame/uniques/src/lib.rs index 2291d19de2bfb..dc27c33562340 100644 --- a/substrate/frame/uniques/src/lib.rs +++ b/substrate/frame/uniques/src/lib.rs @@ -42,6 +42,9 @@ mod types; pub mod migration; pub mod weights; +extern crate alloc; + +use alloc::vec::Vec; use codec::{Decode, Encode}; use frame_support::traits::{ tokens::Locker, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, ReservableCurrency, @@ -51,7 +54,6 @@ use sp_runtime::{ traits::{Saturating, StaticLookup, Zero}, ArithmeticError, RuntimeDebug, }; -use sp_std::prelude::*; pub use pallet::*; pub use types::*; diff --git a/substrate/frame/uniques/src/mock.rs b/substrate/frame/uniques/src/mock.rs index 9fd7f87e159bb..c3b74eb8c2554 100644 --- a/substrate/frame/uniques/src/mock.rs +++ b/substrate/frame/uniques/src/mock.rs @@ -43,20 +43,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type MaxLocks = (); - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl Config for Test { diff --git a/substrate/frame/uniques/src/tests.rs b/substrate/frame/uniques/src/tests.rs index 5dfe43c96888d..a8428c420b3e5 100644 --- a/substrate/frame/uniques/src/tests.rs +++ b/substrate/frame/uniques/src/tests.rs @@ -21,7 +21,6 @@ use crate::{mock::*, Event, *}; use frame_support::{assert_noop, assert_ok, traits::Currency}; use pallet_balances::Error as BalancesError; use sp_runtime::traits::Dispatchable; -use sp_std::prelude::*; fn items() -> Vec<(u64, u32, u32)> { let mut r: Vec<_> = Account::::iter().map(|x| x.0).collect(); diff --git a/substrate/frame/utility/Cargo.toml b/substrate/frame/utility/Cargo.toml index 00e8be75a3de6..f08232de9cd6d 100644 --- a/substrate/frame/utility/Cargo.toml +++ b/substrate/frame/utility/Cargo.toml @@ -16,22 +16,21 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-root-testing = { path = "../root-testing" } -pallet-collective = { path = "../collective" } -pallet-timestamp = { path = "../timestamp" } -sp-core = { path = "../../primitives/core" } +pallet-balances = { workspace = true, default-features = true } +pallet-root-testing = { workspace = true, default-features = true } +pallet-collective = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] @@ -48,7 +47,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/utility/src/benchmarking.rs b/substrate/frame/utility/src/benchmarking.rs index 78911fd310e85..467055ecd800d 100644 --- a/substrate/frame/utility/src/benchmarking.rs +++ b/substrate/frame/utility/src/benchmarking.rs @@ -20,6 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; +use alloc::{vec, vec::Vec}; use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; use frame_system::RawOrigin; diff --git a/substrate/frame/utility/src/lib.rs b/substrate/frame/utility/src/lib.rs index 7f963e3637d6f..3ce5b4ff86496 100644 --- a/substrate/frame/utility/src/lib.rs +++ b/substrate/frame/utility/src/lib.rs @@ -56,6 +56,9 @@ mod benchmarking; mod tests; pub mod weights; +extern crate alloc; + +use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{extract_actual_weight, GetDispatchInfo, PostDispatchInfo}, @@ -64,7 +67,6 @@ use frame_support::{ use sp_core::TypeId; use sp_io::hashing::blake2_256; use sp_runtime::traits::{BadOrigin, Dispatchable, TrailingZeroInput}; -use sp_std::prelude::*; pub use weights::WeightInfo; pub use pallet::*; @@ -131,7 +133,7 @@ pub mod pallet { /// The limit on the number of batched calls. fn batched_calls_limit() -> u32 { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; - let call_size = ((sp_std::mem::size_of::<::RuntimeCall>() as u32 + + let call_size = ((core::mem::size_of::<::RuntimeCall>() as u32 + CALL_ALIGN - 1) / CALL_ALIGN) * CALL_ALIGN; // The margin to take into account vec doubling capacity. @@ -146,7 +148,7 @@ pub mod pallet { fn integrity_test() { // If you hit this error, you need to try to `Box` big dispatchable parameters. assert!( - sp_std::mem::size_of::<::RuntimeCall>() as u32 <= CALL_ALIGN, + core::mem::size_of::<::RuntimeCall>() as u32 <= CALL_ALIGN, "Call enum size should be smaller than {} bytes.", CALL_ALIGN, ); diff --git a/substrate/frame/utility/src/tests.rs b/substrate/frame/utility/src/tests.rs index 9bcbec99f3b44..0a58a92b4c9ea 100644 --- a/substrate/frame/utility/src/tests.rs +++ b/substrate/frame/utility/src/tests.rs @@ -25,14 +25,13 @@ use crate as utility; use frame_support::{ assert_err_ignore_postinfo, assert_noop, assert_ok, derive_impl, dispatch::{DispatchErrorWithPostInfo, Pays}, - error::BadOrigin, parameter_types, storage, traits::{ConstU64, Contains}, weights::Weight, }; use pallet_collective::{EnsureProportionAtLeast, Instance1}; use sp_runtime::{ - traits::{BlakeTwo256, Dispatchable, Hash}, + traits::{BadOrigin, BlakeTwo256, Dispatchable, Hash}, BuildStorage, DispatchError, TokenError, }; @@ -151,20 +150,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_root_testing::Config for Test { diff --git a/substrate/frame/vesting/Cargo.toml b/substrate/frame/vesting/Cargo.toml index 7372b84240364..64c26174cfdfd 100644 --- a/substrate/frame/vesting/Cargo.toml +++ b/substrate/frame/vesting/Cargo.toml @@ -16,21 +16,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } +], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io", default-features = false } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true } [features] default = ["std"] @@ -45,7 +44,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/vesting/src/benchmarking.rs b/substrate/frame/vesting/src/benchmarking.rs index 311590873d95f..68214c4f47ccc 100644 --- a/substrate/frame/vesting/src/benchmarking.rs +++ b/substrate/frame/vesting/src/benchmarking.rs @@ -24,7 +24,7 @@ use frame_support::assert_ok; use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System, RawOrigin}; use sp_runtime::traits::{Bounded, CheckedDiv, CheckedMul}; -use super::*; +use super::{Vesting as VestingStorage, *}; use crate::Pallet as Vesting; const SEED: u32 = 0; @@ -291,7 +291,7 @@ benchmarks! { "Vesting balance should equal sum locked of all schedules", ); assert_eq!( - Vesting::::vesting(&caller).unwrap().len(), + VestingStorage::::get(&caller).unwrap().len(), s as usize, "There should be exactly max vesting schedules" ); @@ -304,7 +304,7 @@ benchmarks! { ); let expected_index = (s - 2) as usize; assert_eq!( - Vesting::::vesting(&caller).unwrap()[expected_index], + VestingStorage::::get(&caller).unwrap()[expected_index], expected_schedule ); assert_eq!( @@ -313,7 +313,7 @@ benchmarks! { "Vesting balance should equal total locked of all schedules", ); assert_eq!( - Vesting::::vesting(&caller).unwrap().len(), + VestingStorage::::get(&caller).unwrap().len(), (s - 1) as usize, "Schedule count should reduce by 1" ); @@ -344,7 +344,7 @@ benchmarks! { "Vesting balance should reflect that we are half way through all schedules duration", ); assert_eq!( - Vesting::::vesting(&caller).unwrap().len(), + VestingStorage::::get(&caller).unwrap().len(), s as usize, "There should be exactly max vesting schedules" ); @@ -359,12 +359,12 @@ benchmarks! { ); let expected_index = (s - 2) as usize; assert_eq!( - Vesting::::vesting(&caller).unwrap()[expected_index], + VestingStorage::::get(&caller).unwrap()[expected_index], expected_schedule, "New schedule is properly created and placed" ); assert_eq!( - Vesting::::vesting(&caller).unwrap()[expected_index], + VestingStorage::::get(&caller).unwrap()[expected_index], expected_schedule ); assert_eq!( @@ -373,7 +373,7 @@ benchmarks! { "Vesting balance should equal half total locked of all schedules", ); assert_eq!( - Vesting::::vesting(&caller).unwrap().len(), + VestingStorage::::get(&caller).unwrap().len(), (s - 1) as usize, "Schedule count should reduce by 1" ); @@ -404,7 +404,7 @@ force_remove_vesting_schedule { }: _(RawOrigin::Root, target_lookup, schedule_index) verify { assert_eq!( - Vesting::::vesting(&target).unwrap().len(), + VestingStorage::::get(&target).unwrap().len(), schedule_index as usize, "Schedule count should reduce by 1" ); diff --git a/substrate/frame/vesting/src/lib.rs b/substrate/frame/vesting/src/lib.rs index 4101caded4180..bfc10efeed796 100644 --- a/substrate/frame/vesting/src/lib.rs +++ b/substrate/frame/vesting/src/lib.rs @@ -56,7 +56,11 @@ mod vesting_info; pub mod migrations; pub mod weights; +extern crate alloc; + +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; +use core::{fmt::Debug, marker::PhantomData}; use frame_support::{ dispatch::DispatchResult, ensure, @@ -76,7 +80,6 @@ use sp_runtime::{ }, DispatchError, RuntimeDebug, }; -use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; pub use pallet::*; pub use vesting_info::*; @@ -200,7 +203,6 @@ pub mod pallet { /// Information regarding the vesting of a given account. #[pallet::storage] - #[pallet::getter(fn vesting)] pub type Vesting = StorageMap< _, Blake2_128Concat, @@ -419,7 +421,7 @@ pub mod pallet { let schedule1_index = schedule1_index as usize; let schedule2_index = schedule2_index as usize; - let schedules = Self::vesting(&who).ok_or(Error::::NotVesting)?; + let schedules = Vesting::::get(&who).ok_or(Error::::NotVesting)?; let merge_action = VestingAction::Merge { index1: schedule1_index, index2: schedule2_index }; @@ -464,6 +466,14 @@ pub mod pallet { } impl Pallet { + // Public function for accessing vesting storage + pub fn vesting( + account: T::AccountId, + ) -> Option, BlockNumberFor>, MaxVestingSchedulesGet>> + { + Vesting::::get(account) + } + // Create a new `VestingInfo`, based off of two other `VestingInfo`s. // NOTE: We assume both schedules have had funds unlocked up through the current block. fn merge_vesting_info( @@ -622,7 +632,7 @@ impl Pallet { /// Unlock any vested funds of `who`. fn do_vest(who: T::AccountId) -> DispatchResult { - let schedules = Self::vesting(&who).ok_or(Error::::NotVesting)?; + let schedules = Vesting::::get(&who).ok_or(Error::::NotVesting)?; let (schedules, locked_now) = Self::exec_action(schedules.to_vec(), VestingAction::Passive)?; @@ -687,7 +697,7 @@ where /// Get the amount that is currently being vested and cannot be transferred out of this account. fn vesting_balance(who: &T::AccountId) -> Option> { - if let Some(v) = Self::vesting(who) { + if let Some(v) = Vesting::::get(who) { let now = T::BlockNumberProvider::current_block_number(); let total_locked_now = v.iter().fold(Zero::zero(), |total, schedule| { schedule.locked_at::(now).saturating_add(total) @@ -726,7 +736,7 @@ where return Err(Error::::InvalidScheduleParams.into()) }; - let mut schedules = Self::vesting(who).unwrap_or_default(); + let mut schedules = Vesting::::get(who).unwrap_or_default(); // NOTE: we must push the new schedule so that `exec_action` // will give the correct new locked amount. @@ -764,7 +774,7 @@ where /// Remove a vesting schedule for a given account. fn remove_vesting_schedule(who: &T::AccountId, schedule_index: u32) -> DispatchResult { - let schedules = Self::vesting(who).ok_or(Error::::NotVesting)?; + let schedules = Vesting::::get(who).ok_or(Error::::NotVesting)?; let remove_action = VestingAction::Remove { index: schedule_index as usize }; let (schedules, locked_now) = Self::exec_action(schedules.to_vec(), remove_action)?; diff --git a/substrate/frame/vesting/src/migrations.rs b/substrate/frame/vesting/src/migrations.rs index 6fe82312b637d..33fa5d0df882c 100644 --- a/substrate/frame/vesting/src/migrations.rs +++ b/substrate/frame/vesting/src/migrations.rs @@ -18,6 +18,7 @@ //! Storage migrations for the vesting pallet. use super::*; +use alloc::vec; // Migration from single schedule to multiple schedules. pub mod v1 { diff --git a/substrate/frame/vesting/src/mock.rs b/substrate/frame/vesting/src/mock.rs index 674a6f6e2a836..f0954a5b989c8 100644 --- a/substrate/frame/vesting/src/mock.rs +++ b/substrate/frame/vesting/src/mock.rs @@ -15,10 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::{ - derive_impl, parameter_types, - traits::{ConstU32, WithdrawReasons}, -}; +use frame_support::{derive_impl, parameter_types, traits::WithdrawReasons}; use sp_runtime::{traits::Identity, BuildStorage}; use super::*; @@ -41,20 +38,10 @@ impl frame_system::Config for Test { type Block = Block; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { type AccountStore = System; - type Balance = u64; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; - type MaxLocks = ConstU32<10>; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } parameter_types! { pub const MinVestedTransfer: u64 = 256 * 2; diff --git a/substrate/frame/vesting/src/tests.rs b/substrate/frame/vesting/src/tests.rs index 2e1e41fc9578f..004da0dfbfa13 100644 --- a/substrate/frame/vesting/src/tests.rs +++ b/substrate/frame/vesting/src/tests.rs @@ -65,9 +65,9 @@ fn check_vesting_status() { 64, // Vesting over 20 blocks 10, ); - assert_eq!(Vesting::vesting(&1).unwrap(), vec![user1_vesting_schedule]); // Account 1 has a vesting schedule - assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_vesting_schedule]); // Account 2 has a vesting schedule - assert_eq!(Vesting::vesting(&12).unwrap(), vec![user12_vesting_schedule]); // Account 12 has a vesting schedule + assert_eq!(VestingStorage::::get(&1).unwrap(), vec![user1_vesting_schedule]); // Account 1 has a vesting schedule + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![user2_vesting_schedule]); // Account 2 has a vesting schedule + assert_eq!(VestingStorage::::get(&12).unwrap(), vec![user12_vesting_schedule]); // Account 12 has a vesting schedule // Account 1 has only 128 units vested from their illiquid ED * 5 units at block 1 assert_eq!(Vesting::vesting_balance(&1), Some(128 * 9)); @@ -110,7 +110,7 @@ fn check_vesting_status_for_multi_schedule_account() { 10, ); // Account 2 already has a vesting schedule. - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); // Account 2's free balance is from sched0. let free_balance = Balances::free_balance(&2); @@ -128,7 +128,7 @@ fn check_vesting_status_for_multi_schedule_account() { let free_balance = Balances::free_balance(&2); assert_eq!(free_balance, ED * (10 + 20)); // The most recently added schedule exists. - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1]); // sched1 has free funds at block #1, but nothing else. assert_eq!(Vesting::vesting_balance(&2), Some(free_balance - sched1.per_block())); @@ -171,7 +171,7 @@ fn check_vesting_status_for_multi_schedule_account() { assert_eq!(Vesting::vesting_balance(&2), Some(0)); // Since we have not called any extrinsics that would unlock funds the schedules // are still in storage, - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1, sched2]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1, sched2]); // but once we unlock the funds, they are removed from storage. vest_and_assert_no_vesting::(2); }); @@ -207,7 +207,7 @@ fn vested_balance_should_transfer_with_multi_sched() { let sched0 = VestingInfo::new(5 * ED, 128, 0); assert_ok!(Vesting::vested_transfer(Some(13).into(), 1, sched0)); // Total 10*ED locked for all the schedules. - assert_eq!(Vesting::vesting(&1).unwrap(), vec![sched0, sched0]); + assert_eq!(VestingStorage::::get(&1).unwrap(), vec![sched0, sched0]); let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 3840); // Account 1 has free balance @@ -245,7 +245,7 @@ fn vested_balance_should_transfer_using_vest_other_with_multi_sched() { let sched0 = VestingInfo::new(5 * ED, 128, 0); assert_ok!(Vesting::vested_transfer(Some(13).into(), 1, sched0)); // Total of 10*ED of locked for all the schedules. - assert_eq!(Vesting::vesting(&1).unwrap(), vec![sched0, sched0]); + assert_eq!(VestingStorage::::get(&1).unwrap(), vec![sched0, sched0]); let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 3840); // Account 1 has free balance @@ -305,7 +305,7 @@ fn liquid_funds_should_transfer_with_delayed_vesting() { 64, // Vesting over 20 blocks 10, ); - assert_eq!(Vesting::vesting(&12).unwrap(), vec![user12_vesting_schedule]); + assert_eq!(VestingStorage::::get(&12).unwrap(), vec![user12_vesting_schedule]); // Account 12 can still send liquid funds assert_ok!(Balances::transfer_allow_death(Some(12).into(), 3, 256 * 5)); @@ -320,7 +320,7 @@ fn vested_transfer_works() { assert_eq!(user3_free_balance, 256 * 30); assert_eq!(user4_free_balance, 256 * 40); // Account 4 should not have any vesting yet. - assert_eq!(Vesting::vesting(&4), None); + assert_eq!(VestingStorage::::get(&4), None); // Make the schedule for the new transfer. let new_vesting_schedule = VestingInfo::new( 256 * 5, @@ -329,7 +329,7 @@ fn vested_transfer_works() { ); assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4).unwrap(), vec![new_vesting_schedule]); + assert_eq!(VestingStorage::::get(&4).unwrap(), vec![new_vesting_schedule]); // Ensure the transfer happened correctly. let user3_free_balance_updated = Balances::free_balance(&3); assert_eq!(user3_free_balance_updated, 256 * 25); @@ -368,7 +368,7 @@ fn vested_transfer_correctly_fails() { ED, // Vesting over 20 blocks 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_vesting_schedule]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![user2_vesting_schedule]); // Fails due to too low transfer amount. let new_vesting_schedule_too_low = @@ -450,7 +450,7 @@ fn force_vested_transfer_works() { assert_eq!(user3_free_balance, ED * 30); assert_eq!(user4_free_balance, ED * 40); // Account 4 should not have any vesting yet. - assert_eq!(Vesting::vesting(&4), None); + assert_eq!(VestingStorage::::get(&4), None); // Make the schedule for the new transfer. let new_vesting_schedule = VestingInfo::new( ED * 5, @@ -469,8 +469,8 @@ fn force_vested_transfer_works() { new_vesting_schedule )); // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4).unwrap()[0], new_vesting_schedule); - assert_eq!(Vesting::vesting(&4).unwrap().len(), 1); + assert_eq!(VestingStorage::::get(&4).unwrap()[0], new_vesting_schedule); + assert_eq!(VestingStorage::::get(&4).unwrap().len(), 1); // Ensure the transfer happened correctly. let user3_free_balance_updated = Balances::free_balance(&3); assert_eq!(user3_free_balance_updated, ED * 25); @@ -508,7 +508,7 @@ fn force_vested_transfer_correctly_fails() { ED, // Vesting over 20 blocks 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_vesting_schedule]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![user2_vesting_schedule]); // Too low transfer amount. let new_vesting_schedule_too_low = @@ -594,12 +594,12 @@ fn merge_schedules_that_have_not_started() { ED, // Vest over 20 blocks. 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); assert_eq!(Balances::usable_balance(&2), 0); // Add a schedule that is identical to the one that already exists. assert_ok!(Vesting::vested_transfer(Some(3).into(), 2, sched0)); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched0]); assert_eq!(Balances::usable_balance(&2), 0); assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); @@ -610,7 +610,7 @@ fn merge_schedules_that_have_not_started() { sched0.per_block() * 2, 10, // Starts at the block the schedules are merged/ ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched1]); assert_eq!(Balances::usable_balance(&2), 0); }); @@ -626,7 +626,7 @@ fn merge_ongoing_schedules() { ED, // Vest over 20 blocks. 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); let sched1 = VestingInfo::new( ED * 10, @@ -634,7 +634,7 @@ fn merge_ongoing_schedules() { sched0.starting_block() + 5, // Start at block 15. ); assert_ok!(Vesting::vested_transfer(Some(4).into(), 2, sched1)); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1]); // Got to half way through the second schedule where both schedules are actively vesting. let cur_block = 20; @@ -666,7 +666,7 @@ fn merge_ongoing_schedules() { let sched2_per_block = sched2_locked / sched2_duration; let sched2 = VestingInfo::new(sched2_locked, sched2_per_block, cur_block); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched2]); // And just to double check, we assert the new merged schedule we be cleaned up as expected. System::set_block_number(30); @@ -696,7 +696,7 @@ fn merging_shifts_other_schedules_index() { ); // Account 3 starts out with no schedules, - assert_eq!(Vesting::vesting(&3), None); + assert_eq!(VestingStorage::::get(&3), None); // and some usable balance. let usable_balance = Balances::usable_balance(&3); assert_eq!(usable_balance, 30 * ED); @@ -710,7 +710,7 @@ fn merging_shifts_other_schedules_index() { assert_ok!(Vesting::vested_transfer(Some(4).into(), 3, sched2)); // With no schedules vested or merged they are in the order they are created - assert_eq!(Vesting::vesting(&3).unwrap(), vec![sched0, sched1, sched2]); + assert_eq!(VestingStorage::::get(&3).unwrap(), vec![sched0, sched1, sched2]); // and the usable balance has not changed. assert_eq!(usable_balance, Balances::usable_balance(&3)); @@ -731,7 +731,7 @@ fn merging_shifts_other_schedules_index() { let sched3 = VestingInfo::new(sched3_locked, sched3_per_block, sched3_start); // The not touched schedule moves left and the new merged schedule is appended. - assert_eq!(Vesting::vesting(&3).unwrap(), vec![sched1, sched3]); + assert_eq!(VestingStorage::::get(&3).unwrap(), vec![sched1, sched3]); // The usable balance hasn't changed since none of the schedules have started. assert_eq!(Balances::usable_balance(&3), usable_balance); }); @@ -748,7 +748,7 @@ fn merge_ongoing_and_yet_to_be_started_schedules() { ED, // Vesting over 20 blocks 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); // Fast forward to half way through the life of sched1. let mut cur_block = @@ -800,7 +800,7 @@ fn merge_ongoing_and_yet_to_be_started_schedules() { let sched2_per_block = sched2_locked / sched2_duration; let sched2 = VestingInfo::new(sched2_locked, sched2_per_block, sched2_start); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched2]); }); } @@ -815,7 +815,7 @@ fn merge_finished_and_ongoing_schedules() { ED, // Vesting over 20 blocks. 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); let sched1 = VestingInfo::new( ED * 40, @@ -834,7 +834,7 @@ fn merge_finished_and_ongoing_schedules() { assert_ok!(Vesting::vested_transfer(Some(3).into(), 2, sched2)); // The schedules are in expected order prior to merging. - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1, sched2]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1, sched2]); // Fast forward to sched0's end block. let cur_block = sched0.ending_block_as_balance::(); @@ -849,7 +849,7 @@ fn merge_finished_and_ongoing_schedules() { // sched2 is now the first, since sched0 & sched1 get filtered out while "merging". // sched1 gets treated like the new merged schedule by getting pushed onto back // of the vesting schedules vec. Note: sched0 finished at the current block. - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2, sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched2, sched1]); // sched0 has finished, so its funds are fully unlocked. let sched0_unlocked_now = sched0.locked(); @@ -877,7 +877,7 @@ fn merge_finishing_schedules_does_not_create_a_new_one() { ED, // 20 block duration. 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); // Create sched1 and transfer it to account 2. let sched1 = VestingInfo::new( @@ -886,7 +886,7 @@ fn merge_finishing_schedules_does_not_create_a_new_one() { 10, ); assert_ok!(Vesting::vested_transfer(Some(3).into(), 2, sched1)); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1]); let all_scheds_end = sched0 .ending_block_as_balance::() @@ -919,7 +919,7 @@ fn merge_finished_and_yet_to_be_started_schedules() { ED, // 20 block duration. 10, // Ends at block 30 ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); let sched1 = VestingInfo::new( ED * 30, @@ -927,7 +927,7 @@ fn merge_finished_and_yet_to_be_started_schedules() { 35, ); assert_ok!(Vesting::vested_transfer(Some(13).into(), 2, sched1)); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1]); let sched2 = VestingInfo::new( ED * 40, @@ -936,7 +936,7 @@ fn merge_finished_and_yet_to_be_started_schedules() { ); // Add a 3rd schedule to demonstrate how sched1 shifts. assert_ok!(Vesting::vested_transfer(Some(13).into(), 2, sched2)); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1, sched2]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1, sched2]); System::set_block_number(30); @@ -951,7 +951,7 @@ fn merge_finished_and_yet_to_be_started_schedules() { // sched0 is removed since it finished, and sched1 is removed and then pushed on the back // because it is treated as the merged schedule - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2, sched1]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched2, sched1]); // The usable balance is updated because merging fully unlocked sched0. assert_eq!(Balances::usable_balance(&2), sched0.locked()); @@ -967,7 +967,7 @@ fn merge_schedules_throws_proper_errors() { ED, // 20 block duration. 10, ); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0]); // Account 2 only has 1 vesting schedule. assert_noop!( @@ -976,12 +976,12 @@ fn merge_schedules_throws_proper_errors() { ); // Account 4 has 0 vesting schedules. - assert_eq!(Vesting::vesting(&4), None); + assert_eq!(VestingStorage::::get(&4), None); assert_noop!(Vesting::merge_schedules(Some(4).into(), 0, 1), Error::::NotVesting); // There are enough schedules to merge but an index is non-existent. Vesting::vested_transfer(Some(3).into(), 2, sched0).unwrap(); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched0]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched0]); assert_noop!( Vesting::merge_schedules(Some(2).into(), 0, 2), Error::::ScheduleIndexOutOfBounds @@ -1014,17 +1014,17 @@ fn generates_multiple_schedules_from_genesis_config() { .build() .execute_with(|| { let user1_sched1 = VestingInfo::new(5 * ED, 128, 0u64); - assert_eq!(Vesting::vesting(&1).unwrap(), vec![user1_sched1]); + assert_eq!(VestingStorage::::get(&1).unwrap(), vec![user1_sched1]); let user2_sched1 = VestingInfo::new(1 * ED, 12, 10u64); let user2_sched2 = VestingInfo::new(2 * ED, 25, 10u64); - assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_sched1, user2_sched2]); + assert_eq!(VestingStorage::::get(&2).unwrap(), vec![user2_sched1, user2_sched2]); let user12_sched1 = VestingInfo::new(1 * ED, 12, 10u64); let user12_sched2 = VestingInfo::new(2 * ED, 25, 10u64); let user12_sched3 = VestingInfo::new(3 * ED, 38, 10u64); assert_eq!( - Vesting::vesting(&12).unwrap(), + VestingStorage::::get(&12).unwrap(), vec![user12_sched1, user12_sched2, user12_sched3] ); }); @@ -1162,7 +1162,7 @@ fn remove_vesting_schedule() { assert_eq!(Balances::free_balance(&3), 256 * 30); assert_eq!(Balances::free_balance(&4), 256 * 40); // Account 4 should not have any vesting yet. - assert_eq!(Vesting::vesting(&4), None); + assert_eq!(VestingStorage::::get(&4), None); // Make the schedule for the new transfer. let new_vesting_schedule = VestingInfo::new( ED * 5, @@ -1171,7 +1171,7 @@ fn remove_vesting_schedule() { ); assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4).unwrap(), vec![new_vesting_schedule]); + assert_eq!(VestingStorage::::get(&4).unwrap(), vec![new_vesting_schedule]); // Account 4 has 5 * 256 locked. assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); // Verify only root can call. @@ -1183,7 +1183,7 @@ fn remove_vesting_schedule() { // Appropriate storage is cleaned up. assert!(!>::contains_key(4)); // Check the vesting balance is zero. - assert_eq!(Vesting::vesting(&4), None); + assert_eq!(VestingStorage::::get(&4), None); // Verifies that trying to remove a schedule when it doesnt exist throws error. assert_noop!( Vesting::force_remove_vesting_schedule(RawOrigin::Root.into(), 4, 0), diff --git a/substrate/frame/whitelist/Cargo.toml b/substrate/frame/whitelist/Cargo.toml index 61bbb278019de..c48a15f216318 100644 --- a/substrate/frame/whitelist/Cargo.toml +++ b/substrate/frame/whitelist/Cargo.toml @@ -15,20 +15,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -pallet-balances = { path = "../balances" } -pallet-preimage = { path = "../preimage" } -sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io" } +pallet-balances = { workspace = true, default-features = true } +pallet-preimage = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -44,7 +43,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking", diff --git a/substrate/frame/whitelist/src/benchmarking.rs b/substrate/frame/whitelist/src/benchmarking.rs index 7fb5632fc0024..cbe6ee4becd0a 100644 --- a/substrate/frame/whitelist/src/benchmarking.rs +++ b/substrate/frame/whitelist/src/benchmarking.rs @@ -73,7 +73,7 @@ mod benchmarks { ) -> Result<(), BenchmarkError> { let origin = T::DispatchWhitelistedOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; - let remark = sp_std::vec![1u8; n as usize]; + let remark = alloc::vec![1u8; n as usize]; let call: ::RuntimeCall = frame_system::Call::remark { remark }.into(); let call_weight = call.get_dispatch_info().weight; let encoded_call = call.encode(); @@ -97,7 +97,7 @@ mod benchmarks { fn dispatch_whitelisted_call_with_preimage(n: Linear<1, 10_000>) -> Result<(), BenchmarkError> { let origin = T::DispatchWhitelistedOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; - let remark = sp_std::vec![1u8; n as usize]; + let remark = alloc::vec![1u8; n as usize]; let call: ::RuntimeCall = frame_system::Call::remark { remark }.into(); let call_hash = T::Hashing::hash_of(&call); diff --git a/substrate/frame/whitelist/src/lib.rs b/substrate/frame/whitelist/src/lib.rs index 44551abd10715..de16c2c2da883 100644 --- a/substrate/frame/whitelist/src/lib.rs +++ b/substrate/frame/whitelist/src/lib.rs @@ -40,6 +40,9 @@ mod tests; pub mod weights; pub use weights::WeightInfo; +extern crate alloc; + +use alloc::boxed::Box; use codec::{DecodeLimit, Encode, FullCodec}; use frame_support::{ dispatch::{GetDispatchInfo, PostDispatchInfo}, @@ -49,7 +52,6 @@ use frame_support::{ }; use scale_info::TypeInfo; use sp_runtime::traits::{Dispatchable, Hash}; -use sp_std::prelude::*; pub use pallet::*; diff --git a/substrate/frame/whitelist/src/mock.rs b/substrate/frame/whitelist/src/mock.rs index 6fb8711057ef0..0a97d1c2df544 100644 --- a/substrate/frame/whitelist/src/mock.rs +++ b/substrate/frame/whitelist/src/mock.rs @@ -21,7 +21,7 @@ use crate as pallet_whitelist; -use frame_support::{construct_runtime, derive_impl, traits::ConstU64}; +use frame_support::{construct_runtime, derive_impl}; use frame_system::EnsureRoot; use sp_runtime::BuildStorage; @@ -43,20 +43,9 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type Balance = u64; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; type AccountStore = System; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); } impl pallet_preimage::Config for Test { diff --git a/substrate/kitchensink_runtime.wasm b/substrate/kitchensink_runtime.wasm new file mode 100644 index 0000000000000..7ebb14371243a Binary files /dev/null and b/substrate/kitchensink_runtime.wasm differ diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index f48480f398d00..d6c64a0ada28b 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -16,26 +16,26 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -sp-api-proc-macro = { path = "proc-macro", default-features = false } -sp-core = { path = "../core", default-features = false } -sp-std = { path = "../std", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } -sp-runtime-interface = { path = "../runtime-interface", default-features = false } -sp-externalities = { path = "../externalities", default-features = false, optional = true } -sp-version = { path = "../version", default-features = false } -sp-state-machine = { path = "../state-machine", default-features = false, optional = true } -sp-trie = { path = "../trie", default-features = false, optional = true } -hash-db = { version = "0.16.0", optional = true } +codec = { workspace = true } +sp-api-proc-macro = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-runtime-interface = { workspace = true } +sp-externalities = { optional = true, workspace = true } +sp-version = { workspace = true } +sp-state-machine = { optional = true, workspace = true } +sp-trie = { optional = true, workspace = true } +hash-db = { optional = true, workspace = true, default-features = true } thiserror = { optional = true, workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = [ +scale-info = { features = [ "derive", -] } -sp-metadata-ir = { path = "../metadata-ir", default-features = false, optional = true } +], workspace = true } +sp-metadata-ir = { optional = true, workspace = true } log = { workspace = true } +docify = { workspace = true } [dev-dependencies] -sp-test-primitives = { path = "../test-primitives" } +sp-test-primitives = { workspace = true } [features] default = ["std"] @@ -52,7 +52,6 @@ std = [ "sp-runtime-interface/std", "sp-runtime/std", "sp-state-machine/std", - "sp-std/std", "sp-test-primitives/std", "sp-trie/std", "sp-version/std", diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index b1bc547f3e4ae..7d7fc19fcf5be 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -21,14 +21,14 @@ proc-macro = true [dependencies] quote = { workspace = true } syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } -proc-macro2 = "1.0.56" -blake2 = { version = "0.10.4", default-features = false } -proc-macro-crate = "3.0.0" -expander = "2.0.0" -Inflector = "0.11.4" +proc-macro2 = { workspace = true } +blake2 = { workspace = true } +proc-macro-crate = { workspace = true } +expander = { workspace = true } +Inflector = { workspace = true } [dev-dependencies] -assert_matches = "1.3.0" +assert_matches = { workspace = true } [features] # Required for the doc tests diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index 20f989c4882e3..d254bf20601fe 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -70,6 +70,8 @@ // Make doc tests happy extern crate self as sp_api; +extern crate alloc; + /// Private exports used by the macros. /// /// This is seen as internal API and can change at any point. @@ -90,7 +92,9 @@ pub mod __private { pub use std_imports::*; pub use crate::*; + pub use alloc::vec; pub use codec::{self, Decode, DecodeLimit, Encode}; + pub use core::{mem, slice}; pub use scale_info; pub use sp_core::offchain; #[cfg(not(feature = "std"))] @@ -103,7 +107,6 @@ pub mod __private { transaction_validity::TransactionValidity, ExtrinsicInclusionMode, RuntimeString, TransactionOutcome, }; - pub use sp_std::{mem, slice, vec}; pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; #[cfg(all(any(target_arch = "riscv32", target_arch = "riscv64"), substrate_runtime))] @@ -532,6 +535,7 @@ pub trait ConstructRuntimeApi> { fn construct_runtime_api(call: &C) -> ApiRef; } +#[docify::export] /// Init the [`RuntimeLogger`](sp_runtime::runtime_logger::RuntimeLogger). pub fn init_runtime_logger() { #[cfg(not(feature = "disable-logging"))] @@ -832,7 +836,7 @@ decl_runtime_apis! { /// Returns the supported metadata versions. /// /// This can be used to call `metadata_at_version`. - fn metadata_versions() -> sp_std::vec::Vec; + fn metadata_versions() -> alloc::vec::Vec; } } diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index b49f774161fd3..6c159fc63d0d3 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -15,25 +15,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = ".." } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } -sp-version = { path = "../../version" } -sp-tracing = { path = "../../tracing" } -sp-runtime = { path = "../../runtime" } -sp-consensus = { path = "../../consensus/common" } -sc-block-builder = { path = "../../../client/block-builder" } -codec = { package = "parity-scale-codec", version = "3.6.12" } -sp-state-machine = { path = "../../state-machine" } -trybuild = "1.0.88" -rustversion = "1.0.6" -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +sp-api = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +sp-version = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +trybuild = { workspace = true } +rustversion = { workspace = true } +scale-info = { features = ["derive"], workspace = true } [dev-dependencies] -criterion = "0.5.1" -futures = "0.3.30" +criterion = { workspace = true, default-features = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -sp-core = { path = "../../core" } -static_assertions = "1.1.0" +sp-core = { workspace = true, default-features = true } +static_assertions = { workspace = true, default-features = true } [[bench]] name = "bench" diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index cbb9f2133577b..c0508d377f8b4 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -18,12 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../core", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +sp-core = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } -sp-std = { path = "../std", default-features = false } -sp-io = { path = "../io", default-features = false } +sp-io = { workspace = true } [features] default = ["std"] @@ -34,7 +33,6 @@ std = [ "serde/std", "sp-core/std", "sp-io/std", - "sp-std/std", ] # Serde support without relying on std features. diff --git a/substrate/primitives/application-crypto/src/bandersnatch.rs b/substrate/primitives/application-crypto/src/bandersnatch.rs index fc7383815d702..0e21e5d3bce31 100644 --- a/substrate/primitives/application-crypto/src/bandersnatch.rs +++ b/substrate/primitives/application-crypto/src/bandersnatch.rs @@ -18,8 +18,8 @@ //! Bandersnatch VRF application crypto types. use crate::{KeyTypeId, RuntimePublic}; +use alloc::vec::Vec; pub use sp_core::bandersnatch::*; -use sp_std::vec::Vec; mod app { crate::app_crypto!(super, sp_core::testing::BANDERSNATCH); diff --git a/substrate/primitives/application-crypto/src/bls377.rs b/substrate/primitives/application-crypto/src/bls377.rs index 3bd01de139c94..9d346ea618343 100644 --- a/substrate/primitives/application-crypto/src/bls377.rs +++ b/substrate/primitives/application-crypto/src/bls377.rs @@ -18,8 +18,8 @@ //! BLS12-377 crypto applications. use crate::{KeyTypeId, RuntimePublic}; +use alloc::vec::Vec; pub use sp_core::bls::bls377::*; -use sp_std::vec::Vec; mod app { crate::app_crypto!(super, sp_core::testing::BLS377); diff --git a/substrate/primitives/application-crypto/src/ecdsa.rs b/substrate/primitives/application-crypto/src/ecdsa.rs index 439b51dc60450..94d5288584ccb 100644 --- a/substrate/primitives/application-crypto/src/ecdsa.rs +++ b/substrate/primitives/application-crypto/src/ecdsa.rs @@ -19,7 +19,7 @@ use crate::{KeyTypeId, RuntimePublic}; -use sp_std::vec::Vec; +use alloc::vec::Vec; pub use sp_core::ecdsa::*; diff --git a/substrate/primitives/application-crypto/src/ecdsa_bls377.rs b/substrate/primitives/application-crypto/src/ecdsa_bls377.rs index 8dee73095fb2e..443d214d67729 100644 --- a/substrate/primitives/application-crypto/src/ecdsa_bls377.rs +++ b/substrate/primitives/application-crypto/src/ecdsa_bls377.rs @@ -18,7 +18,7 @@ //! ECDSA and BLS12-377 paired crypto applications. use crate::{KeyTypeId, RuntimePublic}; -use sp_std::vec::Vec; +use alloc::vec::Vec; pub use sp_core::paired_crypto::ecdsa_bls377::*; diff --git a/substrate/primitives/application-crypto/src/ed25519.rs b/substrate/primitives/application-crypto/src/ed25519.rs index addefe7daf643..6769de4e47c34 100644 --- a/substrate/primitives/application-crypto/src/ed25519.rs +++ b/substrate/primitives/application-crypto/src/ed25519.rs @@ -19,7 +19,7 @@ use crate::{KeyTypeId, RuntimePublic}; -use sp_std::vec::Vec; +use alloc::vec::Vec; pub use sp_core::ed25519::*; diff --git a/substrate/primitives/application-crypto/src/lib.rs b/substrate/primitives/application-crypto/src/lib.rs index 2355f1ba527d5..37949d7c41ded 100644 --- a/substrate/primitives/application-crypto/src/lib.rs +++ b/substrate/primitives/application-crypto/src/lib.rs @@ -20,6 +20,8 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + pub use sp_core::crypto::{key_types, CryptoTypeId, DeriveJunction, KeyTypeId, Ss58Codec}; #[doc(hidden)] pub use sp_core::crypto::{DeriveError, Pair, SecretStringError}; @@ -30,15 +32,17 @@ pub use sp_core::{ RuntimeDebug, }; +#[doc(hidden)] +pub use alloc::vec::Vec; #[doc(hidden)] pub use codec; #[doc(hidden)] +pub use core::ops::Deref; +#[doc(hidden)] pub use scale_info; #[doc(hidden)] #[cfg(feature = "serde")] pub use serde; -#[doc(hidden)] -pub use sp_std::{ops::Deref, vec::Vec}; #[cfg(feature = "bandersnatch-experimental")] pub mod bandersnatch; @@ -357,7 +361,7 @@ macro_rules! app_crypto_public_common { #[doc(hidden)] pub mod module_format_string_prelude { #[cfg(all(not(feature = "std"), feature = "serde"))] - pub use sp_std::alloc::{format, string::String}; + pub use alloc::{format, string::String}; #[cfg(feature = "std")] pub use std::{format, string::String}; } diff --git a/substrate/primitives/application-crypto/src/sr25519.rs b/substrate/primitives/application-crypto/src/sr25519.rs index d411cc253c0d8..ba6f0e3ae6b37 100644 --- a/substrate/primitives/application-crypto/src/sr25519.rs +++ b/substrate/primitives/application-crypto/src/sr25519.rs @@ -19,7 +19,7 @@ use crate::{KeyTypeId, RuntimePublic}; -use sp_std::vec::Vec; +use alloc::vec::Vec; pub use sp_core::sr25519::*; diff --git a/substrate/primitives/application-crypto/src/traits.rs b/substrate/primitives/application-crypto/src/traits.rs index 0b59abf272dc7..1789d9b96fd82 100644 --- a/substrate/primitives/application-crypto/src/traits.rs +++ b/substrate/primitives/application-crypto/src/traits.rs @@ -18,8 +18,9 @@ use codec::Codec; use scale_info::TypeInfo; +use alloc::vec::Vec; +use core::fmt::Debug; use sp_core::crypto::{CryptoType, CryptoTypeId, IsWrappedBy, KeyTypeId, Pair, Public}; -use sp_std::{fmt::Debug, vec::Vec}; /// Application-specific cryptographic object. /// @@ -47,8 +48,8 @@ pub trait AppCrypto: 'static + Sized + CryptoType { } /// Type which implements Hash in std, not when no-std (std variant). -pub trait MaybeHash: sp_std::hash::Hash {} -impl MaybeHash for T {} +pub trait MaybeHash: core::hash::Hash {} +impl MaybeHash for T {} /// Application-specific key pair. pub trait AppPair: diff --git a/substrate/primitives/application-crypto/test/Cargo.toml b/substrate/primitives/application-crypto/test/Cargo.toml index 0057606b38e57..43f9d3852005a 100644 --- a/substrate/primitives/application-crypto/test/Cargo.toml +++ b/substrate/primitives/application-crypto/test/Cargo.toml @@ -16,8 +16,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../../api" } -sp-application-crypto = { path = ".." } -sp-core = { path = "../../core", default-features = false } -sp-keystore = { path = "../../keystore", default-features = false } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-keystore = { workspace = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index a9f2b80156f5e..4a9d5ba234bab 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -17,23 +17,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", "max-encoded-len", -] } -integer-sqrt = "0.1.2" -num-traits = { version = "0.2.17", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +], workspace = true } +integer-sqrt = { workspace = true } +num-traits = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } -static_assertions = "1.1.0" -sp-std = { path = "../std", default-features = false } -docify = "0.2.8" +static_assertions = { workspace = true, default-features = true } +docify = { workspace = true } [dev-dependencies] -criterion = "0.5.1" -primitive-types = "0.12.0" -sp-crypto-hashing = { path = "../crypto/hashing" } -rand = "0.8.5" +criterion = { workspace = true, default-features = true } +primitive-types = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } [features] default = ["std"] @@ -43,7 +42,6 @@ std = [ "scale-info/std", "serde/std", "sp-crypto-hashing/std", - "sp-std/std", ] # Serde support without relying on std features. serde = ["dep:serde", "scale-info/serde"] diff --git a/substrate/primitives/arithmetic/fuzzer/Cargo.toml b/substrate/primitives/arithmetic/fuzzer/Cargo.toml index ace30e9c90e91..c978393af34c4 100644 --- a/substrate/primitives/arithmetic/fuzzer/Cargo.toml +++ b/substrate/primitives/arithmetic/fuzzer/Cargo.toml @@ -17,11 +17,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -arbitrary = "1.3.2" -fraction = "0.13.1" -honggfuzz = "0.5.49" -num-bigint = "0.4.3" -sp-arithmetic = { path = ".." } +arbitrary = { workspace = true } +fraction = { workspace = true } +honggfuzz = { workspace = true } +num-bigint = { workspace = true } +sp-arithmetic = { workspace = true, default-features = true } [[bin]] name = "biguint" diff --git a/substrate/primitives/authority-discovery/Cargo.toml b/substrate/primitives/authority-discovery/Cargo.toml index 72a8bb7fc47d0..26e08b8504c31 100644 --- a/substrate/primitives/authority-discovery/Cargo.toml +++ b/substrate/primitives/authority-discovery/Cargo.toml @@ -16,11 +16,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-api = { path = "../api", default-features = false } -sp-application-crypto = { path = "../application-crypto", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/block-builder/Cargo.toml b/substrate/primitives/block-builder/Cargo.toml index cc4b10851544d..ddc0c00a3be8a 100644 --- a/substrate/primitives/block-builder/Cargo.toml +++ b/substrate/primitives/block-builder/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../api", default-features = false } -sp-inherents = { path = "../inherents", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } +sp-api = { workspace = true } +sp-inherents = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index 5e51a2d06ed7a..aedd720612c33 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -17,14 +17,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -futures = "0.3.30" +codec = { features = ["derive"], workspace = true } +futures = { workspace = true } log = { workspace = true, default-features = true } -parking_lot = "0.12.1" -schnellru = "0.2.1" +parking_lot = { workspace = true, default-features = true } +schnellru = { workspace = true } thiserror = { workspace = true } -sp-api = { path = "../api" } -sp-consensus = { path = "../consensus/common" } -sp-database = { path = "../database" } -sp-runtime = { path = "../runtime" } -sp-state-machine = { path = "../state-machine" } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-database = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index 76393420da740..a928217d58854 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -21,15 +21,15 @@ use log::warn; use parking_lot::RwLock; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, Justifications, }; -use std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; +use std::collections::{btree_set::BTreeSet, HashMap, VecDeque}; use crate::{ error::{Error, Result}, - header_metadata::{self, HeaderMetadata}, - lowest_common_ancestor_multiblock, tree_route, TreeRoute, + header_metadata::HeaderMetadata, + tree_route, CachedHeaderMetadata, }; /// Blockchain database header backend. Does not perform any validation. @@ -128,6 +128,32 @@ where { } +struct MinimalBlockMetadata { + number: NumberFor, + hash: Block::Hash, + parent: Block::Hash, +} + +impl Clone for MinimalBlockMetadata +where + Block: BlockT, +{ + fn clone(&self) -> Self { + Self { number: self.number, hash: self.hash, parent: self.parent } + } +} + +impl Copy for MinimalBlockMetadata where Block: BlockT {} + +impl From<&CachedHeaderMetadata> for MinimalBlockMetadata +where + Block: BlockT, +{ + fn from(value: &CachedHeaderMetadata) -> Self { + Self { number: value.number, hash: value.hash, parent: value.parent } + } +} + /// Blockchain database backend. Does not perform any validation. pub trait Backend: HeaderBackend + HeaderMetadata @@ -226,88 +252,128 @@ pub trait Backend: finalized_block_hash: Block::Hash, finalized_block_number: NumberFor, ) -> std::result::Result, Error> { - let mut result = DisplacedLeavesAfterFinalization::default(); - let leaves = self.leaves()?; // If we have only one leaf there are no forks, and we can return early. if finalized_block_number == Zero::zero() || leaves.len() == 1 { - return Ok(result) + return Ok(DisplacedLeavesAfterFinalization::default()) } - let first_leaf = leaves.first().ok_or(Error::Backend( - "Unable to find any leaves. This should not happen.".to_string(), - ))?; - let leaf_block_header = self.expect_header(*first_leaf)?; - - // If the distance between the leafs and the finalized block is large, calculating - // tree routes can be very expensive. In that case, we will try to find the - // lowest common ancestor between all the leaves. The assumption here is that the forks are - // close to the tip and not long. So the LCA can be computed from the header cache. If the - // LCA is above the finalized block, we know that there are no displaced leaves by the - // finalization. - if leaf_block_header - .number() - .checked_sub(&finalized_block_number) - .unwrap_or(0u32.into()) > - header_metadata::LRU_CACHE_SIZE.into() - { - if let Some(lca) = lowest_common_ancestor_multiblock(self, leaves.clone())? { - if lca.number > finalized_block_number { - return Ok(result) - } else { - log::warn!("The distance between leafs and finalized block is large. Finalization can take a long time."); - } - }; - } + // Store hashes of finalized blocks for quick checking later, the last block if the + // finalized one + let mut finalized_chain = VecDeque::new(); + finalized_chain + .push_front(MinimalBlockMetadata::from(&self.header_metadata(finalized_block_hash)?)); + + // Local cache is a performance optimization in case of finalized block deep below the + // tip of the chain with a lot of leaves above finalized block + let mut local_cache = HashMap::>::new(); + + let mut result = DisplacedLeavesAfterFinalization { + displaced_leaves: Vec::with_capacity(leaves.len()), + displaced_blocks: Vec::with_capacity(leaves.len()), + }; + let mut displaced_blocks_candidates = Vec::new(); - // For each leaf determine whether it belongs to a non-canonical branch. for leaf_hash in leaves { - let leaf_block_header = self.expect_header(leaf_hash)?; - let leaf_number = *leaf_block_header.number(); + let mut current_header_metadata = + MinimalBlockMetadata::from(&self.header_metadata(leaf_hash)?); + let leaf_number = current_header_metadata.number; + + // Collect all block hashes until the height of the finalized block + displaced_blocks_candidates.clear(); + while current_header_metadata.number > finalized_block_number { + displaced_blocks_candidates.push(current_header_metadata.hash); + + let parent_hash = current_header_metadata.parent; + match local_cache.get(&parent_hash) { + Some(metadata_header) => { + current_header_metadata = *metadata_header; + }, + None => { + current_header_metadata = + MinimalBlockMetadata::from(&self.header_metadata(parent_hash)?); + // Cache locally in case more branches above finalized block reference + // the same block hash + local_cache.insert(parent_hash, current_header_metadata); + }, + } + } + + // If points back to the finalized header then nothing left to do, this leaf will be + // checked again later + if current_header_metadata.hash == finalized_block_hash { + continue; + } - let leaf_tree_route = match tree_route(self, leaf_hash, finalized_block_hash) { - Ok(tree_route) => tree_route, - Err(Error::UnknownBlock(_)) => { - // Sometimes routes can't be calculated. E.g. after warp sync. + // Otherwise the whole leaf branch needs to be pruned, track it all the way to the + // point of branching from the finalized chain + result.displaced_leaves.push((leaf_number, leaf_hash)); + result.displaced_blocks.extend(displaced_blocks_candidates.drain(..)); + result.displaced_blocks.push(current_header_metadata.hash); + // Collect the rest of the displaced blocks of leaf branch + for distance_from_finalized in 1_u32.. { + // Find block at `distance_from_finalized` from finalized block + let (finalized_chain_block_number, finalized_chain_block_hash) = + match finalized_chain.iter().rev().nth(distance_from_finalized as usize) { + Some(header) => (header.number, header.hash), + None => { + let metadata = MinimalBlockMetadata::from(&self.header_metadata( + finalized_chain.front().expect("Not empty; qed").parent, + )?); + let result = (metadata.number, metadata.hash); + finalized_chain.push_front(metadata); + result + }, + }; + + if current_header_metadata.number <= finalized_chain_block_number { + // Skip more blocks until we get all blocks on finalized chain until the height + // of the parent block continue; - }, - Err(e) => Err(e)?, - }; + } - // Is it a stale fork? - let needs_pruning = leaf_tree_route.common_block().hash != finalized_block_hash; + let parent_hash = current_header_metadata.parent; + if finalized_chain_block_hash == parent_hash { + // Reached finalized chain, nothing left to do + break; + } - if needs_pruning { - result.displaced_leaves.insert(leaf_hash, leaf_number); - result.tree_routes.insert(leaf_hash, leaf_tree_route); + // Store displaced block and look deeper for block on finalized chain + result.displaced_blocks.push(parent_hash); + current_header_metadata = + MinimalBlockMetadata::from(&self.header_metadata(parent_hash)?); } } - Ok(result) + // There could be duplicates shared by multiple branches, clean them up + result.displaced_blocks.sort_unstable(); + result.displaced_blocks.dedup(); + + return Ok(result); } } /// Result of [`Backend::displaced_leaves_after_finalizing`]. #[derive(Clone, Debug)] pub struct DisplacedLeavesAfterFinalization { - /// A collection of hashes and block numbers for displaced leaves. - pub displaced_leaves: BTreeMap>, + /// A list of hashes and block numbers of displaced leaves. + pub displaced_leaves: Vec<(NumberFor, Block::Hash)>, - /// A collection of tree routes from the leaves to finalized block. - pub tree_routes: BTreeMap>, + /// A list of hashes displaced blocks from all displaced leaves. + pub displaced_blocks: Vec, } impl Default for DisplacedLeavesAfterFinalization { fn default() -> Self { - Self { displaced_leaves: Default::default(), tree_routes: Default::default() } + Self { displaced_leaves: Vec::new(), displaced_blocks: Vec::new() } } } impl DisplacedLeavesAfterFinalization { /// Returns a collection of hashes for the displaced leaves. pub fn hashes(&self) -> impl Iterator + '_ { - self.displaced_leaves.keys().cloned() + self.displaced_leaves.iter().map(|(_, hash)| *hash) } } diff --git a/substrate/primitives/blockchain/src/header_metadata.rs b/substrate/primitives/blockchain/src/header_metadata.rs index c2054445b0676..30024765add3c 100644 --- a/substrate/primitives/blockchain/src/header_metadata.rs +++ b/substrate/primitives/blockchain/src/header_metadata.rs @@ -20,12 +20,16 @@ use parking_lot::RwLock; use schnellru::{ByLength, LruMap}; -use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One}; +use sp_core::U256; +use sp_runtime::{ + traits::{Block as BlockT, Header, NumberFor, One}, + Saturating, +}; /// Set to the expected max difference between `best` and `finalized` blocks at sync. pub(crate) const LRU_CACHE_SIZE: u32 = 5_000; -/// Get lowest common ancestor between two blocks in the tree. +/// Get the lowest common ancestor between two blocks in the tree. /// /// This implementation is efficient because our trees have very few and /// small branches, and because of our current query pattern: @@ -96,30 +100,6 @@ pub fn lowest_common_ancestor + ?Sized>( Ok(HashAndNumber { hash: header_one.hash, number: header_one.number }) } -/// Get lowest common ancestor between multiple blocks. -pub fn lowest_common_ancestor_multiblock + ?Sized>( - backend: &T, - hashes: Vec, -) -> Result>, T::Error> { - // Ensure the list of hashes is not empty - let mut hashes_iter = hashes.into_iter(); - - let first_hash = match hashes_iter.next() { - Some(hash) => hash, - None => return Ok(None), - }; - - // Start with the first hash as the initial LCA - let first_cached = backend.header_metadata(first_hash)?; - let mut lca = HashAndNumber { number: first_cached.number, hash: first_cached.hash }; - for hash in hashes_iter { - // Calculate the LCA of the current LCA and the next hash - lca = lowest_common_ancestor(backend, lca.hash, hash)?; - } - - Ok(Some(lca)) -} - /// Compute a tree-route between two blocks. See tree-route docs for more details. pub fn tree_route + ?Sized>( backend: &T, @@ -129,15 +109,16 @@ pub fn tree_route + ?Sized>( let mut from = backend.header_metadata(from)?; let mut to = backend.header_metadata(to)?; - let mut from_branch = Vec::new(); - let mut to_branch = Vec::new(); - + let mut to_branch = + Vec::with_capacity(Into::::into(to.number.saturating_sub(from.number)).as_usize()); while to.number > from.number { to_branch.push(HashAndNumber { number: to.number, hash: to.hash }); to = backend.header_metadata(to.parent)?; } + let mut from_branch = + Vec::with_capacity(Into::::into(to.number.saturating_sub(from.number)).as_usize()); while from.number > to.number { from_branch.push(HashAndNumber { number: from.number, hash: from.hash }); from = backend.header_metadata(from.parent)?; @@ -156,6 +137,7 @@ pub fn tree_route + ?Sized>( // add the pivot block. and append the reversed to-branch // (note that it's reverse order originals) let pivot = from_branch.len(); + from_branch.reserve_exact(to_branch.len() + 1); from_branch.push(HashAndNumber { number: to.number, hash: to.hash }); from_branch.extend(to_branch.into_iter().rev()); @@ -173,7 +155,7 @@ pub struct HashAndNumber { /// A tree-route from one block to another in the chain. /// -/// All blocks prior to the pivot in the deque is the reverse-order unique ancestry +/// All blocks prior to the pivot in the vector is the reverse-order unique ancestry /// of the first block, the block at the pivot index is the common ancestor, /// and all blocks after the pivot is the ancestry of the second block, in /// order. diff --git a/substrate/primitives/consensus/aura/Cargo.toml b/substrate/primitives/consensus/aura/Cargo.toml index a54499178171d..3ae45062cb681 100644 --- a/substrate/primitives/consensus/aura/Cargo.toml +++ b/substrate/primitives/consensus/aura/Cargo.toml @@ -16,15 +16,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-api = { path = "../../api", default-features = false } -sp-application-crypto = { path = "../../application-crypto", default-features = false } -sp-consensus-slots = { path = "../slots", default-features = false } -sp-inherents = { path = "../../inherents", default-features = false } -sp-runtime = { path = "../../runtime", default-features = false } -sp-timestamp = { path = "../../timestamp", default-features = false } +async-trait = { optional = true, workspace = true } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-consensus-slots = { workspace = true } +sp-inherents = { workspace = true } +sp-runtime = { workspace = true } +sp-timestamp = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index 46c032ba61a60..884fc6c25c97d 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -16,17 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +async-trait = { optional = true, workspace = true } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } -sp-api = { path = "../../api", default-features = false } -sp-application-crypto = { path = "../../application-crypto", default-features = false } -sp-consensus-slots = { path = "../slots", default-features = false } -sp-core = { path = "../../core", default-features = false } -sp-inherents = { path = "../../inherents", default-features = false } -sp-runtime = { path = "../../runtime", default-features = false } -sp-timestamp = { path = "../../timestamp", optional = true, default-features = false } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-consensus-slots = { workspace = true } +sp-core = { workspace = true } +sp-inherents = { workspace = true } +sp-runtime = { workspace = true } +sp-timestamp = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index a682939a02f95..49d907506049a 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -15,23 +15,23 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } -sp-api = { path = "../../api", default-features = false } -sp-application-crypto = { path = "../../application-crypto", default-features = false } -sp-core = { path = "../../core", default-features = false } -sp-crypto-hashing = { path = "../../crypto/hashing", default-features = false } -sp-io = { path = "../../io", default-features = false } -sp-mmr-primitives = { path = "../../merkle-mountain-range", default-features = false } -sp-runtime = { path = "../../runtime", default-features = false } -sp-keystore = { path = "../../keystore", default-features = false } -strum = { version = "0.26.2", features = ["derive"], default-features = false } -lazy_static = { version = "1.4.0", optional = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-core = { workspace = true } +sp-crypto-hashing = { workspace = true } +sp-io = { workspace = true } +sp-mmr-primitives = { workspace = true } +sp-runtime = { workspace = true } +sp-keystore = { workspace = true } +strum = { features = ["derive"], workspace = true } +lazy_static = { optional = true, workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -w3f-bls = { version = "0.1.3", features = ["std"] } +array-bytes = { workspace = true, default-features = true } +w3f-bls = { features = ["std"], workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index 913184402aef7..7f6f733d0e39a 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -53,7 +53,7 @@ use scale_info::TypeInfo; use sp_application_crypto::{AppPublic, RuntimeAppPublic}; use sp_core::H256; use sp_runtime::{ - traits::{Hash, Keccak256, NumberFor}, + traits::{Hash, Header as HeaderT, Keccak256, NumberFor}, OpaqueValue, }; @@ -307,8 +307,10 @@ pub struct VoteMessage { pub signature: Signature, } -/// Proof of voter misbehavior on a given set id. Misbehavior/equivocation in -/// BEEFY happens when a voter votes on the same round/block for different payloads. +/// Proof showing that an authority voted twice in the same round. +/// +/// One type of misbehavior in BEEFY happens when an authority votes in the same round/block +/// for different payloads. /// Proving is achieved by collecting the signed commitments of conflicting votes. #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] pub struct DoubleVotingProof { @@ -333,6 +335,27 @@ impl DoubleVotingProof { } } +/// Proof showing that an authority voted for a non-canonical chain. +/// +/// Proving is achieved by providing a proof that contains relevant info about the canonical chain +/// at `commitment.block_number`. The `commitment` can be checked against this info. +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +pub struct ForkVotingProof { + /// The equivocated vote. + pub vote: VoteMessage, + /// Proof containing info about the canonical chain at `commitment.block_number`. + pub ancestry_proof: AncestryProof, + /// The header of the block where the ancestry proof was generated + pub header: Header, +} + +/// Proof showing that an authority voted for a future block. +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +pub struct FutureBlockVotingProof { + /// The equivocated vote. + pub vote: VoteMessage, +} + /// Check a commitment signature by encoding the commitment and /// verifying the provided signature using the expected authority id. pub fn check_commitment_signature( @@ -351,7 +374,7 @@ where /// Verifies the equivocation proof by making sure that both votes target /// different blocks and that its signatures are valid. -pub fn check_equivocation_proof( +pub fn check_double_voting_proof( report: &DoubleVotingProof::Signature>, ) -> bool where @@ -398,6 +421,25 @@ impl OnNewValidatorSet for () { fn on_new_validator_set(_: &ValidatorSet, _: &ValidatorSet) {} } +/// Hook containing helper methods for proving/checking commitment canonicity. +pub trait AncestryHelper { + /// Type containing proved info about the canonical chain at a certain height. + type Proof: Clone + Debug + Decode + Encode + PartialEq + TypeInfo; + /// The data needed for validating the proof. + type ValidationContext; + + /// Extract the validation context from the provided header. + fn extract_validation_context(header: Header) -> Option; + + /// Check if a commitment is pointing to a header on a non-canonical chain + /// against a canonicity proof generated at the same header height. + fn is_non_canonical( + commitment: &Commitment, + proof: Self::Proof, + context: Self::ValidationContext, + ) -> bool; +} + /// An opaque type used to represent the key ownership proof at the runtime API /// boundary. The inner value is an encoded representation of the actual key /// ownership proof which will be parameterized when defining the runtime. At @@ -408,7 +450,7 @@ pub type OpaqueKeyOwnershipProof = OpaqueValue; sp_api::decl_runtime_apis! { /// API necessary for BEEFY voters. - #[api_version(3)] + #[api_version(4)] pub trait BeefyApi where AuthorityId : Codec + RuntimeAppPublic, { @@ -418,15 +460,15 @@ sp_api::decl_runtime_apis! { /// Return the current active BEEFY validator set fn validator_set() -> Option>; - /// Submits an unsigned extrinsic to report an equivocation. The caller - /// must provide the equivocation proof and a key ownership proof + /// Submits an unsigned extrinsic to report a double voting equivocation. The caller + /// must provide the double voting proof and a key ownership proof /// (should be obtained using `generate_key_ownership_proof`). The /// extrinsic will be unsigned and should only be accepted for local /// authorship (not to be broadcast to the network). This method returns /// `None` when creation of the extrinsic fails, e.g. if equivocation /// reporting is disabled for the given runtime (i.e. this method is /// hardcoded to return `None`). Only useful in an offchain context. - fn submit_report_equivocation_unsigned_extrinsic( + fn submit_report_double_voting_unsigned_extrinsic( equivocation_proof: DoubleVotingProof, AuthorityId, ::Signature>, key_owner_proof: OpaqueKeyOwnershipProof, diff --git a/substrate/primitives/consensus/beefy/src/payload.rs b/substrate/primitives/consensus/beefy/src/payload.rs index 1a06e620e7ad4..d22255c384bc2 100644 --- a/substrate/primitives/consensus/beefy/src/payload.rs +++ b/substrate/primitives/consensus/beefy/src/payload.rs @@ -58,7 +58,7 @@ impl Payload { /// Returns a decoded payload value under given `id`. /// - /// In case the value is not there or it cannot be decoded does not match `None` is returned. + /// In case the value is not there, or it cannot be decoded `None` is returned. pub fn get_decoded(&self, id: &BeefyPayloadId) -> Option { self.get_raw(id).and_then(|raw| T::decode(&mut &raw[..]).ok()) } diff --git a/substrate/primitives/consensus/beefy/src/test_utils.rs b/substrate/primitives/consensus/beefy/src/test_utils.rs index d7fd49214f12f..bd335ede48938 100644 --- a/substrate/primitives/consensus/beefy/src/test_utils.rs +++ b/substrate/primitives/consensus/beefy/src/test_utils.rs @@ -18,12 +18,12 @@ #[cfg(feature = "bls-experimental")] use crate::ecdsa_bls_crypto; use crate::{ - ecdsa_crypto, AuthorityIdBound, BeefySignatureHasher, Commitment, DoubleVotingProof, Payload, - ValidatorSetId, VoteMessage, + ecdsa_crypto, AuthorityIdBound, BeefySignatureHasher, Commitment, DoubleVotingProof, + ForkVotingProof, FutureBlockVotingProof, Payload, ValidatorSetId, VoteMessage, }; use sp_application_crypto::{AppCrypto, AppPair, RuntimeAppPublic, Wraps}; use sp_core::{ecdsa, Pair}; -use sp_runtime::traits::Hash; +use sp_runtime::traits::{BlockNumber, Hash, Header as HeaderT}; use codec::Encode; use std::{collections::HashMap, marker::PhantomData}; @@ -136,20 +136,42 @@ impl From> for ecdsa_crypto::Public { } } -/// Create a new `EquivocationProof` based on given arguments. -pub fn generate_equivocation_proof( +/// Create a new `VoteMessage` from commitment primitives and keyring +pub fn signed_vote( + block_number: Number, + payload: Payload, + validator_set_id: ValidatorSetId, + keyring: &Keyring, +) -> VoteMessage { + let commitment = Commitment { validator_set_id, block_number, payload }; + let signature = keyring.sign(&commitment.encode()); + VoteMessage { commitment, id: keyring.public(), signature } +} + +/// Create a new `DoubleVotingProof` based on given arguments. +pub fn generate_double_voting_proof( vote1: (u64, Payload, ValidatorSetId, &Keyring), vote2: (u64, Payload, ValidatorSetId, &Keyring), ) -> DoubleVotingProof { - let signed_vote = |block_number: u64, - payload: Payload, - validator_set_id: ValidatorSetId, - keyring: &Keyring| { - let commitment = Commitment { validator_set_id, block_number, payload }; - let signature = keyring.sign(&commitment.encode()); - VoteMessage { commitment, id: keyring.public(), signature } - }; let first = signed_vote(vote1.0, vote1.1, vote1.2, vote1.3); let second = signed_vote(vote2.0, vote2.1, vote2.2, vote2.3); DoubleVotingProof { first, second } } + +/// Create a new `ForkVotingProof` based on vote & canonical header. +pub fn generate_fork_voting_proof, AncestryProof>( + vote: (u64, Payload, ValidatorSetId, &Keyring), + ancestry_proof: AncestryProof, + header: Header, +) -> ForkVotingProof { + let signed_vote = signed_vote(vote.0, vote.1, vote.2, vote.3); + ForkVotingProof { vote: signed_vote, ancestry_proof, header } +} + +/// Create a new `ForkVotingProof` based on vote & canonical header. +pub fn generate_future_block_voting_proof( + vote: (u64, Payload, ValidatorSetId, &Keyring), +) -> FutureBlockVotingProof { + let signed_vote = signed_vote(vote.0, vote.1, vote.2, vote.3); + FutureBlockVotingProof { vote: signed_vote } +} diff --git a/substrate/primitives/consensus/common/Cargo.toml b/substrate/primitives/consensus/common/Cargo.toml index 90aeadd5055e6..a5d9a8da1a9be 100644 --- a/substrate/primitives/consensus/common/Cargo.toml +++ b/substrate/primitives/consensus/common/Cargo.toml @@ -17,18 +17,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.79" -futures = { version = "0.3.30", features = ["thread-pool"] } +async-trait = { workspace = true } +futures = { features = ["thread-pool"], workspace = true } log = { workspace = true, default-features = true } thiserror = { workspace = true } -sp-core = { path = "../../core" } -sp-inherents = { path = "../../inherents" } -sp-runtime = { path = "../../runtime" } -sp-state-machine = { path = "../../state-machine" } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } [dev-dependencies] -futures = "0.3.30" -sp-test-primitives = { path = "../../test-primitives" } +futures = { workspace = true } +sp-test-primitives = { workspace = true } [features] default = [] diff --git a/substrate/primitives/consensus/grandpa/Cargo.toml b/substrate/primitives/consensus/grandpa/Cargo.toml index 9a59575a22c39..6ba5bb40595a0 100644 --- a/substrate/primitives/consensus/grandpa/Cargo.toml +++ b/substrate/primitives/consensus/grandpa/Cargo.toml @@ -17,16 +17,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -finality-grandpa = { version = "0.16.2", default-features = false, features = ["derive-codec"] } +codec = { features = ["derive"], workspace = true } +finality-grandpa = { features = ["derive-codec"], workspace = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } -sp-api = { path = "../../api", default-features = false } -sp-application-crypto = { path = "../../application-crypto", default-features = false } -sp-core = { path = "../../core", default-features = false } -sp-keystore = { path = "../../keystore", default-features = false, optional = true } -sp-runtime = { path = "../../runtime", default-features = false } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-core = { workspace = true } +sp-keystore = { optional = true, workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/consensus/pow/Cargo.toml b/substrate/primitives/consensus/pow/Cargo.toml index 0700e2c4f8b9f..d4563cf8e6afd 100644 --- a/substrate/primitives/consensus/pow/Cargo.toml +++ b/substrate/primitives/consensus/pow/Cargo.toml @@ -16,10 +16,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -sp-api = { path = "../../api", default-features = false } -sp-core = { path = "../../core", default-features = false } -sp-runtime = { path = "../../runtime", default-features = false } +codec = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 7927557308392..12bcbc1b33928 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -18,14 +18,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true } -sp-api = { path = "../../api", default-features = false } -sp-application-crypto = { path = "../../application-crypto", default-features = false, features = ["bandersnatch-experimental"] } -sp-consensus-slots = { path = "../slots", default-features = false } -sp-core = { path = "../../core", default-features = false, features = ["bandersnatch-experimental"] } -sp-runtime = { path = "../../runtime", default-features = false } +sp-api = { workspace = true } +sp-application-crypto = { features = ["bandersnatch-experimental"], workspace = true } +sp-consensus-slots = { workspace = true } +sp-core = { features = ["bandersnatch-experimental"], workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index dd519eab46475..9d881c3acd004 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -16,10 +16,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } -sp-timestamp = { path = "../../timestamp", default-features = false } +sp-timestamp = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index f931faf8bd043..2ba4f959a5f9e 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -16,60 +16,60 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +scale-info = { features = ["derive"], workspace = true } log = { workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } -bounded-collections = { version = "0.2.0", default-features = false } -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info"] } -impl-serde = { version = "0.4.0", default-features = false, optional = true } -hash-db = { version = "0.16.0", default-features = false } -hash256-std-hasher = { version = "0.15.2", default-features = false } -bs58 = { version = "0.5.0", default-features = false, optional = true } -rand = { version = "0.8.5", features = ["small_rng"], optional = true } -substrate-bip39 = { path = "../../utils/substrate-bip39", default-features = false } +bounded-collections = { workspace = true } +primitive-types = { features = ["codec", "scale-info"], workspace = true } +impl-serde = { optional = true, workspace = true } +hash-db = { workspace = true } +hash256-std-hasher = { workspace = true } +bs58 = { optional = true, workspace = true } +rand = { features = ["small_rng"], optional = true, workspace = true, default-features = true } +substrate-bip39 = { workspace = true } # personal fork here as workaround for: https://github.com/rust-bitcoin/rust-bip39/pull/64 bip39 = { package = "parity-bip39", version = "2.0.1", default-features = false, features = ["alloc"] } -zeroize = { version = "1.4.3", default-features = false } -secrecy = { version = "0.8.0", default-features = false, features = ["alloc"] } -parking_lot = { version = "0.12.1", optional = true } -ss58-registry = { version = "1.34.0", default-features = false } -sp-std = { path = "../std", default-features = false } -sp-debug-derive = { path = "../debug-derive", default-features = false } -sp-storage = { path = "../storage", default-features = false } -sp-externalities = { path = "../externalities", optional = true, default-features = false } -futures = { version = "0.3.30", optional = true } -dyn-clonable = { version = "0.9.0", optional = true } +zeroize = { workspace = true } +secrecy = { features = ["alloc"], workspace = true } +parking_lot = { optional = true, workspace = true, default-features = true } +ss58-registry = { workspace = true } +sp-std = { workspace = true } +sp-debug-derive = { workspace = true } +sp-storage = { workspace = true } +sp-externalities = { optional = true, workspace = true } +futures = { optional = true, workspace = true } +dyn-clonable = { optional = true, workspace = true } thiserror = { optional = true, workspace = true } -tracing = { version = "0.1.29", optional = true } -bitflags = "1.3" -paste = "1.0.7" -itertools = { version = "0.11", optional = true } +tracing = { optional = true, workspace = true, default-features = true } +bitflags = { workspace = true } +paste = { workspace = true, default-features = true } +itertools = { optional = true, workspace = true } # full crypto -array-bytes = { version = "6.2.2" } -ed25519-zebra = { version = "4.0.3", default-features = false } -blake2 = { version = "0.10.4", default-features = false, optional = true } -libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"] } -schnorrkel = { version = "0.11.4", features = ["preaudit_deprecated"], default-features = false } -merlin = { version = "3.0", default-features = false } -sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } -sp-runtime-interface = { path = "../runtime-interface", default-features = false } +array-bytes = { workspace = true, default-features = true } +ed25519-zebra = { workspace = true } +blake2 = { optional = true, workspace = true } +libsecp256k1 = { features = ["static-context"], workspace = true } +schnorrkel = { features = ["preaudit_deprecated"], workspace = true } +merlin = { workspace = true } +sp-crypto-hashing = { workspace = true } +sp-runtime-interface = { workspace = true } # k256 crate, better portability, intended to be used in substrate-runtimes (no-std) -k256 = { version = "0.13.3", features = ["alloc", "ecdsa"], default-features = false } +k256 = { features = ["alloc", "ecdsa"], workspace = true } # secp256k1 crate, better performance, intended to be used on host side (std) -secp256k1 = { version = "0.28.0", default-features = false, features = ["alloc", "recovery"], optional = true } +secp256k1 = { features = ["alloc", "recovery"], optional = true, workspace = true } # bls crypto -w3f-bls = { version = "0.1.3", default-features = false, optional = true } +w3f-bls = { optional = true, workspace = true } # bandersnatch crypto bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "e9782f9", default-features = false, features = ["substrate-curves"], optional = true } [dev-dependencies] -criterion = "0.5.1" +criterion = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -lazy_static = "1.4.0" -regex = "1.6.0" +lazy_static = { workspace = true } +regex = { workspace = true } [[bench]] name = "bench" diff --git a/substrate/primitives/core/fuzz/Cargo.toml b/substrate/primitives/core/fuzz/Cargo.toml index 463eaea8ea30d..46dfe8d483b74 100644 --- a/substrate/primitives/core/fuzz/Cargo.toml +++ b/substrate/primitives/core/fuzz/Cargo.toml @@ -11,11 +11,11 @@ workspace = true cargo-fuzz = true [dependencies] -lazy_static = "1.4.0" -libfuzzer-sys = "0.4" -regex = "1.10.2" +lazy_static = { workspace = true } +libfuzzer-sys = { workspace = true } +regex = { workspace = true } -sp-core = { path = ".." } +sp-core = { workspace = true, default-features = true } [[bin]] name = "fuzz_address_uri" diff --git a/substrate/primitives/core/src/address_uri.rs b/substrate/primitives/core/src/address_uri.rs index d44f3c0c87c40..bbe31b7553bd2 100644 --- a/substrate/primitives/core/src/address_uri.rs +++ b/substrate/primitives/core/src/address_uri.rs @@ -18,8 +18,8 @@ //! Little util for parsing an address URI. Replaces regular expressions. #[cfg(not(feature = "std"))] -use sp_std::{ - alloc::string::{String, ToString}, +use alloc::{ + string::{String, ToString}, vec::Vec, }; @@ -97,10 +97,10 @@ impl InvalidCharacterInfo { } } -impl sp_std::fmt::Display for InvalidCharacterInfo { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { +impl core::fmt::Display for InvalidCharacterInfo { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let (s, pos) = escape_string(&self.0, self.1); - write!(f, "{s}\n{i}^", i = sp_std::iter::repeat(" ").take(pos).collect::()) + write!(f, "{s}\n{i}^", i = core::iter::repeat(" ").take(pos).collect::()) } } diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index 71ee2da538348..25bf4657030fb 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -31,7 +31,7 @@ use bandersnatch_vrfs::{CanonicalSerialize, SecretKey}; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; -use sp_std::{vec, vec::Vec}; +use alloc::{vec, vec::Vec}; /// Identifier used to match public keys against bandersnatch-vrf keys. pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"band"); diff --git a/substrate/primitives/core/src/bls.rs b/substrate/primitives/core/src/bls.rs index bb04babb3f180..a86f67844da2a 100644 --- a/substrate/primitives/core/src/bls.rs +++ b/substrate/primitives/core/src/bls.rs @@ -28,7 +28,7 @@ use crate::crypto::{ SignatureBytes, UncheckedFrom, }; -use sp_std::vec::Vec; +use alloc::vec::Vec; use w3f_bls::{ DoublePublicKey, DoublePublicKeyScheme, DoubleSignature, EngineBLS, Keypair, Message, diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index b13899fff5176..fd7fe77672040 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -18,8 +18,14 @@ //! Cryptographic utilities. use crate::{ed25519, sr25519}; +#[cfg(all(not(feature = "std"), feature = "serde"))] +use alloc::{format, string::String, vec}; +use alloc::{str, vec::Vec}; use bip39::{Language, Mnemonic}; use codec::{Decode, Encode, MaxEncodedLen}; +use core::hash::Hash; +#[doc(hidden)] +pub use core::ops::Deref; #[cfg(feature = "std")] use itertools::Itertools; #[cfg(feature = "std")] @@ -27,14 +33,6 @@ use rand::{rngs::OsRng, RngCore}; use scale_info::TypeInfo; pub use secrecy::{ExposeSecret, SecretString}; use sp_runtime_interface::pass_by::PassByInner; -#[doc(hidden)] -pub use sp_std::ops::Deref; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::{ - alloc::{format, string::String}, - vec, -}; -use sp_std::{hash::Hash, str, vec::Vec}; pub use ss58_registry::{from_known_address_format, Ss58AddressFormat, Ss58AddressFormatRegistry}; /// Trait to zeroize a memory buffer. pub use zeroize::Zeroize; @@ -245,8 +243,8 @@ pub enum PublicError { } #[cfg(feature = "std")] -impl sp_std::fmt::Debug for PublicError { - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { +impl core::fmt::Debug for PublicError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { // Just use the `Display` implementation write!(f, "{}", self) } @@ -587,8 +585,8 @@ impl std::fmt::Display for AccountId32 { } } -impl sp_std::fmt::Debug for AccountId32 { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { +impl core::fmt::Debug for AccountId32 { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { #[cfg(feature = "serde")] { let s = self.to_ss58check(); @@ -624,7 +622,7 @@ impl<'de> serde::Deserialize<'de> for AccountId32 { } #[cfg(feature = "std")] -impl sp_std::str::FromStr for AccountId32 { +impl std::str::FromStr for AccountId32 { type Err = &'static str; fn from_str(s: &str) -> Result { @@ -786,7 +784,7 @@ pub struct SecretUri { pub junctions: Vec, } -impl sp_std::str::FromStr for SecretUri { +impl alloc::str::FromStr for SecretUri { type Err = SecretStringError; fn from_str(s: &str) -> Result { @@ -925,7 +923,7 @@ pub trait Pair: CryptoType + Sized { s: &str, password_override: Option<&str>, ) -> Result<(Self, Option), SecretStringError> { - use sp_std::str::FromStr; + use alloc::str::FromStr; let SecretUri { junctions, phrase, password } = SecretUri::from_str(s)?; let password = password_override.or_else(|| password.as_ref().map(|p| p.expose_secret().as_str())); diff --git a/substrate/primitives/core/src/crypto_bytes.rs b/substrate/primitives/core/src/crypto_bytes.rs index ee5f3482f743a..e5130e6d50079 100644 --- a/substrate/primitives/core/src/crypto_bytes.rs +++ b/substrate/primitives/core/src/crypto_bytes.rs @@ -34,7 +34,7 @@ use crate::crypto::Ss58Codec; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::{format, string::String}; +use alloc::{format, string::String}; pub use public_bytes::*; pub use signature_bytes::*; @@ -256,7 +256,7 @@ mod public_bytes { impl Public for PublicBytes where Self: CryptoType {} - impl sp_std::fmt::Debug for PublicBytes + impl core::fmt::Debug for PublicBytes where Self: CryptoType, { @@ -267,7 +267,7 @@ mod public_bytes { } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } @@ -362,17 +362,17 @@ mod signature_bytes { } } - impl sp_std::fmt::Debug for SignatureBytes + impl core::fmt::Debug for SignatureBytes where Self: CryptoType, { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "{}", crate::hexdisplay::HexDisplay::from(&&self.0[..])) } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs index 9cba8cc3d352a..d11811ff2af65 100644 --- a/substrate/primitives/core/src/ecdsa.rs +++ b/substrate/primitives/core/src/ecdsa.rs @@ -22,6 +22,8 @@ use crate::crypto::{ SecretStringError, SignatureBytes, }; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; #[cfg(not(feature = "std"))] use k256::ecdsa::{SigningKey as SecretKey, VerifyingKey}; #[cfg(feature = "std")] @@ -29,8 +31,6 @@ use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, Message, PublicKey, SecretKey, SECP256K1, }; -#[cfg(not(feature = "std"))] -use sp_std::vec::Vec; /// An identifier used to match public keys against ecdsa keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ecds"); diff --git a/substrate/primitives/core/src/ed25519.rs b/substrate/primitives/core/src/ed25519.rs index 269b6bfcd8dce..401f9a39d5673 100644 --- a/substrate/primitives/core/src/ed25519.rs +++ b/substrate/primitives/core/src/ed25519.rs @@ -24,7 +24,7 @@ use crate::crypto::{ use ed25519_zebra::{SigningKey, VerificationKey}; -use sp_std::vec::Vec; +use alloc::vec::Vec; /// An identifier used to match public keys against ed25519 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ed25"); diff --git a/substrate/primitives/core/src/hexdisplay.rs b/substrate/primitives/core/src/hexdisplay.rs index 72bb24a186e54..1902b8cca958a 100644 --- a/substrate/primitives/core/src/hexdisplay.rs +++ b/substrate/primitives/core/src/hexdisplay.rs @@ -27,8 +27,8 @@ impl<'a> HexDisplay<'a> { } } -impl<'a> sp_std::fmt::Display for HexDisplay<'a> { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> Result<(), sp_std::fmt::Error> { +impl<'a> core::fmt::Display for HexDisplay<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { if self.0.len() < 1027 { for byte in self.0 { f.write_fmt(format_args!("{:02x}", byte))?; @@ -46,8 +46,8 @@ impl<'a> sp_std::fmt::Display for HexDisplay<'a> { } } -impl<'a> sp_std::fmt::Debug for HexDisplay<'a> { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> Result<(), sp_std::fmt::Error> { +impl<'a> core::fmt::Debug for HexDisplay<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { for byte in self.0 { f.write_fmt(format_args!("{:02x}", byte))?; } @@ -73,7 +73,7 @@ impl AsBytesRef for [u8] { } } -impl AsBytesRef for sp_std::vec::Vec { +impl AsBytesRef for alloc::vec::Vec { fn as_bytes_ref(&self) -> &[u8] { self } diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index 098bd135bfebb..46503921453de 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -31,15 +31,18 @@ macro_rules! map { ); } +extern crate alloc; + +use alloc::vec::Vec; #[doc(hidden)] pub use codec::{Decode, Encode, MaxEncodedLen}; +use core::ops::Deref; use scale_info::TypeInfo; #[cfg(feature = "serde")] pub use serde; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use sp_runtime_interface::pass_by::{PassByEnum, PassByInner}; -use sp_std::{ops::Deref, prelude::*}; pub use sp_debug_derive::RuntimeDebug; @@ -137,7 +140,7 @@ impl codec::WrapperTypeDecode for Bytes { } #[cfg(feature = "std")] -impl sp_std::str::FromStr for Bytes { +impl alloc::str::FromStr for Bytes { type Err = bytes::FromHexError; fn from_str(s: &str) -> Result { @@ -156,7 +159,7 @@ impl OpaqueMetadata { } } -impl sp_std::ops::Deref for OpaqueMetadata { +impl Deref for OpaqueMetadata { type Target = Vec; fn deref(&self) -> &Self::Target { @@ -313,7 +316,7 @@ pub fn to_substrate_wasm_fn_return_value(value: &impl Encode) -> u64 { // Leak the output vector to avoid it being freed. // This is fine in a WASM context since the heap // will be discarded after the call. - sp_std::mem::forget(encoded); + core::mem::forget(encoded); res } @@ -430,16 +433,7 @@ pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB macro_rules! generate_feature_enabled_macro { ( $macro_name:ident, $feature_name:meta, $d:tt ) => { $crate::paste::paste!{ - /// Enable/disable the given code depending on - #[doc = concat!("`", stringify!($feature_name), "`")] - /// being enabled for the crate or not. - /// - /// # Example /// - /// ```nocompile - /// // Will add the code depending on the feature being enabled or not. - #[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")] - /// ``` #[cfg($feature_name)] #[macro_export] macro_rules! [<_ $macro_name>] { @@ -448,6 +442,13 @@ macro_rules! generate_feature_enabled_macro { } } + /// + #[cfg(not($feature_name))] + #[macro_export] + macro_rules! [<_ $macro_name>] { + ( $d ( $d input:tt )* ) => {}; + } + /// Enable/disable the given code depending on #[doc = concat!("`", stringify!($feature_name), "`")] /// being enabled for the crate or not. @@ -458,15 +459,8 @@ macro_rules! generate_feature_enabled_macro { /// // Will add the code depending on the feature being enabled or not. #[doc = concat!(stringify!($macro_name), "!( println!(\"Hello\") )")] /// ``` - #[cfg(not($feature_name))] - #[macro_export] - macro_rules! [<_ $macro_name>] { - ( $d ( $d input:tt )* ) => {}; - } - - // Work around for: - #[doc(hidden)] - pub use [<_ $macro_name>] as $macro_name; + // https://github.com/rust-lang/rust/pull/52234 + pub use [<_ $macro_name>] as $macro_name; } }; } diff --git a/substrate/primitives/core/src/offchain/mod.rs b/substrate/primitives/core/src/offchain/mod.rs index cef495dfaacdc..9be86e85d5878 100644 --- a/substrate/primitives/core/src/offchain/mod.rs +++ b/substrate/primitives/core/src/offchain/mod.rs @@ -18,10 +18,10 @@ //! Offchain workers types use crate::{OpaquePeerId, RuntimeDebug}; +use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime_interface::pass_by::{PassByCodec, PassByEnum, PassByInner}; -use sp_std::prelude::{Box, Vec}; pub use crate::crypto::KeyTypeId; diff --git a/substrate/primitives/core/src/paired_crypto.rs b/substrate/primitives/core/src/paired_crypto.rs index 260e86b6ff9c4..57bc5b006197b 100644 --- a/substrate/primitives/core/src/paired_crypto.rs +++ b/substrate/primitives/core/src/paired_crypto.rs @@ -24,7 +24,7 @@ use crate::crypto::{ PublicBytes, SecretStringError, Signature as SignatureT, SignatureBytes, UncheckedFrom, }; -use sp_std::vec::Vec; +use alloc::vec::Vec; /// ECDSA and BLS12-377 paired crypto scheme #[cfg(feature = "bls-experimental")] diff --git a/substrate/primitives/core/src/sr25519.rs b/substrate/primitives/core/src/sr25519.rs index 54b9a98db3d2c..48780f2ccff93 100644 --- a/substrate/primitives/core/src/sr25519.rs +++ b/substrate/primitives/core/src/sr25519.rs @@ -25,25 +25,25 @@ use crate::crypto::Ss58Codec; use crate::crypto::{ CryptoBytes, DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError, }; +use alloc::vec::Vec; #[cfg(feature = "full_crypto")] use schnorrkel::signing_context; use schnorrkel::{ derive::{ChainCode, Derivation, CHAIN_CODE_LENGTH}, ExpansionMode, Keypair, MiniSecretKey, PublicKey, SecretKey, }; -use sp_std::vec::Vec; use crate::crypto::{CryptoType, CryptoTypeId, Derive, Public as TraitPublic, SignatureBytes}; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +#[cfg(all(not(feature = "std"), feature = "serde"))] +use alloc::{format, string::String}; use schnorrkel::keys::{MINI_SECRET_KEY_LENGTH, SECRET_KEY_LENGTH}; #[cfg(feature = "serde")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "std")] use sp_runtime_interface::pass_by::PassByInner; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::{format, string::String}; // signing context const SIGNING_CTX: &[u8] = b"substrate"; @@ -100,15 +100,15 @@ impl std::fmt::Display for Public { } } -impl sp_std::fmt::Debug for Public { +impl core::fmt::Debug for Public { #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let s = self.to_ss58check(); write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(self.inner()), &s[0..8]) } #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } diff --git a/substrate/primitives/crypto/ec-utils/Cargo.toml b/substrate/primitives/crypto/ec-utils/Cargo.toml index 142a5abf9b30d..1068787728bab 100644 --- a/substrate/primitives/crypto/ec-utils/Cargo.toml +++ b/substrate/primitives/crypto/ec-utils/Cargo.toml @@ -15,19 +15,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ark-ec = { version = "0.4.2", default-features = false, optional = true } -ark-bls12-377-ext = { version = "0.4.1", default-features = false, optional = true } -ark-bls12-377 = { version = "0.4.0", default-features = false, features = ["curve"], optional = true } -ark-bls12-381-ext = { version = "0.4.1", default-features = false, optional = true } -ark-bls12-381 = { version = "0.4.0", default-features = false, features = ["curve"], optional = true } -ark-bw6-761-ext = { version = "0.4.1", default-features = false, optional = true } -ark-bw6-761 = { version = "0.4.0", default-features = false, optional = true } -ark-ed-on-bls12-381-bandersnatch-ext = { version = "0.4.1", default-features = false, optional = true } -ark-ed-on-bls12-381-bandersnatch = { version = "0.4.0", default-features = false, optional = true } -ark-ed-on-bls12-377-ext = { version = "0.4.1", default-features = false, optional = true } -ark-ed-on-bls12-377 = { version = "0.4.0", default-features = false, optional = true } -ark-scale = { version = "0.0.12", default-features = false, features = ["hazmat"], optional = true } -sp-runtime-interface = { path = "../../runtime-interface", default-features = false, optional = true } +ark-ec = { optional = true, workspace = true } +ark-bls12-377-ext = { optional = true, workspace = true } +ark-bls12-377 = { features = ["curve"], optional = true, workspace = true } +ark-bls12-381-ext = { optional = true, workspace = true } +ark-bls12-381 = { features = ["curve"], optional = true, workspace = true } +ark-bw6-761-ext = { optional = true, workspace = true } +ark-bw6-761 = { optional = true, workspace = true } +ark-ed-on-bls12-381-bandersnatch-ext = { optional = true, workspace = true } +ark-ed-on-bls12-381-bandersnatch = { optional = true, workspace = true } +ark-ed-on-bls12-377-ext = { optional = true, workspace = true } +ark-ed-on-bls12-377 = { optional = true, workspace = true } +ark-scale = { features = ["hazmat"], optional = true, workspace = true } +sp-runtime-interface = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/crypto/hashing/Cargo.toml b/substrate/primitives/crypto/hashing/Cargo.toml index 1755164888bc9..461af269bf2da 100644 --- a/substrate/primitives/crypto/hashing/Cargo.toml +++ b/substrate/primitives/crypto/hashing/Cargo.toml @@ -16,16 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -blake2b_simd = { version = "1.0.1", default-features = false } -byteorder = { version = "1.3.2", default-features = false } -digest = { version = "0.10.3", default-features = false } -sha2 = { version = "0.10.7", default-features = false } -sha3 = { version = "0.10.0", default-features = false } -twox-hash = { version = "1.6.3", default-features = false, features = ["digest_0_10"] } +blake2b_simd = { workspace = true } +byteorder = { workspace = true } +digest = { workspace = true } +sha2 = { workspace = true } +sha3 = { workspace = true } +twox-hash = { features = ["digest_0_10"], workspace = true } [dev-dependencies] -criterion = "0.5.1" -sp-crypto-hashing-proc-macro = { path = "proc-macro" } +criterion = { workspace = true, default-features = true } +sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } [[bench]] name = "bench" diff --git a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml index f988042d30759..68e865c7dac58 100644 --- a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml +++ b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml @@ -21,4 +21,4 @@ proc-macro = true [dependencies] quote = { workspace = true } syn = { features = ["full", "parsing"], workspace = true } -sp-crypto-hashing = { path = "..", default-features = false } +sp-crypto-hashing = { workspace = true } diff --git a/substrate/primitives/database/Cargo.toml b/substrate/primitives/database/Cargo.toml index 081aad6075840..c0867198e8b9b 100644 --- a/substrate/primitives/database/Cargo.toml +++ b/substrate/primitives/database/Cargo.toml @@ -14,5 +14,5 @@ readme = "README.md" workspace = true [dependencies] -kvdb = "0.13.0" -parking_lot = "0.12.1" +kvdb = { workspace = true } +parking_lot = { workspace = true, default-features = true } diff --git a/substrate/primitives/debug-derive/Cargo.toml b/substrate/primitives/debug-derive/Cargo.toml index debf964aa3dfd..4f45d6525c4a9 100644 --- a/substrate/primitives/debug-derive/Cargo.toml +++ b/substrate/primitives/debug-derive/Cargo.toml @@ -21,7 +21,7 @@ proc-macro = true [dependencies] quote = { workspace = true } syn = { workspace = true } -proc-macro2 = "1.0.56" +proc-macro2 = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/externalities/Cargo.toml b/substrate/primitives/externalities/Cargo.toml index 3a0d0315e9178..ca2f57c03b948 100644 --- a/substrate/primitives/externalities/Cargo.toml +++ b/substrate/primitives/externalities/Cargo.toml @@ -17,9 +17,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -environmental = { version = "1.1.3", default-features = false } -sp-storage = { path = "../storage", default-features = false } +codec = { workspace = true } +environmental = { workspace = true } +sp-storage = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/genesis-builder/Cargo.toml b/substrate/primitives/genesis-builder/Cargo.toml index 4fc8a0416fbe5..d4345d17e7e5c 100644 --- a/substrate/primitives/genesis-builder/Cargo.toml +++ b/substrate/primitives/genesis-builder/Cargo.toml @@ -16,11 +16,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bytes"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { features = ["bytes"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -sp-api = { path = "../api", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } +sp-api = { workspace = true } +sp-runtime = { workspace = true } serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } [features] diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index c63aca801a0d7..1495287698d66 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -17,15 +17,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -impl-trait-for-tuples = "0.2.2" +async-trait = { optional = true, workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +impl-trait-for-tuples = { workspace = true } thiserror = { optional = true, workspace = true } -sp-runtime = { path = "../runtime", default-features = false, optional = true } +sp-runtime = { optional = true, workspace = true } [dev-dependencies] -futures = "0.3.30" +futures = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index abb16d163da06..923efc3b87646 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -18,31 +18,37 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -bytes = { version = "1.1.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bytes"] } -sp-core = { path = "../core", default-features = false } -sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } -sp-keystore = { path = "../keystore", default-features = false, optional = true } -sp-std = { path = "../std", default-features = false } -libsecp256k1 = { version = "0.7", optional = true } -sp-state-machine = { path = "../state-machine", default-features = false, optional = true } -sp-runtime-interface = { path = "../runtime-interface", default-features = false } -sp-trie = { path = "../trie", default-features = false, optional = true } -sp-externalities = { path = "../externalities", default-features = false } -sp-tracing = { path = "../tracing", default-features = false } +bytes = { workspace = true } +codec = { features = [ + "bytes", +], workspace = true } +sp-core = { workspace = true } +sp-crypto-hashing = { workspace = true } +sp-keystore = { optional = true, workspace = true } +libsecp256k1 = { optional = true, workspace = true, default-features = true } +sp-state-machine = { optional = true, workspace = true } +sp-runtime-interface = { workspace = true } +sp-trie = { optional = true, workspace = true } +sp-externalities = { workspace = true } +sp-tracing = { workspace = true } log = { optional = true, workspace = true, default-features = true } -secp256k1 = { version = "0.28.0", features = ["global-context", "recovery"], optional = true } -tracing = { version = "0.1.29", default-features = false } -tracing-core = { version = "0.1.32", default-features = false } +secp256k1 = { features = [ + "global-context", + "recovery", +], optional = true, workspace = true, default-features = true } +tracing = { workspace = true } +tracing-core = { workspace = true } # Required for backwards compatibility reason, but only used for verifying when `UseDalekExt` is set. -ed25519-dalek = { version = "2.1", default-features = false, optional = true } +ed25519-dalek = { optional = true, workspace = true } + +docify = { workspace = true } [target.'cfg(all(any(target_arch = "riscv32", target_arch = "riscv64"), substrate_runtime))'.dependencies] polkavm-derive = { workspace = true } [build-dependencies] -rustversion = "1.0.6" +rustversion = { workspace = true } [features] default = ["std"] @@ -60,7 +66,6 @@ std = [ "sp-keystore/std", "sp-runtime-interface/std", "sp-state-machine/std", - "sp-std/std", "sp-tracing/std", "sp-trie/std", "tracing-core/std", diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index 8ef1f41ce0197..b39a06ed2115a 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -77,7 +77,9 @@ #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(enable_alloc_error_handler, feature(alloc_error_handler))] -use sp_std::vec::Vec; +extern crate alloc; + +use alloc::vec::Vec; #[cfg(feature = "std")] use tracing; @@ -1771,7 +1773,7 @@ pub fn unreachable() -> ! { #[panic_handler] #[no_mangle] pub fn panic(info: &core::panic::PanicInfo) -> ! { - let message = sp_std::alloc::format!("{}", info); + let message = alloc::format!("{}", info); #[cfg(feature = "improved_panic_error_reporting")] { panic_handler::abort_on_panic(&message); @@ -1805,6 +1807,7 @@ pub type TestExternalities = sp_state_machine::TestExternalities Result { impl TryFrom for #ident { type Error = (); - fn try_from(inner: u8) -> #crate_::sp_std::result::Result { + fn try_from(inner: u8) -> core::result::Result { match inner { #( #try_from_variants, )* _ => Err(()), diff --git a/substrate/primitives/runtime-interface/src/impls.rs b/substrate/primitives/runtime-interface/src/impls.rs index 3530b62662a53..daf5725e7f511 100644 --- a/substrate/primitives/runtime-interface/src/impls.rs +++ b/substrate/primitives/runtime-interface/src/impls.rs @@ -35,10 +35,12 @@ use sp_wasm_interface::{FunctionContext, Result}; use codec::{Decode, Encode}; -use sp_std::{any::TypeId, mem, vec::Vec}; +use core::{any::TypeId, mem}; + +use alloc::vec::Vec; #[cfg(feature = "std")] -use sp_std::borrow::Cow; +use alloc::borrow::Cow; // Make sure that our assumptions for storing a pointer + its size in `u64` is valid. #[cfg(all(not(feature = "std"), not(feature = "disable_target_static_assertions")))] @@ -337,7 +339,7 @@ impl IntoPreallocatedFFIValue for [u8; N] { } } -impl PassBy for sp_std::result::Result { +impl PassBy for core::result::Result { type PassBy = Codec; } diff --git a/substrate/primitives/runtime-interface/src/lib.rs b/substrate/primitives/runtime-interface/src/lib.rs index f6ef27789b36f..d6dcb69958af4 100644 --- a/substrate/primitives/runtime-interface/src/lib.rs +++ b/substrate/primitives/runtime-interface/src/lib.rs @@ -111,6 +111,8 @@ extern crate self as sp_runtime_interface; +extern crate alloc; + #[doc(hidden)] #[cfg(feature = "std")] pub use sp_wasm_interface; diff --git a/substrate/primitives/runtime-interface/src/pass_by.rs b/substrate/primitives/runtime-interface/src/pass_by.rs index 103e9c1622054..dce0b8e4bddb7 100644 --- a/substrate/primitives/runtime-interface/src/pass_by.rs +++ b/substrate/primitives/runtime-interface/src/pass_by.rs @@ -33,10 +33,10 @@ use crate::wasm::*; #[cfg(feature = "std")] use sp_wasm_interface::{FunctionContext, Pointer, Result}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; #[cfg(not(feature = "std"))] -use sp_std::vec::Vec; +use alloc::vec::Vec; /// Derive macro for implementing [`PassBy`] with the [`Codec`] strategy. /// diff --git a/substrate/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml b/substrate/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml index f663c6d47263b..ba09c700e320a 100644 --- a/substrate/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml +++ b/substrate/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml @@ -16,12 +16,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../../core", default-features = false } -sp-io = { path = "../../io", default-features = false } -sp-runtime-interface = { path = "..", default-features = false } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime-interface = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/primitives/runtime-interface/test-wasm/Cargo.toml b/substrate/primitives/runtime-interface/test-wasm/Cargo.toml index ecb3c7f8732dd..77e77c707d9e6 100644 --- a/substrate/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/substrate/primitives/runtime-interface/test-wasm/Cargo.toml @@ -16,14 +16,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -bytes = { version = "1.1.0", default-features = false } -sp-core = { path = "../../core", default-features = false } -sp-io = { path = "../../io", default-features = false } -sp-runtime-interface = { path = "..", default-features = false } -sp-std = { path = "../../std", default-features = false } +bytes = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime-interface = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -32,6 +31,5 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime-interface/std", - "sp-std/std", "substrate-wasm-builder", ] diff --git a/substrate/primitives/runtime-interface/test-wasm/src/lib.rs b/substrate/primitives/runtime-interface/test-wasm/src/lib.rs index 2b3fc728f6ff4..545f1ff4a115e 100644 --- a/substrate/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/substrate/primitives/runtime-interface/test-wasm/src/lib.rs @@ -19,11 +19,14 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use sp_runtime_interface::runtime_interface; #[cfg(not(feature = "std"))] -use sp_std::{mem, prelude::*}; +use core::mem; +use alloc::{vec, vec::Vec}; use sp_core::{sr25519::Public, wasm_export_functions}; // Include the WASM binary @@ -229,7 +232,7 @@ wasm_export_functions! { fn test_invalid_utf8_data_should_return_an_error() { let data = vec![0, 159, 146, 150]; // I'm an evil hacker, trying to hack! - let data_str = unsafe { sp_std::str::from_utf8_unchecked(&data) }; + let data_str = unsafe { alloc::str::from_utf8_unchecked(&data) }; test_api::invalid_utf8_data(data_str); } diff --git a/substrate/primitives/runtime-interface/test/Cargo.toml b/substrate/primitives/runtime-interface/test/Cargo.toml index 55d70960989e8..469a63f2473c4 100644 --- a/substrate/primitives/runtime-interface/test/Cargo.toml +++ b/substrate/primitives/runtime-interface/test/Cargo.toml @@ -15,13 +15,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -tracing = "0.1.29" -tracing-core = "0.1.32" -sc-executor = { path = "../../../client/executor" } -sc-executor-common = { path = "../../../client/executor/common" } -sp-io = { path = "../../io" } -sp-runtime = { path = "../../runtime" } -sp-runtime-interface = { path = ".." } -sp-runtime-interface-test-wasm = { path = "../test-wasm" } -sp-runtime-interface-test-wasm-deprecated = { path = "../test-wasm-deprecated" } -sp-state-machine = { path = "../../state-machine" } +tracing = { workspace = true, default-features = true } +tracing-core = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-runtime-interface = { workspace = true, default-features = true } +sp-runtime-interface-test-wasm = { workspace = true } +sp-runtime-interface-test-wasm-deprecated = { workspace = true } +sp-state-machine = { workspace = true, default-features = true } diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index 4d298b7ce5e3d..83b9422ca1d88 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -17,34 +17,35 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } -either = { version = "1.5", default-features = false } -hash256-std-hasher = { version = "0.15.2", default-features = false } -impl-trait-for-tuples = "0.2.2" +codec = { features = ["derive", "max-encoded-len"], workspace = true } +either = { workspace = true } +hash256-std-hasher = { workspace = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } -num-traits = { version = "0.2.17", default-features = false } -paste = "1.0" -rand = { version = "0.8.5", optional = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +num-traits = { workspace = true } +paste = { workspace = true, default-features = true } +rand = { optional = true, workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } -sp-application-crypto = { path = "../application-crypto", default-features = false } -sp-arithmetic = { path = "../arithmetic", default-features = false } -sp-core = { path = "../core", default-features = false } -sp-io = { path = "../io", default-features = false } -sp-std = { path = "../std", default-features = false } -sp-weights = { path = "../weights", default-features = false } -docify = "0.2.8" +sp-application-crypto = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-std = { workspace = true } +sp-weights = { workspace = true } +docify = { workspace = true } +tracing = { workspace = true, features = ["log"], default-features = false } simple-mermaid = { version = "0.1.1", optional = true } [dev-dependencies] -rand = "0.8.5" +rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -zstd = { version = "0.12.4", default-features = false } -sp-api = { path = "../api" } -sp-state-machine = { path = "../state-machine" } -sp-tracing = { path = "../tracing" } -substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +zstd = { workspace = true } +sp-api = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } [features] runtime-benchmarks = [] @@ -69,6 +70,7 @@ std = [ "sp-std/std", "sp-tracing/std", "sp-weights/std", + "tracing/std", ] # Serde support without relying on std features. diff --git a/substrate/primitives/runtime/src/generic/block.rs b/substrate/primitives/runtime/src/generic/block.rs index 05146e880cb16..8ed79c7c8dcf7 100644 --- a/substrate/primitives/runtime/src/generic/block.rs +++ b/substrate/primitives/runtime/src/generic/block.rs @@ -31,8 +31,8 @@ use crate::{ }, Justifications, }; +use alloc::vec::Vec; use sp_core::RuntimeDebug; -use sp_std::prelude::*; /// Something to identify a block. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] diff --git a/substrate/primitives/runtime/src/generic/digest.rs b/substrate/primitives/runtime/src/generic/digest.rs index d7db0f91a4821..c639576a28670 100644 --- a/substrate/primitives/runtime/src/generic/digest.rs +++ b/substrate/primitives/runtime/src/generic/digest.rs @@ -17,12 +17,11 @@ //! Generic implementation of a digest. +#[cfg(all(not(feature = "std"), feature = "serde"))] +use alloc::format; +use alloc::vec::Vec; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::format; - -use sp_std::prelude::*; use crate::{ codec::{Decode, Encode, Error, Input}, diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index df1f5645f0482..499b7c5f5836d 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -26,12 +26,13 @@ use crate::{ transaction_validity::{InvalidTransaction, TransactionValidityError}, OpaqueExtrinsic, }; +#[cfg(all(not(feature = "std"), feature = "serde"))] +use alloc::format; +use alloc::{vec, vec::Vec}; use codec::{Compact, Decode, Encode, EncodeLike, Error, Input}; +use core::fmt; use scale_info::{build::Fields, meta_type, Path, StaticTypeInfo, Type, TypeInfo, TypeParameter}; use sp_io::hashing::blake2_256; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::format; -use sp_std::{fmt, prelude::*}; /// Current version of the [`UncheckedExtrinsic`] encoded format. /// @@ -316,7 +317,7 @@ where Extra: SignedExtension, { fn encode(&self) -> Vec { - let mut tmp = Vec::with_capacity(sp_std::mem::size_of::()); + let mut tmp = Vec::with_capacity(core::mem::size_of::()); // 1 byte version id. match self.signature.as_ref() { @@ -437,7 +438,7 @@ mod tests { type AdditionalSigned = (); type Pre = (); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { Ok(()) } diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index 046909b9a38d7..d313d23395a0f 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -45,6 +45,11 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +#[doc(hidden)] +extern crate alloc; + +#[doc(hidden)] +pub use alloc::vec::Vec; #[doc(hidden)] pub use codec; #[doc(hidden)] @@ -73,12 +78,12 @@ use sp_core::{ hash::{H256, H512}, sr25519, }; -use sp_std::prelude::*; +#[cfg(all(not(feature = "std"), feature = "serde"))] +use alloc::format; +use alloc::vec; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::format; pub mod curve; pub mod generic; @@ -191,7 +196,7 @@ impl Justifications { impl IntoIterator for Justifications { type Item = Justification; - type IntoIter = sp_std::vec::IntoIter; + type IntoIter = alloc::vec::IntoIter; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() @@ -508,11 +513,11 @@ impl From for DispatchOutcome { /// This is the legacy return type of `Dispatchable`. It is still exposed for compatibility reasons. /// The new return type is `DispatchResultWithInfo`. FRAME runtimes should use /// `frame_support::dispatch::DispatchResult`. -pub type DispatchResult = sp_std::result::Result<(), DispatchError>; +pub type DispatchResult = core::result::Result<(), DispatchError>; /// Return type of a `Dispatchable` which contains the `DispatchResult` and additional information /// about the `Dispatchable` that is only known post dispatch. -pub type DispatchResultWithInfo = sp_std::result::Result>; +pub type DispatchResultWithInfo = core::result::Result>; /// Reason why a pallet call failed. #[derive(Eq, Clone, Copy, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] @@ -911,14 +916,14 @@ impl OpaqueExtrinsic { } } -impl sp_std::fmt::Debug for OpaqueExtrinsic { +impl core::fmt::Debug for OpaqueExtrinsic { #[cfg(feature = "std")] - fn fmt(&self, fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { write!(fmt, "{}", sp_core::hexdisplay::HexDisplay::from(&self.0)) } #[cfg(not(feature = "std"))] - fn fmt(&self, _fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn fmt(&self, _fmt: &mut core::fmt::Formatter) -> core::fmt::Result { Ok(()) } } diff --git a/substrate/primitives/runtime/src/multiaddress.rs b/substrate/primitives/runtime/src/multiaddress.rs index c435606312e43..4382405a8ebc6 100644 --- a/substrate/primitives/runtime/src/multiaddress.rs +++ b/substrate/primitives/runtime/src/multiaddress.rs @@ -17,8 +17,8 @@ //! MultiAddress type is a wrapper for multiple downstream account formats. +use alloc::vec::Vec; use codec::{Decode, Encode}; -use sp_std::vec::Vec; /// A multi-format address wrapper for on-chain accounts. #[derive(Encode, Decode, PartialEq, Eq, Clone, crate::RuntimeDebug, scale_info::TypeInfo)] diff --git a/substrate/primitives/runtime/src/offchain/http.rs b/substrate/primitives/runtime/src/offchain/http.rs index bacc0073825bb..7989916454025 100644 --- a/substrate/primitives/runtime/src/offchain/http.rs +++ b/substrate/primitives/runtime/src/offchain/http.rs @@ -48,15 +48,13 @@ //! assert_eq!(body.error(), &None); //! ``` +use alloc::{str, vec, vec::Vec}; use sp_core::{ offchain::{ HttpError, HttpRequestId as RequestId, HttpRequestStatus as RequestStatus, Timestamp, }, RuntimeDebug, }; -#[cfg(not(feature = "std"))] -use sp_std::prelude::vec; -use sp_std::{prelude::Vec, str}; /// Request method (HTTP verb) #[derive(Clone, PartialEq, Eq, RuntimeDebug)] diff --git a/substrate/primitives/runtime/src/runtime_logger.rs b/substrate/primitives/runtime/src/runtime_logger.rs index b7374b8b6f6c8..79984b1356725 100644 --- a/substrate/primitives/runtime/src/runtime_logger.rs +++ b/substrate/primitives/runtime/src/runtime_logger.rs @@ -53,7 +53,7 @@ impl log::Log for RuntimeLogger { } fn log(&self, record: &log::Record) { - use sp_std::fmt::Write; + use core::fmt::Write; let mut w = sp_std::Writer::default(); let _ = ::core::write!(&mut w, "{}", record.args()); @@ -66,16 +66,15 @@ impl log::Log for RuntimeLogger { #[cfg(test)] mod tests { use sp_api::ProvideRuntimeApi; - use std::{env, str::FromStr}; + use std::env; use substrate_test_runtime_client::{ runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; #[test] - fn ensure_runtime_logger_respects_host_max_log_level() { + fn ensure_runtime_logger_works() { if env::var("RUN_TEST").is_ok() { sp_tracing::try_init_simple(); - log::set_max_level(log::LevelFilter::from_str(&env::var("RUST_LOG").unwrap()).unwrap()); let client = TestClientBuilder::new().build(); let runtime_api = client.runtime_api(); @@ -83,17 +82,19 @@ mod tests { .do_trace_log(client.chain_info().genesis_hash) .expect("Logging should not fail"); } else { - for (level, should_print) in &[("trace", true), ("info", false)] { + for (level, should_print) in &[("test=trace", true), ("info", false)] { let executable = std::env::current_exe().unwrap(); let output = std::process::Command::new(executable) .env("RUN_TEST", "1") .env("RUST_LOG", level) - .args(&["--nocapture", "ensure_runtime_logger_respects_host_max_log_level"]) + .args(&["--nocapture", "ensure_runtime_logger_works"]) .output() .unwrap(); let output = String::from_utf8(output.stderr).unwrap(); assert!(output.contains("Hey I'm runtime") == *should_print); + assert!(output.contains("THIS IS TRACING") == *should_print); + assert!(output.contains("Hey, I'm tracing") == *should_print); } } } diff --git a/substrate/primitives/runtime/src/runtime_string.rs b/substrate/primitives/runtime/src/runtime_string.rs index 607ae59db632f..71aacf07a762e 100644 --- a/substrate/primitives/runtime/src/runtime_string.rs +++ b/substrate/primitives/runtime/src/runtime_string.rs @@ -15,9 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +use alloc::vec::Vec; use codec::{Decode, Encode}; use sp_core::RuntimeDebug; -use sp_std::vec::Vec; /// A string that wraps a `&'static str` in the runtime and `String`/`Vec` on decode. #[derive(Eq, RuntimeDebug, Clone)] @@ -50,7 +50,7 @@ macro_rules! format_runtime_string { } #[cfg(not(feature = "std"))] { - sp_runtime::RuntimeString::Owned(sp_std::alloc::format!($($args)*).as_bytes().to_vec()) + sp_runtime::RuntimeString::Owned(alloc::format!($($args)*).as_bytes().to_vec()) } }}; } diff --git a/substrate/primitives/runtime/src/traits.rs b/substrate/primitives/runtime/src/traits.rs index d023aa045dbe0..25ef15eaf56ef 100644 --- a/substrate/primitives/runtime/src/traits.rs +++ b/substrate/primitives/runtime/src/traits.rs @@ -26,7 +26,10 @@ use crate::{ }, DispatchResult, }; +use alloc::vec::Vec; use codec::{Codec, Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; +#[doc(hidden)] +pub use core::{fmt::Debug, marker::PhantomData}; use impl_trait_for_tuples::impl_for_tuples; #[cfg(feature = "serde")] use serde::{de::DeserializeOwned, Deserialize, Serialize}; @@ -44,9 +47,6 @@ pub use sp_core::{ parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet, }; -#[doc(hidden)] -pub use sp_std::marker::PhantomData; -use sp_std::{self, fmt::Debug, prelude::*}; #[cfg(feature = "std")] use std::fmt::Display; #[cfg(feature = "std")] @@ -322,7 +322,7 @@ impl TryMorph for Identity { } /// Implementation of `Morph` which converts between types using `Into`. -pub struct MorphInto(sp_std::marker::PhantomData); +pub struct MorphInto(core::marker::PhantomData); impl> Morph for MorphInto { type Outcome = T; fn morph(a: A) -> T { @@ -331,7 +331,7 @@ impl> Morph for MorphInto { } /// Implementation of `TryMorph` which attempts to convert between types using `TryInto`. -pub struct TryMorphInto(sp_std::marker::PhantomData); +pub struct TryMorphInto(core::marker::PhantomData); impl> TryMorph for TryMorphInto { type Outcome = T; fn try_morph(a: A) -> Result { @@ -692,7 +692,7 @@ impl MaybeEquivalence for Tuple { /// Adapter which turns a [Get] implementation into a [Convert] implementation which always returns /// in the same value no matter the input. -pub struct ConvertToValue(sp_std::marker::PhantomData); +pub struct ConvertToValue(core::marker::PhantomData); impl> Convert for ConvertToValue { fn convert(_: X) -> Y { T::get() @@ -934,17 +934,17 @@ impl Clear for T { pub trait SimpleBitOps: Sized + Clear - + sp_std::ops::BitOr - + sp_std::ops::BitXor - + sp_std::ops::BitAnd + + core::ops::BitOr + + core::ops::BitXor + + core::ops::BitAnd { } impl< T: Sized + Clear - + sp_std::ops::BitOr - + sp_std::ops::BitXor - + sp_std::ops::BitAnd, + + core::ops::BitOr + + core::ops::BitXor + + core::ops::BitAnd, > SimpleBitOps for T { } @@ -988,7 +988,7 @@ pub trait HashOutput: + MaybeDisplay + MaybeFromStr + Debug - + sp_std::hash::Hash + + core::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + Copy @@ -1008,7 +1008,7 @@ impl HashOutput for T where + MaybeDisplay + MaybeFromStr + Debug - + sp_std::hash::Hash + + core::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + Copy @@ -1131,7 +1131,7 @@ sp_core::impl_maybe_marker!( trait MaybeFromStr: FromStr; /// A type that implements Hash when in std environment. - trait MaybeHash: sp_std::hash::Hash; + trait MaybeHash: core::hash::Hash; ); sp_core::impl_maybe_marker_std_or_serde!( @@ -1158,7 +1158,7 @@ pub trait BlockNumber: + MaybeSerializeDeserialize + MaybeFromStr + Debug - + sp_std::hash::Hash + + core::hash::Hash + Copy + MaybeDisplay + AtLeast32BitUnsigned @@ -1176,7 +1176,7 @@ impl< + MaybeSerializeDeserialize + MaybeFromStr + Debug - + sp_std::hash::Hash + + core::hash::Hash + Copy + MaybeDisplay + AtLeast32BitUnsigned @@ -1599,7 +1599,7 @@ pub trait SignedExtension: /// This method provides a default implementation that returns a vec containing a single /// [`SignedExtensionMetadata`]. fn metadata() -> Vec { - sp_std::vec![SignedExtensionMetadata { + alloc::vec![SignedExtensionMetadata { identifier: Self::IDENTIFIER, ty: scale_info::meta_type::(), additional_signed: scale_info::meta_type::() @@ -1702,7 +1702,7 @@ impl SignedExtension for () { type Call = (); type Pre = (); const IDENTIFIER: &'static str = "UnitSignedExtension"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { Ok(()) } fn pre_dispatch( @@ -2035,7 +2035,7 @@ macro_rules! impl_opaque_keys_inner { /// The generated key pairs are stored in the keystore. /// /// Returns the concatenated SCALE encoded public keys. - pub fn generate(seed: Option<$crate::sp_std::vec::Vec>) -> $crate::sp_std::vec::Vec { + pub fn generate(seed: Option<$crate::Vec>) -> $crate::Vec { let keys = Self{ $( $field: < @@ -2051,7 +2051,7 @@ macro_rules! impl_opaque_keys_inner { /// Converts `Self` into a `Vec` of `(raw public key, KeyTypeId)`. pub fn into_raw_public_keys( self, - ) -> $crate::sp_std::vec::Vec<($crate::sp_std::vec::Vec, $crate::KeyTypeId)> { + ) -> $crate::Vec<($crate::Vec, $crate::KeyTypeId)> { let mut keys = Vec::new(); $( keys.push(( @@ -2073,7 +2073,7 @@ macro_rules! impl_opaque_keys_inner { /// Returns `None` when the decoding failed, otherwise `Some(_)`. pub fn decode_into_raw_public_keys( encoded: &[u8], - ) -> Option<$crate::sp_std::vec::Vec<($crate::sp_std::vec::Vec, $crate::KeyTypeId)>> { + ) -> Option<$crate::Vec<($crate::Vec, $crate::KeyTypeId)>> { ::decode(&mut &encoded[..]) .ok() .map(|s| s.into_raw_public_keys()) diff --git a/substrate/primitives/runtime/src/transaction_validity.rs b/substrate/primitives/runtime/src/transaction_validity.rs index 836948493823c..ffff94e17461e 100644 --- a/substrate/primitives/runtime/src/transaction_validity.rs +++ b/substrate/primitives/runtime/src/transaction_validity.rs @@ -21,8 +21,8 @@ use crate::{ codec::{Decode, Encode}, RuntimeDebug, }; +use alloc::{vec, vec::Vec}; use scale_info::TypeInfo; -use sp_std::prelude::*; /// Priority for a transaction. Additive. Higher is better. pub type TransactionPriority = u64; diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml index 9355ab4201071..b6b3a91a820a9 100644 --- a/substrate/primitives/session/Cargo.toml +++ b/substrate/primitives/session/Cargo.toml @@ -16,13 +16,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-api = { path = "../api", default-features = false } -sp-core = { path = "../core", default-features = false } -sp-runtime = { path = "../runtime", optional = true, default-features = false } -sp-staking = { path = "../staking", default-features = false } -sp-keystore = { path = "../keystore", optional = true, default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { optional = true, workspace = true } +sp-staking = { workspace = true } +sp-keystore = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index 6e3ce4bca106f..9c92cba66c679 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -17,12 +17,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], optional = true, workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -impl-trait-for-tuples = "0.2.2" +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +impl-trait-for-tuples = { workspace = true } -sp-core = { path = "../core", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index f6402eccf0df0..aa3ce2ad2b8b0 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -17,28 +17,28 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -hash-db = { version = "0.16.0", default-features = false } +codec = { workspace = true } +hash-db = { workspace = true } log = { workspace = true } -parking_lot = { version = "0.12.1", optional = true } -rand = { version = "0.8.5", optional = true } -smallvec = "1.11.0" +parking_lot = { optional = true, workspace = true, default-features = true } +rand = { optional = true, workspace = true, default-features = true } +smallvec = { workspace = true, default-features = true } thiserror = { optional = true, workspace = true } -tracing = { version = "0.1.29", optional = true } -sp-core = { path = "../core", default-features = false } -sp-externalities = { path = "../externalities", default-features = false } -sp-panic-handler = { path = "../panic-handler", optional = true } -sp-trie = { path = "../trie", default-features = false } -trie-db = { version = "0.29.0", default-features = false } -arbitrary = { version = "1", features = ["derive"], optional = true } +tracing = { optional = true, workspace = true, default-features = true } +sp-core = { workspace = true } +sp-externalities = { workspace = true } +sp-panic-handler = { optional = true, workspace = true, default-features = true } +sp-trie = { workspace = true } +trie-db = { workspace = true } +arbitrary = { features = ["derive"], optional = true, workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -pretty_assertions = "1.2.1" -rand = "0.8.5" -sp-runtime = { path = "../runtime" } -assert_matches = "1.5" -arbitrary = { version = "1", features = ["derive"] } +array-bytes = { workspace = true, default-features = true } +pretty_assertions = { workspace = true } +rand = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +assert_matches = { workspace = true } +arbitrary = { features = ["derive"], workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index bb893b25dc443..c6f8491367c30 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -16,25 +16,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-core = { path = "../core", default-features = false } -sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } -sp-api = { path = "../api", default-features = false } -sp-application-crypto = { path = "../application-crypto", default-features = false } -sp-runtime-interface = { path = "../runtime-interface", default-features = false } -sp-externalities = { path = "../externalities", default-features = false } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-core = { workspace = true } +sp-crypto-hashing = { workspace = true } +sp-runtime = { workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-runtime-interface = { workspace = true } +sp-externalities = { workspace = true } thiserror = { optional = true, workspace = true } # ECIES dependencies -ed25519-dalek = { version = "2.1", optional = true } -x25519-dalek = { version = "2.0", optional = true, features = ["static_secrets"] } -curve25519-dalek = { version = "4.1.1", optional = true } -aes-gcm = { version = "0.10", optional = true } -hkdf = { version = "0.12.0", optional = true } -sha2 = { version = "0.10.7", optional = true } -rand = { version = "0.8.5", features = ["small_rng"], optional = true } +ed25519-dalek = { optional = true, workspace = true, default-features = true } +x25519-dalek = { optional = true, features = ["static_secrets"], workspace = true } +curve25519-dalek = { optional = true, workspace = true } +aes-gcm = { optional = true, workspace = true } +hkdf = { optional = true, workspace = true } +sha2 = { optional = true, workspace = true, default-features = true } +rand = { features = ["small_rng"], optional = true, workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/primitives/storage/Cargo.toml b/substrate/primitives/storage/Cargo.toml index c3318943d0d48..3184ec010930a 100644 --- a/substrate/primitives/storage/Cargo.toml +++ b/substrate/primitives/storage/Cargo.toml @@ -17,11 +17,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -impl-serde = { version = "0.4.0", optional = true, default-features = false } -ref-cast = "1.0.0" +codec = { features = ["derive"], workspace = true } +impl-serde = { optional = true, workspace = true } +ref-cast = { workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } -sp-debug-derive = { path = "../debug-derive", default-features = false } +sp-debug-derive = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs index 197994f574719..3b9afae4ca078 100644 --- a/substrate/primitives/storage/src/lib.rs +++ b/substrate/primitives/storage/src/lib.rs @@ -293,7 +293,7 @@ impl ChildInfo { } } - /// Return a the full location in the direct parent of + /// Return the full location in the direct parent of /// this trie. pub fn prefixed_storage_key(&self) -> PrefixedStorageKey { match self { @@ -302,7 +302,7 @@ impl ChildInfo { } } - /// Returns a the full location in the direct parent of + /// Returns the full location in the direct parent of /// this trie. pub fn into_prefixed_storage_key(self) -> PrefixedStorageKey { match self { diff --git a/substrate/primitives/test-primitives/Cargo.toml b/substrate/primitives/test-primitives/Cargo.toml index b7be614860910..e223e8937653c 100644 --- a/substrate/primitives/test-primitives/Cargo.toml +++ b/substrate/primitives/test-primitives/Cargo.toml @@ -15,12 +15,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true } -sp-application-crypto = { path = "../application-crypto", default-features = false } -sp-core = { path = "../core", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } +sp-application-crypto = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index c1bf9b3255eab..7dccf741cd068 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -16,11 +16,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +async-trait = { optional = true, workspace = true } +codec = { features = ["derive"], workspace = true } thiserror = { optional = true, workspace = true } -sp-inherents = { path = "../inherents", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } +sp-inherents = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/tracing/Cargo.toml b/substrate/primitives/tracing/Cargo.toml index 8adec1670dc2d..c434016604ac9 100644 --- a/substrate/primitives/tracing/Cargo.toml +++ b/substrate/primitives/tracing/Cargo.toml @@ -21,11 +21,11 @@ features = ["with-tracing"] targets = ["wasm32-unknown-unknown", "x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = [ +codec = { features = [ "derive", -] } -tracing = { version = "0.1.29", default-features = false } -tracing-core = { version = "0.1.32", default-features = false } +], workspace = true } +tracing = { workspace = true } +tracing-core = { workspace = true } tracing-subscriber = { workspace = true, optional = true, features = [ "env-filter", "tracing-log", diff --git a/substrate/primitives/transaction-pool/Cargo.toml b/substrate/primitives/transaction-pool/Cargo.toml index a7deda64efce4..964fb18b533ee 100644 --- a/substrate/primitives/transaction-pool/Cargo.toml +++ b/substrate/primitives/transaction-pool/Cargo.toml @@ -17,8 +17,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../api", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } +sp-api = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/transaction-storage-proof/Cargo.toml b/substrate/primitives/transaction-storage-proof/Cargo.toml index 1e874c3595acd..f12b9ef118cba 100644 --- a/substrate/primitives/transaction-storage-proof/Cargo.toml +++ b/substrate/primitives/transaction-storage-proof/Cargo.toml @@ -16,13 +16,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-core = { path = "../core", optional = true, default-features = false } -sp-inherents = { path = "../inherents", default-features = false } -sp-runtime = { path = "../runtime", default-features = false } -sp-trie = { path = "../trie", optional = true, default-features = false } +async-trait = { optional = true, workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-core = { optional = true, workspace = true } +sp-inherents = { workspace = true } +sp-runtime = { workspace = true } +sp-trie = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 45459c180d40d..1fe29f72014aa 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -21,29 +21,29 @@ name = "bench" harness = false [dependencies] -ahash = { version = "0.8.2", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -hash-db = { version = "0.16.0", default-features = false } -lazy_static = { version = "1.4.0", optional = true } -memory-db = { version = "0.32.0", default-features = false } -nohash-hasher = { version = "0.2.0", optional = true } -parking_lot = { version = "0.12.1", optional = true } -rand = { version = "0.8", optional = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +ahash = { optional = true, workspace = true } +codec = { workspace = true } +hash-db = { workspace = true } +lazy_static = { optional = true, workspace = true } +memory-db = { workspace = true } +nohash-hasher = { optional = true, workspace = true } +parking_lot = { optional = true, workspace = true, default-features = true } +rand = { optional = true, workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } thiserror = { optional = true, workspace = true } -tracing = { version = "0.1.29", optional = true } -trie-db = { version = "0.29.0", default-features = false } -trie-root = { version = "0.18.0", default-features = false } -sp-core = { path = "../core", default-features = false } -sp-externalities = { path = "../externalities", default-features = false } -schnellru = { version = "0.2.1", optional = true } +tracing = { optional = true, workspace = true, default-features = true } +trie-db = { workspace = true } +trie-root = { workspace = true } +sp-core = { workspace = true } +sp-externalities = { workspace = true } +schnellru = { optional = true, workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -criterion = "0.5.1" -trie-bench = "0.39.0" -trie-standardmap = "0.16.0" -sp-runtime = { path = "../runtime" } +array-bytes = { workspace = true, default-features = true } +criterion = { workspace = true, default-features = true } +trie-bench = { workspace = true } +trie-standardmap = { workspace = true } +sp-runtime = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/primitives/trie/src/accessed_nodes_tracker.rs b/substrate/primitives/trie/src/accessed_nodes_tracker.rs new file mode 100644 index 0000000000000..378e3c2812c06 --- /dev/null +++ b/substrate/primitives/trie/src/accessed_nodes_tracker.rs @@ -0,0 +1,119 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Helpers for checking for duplicate nodes. + +use alloc::collections::BTreeSet; +use core::hash::Hash; +use scale_info::TypeInfo; +use sp_core::{Decode, Encode}; +use trie_db::{RecordedForKey, TrieAccess, TrieRecorder}; + +/// Error associated with the `AccessedNodesTracker` module. +#[derive(Encode, Decode, Clone, Eq, PartialEq, Debug, TypeInfo)] +pub enum Error { + /// The proof contains unused nodes. + UnusedNodes, +} + +/// Helper struct used to ensure that a storage proof doesn't contain duplicate or unused nodes. +/// +/// The struct needs to be used as a `TrieRecorder` and `ensure_no_unused_nodes()` has to be called +/// to actually perform the check. +pub struct AccessedNodesTracker { + proof_nodes_count: usize, + recorder: BTreeSet, +} + +impl AccessedNodesTracker { + /// Create a new instance of `RedundantNodesChecker`, starting from a `RawStorageProof`. + pub fn new(proof_nodes_count: usize) -> Self { + Self { proof_nodes_count, recorder: BTreeSet::new() } + } + + /// Ensure that all the nodes in the proof have been accessed. + pub fn ensure_no_unused_nodes(self) -> Result<(), Error> { + if self.proof_nodes_count != self.recorder.len() { + return Err(Error::UnusedNodes) + } + + Ok(()) + } +} + +impl TrieRecorder for AccessedNodesTracker { + fn record(&mut self, access: TrieAccess) { + match access { + TrieAccess::NodeOwned { hash, .. } | + TrieAccess::EncodedNode { hash, .. } | + TrieAccess::Value { hash, .. } => { + self.recorder.insert(hash); + }, + _ => {}, + } + } + + fn trie_nodes_recorded_for_key(&self, _key: &[u8]) -> RecordedForKey { + RecordedForKey::None + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::{tests::create_storage_proof, StorageProof}; + use hash_db::Hasher; + use trie_db::{Trie, TrieDBBuilder}; + + type Hash = ::Out; + type Layout = crate::LayoutV1; + + const TEST_DATA: &[(&[u8], &[u8])] = + &[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64])]; + + #[test] + fn proof_with_unused_nodes_is_rejected() { + let (raw_proof, root) = create_storage_proof::(TEST_DATA); + let proof = StorageProof::new(raw_proof.clone()); + let proof_nodes_count = proof.len(); + + let mut accessed_nodes_tracker = AccessedNodesTracker::::new(proof_nodes_count); + { + let db = proof.clone().into_memory_db(); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut accessed_nodes_tracker) + .build(); + + trie.get(b"key1").unwrap().unwrap(); + trie.get(b"key2").unwrap().unwrap(); + trie.get(b"key3").unwrap().unwrap(); + } + assert_eq!(accessed_nodes_tracker.ensure_no_unused_nodes(), Ok(())); + + let mut accessed_nodes_tracker = AccessedNodesTracker::::new(proof_nodes_count); + { + let db = proof.into_memory_db(); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut accessed_nodes_tracker) + .build(); + + trie.get(b"key1").unwrap().unwrap(); + trie.get(b"key2").unwrap().unwrap(); + } + assert_eq!(accessed_nodes_tracker.ensure_no_unused_nodes(), Err(Error::UnusedNodes)); + } +} diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 54f202eda0c9a..ef6b6a5743c2b 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -21,6 +21,7 @@ extern crate alloc; +pub mod accessed_nodes_tracker; #[cfg(feature = "std")] pub mod cache; mod error; @@ -28,6 +29,7 @@ mod node_codec; mod node_header; #[cfg(feature = "std")] pub mod recorder; +pub mod recorder_ext; mod storage_proof; mod trie_codec; mod trie_stream; @@ -46,7 +48,7 @@ use hash_db::{Hasher, Prefix}; pub use memory_db::{prefixed_key, HashKey, KeyFunction, PrefixedKey}; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -pub use storage_proof::{CompactProof, StorageProof}; +pub use storage_proof::{CompactProof, StorageProof, StorageProofError}; /// Trie codec reexport, mainly child trie support /// for trie compact proof. pub use trie_codec::{decode_compact, encode_compact, Error as CompactProofError}; @@ -64,6 +66,9 @@ pub use trie_db::{proof::VerifyError, MerkleValue}; /// The Substrate format implementation of `TrieStream`. pub use trie_stream::TrieStream; +/// Raw storage proof type (just raw trie nodes). +pub type RawStorageProof = Vec>; + /// substrate trie layout pub struct LayoutV0(PhantomData); @@ -195,11 +200,11 @@ pub type MemoryDB = memory_db::MemoryDB, trie_db::DB /// Reexport from `hash_db`, with genericity set for `Hasher` trait. pub type GenericMemoryDB = memory_db::MemoryDB; -/// Persistent trie database read-access interface for the a given hasher. +/// Persistent trie database read-access interface for a given hasher. pub type TrieDB<'a, 'cache, L> = trie_db::TrieDB<'a, 'cache, L>; /// Builder for creating a [`TrieDB`]. pub type TrieDBBuilder<'a, 'cache, L> = trie_db::TrieDBBuilder<'a, 'cache, L>; -/// Persistent trie database write-access interface for the a given hasher. +/// Persistent trie database write-access interface for a given hasher. pub type TrieDBMut<'a, L> = trie_db::TrieDBMut<'a, L>; /// Builder for creating a [`TrieDBMut`]. pub type TrieDBMutBuilder<'a, L> = trie_db::TrieDBMutBuilder<'a, L>; @@ -212,17 +217,17 @@ pub type TrieHash = <::Hash as Hasher>::Out; pub mod trie_types { use super::*; - /// Persistent trie database read-access interface for the a given hasher. + /// Persistent trie database read-access interface for a given hasher. /// /// Read only V1 and V0 are compatible, thus we always use V1. pub type TrieDB<'a, 'cache, H> = super::TrieDB<'a, 'cache, LayoutV1>; /// Builder for creating a [`TrieDB`]. pub type TrieDBBuilder<'a, 'cache, H> = super::TrieDBBuilder<'a, 'cache, LayoutV1>; - /// Persistent trie database write-access interface for the a given hasher. + /// Persistent trie database write-access interface for a given hasher. pub type TrieDBMutV0<'a, H> = super::TrieDBMut<'a, LayoutV0>; /// Builder for creating a [`TrieDBMutV0`]. pub type TrieDBMutBuilderV0<'a, H> = super::TrieDBMutBuilder<'a, LayoutV0>; - /// Persistent trie database write-access interface for the a given hasher. + /// Persistent trie database write-access interface for a given hasher. pub type TrieDBMutV1<'a, H> = super::TrieDBMut<'a, LayoutV1>; /// Builder for creating a [`TrieDBMutV1`]. pub type TrieDBMutBuilderV1<'a, H> = super::TrieDBMutBuilder<'a, LayoutV1>; @@ -616,6 +621,50 @@ mod tests { type MemoryDBMeta = memory_db::MemoryDB, trie_db::DBValue>; + pub fn create_trie( + data: &[(&[u8], &[u8])], + ) -> (MemoryDB, trie_db::TrieHash) { + let mut db = MemoryDB::default(); + let mut root = Default::default(); + + { + let mut trie = trie_db::TrieDBMutBuilder::::new(&mut db, &mut root).build(); + for (k, v) in data { + trie.insert(k, v).expect("Inserts data"); + } + } + + let mut recorder = Recorder::::new(); + { + let trie = trie_db::TrieDBBuilder::::new(&mut db, &mut root) + .with_recorder(&mut recorder) + .build(); + for (k, _v) in data { + trie.get(k).unwrap(); + } + } + + (db, root) + } + + pub fn create_storage_proof( + data: &[(&[u8], &[u8])], + ) -> (RawStorageProof, trie_db::TrieHash) { + let (db, root) = create_trie::(data); + + let mut recorder = Recorder::::new(); + { + let trie = trie_db::TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut recorder) + .build(); + for (k, _v) in data { + trie.get(k).unwrap(); + } + } + + (recorder.drain().into_iter().map(|record| record.data).collect(), root) + } + fn hashed_null_node() -> TrieHash { ::hashed_null_node() } diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 22a22b33b3709..2886577eddc60 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -145,7 +145,7 @@ impl Recorder { /// Convert the recording to a [`StorageProof`]. /// - /// In contrast to [`Self::drain_storage_proof`] this doesn't consumes and doesn't clears the + /// In contrast to [`Self::drain_storage_proof`] this doesn't consume and doesn't clear the /// recordings. /// /// Returns the [`StorageProof`]. @@ -429,7 +429,8 @@ impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { #[cfg(test)] mod tests { use super::*; - use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder}; + use crate::tests::create_trie; + use trie_db::{Trie, TrieDBBuilder, TrieRecorder}; type MemoryDB = crate::MemoryDB; type Layout = crate::LayoutV1; @@ -438,23 +439,9 @@ mod tests { const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64]), (b"key4", &[4; 64])]; - fn create_trie() -> (MemoryDB, TrieHash) { - let mut db = MemoryDB::default(); - let mut root = Default::default(); - - { - let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); - for (k, v) in TEST_DATA { - trie.insert(k, v).expect("Inserts data"); - } - } - - (db, root) - } - #[test] fn recorder_works() { - let (db, root) = create_trie(); + let (db, root) = create_trie::(TEST_DATA); let recorder = Recorder::default(); @@ -498,7 +485,7 @@ mod tests { #[test] fn recorder_transactions_rollback_work() { - let (db, root) = create_trie(); + let (db, root) = create_trie::(TEST_DATA); let recorder = Recorder::default(); let mut stats = vec![RecorderStats::default()]; @@ -547,7 +534,7 @@ mod tests { #[test] fn recorder_transactions_commit_work() { - let (db, root) = create_trie(); + let (db, root) = create_trie::(TEST_DATA); let recorder = Recorder::default(); @@ -586,7 +573,7 @@ mod tests { #[test] fn recorder_transactions_commit_and_rollback_work() { - let (db, root) = create_trie(); + let (db, root) = create_trie::(TEST_DATA); let recorder = Recorder::default(); @@ -645,7 +632,7 @@ mod tests { #[test] fn recorder_transaction_accessed_keys_works() { let key = TEST_DATA[0].0; - let (db, root) = create_trie(); + let (db, root) = create_trie::(TEST_DATA); let recorder = Recorder::default(); diff --git a/substrate/primitives/trie/src/recorder_ext.rs b/substrate/primitives/trie/src/recorder_ext.rs new file mode 100644 index 0000000000000..866d5b72c5d64 --- /dev/null +++ b/substrate/primitives/trie/src/recorder_ext.rs @@ -0,0 +1,47 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Extension for the default recorder. + +use crate::RawStorageProof; +use alloc::{collections::BTreeSet, vec::Vec}; +use trie_db::{Recorder, TrieLayout}; + +/// Convenience extension for the `Recorder` struct. +/// +/// Used to deduplicate some logic. +pub trait RecorderExt +where + Self: Sized, +{ + /// Convert the recorder into a `BTreeSet`. + fn into_set(self) -> BTreeSet>; + + /// Convert the recorder into a `RawStorageProof`, avoiding duplicate nodes. + fn into_raw_storage_proof(self) -> RawStorageProof { + // The recorder may record the same trie node multiple times, + // and we don't want duplicate nodes in our proofs + // => let's deduplicate it by collecting to a BTreeSet first + self.into_set().into_iter().collect() + } +} + +impl RecorderExt for Recorder { + fn into_set(mut self) -> BTreeSet> { + self.drain().into_iter().map(|record| record.data).collect::>() + } +} diff --git a/substrate/primitives/trie/src/storage_proof.rs b/substrate/primitives/trie/src/storage_proof.rs index e46c49be19cb8..a9f6298742f64 100644 --- a/substrate/primitives/trie/src/storage_proof.rs +++ b/substrate/primitives/trie/src/storage_proof.rs @@ -25,6 +25,13 @@ use scale_info::TypeInfo; // with `LayoutV0`. use crate::LayoutV1 as Layout; +/// Error associated with the `storage_proof` module. +#[derive(Encode, Decode, Clone, Eq, PartialEq, Debug, TypeInfo)] +pub enum StorageProofError { + /// The proof contains duplicate nodes. + DuplicateNodes, +} + /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that /// does not already have access to the key-value pairs. @@ -43,6 +50,22 @@ impl StorageProof { StorageProof { trie_nodes: BTreeSet::from_iter(trie_nodes) } } + /// Constructs a storage proof from a subset of encoded trie nodes in a storage backend. + /// + /// Returns an error if the provided subset of encoded trie nodes contains duplicates. + pub fn new_with_duplicate_nodes_check( + trie_nodes: impl IntoIterator>, + ) -> Result { + let mut trie_nodes_set = BTreeSet::new(); + for node in trie_nodes { + if !trie_nodes_set.insert(node) { + return Err(StorageProofError::DuplicateNodes); + } + } + + Ok(StorageProof { trie_nodes: trie_nodes_set }) + } + /// Returns a new empty proof. /// /// An empty proof is capable of only proving trivial statements (ie. that an empty set of @@ -56,6 +79,11 @@ impl StorageProof { self.trie_nodes.is_empty() } + /// Returns the number of nodes in the proof. + pub fn len(&self) -> usize { + self.trie_nodes.len() + } + /// Convert into an iterator over encoded trie nodes in lexicographical order constructed /// from the proof. pub fn into_iter_nodes(self) -> impl Sized + DoubleEndedIterator> { @@ -198,3 +226,23 @@ impl CompactProof { Ok((db, root)) } } + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::{tests::create_storage_proof, StorageProof}; + + type Layout = crate::LayoutV1; + + const TEST_DATA: &[(&[u8], &[u8])] = + &[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64]), (b"key11", &[4; 64])]; + + #[test] + fn proof_with_duplicate_nodes_is_rejected() { + let (raw_proof, _root) = create_storage_proof::(TEST_DATA); + assert!(matches!( + StorageProof::new_with_duplicate_nodes_check(raw_proof), + Err(StorageProofError::DuplicateNodes) + )); + } +} diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index f8ef8f66c5355..65a7c63f432dd 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -17,16 +17,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -impl-serde = { version = "0.4.0", default-features = false, optional = true } -parity-wasm = { version = "0.45", optional = true } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +codec = { features = ["derive"], workspace = true } +impl-serde = { optional = true, workspace = true } +parity-wasm = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } thiserror = { optional = true, workspace = true } -sp-crypto-hashing-proc-macro = { path = "../crypto/hashing/proc-macro" } -sp-runtime = { path = "../runtime", default-features = false } -sp-std = { path = "../std", default-features = false } -sp-version-proc-macro = { path = "proc-macro", default-features = false } +sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-version-proc-macro = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index 3abd5c0910694..1feea15b9fcdb 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -19,10 +19,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } -proc-macro2 = "1.0.56" +codec = { features = ["derive"], workspace = true, default-features = true } +proc-macro2 = { workspace = true } quote = { workspace = true } syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } [dev-dependencies] -sp-version = { path = ".." } +sp-version = { workspace = true, default-features = true } diff --git a/substrate/primitives/version/src/lib.rs b/substrate/primitives/version/src/lib.rs index 789c507742f77..55dea364eef4d 100644 --- a/substrate/primitives/version/src/lib.rs +++ b/substrate/primitives/version/src/lib.rs @@ -33,6 +33,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] @@ -40,6 +42,8 @@ use std::collections::HashSet; #[cfg(feature = "std")] use std::fmt; +#[doc(hidden)] +pub use alloc::borrow::Cow; use codec::{Decode, Encode, Input}; use scale_info::TypeInfo; use sp_runtime::RuntimeString; @@ -139,13 +143,13 @@ pub use sp_version_proc_macro::runtime_version; pub type ApiId = [u8; 8]; /// A vector of pairs of `ApiId` and a `u32` for version. -pub type ApisVec = sp_std::borrow::Cow<'static, [(ApiId, u32)]>; +pub type ApisVec = alloc::borrow::Cow<'static, [(ApiId, u32)]>; /// Create a vector of Api declarations. #[macro_export] macro_rules! create_apis_vec { ( $y:expr ) => { - $crate::sp_std::borrow::Cow::Borrowed(&$y) + $crate::Cow::Borrowed(&$y) }; } @@ -409,9 +413,9 @@ impl GetNativeVersion for std::sync::Arc { #[cfg(feature = "serde")] mod apis_serialize { use super::*; + use alloc::vec::Vec; use impl_serde::serialize as bytes; use serde::{de, ser::SerializeTuple, Serializer}; - use sp_std::vec::Vec; #[derive(Serialize)] struct ApiId<'a>(#[serde(serialize_with = "serialize_bytesref")] &'a super::ApiId, &'a u32); @@ -446,7 +450,7 @@ mod apis_serialize { impl<'de> de::Visitor<'de> for Visitor { type Value = ApisVec; - fn expecting(&self, formatter: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + fn expecting(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result { formatter.write_str("a sequence of api id and version tuples") } diff --git a/substrate/primitives/wasm-interface/Cargo.toml b/substrate/primitives/wasm-interface/Cargo.toml index a0c8342d2d3c5..f7a1d25d16bf3 100644 --- a/substrate/primitives/wasm-interface/Cargo.toml +++ b/substrate/primitives/wasm-interface/Cargo.toml @@ -17,11 +17,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -impl-trait-for-tuples = "0.2.2" +codec = { features = ["derive"], workspace = true } +impl-trait-for-tuples = { workspace = true } log = { optional = true, workspace = true, default-features = true } -wasmtime = { version = "8.0.1", default-features = false, optional = true } -anyhow = { version = "1.0.81", optional = true } +wasmtime = { optional = true, workspace = true } +anyhow = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index d2d72a7cb019f..c9f4f39d041c2 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -16,14 +16,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -bounded-collections = { version = "0.2.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +bounded-collections = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } -smallvec = "1.11.0" -sp-arithmetic = { path = "../arithmetic", default-features = false } -sp-debug-derive = { path = "../debug-derive", default-features = false } -schemars = { version = "0.8.3", default-features = false, optional = true } +smallvec = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true } +sp-debug-derive = { workspace = true } +schemars = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/scripts/ci/node-template-release/Cargo.toml b/substrate/scripts/ci/node-template-release/Cargo.toml index 8e3e6138b9a8d..5ec665de05ea9 100644 --- a/substrate/scripts/ci/node-template-release/Cargo.toml +++ b/substrate/scripts/ci/node-template-release/Cargo.toml @@ -14,11 +14,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.5.3", features = ["derive"] } -flate2 = "1.0" -fs_extra = "1.3" -glob = "0.3" -tar = "0.4" -tempfile = "3" -toml_edit = "0.19" -itertools = "0.11" +clap = { features = ["derive"], workspace = true } +flate2 = { workspace = true } +fs_extra = { workspace = true } +glob = { workspace = true } +tar = { workspace = true } +tempfile = { workspace = true } +toml_edit = { workspace = true } +itertools = { workspace = true } diff --git a/substrate/test-utils/Cargo.toml b/substrate/test-utils/Cargo.toml index 56b1c038199a8..71aac02ba9b64 100644 --- a/substrate/test-utils/Cargo.toml +++ b/substrate/test-utils/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.30" -tokio = { version = "1.22.0", features = ["macros", "time"] } +futures = { workspace = true } +tokio = { features = ["macros", "time"], workspace = true, default-features = true } [dev-dependencies] -trybuild = { version = "1.0.88", features = ["diff"] } -sc-service = { path = "../client/service" } +trybuild = { features = ["diff"], workspace = true } +sc-service = { workspace = true, default-features = true } diff --git a/substrate/test-utils/cli/Cargo.toml b/substrate/test-utils/cli/Cargo.toml index 87c595c66f348..52642b9f62bb9 100644 --- a/substrate/test-utils/cli/Cargo.toml +++ b/substrate/test-utils/cli/Cargo.toml @@ -16,17 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -substrate-rpc-client = { path = "../../utils/frame/rpc/client" } -sp-rpc = { path = "../../primitives/rpc" } -assert_cmd = "2.0.10" -nix = { version = "0.28.0", features = ["signal"] } -regex = "1.7.3" -tokio = { version = "1.22.0", features = ["full"] } -node-primitives = { path = "../../bin/node/primitives" } -node-cli = { package = "staging-node-cli", path = "../../bin/node/cli" } -sc-cli = { path = "../../client/cli" } -sc-service = { path = "../../client/service" } -futures = "0.3.28" +substrate-rpc-client = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +assert_cmd = { workspace = true } +nix = { features = ["signal"], workspace = true } +regex = { workspace = true } +tokio = { features = ["full"], workspace = true, default-features = true } +node-primitives = { workspace = true, default-features = true } +node-cli = { workspace = true } +sc-cli = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +futures = { workspace = true } [features] try-runtime = ["node-cli/try-runtime"] diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index 5871f1bf5b4d0..ecf04dac1a673 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -16,27 +16,27 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" +array-bytes = { workspace = true, default-features = true } +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sc-client-api = { path = "../../client/api" } -sc-client-db = { path = "../../client/db", default-features = false, features = [ +sc-client-api = { workspace = true, default-features = true } +sc-client-db = { features = [ "test-helpers", -] } -sc-consensus = { path = "../../client/consensus/common" } -sc-executor = { path = "../../client/executor" } -sc-offchain = { path = "../../client/offchain" } -sc-service = { path = "../../client/service", default-features = false, features = [ +], workspace = true } +sc-consensus = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sc-service = { features = [ "test-helpers", -] } -sp-blockchain = { path = "../../primitives/blockchain" } -sp-consensus = { path = "../../primitives/consensus/common" } -sp-core = { path = "../../primitives/core" } -sp-keyring = { path = "../../primitives/keyring" } -sp-keystore = { path = "../../primitives/keystore" } -sp-runtime = { path = "../../primitives/runtime" } -sp-state-machine = { path = "../../primitives/state-machine" } -tokio = { version = "1.22.0", features = ["sync"] } +], workspace = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +tokio = { features = ["sync"], workspace = true, default-features = true } diff --git a/substrate/test-utils/client/src/client_ext.rs b/substrate/test-utils/client/src/client_ext.rs index 73581a4f0efa3..9dc4739eb7954 100644 --- a/substrate/test-utils/client/src/client_ext.rs +++ b/substrate/test-utils/client/src/client_ext.rs @@ -153,7 +153,7 @@ where Self: BlockImport, RA: Send, B: Send + Sync, - E: Send, + E: Send + Sync, { async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 8733ff9fcebb3..5c8f49a6db859 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -16,59 +16,59 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } -sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false, features = ["serde"] } -sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features = false, features = ["serde"] } -sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } -sp-block-builder = { path = "../../primitives/block-builder", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } -scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-inherents = { path = "../../primitives/inherents", default-features = false } -sp-keyring = { path = "../../primitives/keyring", default-features = false } -sp-offchain = { path = "../../primitives/offchain", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-crypto-hashing = { path = "../../primitives/crypto/hashing", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false } -frame-support = { path = "../../frame/support", default-features = false } -sp-version = { path = "../../primitives/version", default-features = false } -sp-session = { path = "../../primitives/session", default-features = false } -sp-api = { path = "../../primitives/api", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -pallet-babe = { path = "../../frame/babe", default-features = false } -pallet-balances = { path = "../../frame/balances", default-features = false } -frame-executive = { path = "../../frame/executive", default-features = false } -frame-metadata-hash-extension = { path = "../../frame/metadata-hash-extension", default-features = false } -frame-system = { path = "../../frame/system", default-features = false } -frame-system-rpc-runtime-api = { path = "../../frame/system/rpc/runtime-api", default-features = false } -pallet-timestamp = { path = "../../frame/timestamp", default-features = false } -sp-consensus-grandpa = { path = "../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-trie = { path = "../../primitives/trie", default-features = false } -sp-transaction-pool = { path = "../../primitives/transaction-pool", default-features = false } -trie-db = { version = "0.29.0", default-features = false } -sc-service = { path = "../../client/service", default-features = false, features = ["test-helpers"], optional = true } -sp-state-machine = { path = "../../primitives/state-machine", default-features = false } -sp-externalities = { path = "../../primitives/externalities", default-features = false } +sp-application-crypto = { features = ["serde"], workspace = true } +sp-consensus-aura = { features = ["serde"], workspace = true } +sp-consensus-babe = { features = ["serde"], workspace = true } +sp-genesis-builder = { workspace = true } +sp-block-builder = { workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-inherents = { workspace = true } +sp-keyring = { workspace = true } +sp-offchain = { workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-crypto-hashing = { workspace = true } +sp-io = { workspace = true } +frame-support = { workspace = true } +sp-version = { workspace = true } +sp-session = { workspace = true } +sp-api = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +pallet-babe = { workspace = true } +pallet-balances = { workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-timestamp = { workspace = true } +sp-consensus-grandpa = { features = ["serde"], workspace = true } +sp-trie = { workspace = true } +sp-transaction-pool = { workspace = true } +trie-db = { workspace = true } +sc-service = { features = ["test-helpers"], optional = true, workspace = true } +sp-state-machine = { workspace = true } +sp-externalities = { workspace = true } # 3rd party -array-bytes = { version = "6.2.2", optional = true } +array-bytes = { optional = true, workspace = true, default-features = true } serde_json = { workspace = true, features = ["alloc"] } log = { workspace = true } -hex-literal = { version = "0.4.1" } +tracing = { workspace = true, default-features = false } [dev-dependencies] -futures = "0.3.30" -sc-block-builder = { path = "../../client/block-builder" } -sc-chain-spec = { path = "../../client/chain-spec" } -sc-executor = { path = "../../client/executor" } -sc-executor-common = { path = "../../client/executor/common" } -sp-consensus = { path = "../../primitives/consensus/common" } -substrate-test-runtime-client = { path = "client" } -sp-tracing = { path = "../../primitives/tracing" } +futures = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +sp-tracing = { workspace = true, default-features = true } serde = { features = ["alloc", "derive"], workspace = true } serde_json = { features = ["alloc"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../utils/wasm-builder", optional = true, features = ["metadata-hash"] } +substrate-wasm-builder = { optional = true, features = ["metadata-hash"], workspace = true, default-features = true } [features] default = ["std"] @@ -112,6 +112,7 @@ std = [ "sp-trie/std", "sp-version/std", "substrate-wasm-builder", + "tracing/std", "trie-db/std", ] diff --git a/substrate/test-utils/runtime/client/Cargo.toml b/substrate/test-utils/runtime/client/Cargo.toml index 5ca24fea33eda..3c628d1e764fd 100644 --- a/substrate/test-utils/runtime/client/Cargo.toml +++ b/substrate/test-utils/runtime/client/Cargo.toml @@ -15,14 +15,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.30" -sc-block-builder = { path = "../../../client/block-builder" } -sc-client-api = { path = "../../../client/api" } -sc-consensus = { path = "../../../client/consensus/common" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } -substrate-test-client = { path = "../../client" } -substrate-test-runtime = { path = ".." } +futures = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +substrate-test-client = { workspace = true } +substrate-test-runtime = { workspace = true } diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 0aab6d3f01ca0..d1a3eaa2daa92 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -27,7 +27,6 @@ pub mod extrinsic; pub mod genesismap; pub mod substrate_test_pallet; -use alloc::boxed::Box; #[cfg(not(feature = "std"))] use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; @@ -596,7 +595,11 @@ impl_runtime_apis! { } fn do_trace_log() { - log::trace!("Hey I'm runtime"); + log::trace!(target: "test", "Hey I'm runtime"); + + let data = "THIS IS TRACING"; + + tracing::trace!(target: "test", %data, "Hey, I'm tracing"); } fn verify_ed25519(sig: ed25519::Signature, public: ed25519::Public, message: Vec) -> bool { @@ -874,7 +877,7 @@ pub mod storage_key_generator { sp_crypto_hashing::twox_64(x).iter().chain(x.iter()).cloned().collect() } - /// Generate the hashed storage keys from the raw literals. These keys are expected to be be in + /// Generate the hashed storage keys from the raw literals. These keys are expected to be in /// storage with given substrate-test runtime. pub fn generate_expected_storage_hashed_keys(custom_heap_pages: bool) -> Vec { let mut literals: Vec<&[u8]> = vec![b":code", b":extrinsic_index"]; diff --git a/substrate/test-utils/runtime/transaction-pool/Cargo.toml b/substrate/test-utils/runtime/transaction-pool/Cargo.toml index 360e2b7b810d1..073997da025d2 100644 --- a/substrate/test-utils/runtime/transaction-pool/Cargo.toml +++ b/substrate/test-utils/runtime/transaction-pool/Cargo.toml @@ -15,12 +15,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -futures = "0.3.30" -parking_lot = "0.12.1" +codec = { workspace = true, default-features = true } +futures = { workspace = true } +parking_lot = { workspace = true, default-features = true } thiserror = { workspace = true } -sc-transaction-pool = { path = "../../../client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-runtime = { path = "../../../primitives/runtime" } -substrate-test-runtime-client = { path = "../client" } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/utils/binary-merkle-tree/Cargo.toml b/substrate/utils/binary-merkle-tree/Cargo.toml index fd35e6b1e1a25..c0b65d731f7a6 100644 --- a/substrate/utils/binary-merkle-tree/Cargo.toml +++ b/substrate/utils/binary-merkle-tree/Cargo.toml @@ -12,15 +12,15 @@ homepage = "https://substrate.io" workspace = true [dependencies] -array-bytes = { version = "6.2.2", optional = true } +array-bytes = { optional = true, workspace = true, default-features = true } log = { optional = true, workspace = true } -hash-db = { version = "0.16.0", default-features = false } +hash-db = { workspace = true } [dev-dependencies] -array-bytes = "6.2.2" -env_logger = "0.11" -sp-core = { path = "../../primitives/core" } -sp-runtime = { path = "../../primitives/runtime" } +array-bytes = { workspace = true, default-features = true } +env_logger = { workspace = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [features] debug = ["array-bytes", "log"] diff --git a/substrate/utils/fork-tree/Cargo.toml b/substrate/utils/fork-tree/Cargo.toml index 275f44623bd14..2bb799c603c84 100644 --- a/substrate/utils/fork-tree/Cargo.toml +++ b/substrate/utils/fork-tree/Cargo.toml @@ -17,4 +17,4 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } +codec = { features = ["derive"], workspace = true, default-features = true } diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index 7cfacdc2e5ede..bd1a22affd03b 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -16,49 +16,49 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -array-bytes = "6.2.2" -chrono = "0.4" -clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.12" } -comfy-table = { version = "7.1.0", default-features = false } -handlebars = "5.1.0" -Inflector = "0.11.4" -itertools = "0.11" -lazy_static = "1.4.0" -linked-hash-map = "0.5.4" +array-bytes = { workspace = true, default-features = true } +chrono = { workspace = true } +clap = { features = ["derive"], workspace = true } +codec = { workspace = true, default-features = true } +comfy-table = { workspace = true } +handlebars = { workspace = true } +Inflector = { workspace = true } +itertools = { workspace = true } +lazy_static = { workspace = true } +linked-hash-map = { workspace = true } log = { workspace = true, default-features = true } -rand = { version = "0.8.5", features = ["small_rng"] } -rand_pcg = "0.3.1" +rand = { features = ["small_rng"], workspace = true, default-features = true } +rand_pcg = { workspace = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -thousands = "0.2.0" -frame-benchmarking = { path = "../../../frame/benchmarking" } -frame-support = { path = "../../../frame/support" } -frame-system = { path = "../../../frame/system" } -sc-block-builder = { path = "../../../client/block-builder" } -sc-chain-spec = { path = "../../../client/chain-spec", default-features = false } -sc-cli = { path = "../../../client/cli", default-features = false } -sc-client-api = { path = "../../../client/api" } -sc-client-db = { path = "../../../client/db", default-features = false } -sc-executor = { path = "../../../client/executor" } -sc-service = { path = "../../../client/service", default-features = false } -sc-sysinfo = { path = "../../../client/sysinfo" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-core = { path = "../../../primitives/core" } -sp-database = { path = "../../../primitives/database" } -sp-externalities = { path = "../../../primitives/externalities" } -sp-genesis-builder = { path = "../../../primitives/genesis-builder" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-state-machine = { path = "../../../primitives/state-machine" } -sp-storage = { path = "../../../primitives/storage" } -sp-trie = { path = "../../../primitives/trie" } -sp-io = { path = "../../../primitives/io" } -sp-wasm-interface = { path = "../../../primitives/wasm-interface" } -gethostname = "0.2.3" +thousands = { workspace = true } +frame-benchmarking = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true } +sc-cli = { workspace = true } +sc-client-api = { workspace = true, default-features = true } +sc-client-db = { workspace = true } +sc-executor = { workspace = true, default-features = true } +sc-service = { workspace = true } +sc-sysinfo = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-database = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-wasm-interface = { workspace = true, default-features = true } +gethostname = { workspace = true } [features] default = ["rocksdb"] diff --git a/substrate/utils/frame/generate-bags/Cargo.toml b/substrate/utils/frame/generate-bags/Cargo.toml index 2688254bd5ea3..934028c9608dc 100644 --- a/substrate/utils/frame/generate-bags/Cargo.toml +++ b/substrate/utils/frame/generate-bags/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # FRAME -frame-support = { path = "../../../frame/support" } -frame-election-provider-support = { path = "../../../frame/election-provider-support" } -frame-system = { path = "../../../frame/system" } -pallet-staking = { path = "../../../frame/staking" } -sp-staking = { path = "../../../primitives/staking" } +frame-support = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } # third party -chrono = { version = "0.4.31" } -num-format = "0.4.3" +chrono = { workspace = true } +num-format = { workspace = true } diff --git a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml index 37d96d7e12b96..6dcbca4b97e24 100644 --- a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -13,8 +13,8 @@ publish = false workspace = true [dependencies] -kitchensink-runtime = { path = "../../../../bin/node/runtime" } -generate-bags = { path = ".." } +kitchensink-runtime = { workspace = true } +generate-bags = { workspace = true, default-features = true } # third-party -clap = { version = "4.5.3", features = ["derive"] } +clap = { features = ["derive"], workspace = true } diff --git a/substrate/utils/frame/omni-bencher/Cargo.toml b/substrate/utils/frame/omni-bencher/Cargo.toml index 0c2d1a1b32b1f..f8f44cb4b4387 100644 --- a/substrate/utils/frame/omni-bencher/Cargo.toml +++ b/substrate/utils/frame/omni-bencher/Cargo.toml @@ -11,11 +11,11 @@ license.workspace = true workspace = true [dependencies] -clap = { version = "4.5.2", features = ["derive"] } -cumulus-primitives-proof-size-hostfunction = { path = "../../../../cumulus/primitives/proof-size-hostfunction" } -frame-benchmarking-cli = { path = "../benchmarking-cli", default-features = false } -sc-cli = { path = "../../../client/cli" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-statement-store = { path = "../../../primitives/statement-store" } -env_logger = "0.11.2" +clap = { features = ["derive"], workspace = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +frame-benchmarking-cli = { workspace = true } +sc-cli = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } +env_logger = { workspace = true } log = { workspace = true } diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index 2911d5eef6590..cc075da68c276 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -15,24 +15,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22", features = ["http-client"] } -codec = { package = "parity-scale-codec", version = "3.6.12" } +jsonrpsee = { features = ["http-client"], workspace = true } +codec = { workspace = true, default-features = true } log = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } -sp-core = { path = "../../../primitives/core" } -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } -sp-state-machine = { path = "../../../primitives/state-machine" } -sp-io = { path = "../../../primitives/io" } -sp-runtime = { path = "../../../primitives/runtime" } -tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } -substrate-rpc-client = { path = "../rpc/client" } -futures = "0.3.30" -indicatif = "0.17.7" -spinners = "4.1.0" -tokio-retry = "0.3.0" +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } +substrate-rpc-client = { workspace = true, default-features = true } +futures = { workspace = true } +indicatif = { workspace = true } +spinners = { workspace = true } +tokio-retry = { workspace = true } [dev-dependencies] -sp-tracing = { path = "../../../primitives/tracing" } +sp-tracing = { workspace = true, default-features = true } [features] remote-test = [] diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 44e5f467d895e..40864085349b0 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -22,10 +22,7 @@ use codec::{Compact, Decode, Encode}; use indicatif::{ProgressBar, ProgressStyle}; -use jsonrpsee::{ - core::params::ArrayParams, - http_client::{HttpClient, HttpClientBuilder}, -}; +use jsonrpsee::{core::params::ArrayParams, http_client::HttpClient}; use log::*; use serde::de::DeserializeOwned; use sp_core::{ @@ -190,7 +187,7 @@ impl Transport { } else { uri.clone() }; - let http_client = HttpClientBuilder::default() + let http_client = HttpClient::builder() .max_request_size(u32::MAX) .max_response_size(u32::MAX) .request_timeout(std::time::Duration::from_secs(60 * 5)) diff --git a/substrate/utils/frame/rpc/client/Cargo.toml b/substrate/utils/frame/rpc/client/Cargo.toml index 501bb95b25794..1300fae9fff29 100644 --- a/substrate/utils/frame/rpc/client/Cargo.toml +++ b/substrate/utils/frame/rpc/client/Cargo.toml @@ -15,13 +15,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22", features = ["ws-client"] } -sc-rpc-api = { path = "../../../../client/rpc-api" } -async-trait = "0.1.79" +jsonrpsee = { features = ["ws-client"], workspace = true } +sc-rpc-api = { workspace = true, default-features = true } +async-trait = { workspace = true } serde = { workspace = true, default-features = true } -sp-runtime = { path = "../../../../primitives/runtime" } +sp-runtime = { workspace = true, default-features = true } log = { workspace = true, default-features = true } [dev-dependencies] -tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread", "sync"] } -sp-core = { path = "../../../../primitives/core" } +tokio = { features = ["macros", "rt-multi-thread", "sync"], workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index ee3bf5eb68d71..c8ff281301e36 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -16,20 +16,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } +codec = { workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } -sp-core = { path = "../../../../primitives/core" } -sp-state-machine = { path = "../../../../primitives/state-machine" } -sp-trie = { path = "../../../../primitives/trie" } -trie-db = "0.29.0" +sp-core = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +trie-db = { workspace = true, default-features = true } -jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } # Substrate Dependencies -sc-client-api = { path = "../../../../client/api" } -sc-rpc-api = { path = "../../../../client/rpc-api" } -sp-runtime = { path = "../../../../primitives/runtime" } +sc-client-api = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index bf566f909ecb7..e20bae730c7ee 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -15,17 +15,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12" } -jsonrpsee = { version = "0.22", features = ["jsonrpsee-types"] } +codec = { workspace = true, default-features = true } +jsonrpsee = { features = ["jsonrpsee-types"], workspace = true } serde = { workspace = true, default-features = true } -frame-support = { path = "../../../../frame/support" } -sc-rpc-api = { path = "../../../../client/rpc-api" } -sp-storage = { path = "../../../../primitives/storage" } +frame-support = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } [dev-dependencies] -scale-info = "2.11.1" -jsonrpsee = { version = "0.22", features = ["jsonrpsee-types", "ws-client"] } -tokio = "1.37" -sp-core = { path = "../../../../primitives/core" } -sp-runtime = { path = "../../../../primitives/runtime" } -frame-system = { path = "../../../../frame/system" } +scale-info = { workspace = true, default-features = true } +jsonrpsee = { features = ["jsonrpsee-types", "ws-client"], workspace = true } +tokio = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml index 75d24e8e210fc..4e866113fd2ee 100644 --- a/substrate/utils/frame/rpc/system/Cargo.toml +++ b/substrate/utils/frame/rpc/system/Cargo.toml @@ -16,27 +16,27 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.30" -codec = { package = "parity-scale-codec", version = "3.6.12" } -docify = "0.2.0" -jsonrpsee = { version = "0.22.5", features = [ +futures = { workspace = true } +codec = { workspace = true, default-features = true } +docify = { workspace = true } +jsonrpsee = { features = [ "client-core", "macros", "server-core", -] } +], workspace = true } log = { workspace = true, default-features = true } -frame-system-rpc-runtime-api = { path = "../../../../frame/system/rpc/runtime-api" } -sc-rpc-api = { path = "../../../../client/rpc-api" } -sc-transaction-pool-api = { path = "../../../../client/transaction-pool/api" } -sp-api = { path = "../../../../primitives/api" } -sp-block-builder = { path = "../../../../primitives/block-builder" } -sp-blockchain = { path = "../../../../primitives/blockchain" } -sp-core = { path = "../../../../primitives/core" } -sp-runtime = { path = "../../../../primitives/runtime" } +frame-system-rpc-runtime-api = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -sc-transaction-pool = { path = "../../../../client/transaction-pool" } -tokio = "1.37" -assert_matches = "1.3.0" -sp-tracing = { path = "../../../../primitives/tracing" } -substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } +sc-transaction-pool = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } +assert_matches = { workspace = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/utils/prometheus/Cargo.toml b/substrate/utils/prometheus/Cargo.toml index 36527ac6183bb..9f5516cd8d741 100644 --- a/substrate/utils/prometheus/Cargo.toml +++ b/substrate/utils/prometheus/Cargo.toml @@ -16,12 +16,14 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -hyper = { version = "0.14.16", default-features = false, features = ["http1", "server", "tcp"] } +http-body-util = { workspace = true } +hyper = { features = ["http1", "server"], workspace = true } +hyper-util = { features = ["server-auto", "tokio"], workspace = true } log = { workspace = true, default-features = true } -prometheus = { version = "0.13.0", default-features = false } +prometheus = { workspace = true } thiserror = { workspace = true } -tokio = { version = "1.22.0", features = ["parking_lot"] } +tokio = { features = ["parking_lot"], workspace = true, default-features = true } [dev-dependencies] -hyper = { version = "0.14.16", features = ["client"] } -tokio = { version = "1.22.0", features = ["rt-multi-thread"] } +hyper-util = { features = ["client-legacy", "tokio"], workspace = true, default-features = true } +tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } diff --git a/substrate/utils/prometheus/src/lib.rs b/substrate/utils/prometheus/src/lib.rs index ed1f9137aec4e..7a8c655906052 100644 --- a/substrate/utils/prometheus/src/lib.rs +++ b/substrate/utils/prometheus/src/lib.rs @@ -15,12 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use hyper::{ - http::StatusCode, - server::Server, - service::{make_service_fn, service_fn}, - Body, Request, Response, -}; +mod sourced; + +use hyper::{http::StatusCode, Request, Response}; +use prometheus::{core::Collector, Encoder, TextEncoder}; +use std::net::SocketAddr; + pub use prometheus::{ self, core::{ @@ -30,13 +30,10 @@ pub use prometheus::{ exponential_buckets, Error as PrometheusError, Histogram, HistogramOpts, HistogramVec, Opts, Registry, }; -use prometheus::{core::Collector, Encoder, TextEncoder}; -use std::net::SocketAddr; - -mod sourced; - pub use sourced::{MetricSource, SourcedCounter, SourcedGauge, SourcedMetric}; +type Body = http_body_util::Full; + pub fn register( metric: T, registry: &Registry, @@ -63,7 +60,10 @@ pub enum Error { PortInUse(SocketAddr), } -async fn request_metrics(req: Request, registry: Registry) -> Result, Error> { +async fn request_metrics( + req: Request, + registry: Registry, +) -> Result, Error> { if req.uri().path() == "/metrics" { let metric_families = registry.gather(); let mut buffer = vec![]; @@ -98,46 +98,49 @@ async fn init_prometheus_with_listener( listener: tokio::net::TcpListener, registry: Registry, ) -> Result<(), Error> { - let listener = hyper::server::conn::AddrIncoming::from_listener(listener)?; - log::info!("ใ€ฝ๏ธ Prometheus exporter started at {}", listener.local_addr()); - - let service = make_service_fn(move |_| { - let registry = registry.clone(); - - async move { - Ok::<_, hyper::Error>(service_fn(move |req: Request| { - request_metrics(req, registry.clone()) - })) - } - }); + log::info!(target: "prometheus", "ใ€ฝ๏ธ Prometheus exporter started at {}", listener.local_addr()?); - let (signal, on_exit) = tokio::sync::oneshot::channel::<()>(); - let server = Server::builder(listener).serve(service).with_graceful_shutdown(async { - let _ = on_exit.await; - }); + let server = hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new()); - let result = server.await.map_err(Into::into); + loop { + let io = match listener.accept().await { + Ok((sock, _)) => hyper_util::rt::TokioIo::new(sock), + Err(e) => { + log::debug!(target: "prometheus", "Error accepting connection: {:?}", e); + continue; + }, + }; - // Gracefully shutdown server, otherwise the server does not stop if it has open connections - let _ = signal.send(()); + let registry = registry.clone(); - result + let conn = server + .serve_connection_with_upgrades( + io, + hyper::service::service_fn(move |req| request_metrics(req, registry.clone())), + ) + .into_owned(); + + tokio::spawn(async move { + if let Err(err) = conn.await { + log::debug!(target: "prometheus", "connection error: {:?}", err); + } + }); + } } #[cfg(test)] mod tests { use super::*; - use hyper::{Client, Uri}; - - #[test] - fn prometheus_works() { - const METRIC_NAME: &str = "test_test_metric_name_test_test"; + use http_body_util::BodyExt; + use hyper::Uri; + use hyper_util::{client::legacy::Client, rt::TokioExecutor}; - let runtime = tokio::runtime::Runtime::new().expect("Creates the runtime"); + const METRIC_NAME: &str = "test_test_metric_name_test_test"; - let listener = runtime - .block_on(tokio::net::TcpListener::bind("127.0.0.1:0")) - .expect("Creates listener"); + #[tokio::test] + async fn prometheus_works() { + let listener = + tokio::net::TcpListener::bind("127.0.0.1:0").await.expect("Creates listener"); let local_addr = listener.local_addr().expect("Returns the local addr"); @@ -148,20 +151,20 @@ mod tests { ) .expect("Registers the test metric"); - runtime.spawn(init_prometheus_with_listener(listener, registry)); + tokio::spawn(init_prometheus_with_listener(listener, registry)); - runtime.block_on(async { - let client = Client::new(); + let client = Client::builder(TokioExecutor::new()).build_http::(); - let res = client - .get(Uri::try_from(&format!("http://{}/metrics", local_addr)).expect("Parses URI")) - .await - .expect("Requests metrics"); + let res = client + .get(Uri::try_from(&format!("http://{}/metrics", local_addr)).expect("Parses URI")) + .await + .expect("Requests metrics"); - let buf = hyper::body::to_bytes(res).await.expect("Converts body to bytes"); + assert!(res.status().is_success()); - let body = String::from_utf8(buf.to_vec()).expect("Converts body to String"); - assert!(body.contains(&format!("{} 0", METRIC_NAME))); - }); + let buf = res.into_body().collect().await.expect("Failed to read HTTP body").to_bytes(); + let body = String::from_utf8(buf.to_vec()).expect("Converts body to String"); + + assert!(body.contains(&format!("{} 0", METRIC_NAME))); } } diff --git a/substrate/utils/substrate-bip39/Cargo.toml b/substrate/utils/substrate-bip39/Cargo.toml index a46f81ee24d96..e5270ea62f4c8 100644 --- a/substrate/utils/substrate-bip39/Cargo.toml +++ b/substrate/utils/substrate-bip39/Cargo.toml @@ -9,15 +9,15 @@ edition.workspace = true repository.workspace = true [dependencies] -hmac = "0.12.1" -pbkdf2 = { version = "0.12.2", default-features = false } -schnorrkel = { version = "0.11.4", default-features = false } -sha2 = { version = "0.10.7", default-features = false } -zeroize = { version = "1.4.3", default-features = false } +hmac = { workspace = true } +pbkdf2 = { workspace = true } +schnorrkel = { workspace = true } +sha2 = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] -bip39 = "2.0.0" -rustc-hex = "2.1.0" +bip39 = { workspace = true } +rustc-hex = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index 090955494f0a7..f084400c12e8d 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -15,29 +15,29 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -build-helper = "0.1.1" -cargo_metadata = "0.15.4" -console = "0.15.8" -strum = { version = "0.26.2", features = ["derive"] } -tempfile = "3.1.0" -toml = "0.8.8" -walkdir = "2.4.0" -sp-maybe-compressed-blob = { path = "../../primitives/maybe-compressed-blob" } -filetime = "0.2.16" -wasm-opt = "0.116" -parity-wasm = "0.45" +build-helper = { workspace = true } +cargo_metadata = { workspace = true } +console = { workspace = true } +strum = { features = ["derive"], workspace = true, default-features = true } +tempfile = { workspace = true } +toml = { workspace = true } +walkdir = { workspace = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +filetime = { workspace = true } +wasm-opt = { workspace = true } +parity-wasm = { workspace = true } polkavm-linker = { workspace = true } # Dependencies required for the `metadata-hash` feature. -merkleized-metadata = { version = "0.1.0", optional = true } -sc-executor = { path = "../../client/executor", optional = true } -sp-core = { path = "../../primitives/core", optional = true } -sp-io = { path = "../../primitives/io", optional = true } -sp-version = { path = "../../primitives/version", optional = true } -frame-metadata = { version = "16.0.0", features = ["current"], optional = true } -codec = { package = "parity-scale-codec", version = "3.1.5", optional = true } -array-bytes = { version = "6.1", optional = true } -sp-tracing = { path = "../../primitives/tracing", optional = true } +merkleized-metadata = { optional = true, workspace = true } +sc-executor = { optional = true, workspace = true, default-features = true } +sp-core = { optional = true, workspace = true, default-features = true } +sp-io = { optional = true, workspace = true, default-features = true } +sp-version = { optional = true, workspace = true, default-features = true } +frame-metadata = { features = ["current"], optional = true, workspace = true, default-features = true } +codec = { optional = true, workspace = true, default-features = true } +array-bytes = { optional = true, workspace = true, default-features = true } +sp-tracing = { optional = true, workspace = true, default-features = true } [features] # Enable support for generating the metadata hash. diff --git a/templates/minimal/Cargo.toml b/templates/minimal/Cargo.toml index ca00cb8428452..ba96e139bcf19 100644 --- a/templates/minimal/Cargo.toml +++ b/templates/minimal/Cargo.toml @@ -10,13 +10,13 @@ edition.workspace = true publish = false [dependencies] -minimal-template-node = { path = "./node" } -minimal-template-runtime = { path = "./runtime" } -pallet-minimal-template = { path = "./pallets/template" } -polkadot-sdk-docs = { path = "../../docs/sdk" } +minimal-template-node = { workspace = true } +minimal-template-runtime = { workspace = true } +pallet-minimal-template = { workspace = true, default-features = true } +polkadot-sdk-docs = { workspace = true } -frame = { package = "polkadot-sdk-frame", path = "../../substrate/frame" } +frame = { workspace = true, default-features = true } # How we build docs in rust-docs simple-mermaid = "0.1.1" -docify = "0.2.7" +docify = { workspace = true } diff --git a/templates/minimal/README.md b/templates/minimal/README.md index f00bfd4d48772..b556a45360890 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -14,7 +14,7 @@ * ๐Ÿค This template is a minimal (in terms of complexity and the number of components) template for building a blockchain node. -* ๐Ÿ”ง Its runtime is configured of a single custom pallet as a starting point, and a handful of ready-made pallets +* ๐Ÿ”ง Its runtime is configured with a single custom pallet as a starting point, and a handful of ready-made pallets such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html). * ๐Ÿ‘ค The template has no consensus configured - it is best for experimenting with a single node network. @@ -42,7 +42,7 @@ packages required to compile this template - please take note of the Rust compil ๐Ÿ”จ Use the following command to build the node without launching it: ```sh -cargo build --release +cargo build --package minimal-template-node --release ``` ๐Ÿณ Alternatively, build the docker image: @@ -65,8 +65,8 @@ docker run --rm polkadot-sdk-minimal-template --dev Development chains: * ๐Ÿงน Do not persist the state. -* ๐Ÿ’ฐ Are preconfigured with a genesis state that includes several prefunded development accounts. -* ๐Ÿง‘โ€โš–๏ธ Development accounts are used as `sudo` accounts. +* ๐Ÿ’ฐ Are pre-configured with a genesis state that includes several pre-funded development accounts. +* ๐Ÿง‘โ€โš–๏ธ One development account (`ALICE`) is used as `sudo` accounts. ### Connect with the Polkadot-JS Apps Front-End diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index a10364a2854a9..70b24c19f8e7c 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -14,47 +14,47 @@ build = "build.rs" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -docify = "0.2.0" -clap = { version = "4.5.3", features = ["derive"] } -futures = { version = "0.3.30", features = ["thread-pool"] } -futures-timer = "3.0.1" -jsonrpsee = { version = "0.22", features = ["server"] } +docify = { workspace = true } +clap = { features = ["derive"], workspace = true } +futures = { features = ["thread-pool"], workspace = true } +futures-timer = { workspace = true } +jsonrpsee = { features = ["server"], workspace = true } serde_json = { workspace = true, default-features = true } -sc-cli = { path = "../../../substrate/client/cli" } -sc-executor = { path = "../../../substrate/client/executor" } -sc-network = { path = "../../../substrate/client/network" } -sc-service = { path = "../../../substrate/client/service" } -sc-telemetry = { path = "../../../substrate/client/telemetry" } -sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-consensus-manual-seal = { path = "../../../substrate/client/consensus/manual-seal" } -sc-rpc-api = { path = "../../../substrate/client/rpc-api" } -sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } -sc-offchain = { path = "../../../substrate/client/offchain" } -sc-client-api = { path = "../../../substrate/client/api" } - -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-block-builder = { path = "../../../substrate/primitives/block-builder" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } - -substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system" } +sc-cli = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-manual-seal = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } + +sp-timestamp = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } + +substrate-frame-rpc-system = { workspace = true, default-features = true } # Once the native runtime is gone, there should be little to no dependency on FRAME here, and # certainly no dependency on the runtime. -frame = { package = "polkadot-sdk-frame", path = "../../../substrate/frame", features = [ +frame = { features = [ "experimental", "runtime", -] } -runtime = { package = "minimal-template-runtime", path = "../runtime" } +], workspace = true, default-features = true } +minimal-template-runtime = { workspace = true } [build-dependencies] -substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [features] default = [] diff --git a/templates/minimal/node/src/chain_spec.rs b/templates/minimal/node/src/chain_spec.rs index 7a3475bb16733..5b53b0f80ac00 100644 --- a/templates/minimal/node/src/chain_spec.rs +++ b/templates/minimal/node/src/chain_spec.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use runtime::{BalancesConfig, SudoConfig, WASM_BINARY}; +use minimal_template_runtime::{BalancesConfig, SudoConfig, WASM_BINARY}; use sc_service::{ChainType, Properties}; use serde_json::{json, Value}; use sp_keyring::AccountKeyring; @@ -43,7 +43,7 @@ pub fn development_config() -> Result { /// Configure initial storage state for FRAME pallets. fn testnet_genesis() -> Value { use frame::traits::Get; - use runtime::interface::{Balance, MinimumBalance}; + use minimal_template_runtime::interface::{Balance, MinimumBalance}; let endowment = >::get().max(1) * 1000; let balances = AccountKeyring::iter() .map(|a| (a.to_account_id(), endowment)) diff --git a/templates/minimal/node/src/command.rs b/templates/minimal/node/src/command.rs index 432add922a7b5..c17f9bc55927b 100644 --- a/templates/minimal/node/src/command.rs +++ b/templates/minimal/node/src/command.rs @@ -114,7 +114,9 @@ pub fn run() -> sc_cli::Result<()> { }, Some(Subcommand::ChainInfo(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run::(&config)) + runner.sync_run(|config| { + cmd.run::(&config) + }) }, None => { let runner = cli.create_runner(&cli.run)?; diff --git a/templates/minimal/node/src/rpc.rs b/templates/minimal/node/src/rpc.rs index 4b283bb2a66f4..451e7b21dd0c1 100644 --- a/templates/minimal/node/src/rpc.rs +++ b/templates/minimal/node/src/rpc.rs @@ -23,7 +23,7 @@ #![warn(missing_docs)] use jsonrpsee::RpcModule; -use runtime::interface::{AccountId, Nonce, OpaqueBlock}; +use minimal_template_runtime::interface::{AccountId, Nonce, OpaqueBlock}; use sc_transaction_pool_api::TransactionPool; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use std::sync::Arc; diff --git a/templates/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs index 5a92627621bfc..71b1ef65b6cad 100644 --- a/templates/minimal/node/src/service.rs +++ b/templates/minimal/node/src/service.rs @@ -16,7 +16,7 @@ // limitations under the License. use futures::FutureExt; -use runtime::{self, interface::OpaqueBlock as Block, RuntimeApi}; +use minimal_template_runtime::{interface::OpaqueBlock as Block, RuntimeApi}; use sc_client_api::backend::Backend; use sc_executor::WasmExecutor; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; @@ -34,8 +34,10 @@ type HostFunctions = #[cfg(not(feature = "runtime-benchmarks"))] type HostFunctions = sp_io::SubstrateHostFunctions; +#[docify::export] pub(crate) type FullClient = sc_service::TFullClient>; + type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml index f0abe3c6942de..9d231fe7d7d45 100644 --- a/templates/minimal/pallets/template/Cargo.toml +++ b/templates/minimal/pallets/template/Cargo.toml @@ -13,16 +13,16 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", features = [ +codec = { features = [ "derive", -], default-features = false } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } -frame = { package = "polkadot-sdk-frame", path = "../../../../substrate/frame", default-features = false, features = [ +], workspace = true } +frame = { features = [ "experimental", "runtime", -] } +], workspace = true } [features] diff --git a/templates/minimal/pallets/template/src/lib.rs b/templates/minimal/pallets/template/src/lib.rs index 713f014bbe61f..92b90ad4412b0 100644 --- a/templates/minimal/pallets/template/src/lib.rs +++ b/templates/minimal/pallets/template/src/lib.rs @@ -1,4 +1,7 @@ //! A shell pallet built with [`frame`]. +//! +//! To get started with this pallet, try implementing the guide in +//! #![cfg_attr(not(feature = "std"), no_std)] diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml index 42ea49ff40462..5d3cf8492e522 100644 --- a/templates/minimal/runtime/Cargo.toml +++ b/templates/minimal/runtime/Cargo.toml @@ -10,31 +10,31 @@ edition.workspace = true publish = false [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } -scale-info = { version = "2.6.0", default-features = false } +codec = { workspace = true } +scale-info = { workspace = true } # this is a frame-based runtime, thus importing `frame` with runtime feature enabled. -frame = { package = "polkadot-sdk-frame", path = "../../../substrate/frame", default-features = false, features = [ +frame = { features = [ "experimental", "runtime", -] } +], workspace = true } # pallets that we want to use -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-balances = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } # genesis builder that allows us to interact with runtime genesis config -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } +sp-genesis-builder = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } # local pallet templates -pallet-minimal-template = { path = "../pallets/template", default-features = false } +pallet-minimal-template = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs index d2debbf5689fd..08ad537ecdd14 100644 --- a/templates/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -23,6 +23,9 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +extern crate alloc; + +use alloc::{vec, vec::Vec}; use frame::{ deps::frame_support::{ genesis_builder_helper::{build_state, get_preset}, @@ -99,27 +102,27 @@ mod runtime { /// Mandatory system pallet that should always be included in a FRAME runtime. #[runtime::pallet_index(0)] - pub type System = frame_system; + pub type System = frame_system::Pallet; /// Provides a way for consensus systems to set and check the onchain time. #[runtime::pallet_index(1)] - pub type Timestamp = pallet_timestamp; + pub type Timestamp = pallet_timestamp::Pallet; /// Provides the ability to keep track of balances. #[runtime::pallet_index(2)] - pub type Balances = pallet_balances; + pub type Balances = pallet_balances::Pallet; /// Provides a way to execute privileged functions. #[runtime::pallet_index(3)] - pub type Sudo = pallet_sudo; + pub type Sudo = pallet_sudo::Pallet; /// Provides the ability to charge for extrinsic execution. #[runtime::pallet_index(4)] - pub type TransactionPayment = pallet_transaction_payment; + pub type TransactionPayment = pallet_transaction_payment::Pallet; /// A minimal pallet template. #[runtime::pallet_index(5)] - pub type Template = pallet_minimal_template; + pub type Template = pallet_minimal_template::Pallet; } parameter_types! { diff --git a/templates/parachain/README.md b/templates/parachain/README.md index a6ac91799b777..802d8586b39e0 100644 --- a/templates/parachain/README.md +++ b/templates/parachain/README.md @@ -16,7 +16,7 @@ * โ˜๏ธ It is based on the [Cumulus](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/cumulus/index.html) framework. -* ๐Ÿ”ง Its runtime is configured of a single custom pallet as a starting point, and a handful of ready-made pallets +* ๐Ÿ”ง Its runtime is configured with a single custom pallet as a starting point, and a handful of ready-made pallets such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html). * ๐Ÿ‘‰ Learn more about parachains [here](https://wiki.polkadot.network/docs/learn-parachains) @@ -44,7 +44,7 @@ packages required to compile this template - please take note of the Rust compil ๐Ÿ”จ Use the following command to build the node without launching it: ```sh -cargo build --release +cargo build --package parachain-template-node --release ``` ๐Ÿณ Alternatively, build the docker image: @@ -70,7 +70,7 @@ and `zombienet` - into `PATH` like so: export PATH="./target/release/:$PATH" ``` -This way, we can conveniently use them un the following steps. +This way, we can conveniently use them in the following steps. ๐Ÿ‘ฅ The following command starts a local development chain, with a single relay chain node and a single parachain collator: diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index 1737c6a9df75e..7cf1f1fddc7b3 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -14,69 +14,69 @@ build = "build.rs" # name = "parachain-template-node" [dependencies] -clap = { version = "4.5.3", features = ["derive"] } +clap = { features = ["derive"], workspace = true } log = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.6.12" } +codec = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } -jsonrpsee = { version = "0.22", features = ["server"] } -futures = "0.3.28" +jsonrpsee = { features = ["server"], workspace = true } +futures = { workspace = true } serde_json = { workspace = true, default-features = true } -docify = "0.2.8" +docify = { workspace = true } # Local -parachain-template-runtime = { path = "../runtime" } +parachain-template-runtime = { workspace = true } # Substrate -frame-benchmarking = { path = "../../../substrate/frame/benchmarking" } -frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-cli" } -pallet-transaction-payment-rpc = { path = "../../../substrate/frame/transaction-payment/rpc" } -sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } -sc-chain-spec = { path = "../../../substrate/client/chain-spec" } -sc-cli = { path = "../../../substrate/client/cli" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-offchain = { path = "../../../substrate/client/offchain" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-executor = { path = "../../../substrate/client/executor" } -sc-network = { path = "../../../substrate/client/network" } -sc-network-sync = { path = "../../../substrate/client/network/sync" } -sc-rpc = { path = "../../../substrate/client/rpc" } -sc-service = { path = "../../../substrate/client/service" } -sc-sysinfo = { path = "../../../substrate/client/sysinfo" } -sc-telemetry = { path = "../../../substrate/client/telemetry" } -sc-tracing = { path = "../../../substrate/client/tracing" } -sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-block-builder = { path = "../../../substrate/primitives/block-builder" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } -sp-core = { path = "../../../substrate/primitives/core" } -sp-keystore = { path = "../../../substrate/primitives/keystore" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } +frame-benchmarking = { workspace = true, default-features = true } +frame-benchmarking-cli = { workspace = true, default-features = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +substrate-frame-rpc-system = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } # Polkadot -polkadot-cli = { path = "../../../polkadot/cli", features = ["rococo-native"] } -polkadot-primitives = { path = "../../../polkadot/primitives" } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +polkadot-cli = { features = ["rococo-native"], workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +xcm = { workspace = true } # Cumulus -cumulus-client-cli = { path = "../../../cumulus/client/cli" } -cumulus-client-collator = { path = "../../../cumulus/client/collator" } -cumulus-client-consensus-aura = { path = "../../../cumulus/client/consensus/aura" } -cumulus-client-consensus-common = { path = "../../../cumulus/client/consensus/common" } -cumulus-client-consensus-proposer = { path = "../../../cumulus/client/consensus/proposer" } -cumulus-client-service = { path = "../../../cumulus/client/service" } -cumulus-primitives-core = { path = "../../../cumulus/primitives/core" } -cumulus-primitives-parachain-inherent = { path = "../../../cumulus/primitives/parachain-inherent" } -cumulus-relay-chain-interface = { path = "../../../cumulus/client/relay-chain-interface" } -color-print = "0.3.4" +cumulus-client-cli = { workspace = true, default-features = true } +cumulus-client-collator = { workspace = true, default-features = true } +cumulus-client-consensus-aura = { workspace = true, default-features = true } +cumulus-client-consensus-common = { workspace = true, default-features = true } +cumulus-client-consensus-proposer = { workspace = true, default-features = true } +cumulus-client-service = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } +color-print = { workspace = true } [build-dependencies] -substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [features] default = [] diff --git a/templates/parachain/node/README.md b/templates/parachain/node/README.md index 350272c7b6efe..ad309d4015aab 100644 --- a/templates/parachain/node/README.md +++ b/templates/parachain/node/README.md @@ -7,7 +7,7 @@ โš™๏ธ It acts as a remote procedure call (RPC) server, allowing interaction with the blockchain. -๐Ÿ‘‰ Learn more about the architecture, and a difference between a node and a runtime +๐Ÿ‘‰ Learn more about the architecture, and the difference between a node and a runtime [here](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/wasm_meta_protocol/index.html). ๐Ÿ‘‡ Here are the most important files in this node template: diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 587dd19faf3e8..3e7d4de105535 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -12,6 +12,7 @@ use parachain_template_runtime::{ // Cumulus Imports use cumulus_client_collator::service::CollatorService; +#[docify::export(lookahead_collator)] use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport; use cumulus_client_consensus_proposer::Proposer; @@ -20,6 +21,7 @@ use cumulus_client_service::{ BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, ParachainHostFunctions, StartRelayChainTasksParams, }; +#[docify::export(cumulus_primitives)] use cumulus_primitives_core::{ relay_chain::{CollatorPair, ValidationCode}, ParaId, @@ -33,7 +35,6 @@ use sc_client_api::Backend; use sc_consensus::ImportQueue; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sc_network::NetworkBlock; -use sc_network_sync::SyncingService; use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; @@ -170,7 +171,6 @@ fn start_consensus( task_manager: &TaskManager, relay_chain_interface: Arc, transaction_pool: Arc>, - sync_oracle: Arc>, keystore: KeystorePtr, relay_chain_slot_duration: Duration, para_id: ParaId, @@ -204,7 +204,6 @@ fn start_consensus( code_hash_provider: move |block_hash| { client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) }, - sync_oracle, keystore, collator_key, para_id, @@ -215,11 +214,9 @@ fn start_consensus( authoring_duration: Duration::from_millis(2000), reinitialize: false, }; - - let fut = - aura::run::( - params, - ); + let fut = aura::run::( + params, + ); task_manager.spawn_essential_handle().spawn("aura", None, fut); Ok(()) @@ -396,7 +393,6 @@ pub async fn start_parachain_node( &task_manager, relay_chain_interface, transaction_pool, - sync_service, params.keystore_container.keystore(), relay_chain_slot_duration, para_id, diff --git a/templates/parachain/pallets/template/Cargo.toml b/templates/parachain/pallets/template/Cargo.toml index 6c549c2c4a9b6..dde863101372e 100644 --- a/templates/parachain/pallets/template/Cargo.toml +++ b/templates/parachain/pallets/template/Cargo.toml @@ -13,22 +13,24 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } +], workspace = true } # frame deps -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } + +# primitive deps +sp-runtime = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../../substrate/primitives/core" } -sp-io = { path = "../../../../substrate/primitives/io" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -40,12 +42,12 @@ runtime-benchmarks = [ ] std = [ "codec/std", + "scale-info/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", - "scale-info/std", - "sp-core/std", - "sp-io/std", + "sp-runtime/std", ] try-runtime = [ diff --git a/templates/parachain/pallets/template/src/benchmarking.rs b/templates/parachain/pallets/template/src/benchmarking.rs index d1a9554aed6dc..5acad6e60decc 100644 --- a/templates/parachain/pallets/template/src/benchmarking.rs +++ b/templates/parachain/pallets/template/src/benchmarking.rs @@ -1,34 +1,33 @@ //! Benchmarking setup for pallet-template #![cfg(feature = "runtime-benchmarks")] -use super::*; -#[allow(unused)] -use crate::Pallet as Template; +use super::*; use frame_benchmarking::v2::*; -use frame_system::RawOrigin; #[benchmarks] mod benchmarks { use super::*; + #[cfg(test)] + use crate::pallet::Pallet as Template; + use frame_system::RawOrigin; #[benchmark] fn do_something() { - let value = 100u32; let caller: T::AccountId = whitelisted_caller(); #[extrinsic_call] - do_something(RawOrigin::Signed(caller), value); + do_something(RawOrigin::Signed(caller), 100); - assert_eq!(Something::::get(), Some(value)); + assert_eq!(Something::::get().map(|v| v.block_number), Some(100u32.into())); } #[benchmark] fn cause_error() { - Something::::put(100u32); + Something::::put(CompositeStruct { block_number: 100u32.into() }); let caller: T::AccountId = whitelisted_caller(); #[extrinsic_call] cause_error(RawOrigin::Signed(caller)); - assert_eq!(Something::::get(), Some(101u32)); + assert_eq!(Something::::get().map(|v| v.block_number), Some(101u32.into())); } impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/templates/parachain/pallets/template/src/lib.rs b/templates/parachain/pallets/template/src/lib.rs index 11587d1df426f..6bfb98972aedf 100644 --- a/templates/parachain/pallets/template/src/lib.rs +++ b/templates/parachain/pallets/template/src/lib.rs @@ -1,8 +1,52 @@ +//! # Template Pallet +//! +//! A pallet with minimal functionality to help developers understand the essential components of +//! writing a FRAME pallet. It is typically used in beginner tutorials or in Polkadot SDK template +//! as a starting point for creating a new pallet and **not meant to be used in production**. +//! +//! ## Overview +//! +//! This template pallet contains basic examples of: +//! - declaring a storage item that stores a single block-number +//! - declaring and using events +//! - declaring and using errors +//! - a dispatchable function that allows a user to set a new value to storage and emits an event +//! upon success +//! - another dispatchable function that causes a custom error to be thrown +//! +//! Each pallet section is annotated with an attribute using the `#[pallet::...]` procedural macro. +//! This macro generates the necessary code for a pallet to be aggregated into a FRAME runtime. +//! +//! To get started with pallet development, consider using this tutorial: +//! +//! +//! +//! And reading the main documentation of the `frame` crate: +//! +//! +//! +//! And looking at the frame [`kitchen-sink`](https://paritytech.github.io/polkadot-sdk/master/pallet_example_kitchensink/index.html) +//! pallet, a showcase of all pallet macros. +//! +//! ### Pallet Sections +//! +//! The pallet sections in this template are: +//! +//! - A **configuration trait** that defines the types and parameters which the pallet depends on +//! (denoted by the `#[pallet::config]` attribute). See: [`Config`]. +//! - A **means to store pallet-specific data** (denoted by the `#[pallet::storage]` attribute). +//! See: [`storage_types`]. +//! - A **declaration of the events** this pallet emits (denoted by the `#[pallet::event]` +//! attribute). See: [`Event`]. +//! - A **declaration of the errors** that this pallet can throw (denoted by the `#[pallet::error]` +//! attribute). See: [`Error`]. +//! - A **set of dispatchable functions** that define the pallet's functionality (denoted by the +//! `#[pallet::call]` attribute). See: [`dispatchables`]. +//! +//! Run `cargo doc --package pallet-template --open` to view this pallet's documentation. + #![cfg_attr(not(feature = "std"), no_std)] -/// Edit this file to define custom logic or remove it if it is not needed. -/// Learn more about FRAME and the core library of Substrate FRAME pallets: -/// pub use pallet::*; #[cfg(test)] @@ -16,16 +60,25 @@ pub mod weights; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +// +// +// +// To see a full list of `pallet` macros and their use cases, see: +// +// #[frame_support::pallet] pub mod pallet { - use frame_support::{dispatch::DispatchResultWithPostInfo, pallet_prelude::*}; + use frame_support::{dispatch::DispatchResultWithPostInfo, pallet_prelude::*, DefaultNoBound}; use frame_system::pallet_prelude::*; + use sp_runtime::traits::{CheckedAdd, One}; /// Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] pub trait Config: frame_system::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. + /// type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// A type representing the weights required by the dispatchables of this pallet. type WeightInfo: crate::weights::WeightInfo; } @@ -33,24 +86,34 @@ pub mod pallet { #[pallet::pallet] pub struct Pallet(_); - // The pallet's runtime storage items. - // https://docs.substrate.io/v3/runtime/storage + /// A struct to store a single block-number. Has all the right derives to store it in storage. + /// + #[derive( + Encode, Decode, MaxEncodedLen, TypeInfo, CloneNoBound, PartialEqNoBound, DefaultNoBound, + )] + #[scale_info(skip_type_params(T))] + pub struct CompositeStruct { + /// A block number. + pub(crate) block_number: BlockNumberFor, + } + + /// The pallet's storage items. + /// + /// #[pallet::storage] - // Learn more about declaring storage items: - // https://docs.substrate.io/v3/runtime/storage#declaring-storage-items - pub type Something = StorageValue<_, u32>; + pub type Something = StorageValue<_, CompositeStruct>; - // Pallets use events to inform users when important changes are made. - // https://docs.substrate.io/v3/runtime/events-and-errors + /// Pallets use events to inform users when important changes are made. + /// #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// Event documentation should end with an array that provides descriptive names for event - /// parameters. [something, who] - SomethingStored(u32, T::AccountId), + /// We usually use passive tense for events. + SomethingStored { block_number: BlockNumberFor, who: T::AccountId }, } - // Errors inform users that something went wrong. + /// Errors inform users that something went wrong. + /// #[pallet::error] pub enum Error { /// Error names should be descriptive. @@ -62,27 +125,33 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet {} - // Dispatchable functions allows users to interact with the pallet and invoke state changes. - // These functions materialize as "extrinsics", which are often compared to transactions. - // Dispatchable functions must be annotated with a weight and must return a DispatchResult. + /// Dispatchable functions allows users to interact with the pallet and invoke state changes. + /// These functions materialize as "extrinsics", which are often compared to transactions. + /// Dispatchable functions must be annotated with a weight and must return a DispatchResult. + /// #[pallet::call] impl Pallet { /// An example dispatchable that takes a singles value as a parameter, writes the value to /// storage and emits an event. This function must be dispatched by a signed extrinsic. #[pallet::call_index(0)] #[pallet::weight(Weight::from_parts(10_000, 0) + T::DbWeight::get().writes(1))] - pub fn do_something(origin: OriginFor, something: u32) -> DispatchResultWithPostInfo { + pub fn do_something(origin: OriginFor, bn: u32) -> DispatchResultWithPostInfo { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. - // https://docs.substrate.io/v3/runtime/origins + // let who = ensure_signed(origin)?; + // Convert the u32 into a block number. This is possible because the set of trait bounds + // defined in [`frame_system::Config::BlockNumber`]. + let block_number: BlockNumberFor = bn.into(); + // Update storage. - >::put(something); + >::put(CompositeStruct { block_number }); // Emit an event. - Self::deposit_event(Event::SomethingStored(something, who)); - // Return a successful DispatchResultWithPostInfo + Self::deposit_event(Event::SomethingStored { block_number, who }); + + // Return a successful [`DispatchResultWithPostInfo`] or [`DispatchResult`]. Ok(().into()) } @@ -96,11 +165,19 @@ pub mod pallet { match >::get() { // Return an error if the value has not been set. None => Err(Error::::NoneValue)?, - Some(old) => { + Some(mut old) => { // Increment the value read from storage; will error in the event of overflow. - let new = old.checked_add(1).ok_or(Error::::StorageOverflow)?; + old.block_number = old + .block_number + .checked_add(&One::one()) + // ^^ equivalent is to: + // .checked_add(&1u32.into()) + // both of which build a `One` instance for the type `BlockNumber`. + .ok_or(Error::::StorageOverflow)?; // Update the value in storage with the incremented result. - >::put(new); + >::put(old); + // Explore how you can rewrite this using + // [`frame_support::storage::StorageValue::mutate`]. Ok(().into()) }, } diff --git a/templates/parachain/pallets/template/src/mock.rs b/templates/parachain/pallets/template/src/mock.rs index ebb0598df97bf..46e3117596f59 100644 --- a/templates/parachain/pallets/template/src/mock.rs +++ b/templates/parachain/pallets/template/src/mock.rs @@ -1,25 +1,36 @@ -use frame_support::{derive_impl, parameter_types}; -use frame_system as system; -use sp_runtime::BuildStorage; - -type Block = frame_system::mocking::MockBlock; +use frame_support::{derive_impl, weights::constants::RocksDbWeight}; +use frame_system::{mocking::MockBlock, GenesisConfig}; +use sp_runtime::{traits::ConstU64, BuildStorage}; // Configure a mock runtime to test the pallet. -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - TemplateModule: crate::{Pallet, Call, Storage, Event}, - } -); +#[frame_support::runtime] +mod test_runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Test; -parameter_types! { - pub const SS58Prefix: u8 = 42; + #[runtime::pallet_index(0)] + pub type System = frame_system; + #[runtime::pallet_index(1)] + pub type TemplateModule = crate; } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl system::Config for Test { - type Block = Block; +impl frame_system::Config for Test { + type Nonce = u64; + type Block = MockBlock; + type BlockHashCount = ConstU64<250>; + type DbWeight = RocksDbWeight; } impl crate::Config for Test { @@ -29,5 +40,5 @@ impl crate::Config for Test { // Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { - system::GenesisConfig::::default().build_storage().unwrap().into() + GenesisConfig::::default().build_storage().unwrap().into() } diff --git a/templates/parachain/pallets/template/src/tests.rs b/templates/parachain/pallets/template/src/tests.rs index 9ad3076be2cc9..a4a41af63c2e9 100644 --- a/templates/parachain/pallets/template/src/tests.rs +++ b/templates/parachain/pallets/template/src/tests.rs @@ -7,7 +7,7 @@ fn it_works_for_default_value() { // Dispatch a signed extrinsic. assert_ok!(TemplateModule::do_something(RuntimeOrigin::signed(1), 42)); // Read pallet storage and assert an expected result. - assert_eq!(Something::::get(), Some(42)); + assert_eq!(Something::::get().map(|v| v.block_number), Some(42)); }); } diff --git a/templates/parachain/pallets/template/src/weights.rs b/templates/parachain/pallets/template/src/weights.rs index 7c42936e09f29..5bfe28e8b71e3 100644 --- a/templates/parachain/pallets/template/src/weights.rs +++ b/templates/parachain/pallets/template/src/weights.rs @@ -4,7 +4,7 @@ //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Alexs-MacBook-Pro-2.local`, CPU: `` +//! HOSTNAME: `_`, CPU: `` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index 059c793679694..939fa245d2a0c 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -13,80 +13,79 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } -docify = "0.2.8" +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } +docify = { workspace = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -hex-literal = { version = "0.4.1", optional = true } +], workspace = true } +hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -scale-info = { version = "2.11.1", default-features = false, features = [ +scale-info = { features = [ "derive", -] } -smallvec = "1.11.0" -docify = "0.2.8" +], workspace = true } +smallvec = { workspace = true, default-features = true } +docify = { workspace = true } # Local -pallet-parachain-template = { path = "../pallets/template", default-features = false } +pallet-parachain-template = { workspace = true } # Substrate / FRAME -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } -frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { features = ["experimental"], workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } # FRAME Pallets -pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-session = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } # Substrate Primitives -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot -pallet-xcm = { path = "../../../polkadot/xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false } -polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = { path = "../../../cumulus/pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../cumulus/pallets/parachain-system", default-features = false } -cumulus-pallet-session-benchmarking = { path = "../../../cumulus/pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../../cumulus/pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../cumulus/pallets/xcmp-queue", default-features = false } -cumulus-primitives-aura = { path = "../../../cumulus/primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../cumulus/primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../cumulus/primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../../cumulus/primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../../cumulus/pallets/collator-selection", default-features = false } -parachains-common = { path = "../../../cumulus/parachains/common", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../cumulus/parachains/pallets/parachain-info", default-features = false } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } +pallet-collator-selection = { workspace = true } +parachains-common = { workspace = true } +parachain-info = { workspace = true } [features] default = ["std"] @@ -136,7 +135,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", diff --git a/templates/parachain/runtime/src/apis.rs b/templates/parachain/runtime/src/apis.rs index 107956ded4104..f5d5d3e63027b 100644 --- a/templates/parachain/runtime/src/apis.rs +++ b/templates/parachain/runtime/src/apis.rs @@ -24,6 +24,7 @@ // For more information, please refer to // External crates imports +use alloc::vec::Vec; use frame_support::{ genesis_builder_helper::{build_state, get_preset}, weights::Weight, @@ -37,7 +38,6 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; -use sp_std::prelude::Vec; use sp_version::RuntimeVersion; // Local module imports @@ -47,10 +47,26 @@ use super::{ SLOT_DURATION, VERSION, }; +// we move some impls outside so we can easily use them with `docify`. +impl Runtime { + #[docify::export] + fn impl_slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION) + } + + #[docify::export] + fn impl_can_build_upon( + included_hash: ::Hash, + slot: cumulus_primitives_aura::Slot, + ) -> bool { + ConsensusHook::can_build_upon(included_hash, slot) + } +} + impl_runtime_apis! { impl sp_consensus_aura::AuraApi for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION) + Runtime::impl_slot_duration() } fn authorities() -> Vec { @@ -63,7 +79,7 @@ impl_runtime_apis! { included_hash: ::Hash, slot: cumulus_primitives_aura::Slot, ) -> bool { - ConsensusHook::can_build_upon(included_hash, slot) + Runtime::impl_can_build_upon(included_hash, slot) } } @@ -90,7 +106,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> Vec { Runtime::metadata_versions() } } @@ -248,7 +264,7 @@ impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + fn setup_set_code_requirements(code: &Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); Ok(()) } diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs index 63e6a67a90638..204e74a11e6a1 100644 --- a/templates/parachain/runtime/src/configs/mod.rs +++ b/templates/parachain/runtime/src/configs/mod.rs @@ -32,7 +32,9 @@ use frame_support::{ derive_impl, dispatch::DispatchClass, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, + traits::{ + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin, VariantCountOf, + }, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -154,8 +156,8 @@ impl pallet_balances::Config for Runtime { type ReserveIdentifier = [u8; 8]; type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = VariantCountOf; } parameter_types! { @@ -263,6 +265,7 @@ impl pallet_session::Config for Runtime { type WeightInfo = (); } +#[docify::export(aura_config)] impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; type DisabledValidators = (); diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 987b88af8444d..f53871bb18502 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -7,9 +7,13 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod apis; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarks; mod configs; mod weights; +extern crate alloc; +use alloc::vec::Vec; use smallvec::smallvec; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, @@ -17,17 +21,13 @@ use sp_runtime::{ MultiSignature, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use frame_support::{ - construct_runtime, - weights::{ - constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightToFeeCoefficient, - WeightToFeeCoefficients, WeightToFeePolynomial, - }, +use frame_support::weights::{ + constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, + WeightToFeePolynomial, }; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; @@ -169,17 +169,21 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { state_version: 1, }; -/// This determines the average expected block time that we are targeting. -/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. -/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked -/// up by `pallet_aura` to implement `fn slot_duration()`. -/// -/// Change this to adjust the block time. -pub const MILLISECS_PER_BLOCK: u64 = 6000; - -// NOTE: Currently it is not possible to change the slot duration after the chain has started. -// Attempting to do so will brick block production. -pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; +#[docify::export] +mod block_times { + /// This determines the average expected block time that we are targeting. Blocks will be + /// produced at a minimum duration defined by `SLOT_DURATION`. `SLOT_DURATION` is picked up by + /// `pallet_timestamp` which is in turn picked up by `pallet_aura` to implement `fn + /// slot_duration()`. + /// + /// Change this to adjust the block time. + pub const MILLISECS_PER_BLOCK: u64 = 6000; + + // NOTE: Currently it is not possible to change the slot duration after the chain has started. + // Attempting to do so will brick block production. + pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; +} +pub use block_times::*; // Time is measured by number of blocks. pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); @@ -202,21 +206,27 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(5); /// `Operational` extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +#[docify::export(max_block_weight)] /// We allow for 2 seconds of compute with a 6 second average block time. const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, ); -/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included -/// into the relay chain. -const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; -/// How many parachain blocks are processed by the relay chain per parent. Limits the -/// number of blocks authored per slot. -const BLOCK_PROCESSING_VELOCITY: u32 = 1; -/// Relay chain slot duration, in milliseconds. -const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +#[docify::export] +mod async_backing_params { + /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included + /// into the relay chain. + pub(crate) const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; + /// How many parachain blocks are processed by the relay chain per parent. Limits the + /// number of blocks authored per slot. + pub(crate) const BLOCK_PROCESSING_VELOCITY: u32 = 1; + /// Relay chain slot duration, in milliseconds. + pub(crate) const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +} +pub(crate) use async_backing_params::*; +#[docify::export] /// Aura consensus hook type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< Runtime, @@ -232,43 +242,70 @@ pub fn native_version() -> NativeVersion { } // Create the runtime by composing the FRAME pallets that were previously configured. -construct_runtime!( - pub enum Runtime { - // System support stuff. - System: frame_system = 0, - ParachainSystem: cumulus_pallet_parachain_system = 1, - Timestamp: pallet_timestamp = 2, - ParachainInfo: parachain_info = 3, - - // Monetary stuff. - Balances: pallet_balances = 10, - TransactionPayment: pallet_transaction_payment = 11, - - // Governance - Sudo: pallet_sudo = 15, - - // Collator support. The order of these 4 are important and shall not change. - Authorship: pallet_authorship = 20, - CollatorSelection: pallet_collator_selection = 21, - Session: pallet_session = 22, - Aura: pallet_aura = 23, - AuraExt: cumulus_pallet_aura_ext = 24, - - // XCM helpers. - XcmpQueue: cumulus_pallet_xcmp_queue = 30, - PolkadotXcm: pallet_xcm = 31, - CumulusXcm: cumulus_pallet_xcm = 32, - MessageQueue: pallet_message_queue = 33, - - // Template - TemplatePallet: pallet_parachain_template = 50, - } -); +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system; + #[runtime::pallet_index(1)] + pub type ParachainSystem = cumulus_pallet_parachain_system; + #[runtime::pallet_index(2)] + pub type Timestamp = pallet_timestamp; + #[runtime::pallet_index(3)] + pub type ParachainInfo = parachain_info; + + // Monetary stuff. + #[runtime::pallet_index(10)] + pub type Balances = pallet_balances; + #[runtime::pallet_index(11)] + pub type TransactionPayment = pallet_transaction_payment; + + // Governance + #[runtime::pallet_index(15)] + pub type Sudo = pallet_sudo; + + // Collator support. The order of these 4 are important and shall not change. + #[runtime::pallet_index(20)] + pub type Authorship = pallet_authorship; + #[runtime::pallet_index(21)] + pub type CollatorSelection = pallet_collator_selection; + #[runtime::pallet_index(22)] + pub type Session = pallet_session; + #[runtime::pallet_index(23)] + pub type Aura = pallet_aura; + #[runtime::pallet_index(24)] + pub type AuraExt = cumulus_pallet_aura_ext; + + // XCM helpers. + #[runtime::pallet_index(30)] + pub type XcmpQueue = cumulus_pallet_xcmp_queue; + #[runtime::pallet_index(31)] + pub type PolkadotXcm = pallet_xcm; + #[runtime::pallet_index(32)] + pub type CumulusXcm = cumulus_pallet_xcm; + #[runtime::pallet_index(33)] + pub type MessageQueue = pallet_message_queue; + + // Template + #[runtime::pallet_index(50)] + pub type TemplatePallet = pallet_parachain_template; +} +#[docify::export(register_validate_block)] cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarks; diff --git a/templates/solochain/README.md b/templates/solochain/README.md index 2e3b1146a8fde..c5dc5db7f3b51 100644 --- a/templates/solochain/README.md +++ b/templates/solochain/README.md @@ -28,7 +28,7 @@ installation](#alternatives-installations) options. Use the following command to build the node without launching it: ```sh -cargo build --release +cargo build --package solochain-template-node --release ``` ### Embedded Docs @@ -37,7 +37,7 @@ After you build the project, you can use the following command to explore its parameters and subcommands: ```sh -./target/release/node-template -h +./target/release/solochain-template-node -h ``` You can generate and view the [Rust @@ -54,19 +54,19 @@ The following command starts a single-node development chain that doesn't persist state: ```sh -./target/release/node-template --dev +./target/release/solochain-template-node --dev ``` To purge the development chain's state, run the following command: ```sh -./target/release/node-template purge-chain --dev +./target/release/solochain-template-node purge-chain --dev ``` To start the development chain with detailed logging, run the following command: ```sh -RUST_BACKTRACE=1 ./target/release/node-template -ldebug --dev +RUST_BACKTRACE=1 ./target/release/solochain-template-node -ldebug --dev ``` Development chains: @@ -75,7 +75,7 @@ Development chains: - Use the **Alice** and **Bob** accounts as default validator authorities. - Use the **Alice** account as the default `sudo` account. - Are preconfigured with a genesis state (`/node/src/chain_spec.rs`) that - includes several prefunded development accounts. + includes several pre-funded development accounts. To persist chain state between runs, specify a base path by running a command @@ -86,7 +86,7 @@ similar to the following: $ mkdir my-chain-state // Use of that folder to store the chain state -$ ./target/release/node-template --dev --base-path ./my-chain-state/ +$ ./target/release/solochain-template-node --dev --base-path ./my-chain-state/ // Check the folder structure created inside the base path after running the chain $ ls ./my-chain-state @@ -142,7 +142,7 @@ following: file that defines a Substrate chain's initial (genesis) state. Chain specifications are useful for development and testing, and critical when architecting the launch of a production chain. Take note of the - `development_config` and `testnet_genesis` functions,. These functions are + `development_config` and `testnet_genesis` functions. These functions are used to define the genesis state for the local development chain configuration. These functions identify some [well-known accounts](https://docs.substrate.io/reference/command-line-tools/subkey/) and diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml index 4e8b81840900d..068284c6c3ea7 100644 --- a/templates/solochain/node/Cargo.toml +++ b/templates/solochain/node/Cargo.toml @@ -15,54 +15,54 @@ build = "build.rs" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.5.3", features = ["derive"] } -futures = { version = "0.3.30", features = ["thread-pool"] } +clap = { features = ["derive"], workspace = true } +futures = { features = ["thread-pool"], workspace = true } serde_json = { workspace = true, default-features = true } -jsonrpsee = { version = "0.22", features = ["server"] } +jsonrpsee = { features = ["server"], workspace = true } # substrate client -sc-cli = { path = "../../../substrate/client/cli" } -sp-core = { path = "../../../substrate/primitives/core" } -sc-executor = { path = "../../../substrate/client/executor" } -sc-network = { path = "../../../substrate/client/network" } -sc-service = { path = "../../../substrate/client/service" } -sc-telemetry = { path = "../../../substrate/client/telemetry" } -sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } -sc-offchain = { path = "../../../substrate/client/offchain" } -sc-consensus-aura = { path = "../../../substrate/client/consensus/aura" } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } -sc-consensus = { path = "../../../substrate/client/consensus/common" } -sc-consensus-grandpa = { path = "../../../substrate/client/consensus/grandpa" } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } -sc-client-api = { path = "../../../substrate/client/api" } -sc-rpc-api = { path = "../../../substrate/client/rpc-api" } -sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } +sc-cli = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } # substrate primitives -sp-runtime = { path = "../../../substrate/primitives/runtime" } -sp-io = { path = "../../../substrate/primitives/io" } -sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -sp-inherents = { path = "../../../substrate/primitives/inherents" } -sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-api = { path = "../../../substrate/primitives/api" } -sp-blockchain = { path = "../../../substrate/primitives/blockchain" } -sp-block-builder = { path = "../../../substrate/primitives/block-builder" } +sp-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } # frame and pallets -frame-system = { path = "../../../substrate/frame/system" } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc = { path = "../../../substrate/frame/transaction-payment/rpc" } -substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system" } +frame-system = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } +substrate-frame-rpc-system = { workspace = true, default-features = true } # These dependencies are used for runtime benchmarking -frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-cli" } +frame-benchmarking-cli = { workspace = true, default-features = true } # Local Dependencies -solochain-template-runtime = { path = "../runtime" } +solochain-template-runtime = { workspace = true } [build-dependencies] -substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } +substrate-build-script-utils = { workspace = true, default-features = true } [features] default = [] diff --git a/templates/solochain/pallets/template/Cargo.toml b/templates/solochain/pallets/template/Cargo.toml index 5b8349b5d678c..e658a30d36848 100644 --- a/templates/solochain/pallets/template/Cargo.toml +++ b/templates/solochain/pallets/template/Cargo.toml @@ -13,22 +13,22 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", -] } +], workspace = true } # frame deps -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../substrate/frame/support", default-features = false } -frame-system = { path = "../../../../substrate/frame/system", default-features = false } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } [dev-dependencies] -sp-core = { path = "../../../../substrate/primitives/core" } -sp-io = { path = "../../../../substrate/primitives/io" } -sp-runtime = { path = "../../../../substrate/primitives/runtime" } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index 0af3899a66699..f06c80b9a0321 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -13,67 +13,66 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ +codec = { features = [ "derive", -] } -scale-info = { version = "2.11.1", default-features = false, features = [ +], workspace = true } +scale-info = { features = [ "derive", "serde", -] } +], workspace = true } # frame -frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["experimental"] } -frame-system = { path = "../../../substrate/frame/system", default-features = false } -frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } -frame-executive = { path = "../../../substrate/frame/executive", default-features = false } +frame-support = { features = ["experimental"], workspace = true } +frame-system = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +frame-executive = { workspace = true } # frame pallets -pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } -pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +pallet-aura = { workspace = true } +pallet-balances = { workspace = true } +pallet-grandpa = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } # primitives -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false, features = [ +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { features = [ "serde", -] } -sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false, features = [ +], workspace = true } +sp-consensus-grandpa = { features = [ "serde", -] } -sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = [ +], workspace = true } +sp-core = { features = [ "serde", -] } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = [ +], workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { features = [ "serde", -] } -sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } -sp-storage = { path = "../../../substrate/primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../substrate/primitives/version", default-features = false, features = [ +], workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { features = [ "serde", -] } -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } +], workspace = true } +sp-genesis-builder = { workspace = true } # RPC related -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +frame-system-rpc-runtime-api = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } # Used for runtime benchmarking -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-benchmarking = { optional = true, workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } # The pallet in this template. -pallet-template = { path = "../pallets/template", default-features = false } +pallet-template = { workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [features] default = ["std"] @@ -109,7 +108,6 @@ std = [ "sp-offchain/std", "sp-runtime/std", "sp-session/std", - "sp-std/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", diff --git a/templates/solochain/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs index 93a56fb0ad78f..9de95ac956951 100644 --- a/templates/solochain/runtime/src/lib.rs +++ b/templates/solochain/runtime/src/lib.rs @@ -3,6 +3,8 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +extern crate alloc; +use alloc::{vec, vec::Vec}; use pallet_grandpa::AuthorityId as GrandpaId; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -13,12 +15,10 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, MultiSignature, }; -use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use frame_support::genesis_builder_helper::{build_state, get_preset}; pub use frame_support::{ construct_runtime, derive_impl, parameter_types, traits::{ @@ -33,6 +33,10 @@ pub use frame_support::{ }, StorageValue, }; +use frame_support::{ + genesis_builder_helper::{build_state, get_preset}, + traits::VariantCountOf, +}; pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; pub use pallet_timestamp::Call as TimestampCall; @@ -218,10 +222,10 @@ impl pallet_balances::Config for Runtime { type ExistentialDeposit = ConstU128; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight; - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = VariantCountOf; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeHoldReason; } parameter_types! { @@ -367,7 +371,7 @@ impl_runtime_apis! { Runtime::metadata_at_version(version) } - fn metadata_versions() -> sp_std::vec::Vec { + fn metadata_versions() -> Vec { Runtime::metadata_versions() } } diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index d790b4f5949ca..94ba09421d409 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -68,6 +68,7 @@ std = [ "pallet-asset-conversion?/std", "pallet-asset-rate?/std", "pallet-asset-tx-payment?/std", + "pallet-assets-freezer?/std", "pallet-assets?/std", "pallet-atomic-swap?/std", "pallet-aura?/std", @@ -239,7 +240,7 @@ std = [ "substrate-bip39?/std", "testnet-parachains-constants?/std", "westend-runtime-constants?/std", - "xcm-fee-payment-runtime-api?/std", + "xcm-runtime-apis?/std", ] runtime-benchmarks = [ "assets-common?/runtime-benchmarks", @@ -263,6 +264,7 @@ runtime-benchmarks = [ "pallet-asset-conversion?/runtime-benchmarks", "pallet-asset-rate?/runtime-benchmarks", "pallet-asset-tx-payment?/runtime-benchmarks", + "pallet-assets-freezer?/runtime-benchmarks", "pallet-assets?/runtime-benchmarks", "pallet-babe?/runtime-benchmarks", "pallet-bags-list?/runtime-benchmarks", @@ -363,7 +365,7 @@ runtime-benchmarks = [ "staging-node-inspect?/runtime-benchmarks", "staging-xcm-builder?/runtime-benchmarks", "staging-xcm-executor?/runtime-benchmarks", - "xcm-fee-payment-runtime-api?/runtime-benchmarks", + "xcm-runtime-apis?/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext?/try-runtime", @@ -385,6 +387,7 @@ try-runtime = [ "pallet-asset-conversion?/try-runtime", "pallet-asset-rate?/try-runtime", "pallet-asset-tx-payment?/try-runtime", + "pallet-assets-freezer?/try-runtime", "pallet-assets?/try-runtime", "pallet-atomic-swap?/try-runtime", "pallet-aura?/try-runtime", @@ -536,7 +539,7 @@ with-tracing = [ "sp-tracing?/with-tracing", "sp-tracing?/with-tracing", ] -runtime = ["assets-common", "binary-merkle-tree", "bp-asset-hub-rococo", "bp-asset-hub-westend", "bp-bridge-hub-cumulus", "bp-bridge-hub-kusama", "bp-bridge-hub-polkadot", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", "bp-header-chain", "bp-kusama", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-bulletin", "bp-polkadot-core", "bp-relayers", "bp-rococo", "bp-runtime", "bp-test-utils", "bp-westend", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "rococo-runtime-constants", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "westend-runtime-constants", "xcm-fee-payment-runtime-api", "xcm-procedural"] +runtime = ["assets-common", "binary-merkle-tree", "bp-asset-hub-rococo", "bp-asset-hub-westend", "bp-bridge-hub-cumulus", "bp-bridge-hub-kusama", "bp-bridge-hub-polkadot", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", "bp-header-chain", "bp-kusama", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-bulletin", "bp-polkadot-core", "bp-relayers", "bp-rococo", "bp-runtime", "bp-test-utils", "bp-westend", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "rococo-runtime-constants", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "westend-runtime-constants", "xcm-procedural", "xcm-runtime-apis"] node = ["asset-test-utils", "bridge-hub-test-utils", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", "cumulus-test-relay-sproof-builder", "emulated-integration-tests-common", "fork-tree", "frame-benchmarking-cli", "frame-remote-externalities", "frame-support-procedural-tools", "generate-bags", "mmr-gadget", "mmr-rpc", "pallet-contracts-mock-network", "pallet-transaction-payment-rpc", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", "polkadot-node-core-candidate-validation", "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-runtime-api", "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-overseer", "polkadot-rpc", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", "sc-allocator", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", "sc-cli", "sc-client-api", "sc-client-db", "sc-consensus", "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-babe-rpc", "sc-consensus-beefy", "sc-consensus-beefy-rpc", "sc-consensus-epochs", "sc-consensus-grandpa", "sc-consensus-grandpa-rpc", "sc-consensus-manual-seal", "sc-consensus-pow", "sc-consensus-slots", "sc-executor", "sc-executor-common", "sc-executor-polkavm", "sc-executor-wasmtime", "sc-informant", "sc-keystore", "sc-mixnet", "sc-network", "sc-network-common", "sc-network-gossip", "sc-network-light", "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-network-types", "sc-offchain", "sc-proposer-metrics", "sc-rpc", "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", "sc-service", "sc-state-db", "sc-statement-store", "sc-storage-monitor", "sc-sync-state-rpc", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", "snowbridge-runtime-test-common", "sp-blockchain", "sp-consensus", "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-database", "sp-maybe-compressed-blob", "sp-panic-handler", "sp-rpc", "staging-chain-spec-builder", "staging-node-inspect", "staging-tracking-allocator", "std", "subkey", "substrate-build-script-utils", "substrate-frame-rpc-support", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", "substrate-wasm-builder", "tracing-gum", "xcm-emulator", "xcm-simulator"] tuples-96 = [ "frame-support-procedural?/tuples-96", @@ -854,6 +857,11 @@ path = "../substrate/frame/assets" default-features = false optional = true +[dependencies.pallet-assets-freezer] +path = "../substrate/frame/assets-freezer" +default-features = false +optional = true + [dependencies.pallet-atomic-swap] path = "../substrate/frame/atomic-swap" default-features = false @@ -1724,13 +1732,13 @@ path = "../polkadot/runtime/westend/constants" default-features = false optional = true -[dependencies.xcm-fee-payment-runtime-api] -path = "../polkadot/xcm/xcm-fee-payment-runtime-api" +[dependencies.xcm-procedural] +path = "../polkadot/xcm/procedural" default-features = false optional = true -[dependencies.xcm-procedural] -path = "../polkadot/xcm/procedural" +[dependencies.xcm-runtime-apis] +path = "../polkadot/xcm/xcm-runtime-apis" default-features = false optional = true diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 78b34ba179b7d..58a5691961d9b 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -360,6 +360,10 @@ pub use pallet_asset_tx_payment; #[cfg(feature = "pallet-assets")] pub use pallet_assets; +/// Provides freezing features to `pallet-assets`. +#[cfg(feature = "pallet-assets-freezer")] +pub use pallet_assets_freezer; + /// FRAME atomic swap pallet. #[cfg(feature = "pallet-atomic-swap")] pub use pallet_atomic_swap; @@ -1556,14 +1560,14 @@ pub use westend_runtime_constants; #[cfg(feature = "xcm-emulator")] pub use xcm_emulator; -/// XCM fee payment runtime API. -#[cfg(feature = "xcm-fee-payment-runtime-api")] -pub use xcm_fee_payment_runtime_api; - /// Procedural macros for XCM. #[cfg(feature = "xcm-procedural")] pub use xcm_procedural; +/// XCM runtime APIs. +#[cfg(feature = "xcm-runtime-apis")] +pub use xcm_runtime_apis; + /// Test kit to simulate cross-chain message passing and XCM execution. #[cfg(feature = "xcm-simulator")] pub use xcm_simulator;