diff --git a/.config/taplo.toml b/.config/taplo.toml index 4b8afc74a52ea..7191d0019274d 100644 --- a/.config/taplo.toml +++ b/.config/taplo.toml @@ -6,6 +6,7 @@ exclude = [ "cumulus/zombienet/**", "polkadot/node/malus/integrationtests/**", "polkadot/zombienet_tests/**", + "substrate/client/transaction-pool/tests/zombienet/**", "substrate/zombienet/**", "target/**", ] diff --git a/.github/workflows/cmd-run.yml b/.github/workflows/cmd-run.yml new file mode 100644 index 0000000000000..ea5c0bf6d1d2b --- /dev/null +++ b/.github/workflows/cmd-run.yml @@ -0,0 +1,396 @@ +name: Command - Run + +on: + workflow_dispatch: + inputs: + cmd: + description: "Command to run" + required: true + pr_num: + description: "PR number" + required: true + pr_branch: + description: "PR branch" + required: true + runner: + description: "Runner to use" + required: true + image: + description: "Image to use" + required: true + is_org_member: + description: "Is the user an org member" + required: true + repo: + description: "Repository to use" + required: true + comment_id: + description: "Comment ID" + required: true + is_quiet: + description: "Quiet mode" + required: false + default: "false" + +permissions: # allow the action to comment on the PR + contents: read + issues: write + pull-requests: write + actions: read + +jobs: + before-cmd: + runs-on: ubuntu-latest + env: + JOB_NAME: "cmd" + CMD: ${{ github.event.inputs.cmd }} + PR_BRANCH: ${{ github.event.inputs.pr_branch }} + PR_NUM: ${{ github.event.inputs.pr_num }} + outputs: + job_url: ${{ steps.build-link.outputs.job_url }} + run_url: ${{ steps.build-link.outputs.run_url }} + steps: + - name: Build workflow link + if: ${{ github.event.inputs.is_quiet == 'false' }} + id: build-link + run: | + # Get exactly the CMD job link, filtering out the other jobs + jobLink=$(curl -s \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/jobs | jq '.jobs[] | select(.name | contains("${{ env.JOB_NAME }}")) | .html_url') + + runLink=$(curl -s \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }} | jq '.html_url') + + echo "job_url=${jobLink}" + echo "run_url=${runLink}" + echo "job_url=$jobLink" >> $GITHUB_OUTPUT + echo "run_url=$runLink" >> $GITHUB_OUTPUT + + - name: Comment PR (Start) + # No need to comment on prdoc start or if --quiet + if: ${{ github.event.inputs.is_quiet == 'false' && !startsWith(github.event.inputs.cmd, 'prdoc') && !startsWith(github.event.inputs.cmd, 'fmt')}} + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + let job_url = ${{ steps.build-link.outputs.job_url }} + let cmd = process.env.CMD; + github.rest.issues.createComment({ + issue_number: ${{ env.PR_NUM }}, + owner: context.repo.owner, + repo: context.repo.repo, + body: `Command "${cmd}" has started 🚀 [See logs here](${job_url})` + }) + + - name: Debug info + env: + CMD: ${{ github.event.inputs.cmd }} + PR_BRANCH: ${{ github.event.inputs.pr_branch }} + PR_NUM: ${{ github.event.inputs.pr_num }} + RUNNER: ${{ github.event.inputs.runner }} + IMAGE: ${{ github.event.inputs.image }} + IS_ORG_MEMBER: ${{ github.event.inputs.is_org_member }} + REPO: ${{ github.event.inputs.repo }} + COMMENT_ID: ${{ github.event.inputs.comment_id }} + IS_QUIET: ${{ github.event.inputs.is_quiet }} + run: | + echo "Running command: $CMD" + echo "PR number: $PR_NUM" + echo "PR branch: $PR_BRANCH" + echo "Runner: $RUNNER" + echo "Image: $IMAGE" + echo "Is org member: $IS_ORG_MEMBER" + echo "Repository: $REPO" + echo "Comment ID: $COMMENT_ID" + echo "Is quiet: $IS_QUIET" + + cmd: + needs: [before-cmd] + env: + CMD: ${{ github.event.inputs.cmd }} + PR_BRANCH: ${{ github.event.inputs.pr_branch }} + PR_NUM: ${{ github.event.inputs.pr_num }} + REPO: ${{ github.event.inputs.repo }} + runs-on: ${{ github.event.inputs.runner }} + container: + image: ${{ github.event.inputs.image }} + timeout-minutes: 1440 # 24 hours per runtime + # lowerdown permissions to separate permissions context for executable parts by contributors + permissions: + contents: read + pull-requests: none + actions: none + issues: none + outputs: + cmd_output: ${{ steps.cmd.outputs.cmd_output }} + subweight: ${{ steps.subweight.outputs.result }} + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + repository: ${{ env.REPO }} + ref: ${{ env.PR_BRANCH }} + + # In order to run prdoc without specifying the PR number, we need to add the PR number as an argument automatically + - name: Prepare PR Number argument + id: pr-arg + run: | + CMD="${CMD}" + if echo "$CMD" | grep -q "prdoc" && ! echo "$CMD" | grep -qE "\-\-pr[[:space:]=][0-9]+"; then + echo "arg=--pr ${PR_NUM}" >> $GITHUB_OUTPUT + else + echo "arg=" >> $GITHUB_OUTPUT + fi + + - name: Run cmd + id: cmd + env: + PR_ARG: ${{ steps.pr-arg.outputs.arg }} + IS_ORG_MEMBER: ${{ github.event.inputs.is_org_member }} + RUNNER: ${{ github.event.inputs.runner }} + IMAGE: ${{ github.event.inputs.image }} + run: | + echo "Running command: '${CMD} ${PR_ARG}' on '${RUNNER}' runner, container: '${IMAGE}'" + echo "RUST_NIGHTLY_VERSION: ${RUST_NIGHTLY_VERSION}" + echo "IS_ORG_MEMBER: ${IS_ORG_MEMBER}" + + git config --global --add safe.directory $GITHUB_WORKSPACE + git config user.name "cmd[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + + # if the user is not an org member, we need to use the bot's path from master to avoid unwanted modifications + if [ "${IS_ORG_MEMBER}" = "true" ]; then + # safe to run commands from current branch + BOT_PATH=.github + else + # going to run commands from master + TMP_DIR=/tmp/polkadot-sdk + git clone --depth 1 --branch master https://github.com/paritytech/polkadot-sdk $TMP_DIR + BOT_PATH=$TMP_DIR/.github + fi + + # install deps and run a command from master + python3 -m pip install -r $BOT_PATH/scripts/generate-prdoc.requirements.txt + python3 $BOT_PATH/scripts/cmd/cmd.py $CMD $PR_ARG + git status + git diff + + if [ -f /tmp/cmd/command_output.log ]; then + CMD_OUTPUT=$(cat /tmp/cmd/command_output.log) + # export to summary to display in the PR + echo "$CMD_OUTPUT" >> $GITHUB_STEP_SUMMARY + # should be multiline, otherwise it captures the first line only + echo 'cmd_output<> $GITHUB_OUTPUT + echo "$CMD_OUTPUT" >> $GITHUB_OUTPUT + echo 'EOF' >> $GITHUB_OUTPUT + fi + + git add -A + git diff HEAD > /tmp/cmd/command_diff.patch -U0 + git commit -m "tmp cmd: $CMD" || true + # without push, as we're saving the diff to an artifact and subweight will compare the local branch with the remote branch + + - name: Upload command output + if: ${{ always() }} + uses: actions/upload-artifact@v4 + with: + name: command-output + path: /tmp/cmd/command_output.log + + - name: Upload command diff + uses: actions/upload-artifact@v4 + with: + name: command-diff + path: /tmp/cmd/command_diff.patch + + - name: Install subweight for bench + if: startsWith(github.event.inputs.cmd, 'bench') + run: cargo install subweight + + # TODO: fix for forks, refs/remotes/origin/master should be replaced with master branch from paritytech/polkadot-sdk + - name: Run Subweight for bench + id: subweight + if: startsWith(github.event.inputs.cmd, 'bench') + shell: bash + run: | + git fetch + git remote -v + echo $(git log -n 2 --oneline) + + result=$(subweight compare commits \ + --path-pattern "./**/weights/**/*.rs,./**/weights.rs" \ + --method asymptotic \ + --format markdown \ + --no-color \ + --change added changed \ + --ignore-errors \ + refs/remotes/origin/master $PR_BRANCH) + + # Save the multiline result to the output + { + echo "result<> $GITHUB_OUTPUT + + after-cmd: + needs: [cmd, before-cmd] + env: + CMD: ${{ github.event.inputs.cmd }} + PR_BRANCH: ${{ github.event.inputs.pr_branch }} + PR_NUM: ${{ github.event.inputs.pr_num }} + REPO: ${{ github.event.inputs.repo }} + runs-on: ubuntu-latest + steps: + # needs to be able to trigger CI, as default token does not retrigger + - uses: actions/create-github-app-token@v1 + id: generate_token + with: + app-id: ${{ secrets.CMD_BOT_APP_ID }} + private-key: ${{ secrets.CMD_BOT_APP_KEY }} + + - name: Checkout + uses: actions/checkout@v4 + with: + token: ${{ steps.generate_token.outputs.token }} + repository: ${{ env.REPO }} + ref: ${{ env.PR_BRANCH }} + + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + name: command-diff + path: command-diff + + - name: Apply & Commit changes + run: | + ls -lsa . + + git config --global --add safe.directory $GITHUB_WORKSPACE + git config user.name "cmd[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + git config --global pull.rebase false + + echo "Applying $file" + git apply "command-diff/command_diff.patch" --unidiff-zero --allow-empty + + rm -rf command-diff + + git status + + if [ -n "$(git status --porcelain)" ]; then + + git remote -v + + push_changes() { + git push origin "HEAD:$PR_BRANCH" + } + + git add . + git restore --staged Cargo.lock # ignore changes in Cargo.lock + git commit -m "Update from ${{ github.actor }} running command '$CMD'" || true + + # Attempt to push changes + if ! push_changes; then + echo "Push failed, trying to rebase..." + git pull --rebase origin $PR_BRANCH + # After successful rebase, try pushing again + push_changes + fi + else + echo "Nothing to commit"; + fi + + - name: Comment PR (End) + # No need to comment on prdoc success or --quiet + #TODO: return "&& !contains(github.event.comment.body, '--quiet')" + if: ${{ github.event.inputs.is_quiet == 'false' && needs.cmd.result == 'success' && !startsWith(github.event.inputs.cmd, 'prdoc') && !startsWith(github.event.inputs.cmd, 'fmt') }} + uses: actions/github-script@v7 + env: + SUBWEIGHT: "${{ needs.cmd.outputs.subweight }}" + CMD_OUTPUT: "${{ needs.cmd.outputs.cmd_output }}" + PR_NUM: ${{ github.event.inputs.pr_num }} + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + let runUrl = ${{ needs.before-cmd.outputs.run_url }}; + let subweight = process.env.SUBWEIGHT || ''; + let cmdOutput = process.env.CMD_OUTPUT || ''; + let cmd = process.env.CMD; + console.log(cmdOutput); + + let subweightCollapsed = subweight.trim() !== '' + ? `
\n\nSubweight results:\n\n${subweight}\n\n
` + : ''; + + let cmdOutputCollapsed = cmdOutput.trim() !== '' + ? `
\n\nCommand output:\n\n${cmdOutput}\n\n
` + : ''; + + github.rest.issues.createComment({ + issue_number: ${{ env.PR_NUM }}, + owner: context.repo.owner, + repo: context.repo.repo, + body: `Command "${cmd}" has finished ✅ [See logs here](${runUrl})${subweightCollapsed}${cmdOutputCollapsed}` + }) + + finish: + needs: [before-cmd, cmd, after-cmd] + if: ${{ always() }} + runs-on: ubuntu-latest + env: + CMD_OUTPUT: "${{ needs.cmd.outputs.cmd_output }}" + CMD: ${{ github.event.inputs.cmd }} + PR_NUM: ${{ github.event.inputs.pr_num }} + COMMENT_ID: ${{ github.event.inputs.comment_id }} + steps: + - name: Comment PR (Failure) + if: ${{ needs.cmd.result == 'failure' || needs.after-cmd.result == 'failure' || needs.before-cmd.result == 'failure' }} + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + let jobUrl = ${{ needs.before-cmd.outputs.job_url }}; + let cmdOutput = process.env.CMD_OUTPUT; + let cmd = process.env.CMD; + let cmdOutputCollapsed = ''; + if (cmdOutput && cmdOutput.trim() !== '') { + cmdOutputCollapsed = `
\n\nCommand output:\n\n${cmdOutput}\n\n
` + } + + github.rest.issues.createComment({ + issue_number: ${{ env.PR_NUM }}, + owner: context.repo.owner, + repo: context.repo.repo, + body: `Command "${cmd}" has failed ❌! [See logs here](${jobUrl})${cmdOutputCollapsed}` + }) + + - name: Add 😕 reaction on failure + if: ${{ needs.cmd.result == 'failure' || needs.after-cmd.result == 'failure' || needs.before-cmd.result == 'failure' }} + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + github.rest.reactions.createForIssueComment({ + comment_id: ${{ env.COMMENT_ID }}, + owner: context.repo.owner, + repo: context.repo.repo, + content: 'confused' + }) + + - name: Add 👍 reaction on success + if: ${{ needs.cmd.result == 'success' && needs.after-cmd.result == 'success' && needs.before-cmd.result == 'success' }} + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + github.rest.reactions.createForIssueComment({ + comment_id: ${{ env.COMMENT_ID }}, + owner: context.repo.owner, + repo: context.repo.repo, + content: '+1' + }) diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml index 44a9a9f061193..14e98bbae1f87 100644 --- a/.github/workflows/cmd.yml +++ b/.github/workflows/cmd.yml @@ -4,7 +4,7 @@ on: issue_comment: # listen for comments on issues types: [created] -permissions: # allow the action to comment on the PR +permissions: # allow the action to comment in PR contents: read issues: write pull-requests: write @@ -55,7 +55,6 @@ jobs: return 'false'; - acknowledge: if: ${{ startsWith(github.event.comment.body, '/cmd') }} runs-on: ubuntu-latest @@ -124,7 +123,7 @@ jobs: with: text: ${{ github.event.comment.body }} regex: "^(\\/cmd )([-\\/\\s\\w.=:]+)$" # see explanation in docs/contributor/commands-readme.md#examples - + # Get PR branch name, because the issue_comment event does not contain the PR branch name - name: Check if the issue is a PR id: check-pr @@ -229,7 +228,7 @@ jobs: needs: [clean, get-pr-info] if: ${{ startsWith(github.event.comment.body, '/cmd') && !contains(github.event.comment.body, '--help') }} runs-on: ubuntu-latest - env: + env: CMD: ${{ needs.get-pr-info.outputs.CMD }} outputs: IMAGE: ${{ steps.set-image.outputs.IMAGE }} @@ -262,326 +261,41 @@ jobs: echo "RUNNER=${{ steps.set-image.outputs.RUNNER }}" echo "IMAGE=${{ steps.set-image.outputs.IMAGE }}" - before-cmd: - needs: [set-image, get-pr-info] + run-cmd-workflow: + needs: [set-image, get-pr-info, is-org-member] runs-on: ubuntu-latest - env: - JOB_NAME: "cmd" - CMD: ${{ needs.get-pr-info.outputs.CMD }} - PR_BRANCH: ${{ needs.get-pr-info.outputs.pr-branch }} - outputs: - job_url: ${{ steps.build-link.outputs.job_url }} - run_url: ${{ steps.build-link.outputs.run_url }} - steps: - - name: Build workflow link - if: ${{ !contains(github.event.comment.body, '--quiet') }} - id: build-link - run: | - # Get exactly the CMD job link, filtering out the other jobs - jobLink=$(curl -s \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/jobs | jq '.jobs[] | select(.name | contains("${{ env.JOB_NAME }}")) | .html_url') - - runLink=$(curl -s \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }} | jq '.html_url') - - echo "job_url=${jobLink}" - echo "run_url=${runLink}" - echo "job_url=$jobLink" >> $GITHUB_OUTPUT - echo "run_url=$runLink" >> $GITHUB_OUTPUT - - - name: Comment PR (Start) - # No need to comment on prdoc start or if --quiet - if: ${{ !contains(github.event.comment.body, '--quiet') && !startsWith(needs.get-pr-info.outputs.CMD, 'prdoc') && !startsWith(needs.get-pr-info.outputs.CMD, 'fmt')}} - uses: actions/github-script@v7 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - script: | - let job_url = ${{ steps.build-link.outputs.job_url }} - let cmd = process.env.CMD; - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: `Command "${cmd}" has started 🚀 [See logs here](${job_url})` - }) - - cmd: - needs: [before-cmd, set-image, get-pr-info, is-org-member] - env: - CMD: ${{ needs.get-pr-info.outputs.CMD }} - PR_BRANCH: ${{ needs.get-pr-info.outputs.pr-branch }} - runs-on: ${{ needs.set-image.outputs.RUNNER }} - container: - image: ${{ needs.set-image.outputs.IMAGE }} - timeout-minutes: 1440 # 24 hours per runtime - # lowerdown permissions to separate permissions context for executable parts by contributors - permissions: + # don't run on help and clean commands + if: ${{ startsWith(github.event.comment.body, '/cmd') && !contains(github.event.comment.body, '--help') && !contains(github.event.comment.body, '--clean') }} + permissions: # run workflow contents: read - pull-requests: none - actions: none - issues: none - outputs: - cmd_output: ${{ steps.cmd.outputs.cmd_output }} - subweight: ${{ steps.subweight.outputs.result }} - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - repository: ${{ needs.get-pr-info.outputs.repo }} - ref: ${{ needs.get-pr-info.outputs.pr-branch }} - - # In order to run prdoc without specifying the PR number, we need to add the PR number as an argument automatically - - name: Prepare PR Number argument - id: pr-arg - run: | - CMD="${{ needs.get-pr-info.outputs.CMD }}" - if echo "$CMD" | grep -q "prdoc" && ! echo "$CMD" | grep -qE "\-\-pr[[:space:]=][0-9]+"; then - echo "arg=--pr ${{ github.event.issue.number }}" >> $GITHUB_OUTPUT - else - echo "arg=" >> $GITHUB_OUTPUT - fi - - - name: Run cmd - id: cmd - env: - PR_ARG: ${{ steps.pr-arg.outputs.arg }} - IS_ORG_MEMBER: ${{ needs.is-org-member.outputs.member }} - run: | - echo "Running command: '$CMD $PR_ARG' on '${{ needs.set-image.outputs.RUNNER }}' runner, container: '${{ needs.set-image.outputs.IMAGE }}'" - echo "RUST_NIGHTLY_VERSION: $RUST_NIGHTLY_VERSION" - echo "IS_ORG_MEMBER: $IS_ORG_MEMBER" - - git config --global --add safe.directory $GITHUB_WORKSPACE - git config user.name "cmd[bot]" - git config user.email "41898282+github-actions[bot]@users.noreply.github.com" - - - # if the user is not an org member, we need to use the bot's path from master to avoid unwanted modifications - if [ "$IS_ORG_MEMBER" = "true" ]; then - # safe to run commands from current branch - BOT_PATH=.github - else - # going to run commands from master - TMP_DIR=/tmp/polkadot-sdk - git clone --depth 1 --branch master https://github.com/paritytech/polkadot-sdk $TMP_DIR - BOT_PATH=$TMP_DIR/.github - fi - - # install deps and run a command from master - python3 -m pip install -r $BOT_PATH/scripts/generate-prdoc.requirements.txt - python3 $BOT_PATH/scripts/cmd/cmd.py $CMD $PR_ARG - git status - git diff - - if [ -f /tmp/cmd/command_output.log ]; then - CMD_OUTPUT=$(cat /tmp/cmd/command_output.log) - # export to summary to display in the PR - echo "$CMD_OUTPUT" >> $GITHUB_STEP_SUMMARY - # should be multiline, otherwise it captures the first line only - echo 'cmd_output<> $GITHUB_OUTPUT - echo "$CMD_OUTPUT" >> $GITHUB_OUTPUT - echo 'EOF' >> $GITHUB_OUTPUT - fi - - git add -A - git diff HEAD > /tmp/cmd/command_diff.patch -U0 - git commit -m "tmp cmd: $CMD" || true - # without push, as we're saving the diff to an artifact and subweight will compare the local branch with the remote branch - - - name: Upload command output - if: ${{ always() }} - uses: actions/upload-artifact@v4 - with: - name: command-output - path: /tmp/cmd/command_output.log - - - name: Upload command diff - uses: actions/upload-artifact@v4 - with: - name: command-diff - path: /tmp/cmd/command_diff.patch - - - name: Install subweight for bench - if: startsWith(needs.get-pr-info.outputs.CMD, 'bench') - run: cargo install subweight - - - name: Run Subweight for bench - id: subweight - if: startsWith(needs.get-pr-info.outputs.CMD, 'bench') - shell: bash - run: | - git fetch - git remote -v - echo $(git log -n 2 --oneline) - - result=$(subweight compare commits \ - --path-pattern "./**/weights/**/*.rs,./**/weights.rs" \ - --method asymptotic \ - --format markdown \ - --no-color \ - --change added changed \ - --ignore-errors \ - refs/remotes/origin/master $PR_BRANCH) - - # Save the multiline result to the output - { - echo "result<> $GITHUB_OUTPUT - - after-cmd: - needs: [cmd, get-pr-info, before-cmd] + issues: write + pull-requests: write + actions: write env: CMD: ${{ needs.get-pr-info.outputs.CMD }} PR_BRANCH: ${{ needs.get-pr-info.outputs.pr-branch }} - runs-on: ubuntu-latest + RUNNER: ${{ needs.set-image.outputs.RUNNER }} + IMAGE: ${{ needs.set-image.outputs.IMAGE }} + REPO: ${{ needs.get-pr-info.outputs.repo }} + IS_ORG_MEMBER: ${{ needs.is-org-member.outputs.member }} + COMMENT_ID: ${{ github.event.comment.id }} + PR_NUMBER: ${{ github.event.issue.number }} steps: - # needs to be able to trigger CI, as default token does not retrigger - - uses: actions/create-github-app-token@v1 - id: generate_token - with: - app-id: ${{ secrets.CMD_BOT_APP_ID }} - private-key: ${{ secrets.CMD_BOT_APP_KEY }} - - name: Checkout uses: actions/checkout@v4 - with: - token: ${{ steps.generate_token.outputs.token }} - repository: ${{ needs.get-pr-info.outputs.repo }} - ref: ${{ needs.get-pr-info.outputs.pr-branch }} - - - name: Download all artifacts - uses: actions/download-artifact@v4 - with: - name: command-diff - path: command-diff - - name: Apply & Commit changes - run: | - ls -lsa . - - git config --global --add safe.directory $GITHUB_WORKSPACE - git config user.name "cmd[bot]" - git config user.email "41898282+github-actions[bot]@users.noreply.github.com" - git config --global pull.rebase false - - echo "Applying $file" - git apply "command-diff/command_diff.patch" --unidiff-zero --allow-empty - - rm -rf command-diff - - git status - - if [ -n "$(git status --porcelain)" ]; then - - git remote -v - - push_changes() { - git push origin "HEAD:$PR_BRANCH" - } - - git add . - git restore --staged Cargo.lock # ignore changes in Cargo.lock - git commit -m "Update from ${{ github.actor }} running command '$CMD'" || true - - # Attempt to push changes - if ! push_changes; then - echo "Push failed, trying to rebase..." - git pull --rebase origin $PR_BRANCH - # After successful rebase, try pushing again - push_changes - fi - else - echo "Nothing to commit"; - fi - - - name: Comment PR (End) - # No need to comment on prdoc success or --quiet - if: ${{ needs.cmd.result == 'success' && !contains(github.event.comment.body, '--quiet') && !startsWith(needs.get-pr-info.outputs.CMD, 'prdoc') && !startsWith(needs.get-pr-info.outputs.CMD, 'fmt') }} - uses: actions/github-script@v7 + - name: Start cmd with gh cli env: - SUBWEIGHT: "${{ needs.cmd.outputs.subweight }}" - CMD_OUTPUT: "${{ needs.cmd.outputs.cmd_output }}" - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - script: | - let runUrl = ${{ needs.before-cmd.outputs.run_url }} - let subweight = process.env.SUBWEIGHT || ''; - let cmdOutput = process.env.CMD_OUTPUT || ''; - let cmd = process.env.CMD; - console.log(cmdOutput); - - let subweightCollapsed = subweight.trim() !== '' - ? `
\n\nSubweight results:\n\n${subweight}\n\n
` - : ''; - - let cmdOutputCollapsed = cmdOutput.trim() !== '' - ? `
\n\nCommand output:\n\n${cmdOutput}\n\n
` - : ''; - - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: `Command "${cmd}" has finished ✅ [See logs here](${runUrl})${subweightCollapsed}${cmdOutputCollapsed}` - }) - - finish: - needs: [get-pr-info, before-cmd, after-cmd, cmd] - if: ${{ always() }} - runs-on: ubuntu-latest - env: - CMD_OUTPUT: "${{ needs.cmd.outputs.cmd_output }}" - CMD: ${{ needs.get-pr-info.outputs.CMD }} - steps: - - name: Comment PR (Failure) - if: ${{ needs.cmd.result == 'failure' || needs.after-cmd.result == 'failure' }} - uses: actions/github-script@v7 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - script: | - let jobUrl = ${{ needs.before-cmd.outputs.job_url }} - let cmdOutput = process.env.CMD_OUTPUT; - let cmd = process.env.CMD; - let cmdOutputCollapsed = ''; - if (cmdOutput && cmdOutput.trim() !== '') { - cmdOutputCollapsed = `
\n\nCommand output:\n\n${cmdOutput}\n\n
` - } - - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: `Command "${cmd}" has failed ❌! [See logs here](${jobUrl})${cmdOutputCollapsed}` - }) - - - name: Add 😕 reaction on failure - if: ${{ needs.cmd.result == 'failure' || needs.after-cmd.result == 'failure' }} - uses: actions/github-script@v7 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - script: | - github.rest.reactions.createForIssueComment({ - comment_id: ${{ github.event.comment.id }}, - owner: context.repo.owner, - repo: context.repo.repo, - content: 'confused' - }) - - - name: Add 👍 reaction on success - if: ${{ needs.cmd.result == 'success' && needs.after-cmd.result == 'success' }} - uses: actions/github-script@v7 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - script: | - github.rest.reactions.createForIssueComment({ - comment_id: ${{ github.event.comment.id }}, - owner: context.repo.owner, - repo: context.repo.repo, - content: '+1' - }) + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh workflow run cmd-run.yml \ + --ref cmd-bot \ + -f cmd="${CMD}" \ + -f repo="${REPO}" \ + -f pr_branch="${PR_BRANCH}" \ + -f pr_num="${PR_NUMBER}" \ + -f runner="${RUNNER}" \ + -f is_org_member="${IS_ORG_MEMBER}" \ + -f comment_id="${COMMENT_ID}" \ + -f image="${IMAGE}" \ + -f is_quiet="${{ contains(github.event.comment.body, '--quiet') }}" diff --git a/.github/workflows/command-backport.yml b/.github/workflows/command-backport.yml index d7b01000855ad..67de5418434f9 100644 --- a/.github/workflows/command-backport.yml +++ b/.github/workflows/command-backport.yml @@ -31,10 +31,10 @@ jobs: - name: Generate token id: generate_token - uses: tibdex/github-app-token@v2.1.0 + uses: actions/create-github-app-token@v1 with: - app_id: ${{ secrets.CMD_BOT_APP_ID }} - private_key: ${{ secrets.CMD_BOT_APP_KEY }} + app_id: ${{ secrets.RELEASE_BACKPORT_AUTOMATION_APP_ID }} + private_key: ${{ secrets.RELEASE_BACKPORT_AUTOMATION_APP_PRIVATE_KEY }} - name: Create backport pull requests uses: korthout/backport-action@v3 diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index f61028cae91d3..9602213460317 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -176,9 +176,11 @@ jobs: env: GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} run: | + VERSIONED_ASSET="${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm" + mv "${{ env.ASSET }}" "$VERSIONED_ASSET" + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ - --repo paritytech/polkadot-sdk \ - '${{ env.ASSET }}#${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm' + --repo paritytech/polkadot-sdk "$VERSIONED_ASSET" publish-release-artifacts: needs: [ validate-inputs, publish-release-draft ] diff --git a/Cargo.lock b/Cargo.lock index 688ba98beb660..d0e4779a0dc19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1596,6 +1596,17 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "average" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a237a6822e1c3c98e700b6db5b293eb341b7524dcb8d227941245702b7431dc" +dependencies = [ + "easy-cast", + "float-ord", + "num-traits", +] + [[package]] name = "backoff" version = "0.4.0" @@ -2252,7 +2263,7 @@ dependencies = [ "sp-state-machine 0.35.0", "sp-std 14.0.0", "sp-trie 29.0.0", - "trie-db", + "trie-db 0.30.0", ] [[package]] @@ -2374,6 +2385,7 @@ dependencies = [ "sp-core 28.0.0", "sp-runtime 31.0.1", "staging-xcm", + "staging-xcm-builder", "staging-xcm-executor", "testnet-parachains-constants", "xcm-runtime-apis", @@ -2569,6 +2581,7 @@ dependencies = [ "sp-core 28.0.0", "sp-runtime 31.0.1", "staging-xcm", + "staging-xcm-builder", "staging-xcm-executor", "testnet-parachains-constants", "xcm-runtime-apis", @@ -2978,9 +2991,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" dependencies = [ "android-tzdata", "iana-time-zone", @@ -2988,7 +3001,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.48.5", + "windows-link", ] [[package]] @@ -4174,6 +4187,16 @@ dependencies = [ "cipher 0.4.4", ] +[[package]] +name = "ctrlc" +version = "3.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" +dependencies = [ + "nix 0.29.0", + "windows-sys 0.59.0", +] + [[package]] name = "cumulus-client-cli" version = "0.7.0" @@ -4549,7 +4572,7 @@ dependencies = [ "sp-version 29.0.0", "staging-xcm", "staging-xcm-builder", - "trie-db", + "trie-db 0.30.0", "trie-standardmap", ] @@ -5626,6 +5649,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" +[[package]] +name = "drawille" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64e461c3f1e69d99372620640b3fd5f0309eeda2e26e4af69f6760c0e1df845" +dependencies = [ + "colored", + "fnv", +] + [[package]] name = "dtoa" version = "1.0.9" @@ -5665,6 +5698,15 @@ version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" +[[package]] +name = "easy-cast" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72852736692ec862655eca398c9bb1b476161b563c9f80f45f4808b9629750d6" +dependencies = [ + "libm", +] + [[package]] name = "ecdsa" version = "0.16.8" @@ -6321,6 +6363,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "float-ord" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce81f49ae8a0482e4c55ea62ebbd7e5a686af544c00b9d090bba3ff9be97b3d" + [[package]] name = "flume" version = "0.11.1" @@ -6488,8 +6536,8 @@ dependencies = [ "sp-version 29.0.0", "sp-wasm-interface 20.0.0", "substrate-test-runtime", - "subxt", - "subxt-signer", + "subxt 0.38.1", + "subxt-signer 0.38.0", "thiserror 1.0.65", "thousands", "westend-runtime", @@ -6522,6 +6570,20 @@ dependencies = [ "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "frame-decode" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7af3d1149d6063985bb62d97f3ea83060ce4d6f2d04c21f551d270e8d84a27c" +dependencies = [ + "frame-metadata 18.0.0", + "parity-scale-codec", + "scale-decode 0.16.0", + "scale-info", + "scale-type-resolver", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "frame-election-provider-solution-type" version = "13.0.0" @@ -6619,6 +6681,18 @@ dependencies = [ "serde", ] +[[package]] +name = "frame-metadata" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daaf440c68eb2c3d88e5760fe8c7af3f9fee9181fab6c2f2c4e7cc48dcc40bb8" +dependencies = [ + "cfg-if", + "parity-scale-codec", + "scale-info", + "serde", +] + [[package]] name = "frame-metadata" version = "20.0.0" @@ -7787,7 +7861,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.5.8", "tokio", "tower-service", "tracing", @@ -8537,9 +8611,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.24.7" +version = "0.24.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5c71d8c1a731cc4227c2f698d377e7848ca12c8a48866fc5e6951c43a4db843" +checksum = "834af00800e962dee8f7bfc0f60601de215e73e78e5497d733a2919da837d3c8" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -12461,16 +12535,11 @@ dependencies = [ name = "pallet-nis" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", "pallet-balances", "parity-scale-codec", + "polkadot-sdk-frame", "scale-info", - "sp-arithmetic 23.0.0", - "sp-core 28.0.0", "sp-io 30.0.0", - "sp-runtime 31.0.1", ] [[package]] @@ -12811,7 +12880,7 @@ dependencies = [ "sp-tracing 16.0.0", "staging-xcm", "staging-xcm-builder", - "subxt-signer", + "subxt-signer 0.38.0", ] [[package]] @@ -12844,8 +12913,8 @@ dependencies = [ "static_init", "substrate-cli-test-utils", "substrate-prometheus-endpoint", - "subxt", - "subxt-signer", + "subxt 0.38.1", + "subxt-signer 0.38.0", "thiserror 1.0.65", "tokio", ] @@ -15415,7 +15484,7 @@ dependencies = [ "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-state-trie-migration-rpc", - "subxt-metadata", + "subxt-metadata 0.38.0", "tokio", "wait-timeout", ] @@ -16689,8 +16758,8 @@ dependencies = [ "serde_json", "substrate-build-script-utils", "subwasmlib", - "subxt", - "subxt-signer", + "subxt 0.38.1", + "subxt-signer 0.38.0", "tokio", "tokio-util", "zombienet-sdk", @@ -20206,7 +20275,7 @@ dependencies = [ "sp-state-machine 0.35.0", "sp-version 29.0.0", "sp-wasm-interface 20.0.0", - "subxt", + "subxt 0.38.1", "thiserror 1.0.65", ] @@ -20452,6 +20521,7 @@ dependencies = [ name = "sc-transaction-pool" version = "28.0.0" dependencies = [ + "anyhow", "array-bytes", "assert_matches", "async-trait", @@ -20481,10 +20551,14 @@ dependencies = [ "substrate-test-runtime", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", + "substrate-txtesttool", "thiserror 1.0.65", "tokio", "tokio-stream", "tracing", + "tracing-subscriber 0.3.18", + "zombienet-configuration", + "zombienet-sdk", ] [[package]] @@ -20530,6 +20604,18 @@ dependencies = [ "serde", ] +[[package]] +name = "scale-bits" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27243ab0d2d6235072b017839c5f0cd1a3b1ce45c0f7a715363b0c7d36c76c94" +dependencies = [ + "parity-scale-codec", + "scale-info", + "scale-type-resolver", + "serde", +] + [[package]] name = "scale-decode" version = "0.13.1" @@ -20538,7 +20624,7 @@ checksum = "e98f3262c250d90e700bb802eb704e1f841e03331c2eb815e46516c4edbf5b27" dependencies = [ "derive_more 0.99.17", "parity-scale-codec", - "scale-bits", + "scale-bits 0.6.0", "scale-type-resolver", "smallvec", ] @@ -20552,12 +20638,27 @@ dependencies = [ "derive_more 1.0.0", "parity-scale-codec", "primitive-types 0.13.1", - "scale-bits", - "scale-decode-derive", + "scale-bits 0.6.0", + "scale-decode-derive 0.14.0", "scale-type-resolver", "smallvec", ] +[[package]] +name = "scale-decode" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d78196772d25b90a98046794ce0fe2588b39ebdfbdc1e45b4c6c85dd43bebad" +dependencies = [ + "parity-scale-codec", + "primitive-types 0.13.1", + "scale-bits 0.7.0", + "scale-decode-derive 0.16.0", + "scale-type-resolver", + "smallvec", + "thiserror 2.0.11", +] + [[package]] name = "scale-decode-derive" version = "0.14.0" @@ -20570,6 +20671,18 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "scale-decode-derive" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f4b54a1211260718b92832b661025d1f1a4b6930fbadd6908e00edd265fa5f7" +dependencies = [ + "darling", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", +] + [[package]] name = "scale-encode" version = "0.8.0" @@ -20579,12 +20692,27 @@ dependencies = [ "derive_more 1.0.0", "parity-scale-codec", "primitive-types 0.13.1", - "scale-bits", - "scale-encode-derive", + "scale-bits 0.6.0", + "scale-encode-derive 0.8.0", "scale-type-resolver", "smallvec", ] +[[package]] +name = "scale-encode" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64901733157f9d25ef86843bd783eda439fac7efb0ad5a615d12d2cf3a29464b" +dependencies = [ + "parity-scale-codec", + "primitive-types 0.13.1", + "scale-bits 0.7.0", + "scale-encode-derive 0.10.0", + "scale-type-resolver", + "smallvec", + "thiserror 2.0.11", +] + [[package]] name = "scale-encode-derive" version = "0.8.0" @@ -20598,6 +20726,19 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "scale-encode-derive" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78a3993a13b4eafa89350604672c8757b7ea84c7c5947d4b3691e3169c96379b" +dependencies = [ + "darling", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", +] + [[package]] name = "scale-info" version = "2.11.6" @@ -20647,6 +20788,19 @@ dependencies = [ "thiserror 1.0.65", ] +[[package]] +name = "scale-typegen" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc3173be608895eb117cf397ab4f31f00e2ed2c7af1c6e0b8f5d51d0a0967053" +dependencies = [ + "proc-macro2 1.0.93", + "quote 1.0.38", + "scale-info", + "syn 2.0.98", + "thiserror 2.0.11", +] + [[package]] name = "scale-value" version = "0.17.0" @@ -20658,13 +20812,32 @@ dependencies = [ "derive_more 1.0.0", "either", "parity-scale-codec", - "scale-bits", + "scale-bits 0.6.0", "scale-decode 0.14.0", - "scale-encode", + "scale-encode 0.8.0", "scale-info", "scale-type-resolver", "serde", - "yap", + "yap 0.11.0", +] + +[[package]] +name = "scale-value" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ca8b26b451ecb7fd7b62b259fa28add63d12ec49bbcac0e01fcb4b5ae0c09aa" +dependencies = [ + "base58", + "blake2 0.10.6", + "either", + "parity-scale-codec", + "scale-bits 0.7.0", + "scale-decode 0.16.0", + "scale-encode 0.10.0", + "scale-type-resolver", + "serde", + "thiserror 2.0.11", + "yap 0.12.0", ] [[package]] @@ -21793,6 +21966,7 @@ dependencies = [ "sp-runtime 31.0.1", "sp-std 14.0.0", "staging-xcm", + "staging-xcm-builder", "staging-xcm-executor", ] @@ -23297,7 +23471,7 @@ dependencies = [ "sp-trie 29.0.0", "thiserror 1.0.65", "tracing", - "trie-db", + "trie-db 0.30.0", ] [[package]] @@ -23318,7 +23492,7 @@ dependencies = [ "sp-trie 34.0.0", "thiserror 1.0.65", "tracing", - "trie-db", + "trie-db 0.29.1", ] [[package]] @@ -23339,7 +23513,7 @@ dependencies = [ "sp-trie 35.0.0", "thiserror 1.0.65", "tracing", - "trie-db", + "trie-db 0.29.1", ] [[package]] @@ -23470,6 +23644,7 @@ name = "sp-tracing" version = "16.0.0" dependencies = [ "parity-scale-codec", + "regex", "tracing", "tracing-core", "tracing-subscriber 0.3.18", @@ -23529,7 +23704,7 @@ dependencies = [ "thiserror 1.0.65", "tracing", "trie-bench", - "trie-db", + "trie-db 0.30.0", "trie-root", "trie-standardmap", ] @@ -23554,7 +23729,7 @@ dependencies = [ "sp-externalities 0.28.0", "thiserror 1.0.65", "tracing", - "trie-db", + "trie-db 0.29.1", "trie-root", ] @@ -23578,7 +23753,7 @@ dependencies = [ "sp-externalities 0.28.0", "thiserror 1.0.65", "tracing", - "trie-db", + "trie-db 0.29.1", "trie-root", ] @@ -24035,7 +24210,7 @@ dependencies = [ "sp-keyring", "staging-node-inspect", "substrate-cli-test-utils", - "subxt-signer", + "subxt-signer 0.38.0", "tempfile", "tokio", "tokio-util", @@ -24534,7 +24709,7 @@ dependencies = [ "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-trie 29.0.0", - "trie-db", + "trie-db 0.30.0", ] [[package]] @@ -24611,7 +24786,7 @@ dependencies = [ "substrate-test-runtime-client", "substrate-wasm-builder", "tracing", - "trie-db", + "trie-db 0.30.0", ] [[package]] @@ -24651,6 +24826,39 @@ dependencies = [ name = "substrate-test-utils" version = "4.0.0-dev" +[[package]] +name = "substrate-txtesttool" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40d15497539242639a640a5a03c160a6695c3d699d8a37035e8cc83aa6dec576" +dependencies = [ + "async-trait", + "average", + "chrono", + "clap 4.5.13", + "clap_derive 4.5.13", + "ctrlc", + "futures", + "futures-util", + "hex", + "jsonrpsee", + "parity-scale-codec", + "parking_lot 0.12.3", + "rand 0.9.0", + "serde", + "serde_json", + "subxt 0.40.0", + "subxt-core 0.40.0", + "subxt-signer 0.40.0", + "termplot", + "thiserror 2.0.11", + "time", + "tokio", + "tokio-util", + "tracing", + "tracing-subscriber 0.3.18", +] + [[package]] name = "substrate-wasm-builder" version = "17.0.0" @@ -24742,17 +24950,17 @@ dependencies = [ "parity-scale-codec", "polkadot-sdk 0.7.0", "primitive-types 0.13.1", - "scale-bits", + "scale-bits 0.6.0", "scale-decode 0.14.0", - "scale-encode", + "scale-encode 0.8.0", "scale-info", - "scale-value", + "scale-value 0.17.0", "serde", "serde_json", - "subxt-core", - "subxt-lightclient", - "subxt-macro", - "subxt-metadata", + "subxt-core 0.38.0", + "subxt-lightclient 0.38.0", + "subxt-macro 0.38.0", + "subxt-metadata 0.38.0", "thiserror 1.0.65", "tokio", "tokio-util", @@ -24762,6 +24970,43 @@ dependencies = [ "web-time", ] +[[package]] +name = "subxt" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ffca95192207f6dbaf68a032f7915cfc8670f062df0464bdddf05d35a09bcf8" +dependencies = [ + "async-trait", + "derive-where", + "either", + "frame-metadata 18.0.0", + "futures", + "hex", + "impl-serde 0.5.0", + "jsonrpsee", + "parity-scale-codec", + "primitive-types 0.13.1", + "scale-bits 0.7.0", + "scale-decode 0.16.0", + "scale-encode 0.10.0", + "scale-info", + "scale-value 0.18.0", + "serde", + "serde_json", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "subxt-core 0.40.0", + "subxt-lightclient 0.40.0", + "subxt-macro 0.40.0", + "subxt-metadata 0.40.0", + "thiserror 2.0.11", + "tokio", + "tokio-util", + "tracing", + "url", + "wasm-bindgen-futures", + "web-time", +] + [[package]] name = "subxt-codegen" version = "0.38.0" @@ -24773,12 +25018,29 @@ dependencies = [ "proc-macro2 1.0.93", "quote 1.0.38", "scale-info", - "scale-typegen", - "subxt-metadata", + "scale-typegen 0.9.0", + "subxt-metadata 0.38.0", "syn 2.0.98", "thiserror 1.0.65", ] +[[package]] +name = "subxt-codegen" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de8786ebc4be0905fac861c8ce1845e677a96b8ddb209e5c3e0e1f6b804d62f" +dependencies = [ + "heck 0.5.0", + "parity-scale-codec", + "proc-macro2 1.0.93", + "quote 1.0.38", + "scale-info", + "scale-typegen 0.10.0", + "subxt-metadata 0.40.0", + "syn 2.0.98", + "thiserror 2.0.11", +] + [[package]] name = "subxt-core" version = "0.38.0" @@ -24788,7 +25050,7 @@ dependencies = [ "base58", "blake2 0.10.6", "derive-where", - "frame-decode", + "frame-decode 0.5.0", "frame-metadata 17.0.0", "hashbrown 0.14.5", "hex", @@ -24797,14 +25059,44 @@ dependencies = [ "parity-scale-codec", "polkadot-sdk 0.7.0", "primitive-types 0.13.1", - "scale-bits", + "scale-bits 0.6.0", "scale-decode 0.14.0", - "scale-encode", + "scale-encode 0.8.0", + "scale-info", + "scale-value 0.17.0", + "serde", + "serde_json", + "subxt-metadata 0.38.0", + "tracing", +] + +[[package]] +name = "subxt-core" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daa812fea5d2a104d3253aa722daa51f222cf951304f4d47eda70c268bf99921" +dependencies = [ + "base58", + "blake2 0.10.6", + "derive-where", + "frame-decode 0.6.1", + "frame-metadata 18.0.0", + "hashbrown 0.14.5", + "hex", + "impl-serde 0.5.0", + "keccak-hash", + "parity-scale-codec", + "primitive-types 0.13.1", + "scale-bits 0.7.0", + "scale-decode 0.16.0", + "scale-encode 0.10.0", "scale-info", - "scale-value", + "scale-value 0.18.0", "serde", "serde_json", - "subxt-metadata", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "subxt-metadata 0.40.0", + "thiserror 2.0.11", "tracing", ] @@ -24825,6 +25117,23 @@ dependencies = [ "tracing", ] +[[package]] +name = "subxt-lightclient" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcbf1a87957918fcfde2cc2150d792c752acb933f0f83262a8b42d96b8dfcc52" +dependencies = [ + "futures", + "futures-util", + "serde", + "serde_json", + "smoldot-light 0.16.2", + "thiserror 2.0.11", + "tokio", + "tokio-stream", + "tracing", +] + [[package]] name = "subxt-macro" version = "0.38.0" @@ -24835,9 +25144,25 @@ dependencies = [ "parity-scale-codec", "proc-macro-error2", "quote 1.0.38", - "scale-typegen", - "subxt-codegen", - "subxt-utils-fetchmetadata", + "scale-typegen 0.9.0", + "subxt-codegen 0.38.0", + "subxt-utils-fetchmetadata 0.38.0", + "syn 2.0.98", +] + +[[package]] +name = "subxt-macro" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0a120bac6a4a07d477e736f42a388eebef47a277d2a76c8cddcf90e71ee4aa2" +dependencies = [ + "darling", + "parity-scale-codec", + "proc-macro-error2", + "quote 1.0.38", + "scale-typegen 0.10.0", + "subxt-codegen 0.40.0", + "subxt-utils-fetchmetadata 0.40.0", "syn 2.0.98", ] @@ -24847,7 +25172,7 @@ version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee13e6862eda035557d9a2871955306aff540d2b89c06e0a62a1136a700aed28" dependencies = [ - "frame-decode", + "frame-decode 0.5.0", "frame-metadata 17.0.0", "hashbrown 0.14.5", "parity-scale-codec", @@ -24855,6 +25180,21 @@ dependencies = [ "scale-info", ] +[[package]] +name = "subxt-metadata" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fcee71f170e496294434c7c8646befc05e9a3a27a771712b4f3008d0c4d0ee7" +dependencies = [ + "frame-decode 0.6.1", + "frame-metadata 18.0.0", + "hashbrown 0.14.5", + "parity-scale-codec", + "scale-info", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "thiserror 2.0.11", +] + [[package]] name = "subxt-signer" version = "0.38.0" @@ -24880,7 +25220,37 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", - "subxt-core", + "subxt-core 0.38.0", + "zeroize", +] + +[[package]] +name = "subxt-signer" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02ea55baf5930de2ad53d2cd1371b083cbeecad1fd2f7b6eb9f16db1496053fa" +dependencies = [ + "base64 0.22.1", + "bip32", + "bip39", + "cfg-if", + "crypto_secretbox", + "hex", + "hmac 0.12.1", + "keccak-hash", + "parity-scale-codec", + "pbkdf2", + "regex", + "schnorrkel 0.11.4", + "scrypt", + "secp256k1 0.30.0", + "secrecy 0.10.3", + "serde", + "serde_json", + "sha2 0.10.8", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "subxt-core 0.40.0", + "thiserror 2.0.11", "zeroize", ] @@ -24895,6 +25265,17 @@ dependencies = [ "thiserror 1.0.65", ] +[[package]] +name = "subxt-utils-fetchmetadata" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "721556230144393c58ff3dd2660f734cc792fe19167823b957d6858b15e12362" +dependencies = [ + "hex", + "parity-scale-codec", + "thiserror 2.0.11", +] + [[package]] name = "sval" version = "2.6.1" @@ -25177,6 +25558,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "termplot" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a71a52e300301c16267a50aca0ad95361a31222061091b80ee8988d1aad87618" +dependencies = [ + "drawille", +] + [[package]] name = "termtree" version = "0.4.1" @@ -25956,16 +26346,16 @@ dependencies = [ [[package]] name = "trie-bench" -version = "0.39.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3092f400e9f7e3ce8c1756016a8b6287163ab7a11dd47d82169260cb4cc2d680" +checksum = "eaafa99707db4419f193b97e825aca722c6910a9dff31d8f41df304b6091ef17" dependencies = [ "criterion", "hash-db", "keccak-hasher", "memory-db", "parity-scale-codec", - "trie-db", + "trie-db 0.30.0", "trie-root", "trie-standardmap", ] @@ -25982,6 +26372,18 @@ dependencies = [ "smallvec", ] +[[package]] +name = "trie-db" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c0670ab45a6b7002c7df369fee950a27cf29ae0474343fd3a15aa15f691e7a6" +dependencies = [ + "hash-db", + "log", + "rustc-hex", + "smallvec", +] + [[package]] name = "trie-root" version = "0.18.0" @@ -27277,6 +27679,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-link" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" + [[package]] name = "windows-registry" version = "0.2.0" @@ -27886,6 +28294,12 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff4524214bc4629eba08d78ceb1d6507070cc0bcbbed23af74e19e6e924a24cf" +[[package]] +name = "yap" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfe269e7b803a5e8e20cbd97860e136529cd83bf2c9c6d37b142467e7e1f051f" + [[package]] name = "yasna" version = "0.5.2" @@ -28080,8 +28494,8 @@ dependencies = [ "serde_json", "sha2 0.10.8", "sp-core 35.0.0", - "subxt", - "subxt-signer", + "subxt 0.38.1", + "subxt-signer 0.38.0", "thiserror 1.0.65", "tokio", "tracing", @@ -28143,8 +28557,8 @@ dependencies = [ "async-trait", "futures", "lazy_static", - "subxt", - "subxt-signer", + "subxt 0.38.1", + "subxt-signer 0.38.0", "tokio", "zombienet-configuration", "zombienet-orchestrator", diff --git a/Cargo.toml b/Cargo.toml index 733c10b60b724..fcf00b5f2c8fc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1366,8 +1366,8 @@ tracing-futures = { version = "0.2.4" } tracing-log = { version = "0.2.0" } tracing-subscriber = { version = "0.3.18" } tracking-allocator = { path = "polkadot/node/tracking-allocator", default-features = false, package = "staging-tracking-allocator" } -trie-bench = { version = "0.39.0" } -trie-db = { version = "0.29.1", default-features = false } +trie-bench = { version = "0.40.0" } +trie-db = { version = "0.30.0", default-features = false } trie-root = { version = "0.18.0", default-features = false } trie-standardmap = { version = "0.16.0" } trybuild = { version = "1.0.103" } @@ -1400,7 +1400,8 @@ xcm-procedural = { path = "polkadot/xcm/procedural", default-features = false } xcm-runtime-apis = { path = "polkadot/xcm/xcm-runtime-apis", default-features = false } xcm-simulator = { path = "polkadot/xcm/xcm-simulator", default-features = false } zeroize = { version = "1.7.0", default-features = false } -zombienet-sdk = { version = "0.2.20" } +zombienet-configuration = { version = "0.2.22" } +zombienet-sdk = { version = "0.2.22" } zstd = { version = "0.12.4", default-features = false } [profile.release] diff --git a/bridges/modules/relayers/src/benchmarking.rs b/bridges/modules/relayers/src/benchmarking.rs index 1d6ee56639e05..5d4acb60e6138 100644 --- a/bridges/modules/relayers/src/benchmarking.rs +++ b/bridges/modules/relayers/src/benchmarking.rs @@ -20,9 +20,7 @@ use crate::*; -use frame_benchmarking::{ - benchmarks_instance_pallet, whitelisted_caller, BenchmarkError, BenchmarkResult, -}; +use frame_benchmarking::v2::*; use frame_support::{assert_ok, weights::Weight}; use frame_system::RawOrigin; use sp_runtime::traits::One; @@ -52,89 +50,107 @@ fn assert_last_event, I: 'static>( frame_system::Pallet::::assert_last_event(generic_event.into()); } -benchmarks_instance_pallet! { - where_clause { where +#[instance_benchmarks( + where BeneficiaryOf: From<::AccountId>, - } +)] +mod benchmarks { + use super::*; - // Benchmark `claim_rewards` call. - claim_rewards { - let reward_kind = T::bench_reward(); + #[benchmark] + fn claim_rewards() { let relayer: T::AccountId = whitelisted_caller(); + let reward_kind = T::bench_reward(); let reward_balance = T::RewardBalance::from(REWARD_AMOUNT); - let _ = T::prepare_rewards_account(reward_kind, reward_balance); RelayerRewards::::insert(&relayer, reward_kind, reward_balance); - }: _(RawOrigin::Signed(relayer.clone()), reward_kind) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(relayer.clone()), reward_kind); + // we can't check anything here, because `PaymentProcedure` is responsible for // payment logic, so we assume that if call has succeeded, the procedure has // also completed successfully - assert_last_event::(Event::RewardPaid { - relayer: relayer.clone(), - reward_kind, - reward_balance, - beneficiary: relayer.into(), - }.into()); + assert_last_event::( + Event::RewardPaid { + relayer: relayer.clone(), + reward_kind, + reward_balance, + beneficiary: relayer.into(), + } + .into(), + ); } - // Benchmark `claim_rewards_to` call. - claim_rewards_to { - let reward_kind = T::bench_reward(); + #[benchmark] + fn claim_rewards_to() -> Result<(), BenchmarkError> { let relayer: T::AccountId = whitelisted_caller(); + let reward_kind = T::bench_reward(); let reward_balance = T::RewardBalance::from(REWARD_AMOUNT); - let Some(alternative_beneficiary) = T::prepare_rewards_account(reward_kind, reward_balance) else { + let Some(alternative_beneficiary) = T::prepare_rewards_account(reward_kind, reward_balance) + else { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))); }; RelayerRewards::::insert(&relayer, reward_kind, reward_balance); - }: _(RawOrigin::Signed(relayer.clone()), reward_kind, alternative_beneficiary.clone()) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(relayer.clone()), reward_kind, alternative_beneficiary.clone()); + // we can't check anything here, because `PaymentProcedure` is responsible for // payment logic, so we assume that if call has succeeded, the procedure has // also completed successfully - assert_last_event::(Event::RewardPaid { - relayer, - reward_kind, - reward_balance, - beneficiary: alternative_beneficiary, - }.into()); + assert_last_event::( + Event::RewardPaid { + relayer: relayer.clone(), + reward_kind, + reward_balance, + beneficiary: alternative_beneficiary, + } + .into(), + ); + + Ok(()) } - // Benchmark `register` call. - register { + #[benchmark] + fn register() { let relayer: T::AccountId = whitelisted_caller(); let valid_till = frame_system::Pallet::::block_number() .saturating_add(crate::Pallet::::required_registration_lease()) .saturating_add(One::one()) .saturating_add(One::one()); - T::deposit_account(relayer.clone(), crate::Pallet::::required_stake()); - }: _(RawOrigin::Signed(relayer.clone()), valid_till) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(relayer.clone()), valid_till); + assert!(crate::Pallet::::is_registration_active(&relayer)); } - // Benchmark `deregister` call. - deregister { + #[benchmark] + fn deregister() { let relayer: T::AccountId = whitelisted_caller(); let valid_till = frame_system::Pallet::::block_number() .saturating_add(crate::Pallet::::required_registration_lease()) .saturating_add(One::one()) .saturating_add(One::one()); T::deposit_account(relayer.clone(), crate::Pallet::::required_stake()); - crate::Pallet::::register(RawOrigin::Signed(relayer.clone()).into(), valid_till).unwrap(); - + crate::Pallet::::register(RawOrigin::Signed(relayer.clone()).into(), valid_till) + .unwrap(); frame_system::Pallet::::set_block_number(valid_till.saturating_add(One::one())); - }: _(RawOrigin::Signed(relayer.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(relayer.clone())); + assert!(!crate::Pallet::::is_registration_active(&relayer)); } // Benchmark `slash_and_deregister` method of the pallet. We are adding this weight to // the weight of message delivery call if `BridgeRelayersTransactionExtension` signed extension // is deployed at runtime level. - slash_and_deregister { + #[benchmark] + fn slash_and_deregister() { // prepare and register relayer account let relayer: T::AccountId = whitelisted_caller(); let valid_till = frame_system::Pallet::::block_number() @@ -142,31 +158,41 @@ benchmarks_instance_pallet! { .saturating_add(One::one()) .saturating_add(One::one()); T::deposit_account(relayer.clone(), crate::Pallet::::required_stake()); - assert_ok!(crate::Pallet::::register(RawOrigin::Signed(relayer.clone()).into(), valid_till)); + assert_ok!(crate::Pallet::::register( + RawOrigin::Signed(relayer.clone()).into(), + valid_till + )); // create slash destination account let slash_destination: T::AccountId = whitelisted_caller(); T::deposit_account(slash_destination.clone(), Zero::zero()); - }: { - crate::Pallet::::slash_and_deregister(&relayer, bp_relayers::ExplicitOrAccountParams::Explicit::<_, ()>(slash_destination)) - } - verify { + + #[block] + { + crate::Pallet::::slash_and_deregister( + &relayer, + bp_relayers::ExplicitOrAccountParams::Explicit::<_, ()>(slash_destination), + ); + } + assert!(!crate::Pallet::::is_registration_active(&relayer)); } // Benchmark `register_relayer_reward` method of the pallet. We are adding this weight to // the weight of message delivery call if `BridgeRelayersTransactionExtension` signed extension // is deployed at runtime level. - register_relayer_reward { + #[benchmark] + fn register_relayer_reward() { let reward_kind = T::bench_reward(); let relayer: T::AccountId = whitelisted_caller(); - }: { - crate::Pallet::::register_relayer_reward(reward_kind, &relayer, One::one()); - } - verify { + #[block] + { + crate::Pallet::::register_relayer_reward(reward_kind, &relayer, One::one()); + } + assert_eq!(RelayerRewards::::get(relayer, &reward_kind), Some(One::one())); } - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime) + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime); } diff --git a/bridges/modules/relayers/src/lib.rs b/bridges/modules/relayers/src/lib.rs index 05d48982a938f..0d0aa1b2ddf50 100644 --- a/bridges/modules/relayers/src/lib.rs +++ b/bridges/modules/relayers/src/lib.rs @@ -18,7 +18,6 @@ //! coordinate relations between relayers. #![cfg_attr(not(feature = "std"), no_std)] -#![warn(missing_docs)] extern crate alloc; @@ -367,6 +366,11 @@ pub mod pallet { ); }, } + + Self::deposit_event(Event::::SlashedAndDeregistered { + relayer: relayer.clone(), + registration, + }); } /// Register reward for given relayer. @@ -553,6 +557,8 @@ mod tests { use super::*; use mock::{RuntimeEvent as TestEvent, *}; + use bp_messages::{HashedLaneId, LaneIdType}; + use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use frame_support::{assert_noop, assert_ok, traits::fungible::Mutate}; use frame_system::{EventRecord, Pallet as System, Phase}; use sp_runtime::DispatchError; @@ -589,6 +595,43 @@ mod tests { }); } + #[test] + fn slash_and_deregister_works() { + run_test(|| { + get_ready_for_events(); + + // register + assert_ok!(Pallet::::register( + RuntimeOrigin::signed(REGISTER_RELAYER), + 150, + )); + // check if registered + let registration = Pallet::::registered_relayer(REGISTER_RELAYER).unwrap(); + assert_eq!(registration, Registration { valid_till: 150, stake: Stake::get() }); + + // slash and deregister + let slash_destination = RewardsAccountParams::new( + HashedLaneId::try_new(1, 2).unwrap(), + *b"test", + RewardsAccountOwner::ThisChain, + ); + let slash_destination = bp_relayers::ExplicitOrAccountParams::Params(slash_destination); + Pallet::::slash_and_deregister(®ISTER_RELAYER, slash_destination); + // check if event emitted + assert_eq!( + System::::events().last(), + Some(&EventRecord { + phase: Phase::Initialization, + event: TestEvent::BridgeRelayers(Event::SlashedAndDeregistered { + relayer: REGISTER_RELAYER, + registration, + }), + topics: vec![], + }) + ) + }); + } + #[test] fn root_cant_claim_anything() { run_test(|| { diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index e44cca077ef32..44e8cbdfc30c2 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -23,6 +23,7 @@ sp-runtime = { workspace = true } sp-std = { workspace = true } xcm = { workspace = true } +xcm-builder = { workspace = true } xcm-executor = { workspace = true } snowbridge-core = { workspace = true } @@ -43,6 +44,7 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "xcm-builder/std", "xcm-executor/std", "xcm/std", ] @@ -50,6 +52,7 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm/runtime-benchmarks", ] diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index f2d5b02e8bbdf..98a563f1894ee 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -466,6 +466,7 @@ where } } +/// DEPRECATED in favor of [xcm_builder::ExternalConsensusLocationsConverterFor] pub struct EthereumLocationsConverterFor(PhantomData); impl ConvertLocation for EthereumLocationsConverterFor where diff --git a/bridges/snowbridge/primitives/router/src/inbound/tests.rs b/bridges/snowbridge/primitives/router/src/inbound/tests.rs index 11d7928602c6e..1d8ac7393cdd9 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/tests.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/tests.rs @@ -2,11 +2,16 @@ use super::EthereumLocationsConverterFor; use crate::inbound::{ mock::*, Command, ConvertMessage, Destination, MessageV1, VersionedMessage, H160, }; -use frame_support::assert_ok; +use frame_support::{assert_ok, parameter_types}; use hex_literal::hex; use xcm::prelude::*; +use xcm_builder::ExternalConsensusLocationsConverterFor; use xcm_executor::traits::ConvertLocation; +parameter_types! { + pub UniversalLocation: InteriorLocation = [GlobalConsensus(ByGenesis([9; 32])), Parachain(1234)].into(); +} + #[test] fn test_ethereum_network_converts_successfully() { let expected_account: [u8; 32] = @@ -15,7 +20,12 @@ fn test_ethereum_network_converts_successfully() { let account = EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location).unwrap(); - + assert_eq!(account, expected_account); + let account = + ExternalConsensusLocationsConverterFor::::convert_location( + &contract_location, + ) + .unwrap(); assert_eq!(account, expected_account); } @@ -30,7 +40,12 @@ fn test_contract_location_with_network_converts_successfully() { let account = EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location).unwrap(); - + assert_eq!(account, expected_account); + let account = + ExternalConsensusLocationsConverterFor::::convert_location( + &contract_location, + ) + .unwrap(); assert_eq!(account, expected_account); } diff --git a/cumulus/bin/pov-validator/src/main.rs b/cumulus/bin/pov-validator/src/main.rs index 1c08f218f6b8a..f04c5938f631b 100644 --- a/cumulus/bin/pov-validator/src/main.rs +++ b/cumulus/bin/pov-validator/src/main.rs @@ -18,7 +18,7 @@ use clap::Parser; use codec::{Decode, Encode}; -use polkadot_node_primitives::{BlockData, PoV, POV_BOMB_LIMIT, VALIDATION_CODE_BOMB_LIMIT}; +use polkadot_node_primitives::{BlockData, PoV, POV_BOMB_LIMIT}; use polkadot_parachain_primitives::primitives::ValidationParams; use polkadot_primitives::{BlockNumber as RBlockNumber, Hash as RHash, HeadData}; use sc_executor::WasmExecutor; @@ -26,6 +26,10 @@ use sp_core::traits::{CallContext, CodeExecutor, RuntimeCode, WrappedRuntimeCode use std::{fs, path::PathBuf, time::Instant}; use tracing::level_filters::LevelFilter; +// This is now determined by the chain, call `validation_code_bomb_limit` API. +// max_code_size * 10 = 30MB currently. Update constant if needed. +const VALIDATION_CODE_BOMB_LIMIT: usize = 30 * 1024 * 1024; + /// Tool for validating a `PoV` locally. #[derive(Parser)] struct Cli { diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index a30608224ab9e..313568745dbc3 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -469,6 +469,10 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn scheduling_lookahead(&self, at: Hash) -> Result { Ok(self.rpc_client.parachain_host_scheduling_lookahead(at).await?) } + + async fn validation_code_bomb_limit(&self, at: Hash) -> Result { + Ok(self.rpc_client.parachain_host_validation_code_bomb_limit(at).await?) + } } #[async_trait::async_trait] diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index b2fd5a4e6089c..113bc557499f0 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -715,6 +715,18 @@ impl RelayChainRpcClient { .await } + pub async fn parachain_host_validation_code_bomb_limit( + &self, + at: RelayHash, + ) -> Result { + self.call_remote_runtime_function( + "ParachainHost_validation_code_bomb_limit", + at, + None::<()>, + ) + .await + } + pub async fn validation_code_hash( &self, at: RelayHash, diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 0a4a29539b2cc..5750c1c4dc18e 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -752,10 +752,6 @@ pub mod pallet { HostConfigurationNotAvailable, /// No validation function upgrade is currently scheduled. NotScheduled, - /// No code upgrade has been authorized. - NothingAuthorized, - /// The given code upgrade has not been authorized. - Unauthorized, } /// Latest included block descendants the runtime accepted. In other words, these are diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs index d3b65dcdc6390..1f9cb6d54de4f 100755 --- a/cumulus/pallets/parachain-system/src/tests.rs +++ b/cumulus/pallets/parachain-system/src/tests.rs @@ -1170,15 +1170,14 @@ fn receive_hrmp_many() { #[test] fn upgrade_version_checks_should_work() { use codec::Encode; - use sp_runtime::DispatchErrorWithPostInfo; use sp_version::RuntimeVersion; let test_data = vec![ - ("test", 0, 1, Err(frame_system::Error::::SpecVersionNeedsToIncrease)), - ("test", 1, 0, Err(frame_system::Error::::SpecVersionNeedsToIncrease)), - ("test", 1, 1, Err(frame_system::Error::::SpecVersionNeedsToIncrease)), - ("test", 1, 2, Err(frame_system::Error::::SpecVersionNeedsToIncrease)), - ("test2", 1, 1, Err(frame_system::Error::::InvalidSpecName)), + ("test", 0, 1, frame_system::Error::::SpecVersionNeedsToIncrease), + ("test", 1, 0, frame_system::Error::::SpecVersionNeedsToIncrease), + ("test", 1, 1, frame_system::Error::::SpecVersionNeedsToIncrease), + ("test", 1, 2, frame_system::Error::::SpecVersionNeedsToIncrease), + ("test2", 1, 1, frame_system::Error::::InvalidSpecName), ]; for (spec_name, spec_version, impl_version, expected) in test_data.into_iter() { @@ -1193,13 +1192,21 @@ fn upgrade_version_checks_should_work() { let mut ext = new_test_ext(); ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(read_runtime_version)); ext.execute_with(|| { + System::set_block_number(1); + let new_code = vec![1, 2, 3, 4]; let new_code_hash = H256(sp_crypto_hashing::blake2_256(&new_code)); let _authorize = System::authorize_upgrade(RawOrigin::Root.into(), new_code_hash); - let res = System::apply_authorized_upgrade(RawOrigin::None.into(), new_code); + assert_ok!(System::apply_authorized_upgrade(RawOrigin::None.into(), new_code)); - assert_eq!(expected.map_err(DispatchErrorWithPostInfo::from), res); + System::assert_last_event( + frame_system::Event::RejectedInvalidAuthorizedUpgrade { + code_hash: new_code_hash, + error: expected.into(), + } + .into(), + ); }); } } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index 35ceffe4c6953..4b229fd311b4a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -27,6 +27,7 @@ sp-runtime = { workspace = true } # Polkadot pallet-xcm = { workspace = true } xcm = { workspace = true } +xcm-builder = { workspace = true } xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs index f84d42cb29f8e..f89880b284bcc 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -25,6 +25,7 @@ mod imports { latest::{ParentThen, ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, prelude::{AccountId32 as AccountId32Junction, *}, }; + pub use xcm_builder::ExternalConsensusLocationsConverterFor; pub use xcm_executor::traits::TransferType; // Cumulus diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index 7f242bab5a9da..9acdf20e2a955 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -13,6 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. use crate::imports::*; +use ahr_xcm_config::UniversalLocation as AssetHubRococoUniversalLocation; use codec::{Decode, Encode}; use emulated_integration_tests_common::xcm_emulator::ConvertLocation; use frame_support::pallet_prelude::TypeInfo; @@ -24,9 +25,7 @@ use snowbridge_pallet_inbound_queue_fixtures::{ send_token::make_send_token_message, send_token_to_penpal::make_send_token_to_penpal_message, }; use snowbridge_pallet_system; -use snowbridge_router_primitives::inbound::{ - Command, Destination, EthereumLocationsConverterFor, MessageV1, VersionedMessage, -}; +use snowbridge_router_primitives::inbound::{Command, Destination, MessageV1, VersionedMessage}; use sp_core::H256; use sp_runtime::{DispatchError::Token, TokenError::FundsUnavailable}; use testnet_parachains_constants::rococo::snowbridge::EthereumNetwork; @@ -319,8 +318,13 @@ fn send_weth_from_ethereum_to_penpal() { let origin_location = (Parent, Parent, ethereum_network_v5).into(); // Fund ethereum sovereign on AssetHub - let ethereum_sovereign: AccountId = - EthereumLocationsConverterFor::::convert_location(&origin_location).unwrap(); + let ethereum_sovereign: AccountId = AssetHubRococo::execute_with(|| { + ExternalConsensusLocationsConverterFor::< + AssetHubRococoUniversalLocation, + AccountId, + >::convert_location(&origin_location) + .unwrap() + }); AssetHubRococo::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); // Create asset on the Penpal parachain. @@ -526,8 +530,13 @@ fn send_eth_asset_from_asset_hub_to_ethereum_and_back() { use ahr_xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee; let assethub_location = BridgeHubRococo::sibling_location_of(AssetHubRococo::para_id()); let assethub_sovereign = BridgeHubRococo::sovereign_account_id_of(assethub_location); - let ethereum_sovereign: AccountId = - EthereumLocationsConverterFor::::convert_location(&origin_location).unwrap(); + let ethereum_sovereign: AccountId = AssetHubRococo::execute_with(|| { + ExternalConsensusLocationsConverterFor::< + AssetHubRococoUniversalLocation, + AccountId, + >::convert_location(&origin_location) + .unwrap() + }); AssetHubRococo::force_default_xcm_version(Some(XCM_VERSION)); BridgeHubRococo::force_default_xcm_version(Some(XCM_VERSION)); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 05c7021d380ae..facd837cbc080 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -28,6 +28,7 @@ sp-runtime = { workspace = true } # Polkadot pallet-xcm = { workspace = true } xcm = { workspace = true } +xcm-builder = { workspace = true } xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs index cd5e22372f0e6..a61bc0d5adc6e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -26,6 +26,7 @@ mod imports { prelude::{AccountId32 as AccountId32Junction, *}, v5, }; + pub use xcm_builder::ExternalConsensusLocationsConverterFor; pub use xcm_executor::traits::TransferType; // Cumulus diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs index 6789aae83ffe4..ea3ede20590c8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs @@ -13,7 +13,10 @@ // See the License for the specific language governing permissions and // limitations under the License. use crate::{imports::*, tests::penpal_emulated_chain::penpal_runtime}; -use asset_hub_westend_runtime::xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee; +use asset_hub_westend_runtime::xcm_config::{ + bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee, + UniversalLocation as AssetHubWestendUniversalLocation, +}; use bridge_hub_westend_runtime::{ bridge_to_ethereum_config::EthereumGatewayAddress, EthereumBeaconClient, EthereumInboundQueue, }; @@ -24,9 +27,7 @@ use hex_literal::hex; use rococo_westend_system_emulated_network::asset_hub_westend_emulated_chain::genesis::AssetHubWestendAssetOwner; use snowbridge_core::{inbound::InboundQueueFixture, AssetMetadata, TokenIdOf}; use snowbridge_pallet_inbound_queue_fixtures::send_native_eth::make_send_native_eth_message; -use snowbridge_router_primitives::inbound::{ - Command, Destination, EthereumLocationsConverterFor, MessageV1, VersionedMessage, -}; +use snowbridge_router_primitives::inbound::{Command, Destination, MessageV1, VersionedMessage}; use sp_core::H256; use testnet_parachains_constants::westend::snowbridge::EthereumNetwork; use xcm_executor::traits::ConvertLocation; @@ -161,8 +162,13 @@ fn register_weth_token_from_ethereum_to_asset_hub() { fn send_weth_token_from_ethereum_to_asset_hub() { let ethereum_network: NetworkId = EthereumNetwork::get().into(); let origin_location = Location::new(2, ethereum_network); - let ethereum_sovereign: AccountId = - EthereumLocationsConverterFor::::convert_location(&origin_location).unwrap(); + let ethereum_sovereign: AccountId = AssetHubWestend::execute_with(|| { + ExternalConsensusLocationsConverterFor::< + AssetHubWestendUniversalLocation, + AccountId, + >::convert_location(&origin_location) + .unwrap() + }); BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id().into(), INITIAL_FUND); @@ -280,8 +286,13 @@ fn send_weth_from_ethereum_to_penpal() { let origin_location = (Parent, Parent, ethereum_network_v5).into(); // Fund ethereum sovereign on AssetHub - let ethereum_sovereign: AccountId = - EthereumLocationsConverterFor::::convert_location(&origin_location).unwrap(); + let ethereum_sovereign: AccountId = AssetHubWestend::execute_with(|| { + ExternalConsensusLocationsConverterFor::< + AssetHubWestendUniversalLocation, + AccountId, + >::convert_location(&origin_location) + .unwrap() + }); AssetHubWestend::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); // Create asset on the Penpal parachain. @@ -387,8 +398,13 @@ fn send_eth_asset_from_asset_hub_to_ethereum_and_back() { use asset_hub_westend_runtime::xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee; let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); - let ethereum_sovereign: AccountId = - EthereumLocationsConverterFor::::convert_location(&origin_location).unwrap(); + let ethereum_sovereign: AccountId = AssetHubWestend::execute_with(|| { + ExternalConsensusLocationsConverterFor::< + AssetHubWestendUniversalLocation, + AccountId, + >::convert_location(&origin_location) + .unwrap() + }); AssetHubWestend::force_default_xcm_version(Some(XCM_VERSION)); BridgeHubWestend::force_default_xcm_version(Some(XCM_VERSION)); @@ -934,13 +950,17 @@ fn transfer_relay_token() { let expected_token_id = TokenIdOf::convert_location(&expected_asset_id).unwrap(); - let ethereum_sovereign: AccountId = - EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&Location::new( + let ethereum_sovereign: AccountId = AssetHubWestend::execute_with(|| { + ExternalConsensusLocationsConverterFor::< + AssetHubWestendUniversalLocation, + [u8; 32], + >::convert_location(&Location::new( 2, [GlobalConsensus(EthereumNetwork::get())], )) .unwrap() - .into(); + .into() + }); // Register token BridgeHubWestend::execute_with(|| { @@ -1082,10 +1102,14 @@ fn transfer_ah_token() { let ethereum_destination = Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]); - let ethereum_sovereign: AccountId = - EthereumLocationsConverterFor::<[u8; 32]>::convert_location(ðereum_destination) - .unwrap() - .into(); + let ethereum_sovereign: AccountId = AssetHubWestend::execute_with(|| { + ExternalConsensusLocationsConverterFor::< + AssetHubWestendUniversalLocation, + [u8; 32], + >::convert_location(ðereum_destination) + .unwrap() + .into() + }); AssetHubWestend::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); let asset_id: Location = diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_proxy.rs index 92a30e6b15b9e..35e122d335b30 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_proxy.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_proxy.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `ef4134d66388`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `99fc4dfa9c86`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -57,11 +57,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 14_792_000 picoseconds. - Weight::from_parts(15_713_712, 0) + // Minimum execution time: 14_174_000 picoseconds. + Weight::from_parts(15_016_964, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_483 - .saturating_add(Weight::from_parts(40_867, 0).saturating_mul(p.into())) + // Standard Error: 937 + .saturating_add(Weight::from_parts(29_307, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -76,13 +76,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `488 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 43_689_000 picoseconds. - Weight::from_parts(43_788_370, 0) + // Minimum execution time: 40_810_000 picoseconds. + Weight::from_parts(41_201_093, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 3_353 - .saturating_add(Weight::from_parts(164_465, 0).saturating_mul(a.into())) - // Standard Error: 3_465 - .saturating_add(Weight::from_parts(66_316, 0).saturating_mul(p.into())) + // Standard Error: 3_289 + .saturating_add(Weight::from_parts(155_098, 0).saturating_mul(a.into())) + // Standard Error: 3_398 + .saturating_add(Weight::from_parts(51_292, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -96,13 +96,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 31_017_000 picoseconds. - Weight::from_parts(28_951_731, 0) + // Minimum execution time: 29_268_000 picoseconds. + Weight::from_parts(27_279_884, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 6_151 - .saturating_add(Weight::from_parts(131_476, 0).saturating_mul(a.into())) - // Standard Error: 6_355 - .saturating_add(Weight::from_parts(115_777, 0).saturating_mul(p.into())) + // Standard Error: 5_927 + .saturating_add(Weight::from_parts(118_689, 0).saturating_mul(a.into())) + // Standard Error: 6_124 + .saturating_add(Weight::from_parts(112_018, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -116,13 +116,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 30_616_000 picoseconds. - Weight::from_parts(29_705_973, 0) + // Minimum execution time: 28_980_000 picoseconds. + Weight::from_parts(27_242_237, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 6_339 - .saturating_add(Weight::from_parts(114_768, 0).saturating_mul(a.into())) - // Standard Error: 6_550 - .saturating_add(Weight::from_parts(101_246, 0).saturating_mul(p.into())) + // Standard Error: 6_279 + .saturating_add(Weight::from_parts(121_215, 0).saturating_mul(a.into())) + // Standard Error: 6_488 + .saturating_add(Weight::from_parts(116_319, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -138,13 +138,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `420 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 38_359_000 picoseconds. - Weight::from_parts(39_155_898, 0) + // Minimum execution time: 37_394_000 picoseconds. + Weight::from_parts(37_283_951, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_535 - .saturating_add(Weight::from_parts(157_477, 0).saturating_mul(a.into())) - // Standard Error: 2_619 - .saturating_add(Weight::from_parts(62_098, 0).saturating_mul(p.into())) + // Standard Error: 2_431 + .saturating_add(Weight::from_parts(150_859, 0).saturating_mul(a.into())) + // Standard Error: 2_511 + .saturating_add(Weight::from_parts(59_816, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -155,11 +155,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_875_000 picoseconds. - Weight::from_parts(26_150_111, 0) + // Minimum execution time: 24_160_000 picoseconds. + Weight::from_parts(24_896_954, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_554 - .saturating_add(Weight::from_parts(53_104, 0).saturating_mul(p.into())) + // Standard Error: 1_200 + .saturating_add(Weight::from_parts(51_579, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -170,11 +170,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_962_000 picoseconds. - Weight::from_parts(26_475_274, 0) + // Minimum execution time: 23_957_000 picoseconds. + Weight::from_parts(24_867_217, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_919 - .saturating_add(Weight::from_parts(46_473, 0).saturating_mul(p.into())) + // Standard Error: 1_157 + .saturating_add(Weight::from_parts(46_274, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -185,11 +185,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_487_000 picoseconds. - Weight::from_parts(23_347_544, 0) + // Minimum execution time: 21_421_000 picoseconds. + Weight::from_parts(22_147_331, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_526 - .saturating_add(Weight::from_parts(37_858, 0).saturating_mul(p.into())) + // Standard Error: 1_126 + .saturating_add(Weight::from_parts(35_078, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -200,11 +200,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `4706` - // Minimum execution time: 26_361_000 picoseconds. - Weight::from_parts(27_377_585, 0) + // Minimum execution time: 25_009_000 picoseconds. + Weight::from_parts(26_272_099, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_612 - .saturating_add(Weight::from_parts(11_566, 0).saturating_mul(p.into())) + // Standard Error: 1_183 + .saturating_add(Weight::from_parts(13_796, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -215,12 +215,28 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `198 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 23_371_000 picoseconds. - Weight::from_parts(24_589_016, 0) + // Minimum execution time: 22_450_000 picoseconds. + Weight::from_parts(23_280_004, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_453 - .saturating_add(Weight::from_parts(31_019, 0).saturating_mul(p.into())) + // Standard Error: 958 + .saturating_add(Weight::from_parts(31_635, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `487` + // Estimated: `5698` + // Minimum execution time: 47_219_000 picoseconds. + Weight::from_parts(48_694_000, 0) + .saturating_add(Weight::from_parts(0, 5698)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 84526b2e4f2ee..bf44d2408d030 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -42,7 +42,6 @@ use parachains_common::{ }; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; -use snowbridge_router_primitives::inbound::EthereumLocationsConverterFor; use sp_runtime::traits::{AccountIdConversion, ConvertInto, TryConvertInto}; use testnet_parachains_constants::rococo::snowbridge::{ EthereumNetwork, INBOUND_QUEUE_PALLET_INDEX, @@ -52,16 +51,15 @@ use xcm_builder::{ AccountId32Aliases, AliasChildLocation, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyRecursively, DenyReserveTransferToRelayChain, DenyThenTry, - DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, - FungibleAdapter, FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, - IsConcrete, LocalMint, MatchedConvertedConcreteId, NetworkExportTableItem, NoChecking, - NonFungiblesAdapter, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SingleAssetExchangeAdapter, - SovereignPaidRemoteExporter, SovereignSignedViaLocation, StartsWith, - StartsWithExplicitGlobalConsensus, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithLatestLocationConverter, WithUniqueTopic, - XcmFeeManagerFromComponents, + DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, ExternalConsensusLocationsConverterFor, + FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, HashedDescription, IsConcrete, + LocalMint, MatchedConvertedConcreteId, NetworkExportTableItem, NoChecking, NonFungiblesAdapter, + ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SingleAssetExchangeAdapter, SovereignPaidRemoteExporter, + SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, + WithLatestLocationConverter, WithUniqueTopic, XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; @@ -105,12 +103,8 @@ pub type LocationToAccountId = ( AccountId32Aliases, // Foreign locations alias into accounts according to a hash of their standard description. HashedDescription>, - // Different global consensus parachain sovereign account. - // (Used for over-bridge transfers and reserve processing) - GlobalConsensusParachainConvertsFor, - // Ethereum contract sovereign account. - // (Used to get convert ethereum contract locations to sovereign account) - EthereumLocationsConverterFor, + // Different global consensus locations sovereign accounts. + ExternalConsensusLocationsConverterFor, ); /// Means for transacting the native currency on this chain. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index 0057459fc93c6..934cf71e009b6 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -47,13 +47,17 @@ use frame_support::{ }, weights::{Weight, WeightToFee as WeightToFeeT}, }; +use hex_literal::hex; use parachains_common::{AccountId, AssetIdForTrustBackedAssets, AuraId, Balance}; use sp_consensus_aura::SlotDuration; use sp_core::crypto::Ss58Codec; use sp_runtime::traits::MaybeEquivalence; use std::convert::Into; use testnet_parachains_constants::rococo::{consensus::*, currency::UNITS, fee::WeightToFee}; -use xcm::latest::prelude::{Assets as XcmAssets, *}; +use xcm::latest::{ + prelude::{Assets as XcmAssets, *}, + WESTEND_GENESIS_HASH, +}; use xcm_builder::WithLatestLocationConverter; use xcm_executor::traits::{JustTry, WeightTrader}; use xcm_runtime_apis::conversions::LocationToAccountHelper; @@ -1514,19 +1518,142 @@ fn location_conversion_works() { ), expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", }, + // ExternalConsensusLocationsConverterFor + TestCase { + description: "Describe Ethereum Location", + location: Location::new(2, [GlobalConsensus(Ethereum { chain_id: 11155111 })]), + expected_account_id_str: "5GjRnmh5o3usSYzVmsxBWzHEpvJyHK4tKNPhjpUR3ASrruBy", + }, + TestCase { + description: "Describe Ethereum AccountKey", + location: Location::new( + 2, + [ + GlobalConsensus(Ethereum { chain_id: 11155111 }), + AccountKey20 { + network: None, + key: hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"), + }, + ], + ), + expected_account_id_str: "5HV4j4AsqT349oLRZmTjhGKDofPBWmWaPUfWGaRkuvzkjW9i", + }, + TestCase { + description: "Describe Westend Location", + location: Location::new(2, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]), + expected_account_id_str: "5Fb4pyqFuYLZ43USEAcVUBhFTfTckG9zv9kUaVnmR79YgBCe", + }, + TestCase { + description: "Describe Westend AccountID", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + AccountId32 { network: None, id: AccountId::from(ALICE).into() }, + ], + ), + expected_account_id_str: "5CpcvNFY6jkMJrd7XQt3yTweRD1WxUeHXvHnbWuVM1MHKHPe", + }, + TestCase { + description: "Describe Westend AccountKey", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + AccountKey20 { network: None, key: [0u8; 20] }, + ], + ), + expected_account_id_str: "5FzaTcFwUMyX5Sfe7wRGuc3zw1cbpGAGZpmAsxS4tBX6x6U3", + }, + TestCase { + description: "Describe Westend Treasury Plurality", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Plurality { id: BodyId::Treasury, part: BodyPart::Voice }, + ], + ), + expected_account_id_str: "5CpdRCmCYwnxS1mifwEddYHDJR8ydDfTpi1gwAQKQvfAjjzu", + }, + TestCase { + description: "Describe Westend Parachain Location", + location: Location::new( + 2, + [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)], + ), + expected_account_id_str: "5CkWf1L181BiSbvoofnzfSg8ZLiBK3i1U4sknzETHk8QS2mA", + }, + TestCase { + description: "Describe Westend Parachain AccountID", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(1000), + AccountId32 { network: None, id: AccountId::from(ALICE).into() }, + ], + ), + expected_account_id_str: "5G6JJUm6tgsxJhRn76VGme8WGukdUNiBBK6ABUtH9YXEjEk9", + }, + TestCase { + description: "Describe Westend Parachain AccountKey", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(1000), + AccountKey20 { network: None, key: [0u8; 20] }, + ], + ), + expected_account_id_str: "5EFpSvq8BUAjdjY4tuGhGXZ66P16iQnX7nxsNoHy7TM6NhMa", + }, + TestCase { + description: "Describe Westend Parachain Treasury Plurality", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(1000), + Plurality { id: BodyId::Treasury, part: BodyPart::Voice }, + ], + ), + expected_account_id_str: "5GfwA4qaz9wpQPPHmf5MSKqvsPyrfx1yYeeZB1SUkqDuRuZ1", + }, + TestCase { + description: "Describe Westend USDT Location", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(1000), + PalletInstance(50), + GeneralIndex(1984), + ], + ), + expected_account_id_str: "5Hd77ZjbVRrYiRXER8qo9DRDB8ZzaKtRswZoypMnMLdixzMs", + }, ]; - for tc in test_cases { - let expected = - AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + ExtBuilder::::default() + .with_collators(collator_session_keys().collators()) + .with_session_keys(collator_session_keys().session_keys()) + .with_para_id(1000.into()) + .build() + .execute_with(|| { + for tc in test_cases { + let expected = AccountId::from_string(tc.expected_account_id_str) + .expect("Invalid AccountId string"); - let got = LocationToAccountHelper::::convert_location( - tc.location.into(), - ) - .unwrap(); + let got = + LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); - assert_eq!(got, expected, "{}", tc.description); - } + assert_eq!(got, expected, "{}", tc.description); + } + }); } #[test] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_proxy.rs index fcab49a6efe1b..09a94907f0d18 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_proxy.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_proxy.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `73b9817d6032`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `99fc4dfa9c86`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -57,11 +57,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 14_571_000 picoseconds. - Weight::from_parts(15_152_428, 0) + // Minimum execution time: 14_242_000 picoseconds. + Weight::from_parts(14_960_578, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_064 - .saturating_add(Weight::from_parts(38_023, 0).saturating_mul(p.into())) + // Standard Error: 1_137 + .saturating_add(Weight::from_parts(35_649, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -76,13 +76,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `488 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 42_038_000 picoseconds. - Weight::from_parts(42_516_107, 0) + // Minimum execution time: 41_637_000 picoseconds. + Weight::from_parts(41_169_266, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_461 - .saturating_add(Weight::from_parts(153_613, 0).saturating_mul(a.into())) - // Standard Error: 2_543 - .saturating_add(Weight::from_parts(51_289, 0).saturating_mul(p.into())) + // Standard Error: 2_978 + .saturating_add(Weight::from_parts(172_008, 0).saturating_mul(a.into())) + // Standard Error: 3_077 + .saturating_add(Weight::from_parts(68_505, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -96,13 +96,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 26_115_000 picoseconds. - Weight::from_parts(26_773_221, 0) + // Minimum execution time: 26_087_000 picoseconds. + Weight::from_parts(26_927_415, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_941 - .saturating_add(Weight::from_parts(153_810, 0).saturating_mul(a.into())) - // Standard Error: 2_005 - .saturating_add(Weight::from_parts(32_722, 0).saturating_mul(p.into())) + // Standard Error: 1_873 + .saturating_add(Weight::from_parts(147_445, 0).saturating_mul(a.into())) + // Standard Error: 1_935 + .saturating_add(Weight::from_parts(22_864, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -116,13 +116,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 26_576_000 picoseconds. - Weight::from_parts(26_798_549, 0) + // Minimum execution time: 26_473_000 picoseconds. + Weight::from_parts(26_682_318, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_759 - .saturating_add(Weight::from_parts(152_069, 0).saturating_mul(a.into())) - // Standard Error: 1_818 - .saturating_add(Weight::from_parts(30_009, 0).saturating_mul(p.into())) + // Standard Error: 1_651 + .saturating_add(Weight::from_parts(153_500, 0).saturating_mul(a.into())) + // Standard Error: 1_706 + .saturating_add(Weight::from_parts(25_850, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -138,13 +138,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `420 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 38_171_000 picoseconds. - Weight::from_parts(38_884_925, 0) + // Minimum execution time: 37_884_000 picoseconds. + Weight::from_parts(38_379_964, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_992 - .saturating_add(Weight::from_parts(156_025, 0).saturating_mul(a.into())) - // Standard Error: 2_058 - .saturating_add(Weight::from_parts(47_982, 0).saturating_mul(p.into())) + // Standard Error: 1_965 + .saturating_add(Weight::from_parts(151_226, 0).saturating_mul(a.into())) + // Standard Error: 2_030 + .saturating_add(Weight::from_parts(51_251, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -155,11 +155,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_404_000 picoseconds. - Weight::from_parts(25_715_478, 0) + // Minimum execution time: 24_317_000 picoseconds. + Weight::from_parts(25_244_966, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_502 - .saturating_add(Weight::from_parts(50_577, 0).saturating_mul(p.into())) + // Standard Error: 1_330 + .saturating_add(Weight::from_parts(50_765, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -170,11 +170,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_375_000 picoseconds. - Weight::from_parts(25_794_656, 0) + // Minimum execution time: 23_865_000 picoseconds. + Weight::from_parts(25_111_800, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_353 - .saturating_add(Weight::from_parts(41_072, 0).saturating_mul(p.into())) + // Standard Error: 1_501 + .saturating_add(Weight::from_parts(45_002, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -185,11 +185,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 21_992_000 picoseconds. - Weight::from_parts(22_942_057, 0) + // Minimum execution time: 21_721_000 picoseconds. + Weight::from_parts(22_626_669, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_379 - .saturating_add(Weight::from_parts(34_999, 0).saturating_mul(p.into())) + // Standard Error: 1_314 + .saturating_add(Weight::from_parts(27_650, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -200,11 +200,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `4706` - // Minimum execution time: 25_579_000 picoseconds. - Weight::from_parts(27_147_325, 0) + // Minimum execution time: 25_709_000 picoseconds. + Weight::from_parts(27_143_129, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_506 - .saturating_add(Weight::from_parts(5_706, 0).saturating_mul(p.into())) + // Standard Error: 1_723 + .saturating_add(Weight::from_parts(8_047, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -215,12 +215,28 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `198 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_786_000 picoseconds. - Weight::from_parts(23_900_996, 0) + // Minimum execution time: 22_817_000 picoseconds. + Weight::from_parts(23_755_496, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_730 - .saturating_add(Weight::from_parts(34_801, 0).saturating_mul(p.into())) + // Standard Error: 1_854 + .saturating_add(Weight::from_parts(33_383, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `487` + // Estimated: `5698` + // Minimum execution time: 43_822_000 picoseconds. + Weight::from_parts(45_419_000, 0) + .saturating_add(Weight::from_parts(0, 5698)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 46c4e4d0b28e4..cc093d6fa5752 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -42,7 +42,6 @@ use parachains_common::{ }; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; -use snowbridge_router_primitives::inbound::EthereumLocationsConverterFor; use sp_runtime::traits::{AccountIdConversion, ConvertInto, TryConvertInto}; use westend_runtime_constants::system_parachain::COLLECTIVES_ID; use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; @@ -50,16 +49,15 @@ use xcm_builder::{ AccountId32Aliases, AliasChildLocation, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyRecursively, DenyReserveTransferToRelayChain, DenyThenTry, - DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, - FungibleAdapter, FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, - IsConcrete, LocalMint, MatchedConvertedConcreteId, NetworkExportTableItem, NoChecking, - NonFungiblesAdapter, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SingleAssetExchangeAdapter, - SovereignPaidRemoteExporter, SovereignSignedViaLocation, StartsWith, - StartsWithExplicitGlobalConsensus, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithLatestLocationConverter, WithUniqueTopic, - XcmFeeManagerFromComponents, + DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, ExternalConsensusLocationsConverterFor, + FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, HashedDescription, IsConcrete, + LocalMint, MatchedConvertedConcreteId, NetworkExportTableItem, NoChecking, NonFungiblesAdapter, + ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SingleAssetExchangeAdapter, SovereignPaidRemoteExporter, + SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, + WithLatestLocationConverter, WithUniqueTopic, XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; @@ -99,12 +97,8 @@ pub type LocationToAccountId = ( AccountId32Aliases, // Foreign locations alias into accounts according to a hash of their standard description. HashedDescription>, - // Different global consensus parachain sovereign account. - // (Used for over-bridge transfers and reserve processing) - GlobalConsensusParachainConvertsFor, - // Ethereum contract sovereign account. - // (Used to get convert ethereum contract locations to sovereign account) - EthereumLocationsConverterFor, + // Different global consensus locations sovereign accounts. + ExternalConsensusLocationsConverterFor, ); /// Means for transacting the native currency on this chain. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index 38673606541b0..58d4fe9b14023 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -47,6 +47,7 @@ use frame_support::{ }, weights::{Weight, WeightToFee as WeightToFeeT}, }; +use hex_literal::hex; use parachains_common::{AccountId, AssetIdForTrustBackedAssets, AuraId, Balance}; use sp_consensus_aura::SlotDuration; use sp_core::crypto::Ss58Codec; @@ -1487,19 +1488,141 @@ fn location_conversion_works() { ), expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", }, + // ExternalConsensusLocationsConverterFor + TestCase { + description: "Describe Ethereum Location", + location: Location::new(2, [GlobalConsensus(Ethereum { chain_id: 11155111 })]), + expected_account_id_str: "5GjRnmh5o3usSYzVmsxBWzHEpvJyHK4tKNPhjpUR3ASrruBy", + }, + TestCase { + description: "Describe Ethereum AccountKey", + location: Location::new( + 2, + [ + GlobalConsensus(Ethereum { chain_id: 11155111 }), + AccountKey20 { + network: None, + key: hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"), + }, + ], + ), + expected_account_id_str: "5HV4j4AsqT349oLRZmTjhGKDofPBWmWaPUfWGaRkuvzkjW9i", + }, + TestCase { + description: "Describe Rococo Location", + location: Location::new(2, [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))]), + expected_account_id_str: "5FfpYGrFybJXFsQk7dabr1vEbQ5ycBBu85vrDjPJsF3q4A8P", + }, + TestCase { + description: "Describe Rococo AccountID", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + AccountId32 { network: None, id: AccountId::from(ALICE).into() }, + ], + ), + expected_account_id_str: "5CXVYinTeQKQGWAP9RqaPhitk7ybrqBZf66kCJmtAjV4Xwbg", + }, + TestCase { + description: "Describe Rococo AccountKey", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + AccountKey20 { network: None, key: [0u8; 20] }, + ], + ), + expected_account_id_str: "5GbRhbJWb2hZY7TCeNvTqZXaP3x3UY5xt4ccxpV1ZtJS1gFL", + }, + TestCase { + description: "Describe Rococo Treasury Plurality", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Plurality { id: BodyId::Treasury, part: BodyPart::Voice }, + ], + ), + expected_account_id_str: "5EGi9NgJNGoMawY8ubnCDLmbdEW6nt2W2U2G3j9E3jXmspT7", + }, + TestCase { + description: "Describe Rococo Parachain Location", + location: Location::new( + 2, + [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), Parachain(1000)], + ), + expected_account_id_str: "5CQeLKM7XC1xNBiQLp26Wa948cudjYRD5VzvaTG3BjnmUvLL", + }, + TestCase { + description: "Describe Rococo Parachain AccountID", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Parachain(1000), + AccountId32 { network: None, id: AccountId::from(ALICE).into() }, + ], + ), + expected_account_id_str: "5H8HsK17dV7i7J8fZBNd438rvwd7rHviZxJqyZpLEGJn6vb6", + }, + TestCase { + description: "Describe Rococo Parachain AccountKey", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Parachain(1000), + AccountKey20 { network: None, key: [0u8; 20] }, + ], + ), + expected_account_id_str: "5G121Rtddxn6zwMD2rZZGXxFHZ2xAgzFUgM9ki4A8wMGo4e2", + }, + TestCase { + description: "Describe Rococo Parachain Treasury Plurality", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Parachain(1000), + Plurality { id: BodyId::Treasury, part: BodyPart::Voice }, + ], + ), + expected_account_id_str: "5FNk7za2pQ71NHnN1jA63hJxJwdQywiVGnK6RL3nYjCdkWDF", + }, + TestCase { + description: "Describe Rococo USDT Location", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Parachain(1000), + PalletInstance(50), + GeneralIndex(1984), + ], + ), + expected_account_id_str: "5HNfT779KHeAL7PaVBTQDVxrT6dfJZJoQMTScxLSahBc9kxF", + }, ]; - for tc in test_cases { - let expected = - AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); - - let got = LocationToAccountHelper::::convert_location( - tc.location.into(), - ) - .unwrap(); - - assert_eq!(got, expected, "{}", tc.description); - } + ExtBuilder::::default() + .with_collators(collator_session_keys().collators()) + .with_session_keys(collator_session_keys().session_keys()) + .with_para_id(1000.into()) + .build() + .execute_with(|| { + for tc in test_cases { + let expected = AccountId::from_string(tc.expected_account_id_str) + .expect("Invalid AccountId string"); + let got = + LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } + }); } #[test] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 13145a723baa6..31b14f9a0517d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -45,11 +45,12 @@ use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyRecursively, DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, - DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HandleFee, - HashedDescription, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + DescribeFamily, EnsureXcmOrigin, ExternalConsensusLocationsConverterFor, + FrameTransactionalProcessor, FungibleAdapter, HandleFee, HashedDescription, IsConcrete, + ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::{ traits::{FeeManager, FeeReason, FeeReason::Export}, @@ -82,6 +83,8 @@ pub type LocationToAccountId = ( AccountId32Aliases, // Foreign locations alias into accounts according to a hash of their standard description. HashedDescription>, + // Different global consensus locations sovereign accounts. + ExternalConsensusLocationsConverterFor, ); /// Means for transacting the native currency on this chain. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 111da98b65c7c..b6530d8e8a18d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -27,7 +27,9 @@ use bridge_hub_rococo_runtime::{ use bridge_hub_test_utils::{GovernanceOrigin, SlotDurations}; use codec::{Decode, Encode}; use frame_support::{dispatch::GetDispatchInfo, parameter_types, traits::ConstU8}; +use hex_literal::hex; use parachains_common::{AccountId, AuraId, Balance}; +use parachains_runtimes_test_utils::ExtBuilder; use snowbridge_core::ChannelId; use sp_consensus_aura::SlotDuration; use sp_core::{crypto::Ss58Codec, H160}; @@ -37,7 +39,7 @@ use sp_runtime::{ AccountId32, Perbill, }; use testnet_parachains_constants::rococo::{consensus::*, fee::WeightToFee}; -use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH}; +use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; use xcm_runtime_apis::conversions::LocationToAccountHelper; parameter_types! { @@ -851,20 +853,142 @@ fn location_conversion_works() { ), expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", }, + // ExternalConsensusLocationsConverterFor + TestCase { + description: "Describe Ethereum Location", + location: Location::new(2, [GlobalConsensus(Ethereum { chain_id: 11155111 })]), + expected_account_id_str: "5GjRnmh5o3usSYzVmsxBWzHEpvJyHK4tKNPhjpUR3ASrruBy", + }, + TestCase { + description: "Describe Ethereum AccountKey", + location: Location::new( + 2, + [ + GlobalConsensus(Ethereum { chain_id: 11155111 }), + AccountKey20 { + network: None, + key: hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"), + }, + ], + ), + expected_account_id_str: "5HV4j4AsqT349oLRZmTjhGKDofPBWmWaPUfWGaRkuvzkjW9i", + }, + TestCase { + description: "Describe Westend Location", + location: Location::new(2, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]), + expected_account_id_str: "5Fb4pyqFuYLZ43USEAcVUBhFTfTckG9zv9kUaVnmR79YgBCe", + }, + TestCase { + description: "Describe Westend AccountID", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Junction::AccountId32 { network: None, id: AccountId::from(Alice).into() }, + ], + ), + expected_account_id_str: "5EEB1syXCCSEFk26ZYjH47WMp1QjYHf3q5zcnqWWY9Tr6gUc", + }, + TestCase { + description: "Describe Westend AccountKey", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + AccountKey20 { network: None, key: [0u8; 20] }, + ], + ), + expected_account_id_str: "5FzaTcFwUMyX5Sfe7wRGuc3zw1cbpGAGZpmAsxS4tBX6x6U3", + }, + TestCase { + description: "Describe Westend Treasury Plurality", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Plurality { id: BodyId::Treasury, part: BodyPart::Voice }, + ], + ), + expected_account_id_str: "5CpdRCmCYwnxS1mifwEddYHDJR8ydDfTpi1gwAQKQvfAjjzu", + }, + TestCase { + description: "Describe Westend Parachain Location", + location: Location::new( + 2, + [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)], + ), + expected_account_id_str: "5CkWf1L181BiSbvoofnzfSg8ZLiBK3i1U4sknzETHk8QS2mA", + }, + TestCase { + description: "Describe Westend Parachain AccountID", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(1000), + Junction::AccountId32 { network: None, id: AccountId::from(Alice).into() }, + ], + ), + expected_account_id_str: "5HBG915qTKYWzqEs4VocHLCa7ftC7JfJCpvSxk6LmXWJvhbU", + }, + TestCase { + description: "Describe Westend Parachain AccountKey", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(1000), + AccountKey20 { network: None, key: [0u8; 20] }, + ], + ), + expected_account_id_str: "5EFpSvq8BUAjdjY4tuGhGXZ66P16iQnX7nxsNoHy7TM6NhMa", + }, + TestCase { + description: "Describe Westend Parachain Treasury Plurality", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(1000), + Plurality { id: BodyId::Treasury, part: BodyPart::Voice }, + ], + ), + expected_account_id_str: "5GfwA4qaz9wpQPPHmf5MSKqvsPyrfx1yYeeZB1SUkqDuRuZ1", + }, + TestCase { + description: "Describe Westend USDT Location", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(1000), + PalletInstance(50), + GeneralIndex(1984), + ], + ), + expected_account_id_str: "5Hd77ZjbVRrYiRXER8qo9DRDB8ZzaKtRswZoypMnMLdixzMs", + }, ]; - for tc in test_cases { - let expected = - AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); - - let got = LocationToAccountHelper::< - AccountId, - bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, - >::convert_location(tc.location.into()) - .unwrap(); - - assert_eq!(got, expected, "{}", tc.description); - } + ExtBuilder::::default() + .with_collators(collator_session_keys().collators()) + .with_session_keys(collator_session_keys().session_keys()) + .with_para_id(1000.into()) + .build() + .execute_with(|| { + for tc in test_cases { + let expected = AccountId::from_string(tc.expected_account_id_str) + .expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::< + AccountId, + bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, + >::convert_location(tc.location.into()) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } + }); } #[test] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index 170376318dcbd..205fc6ed4b87a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -44,11 +44,12 @@ use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyRecursively, DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, - DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HandleFee, - HashedDescription, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + DescribeFamily, EnsureXcmOrigin, ExternalConsensusLocationsConverterFor, + FrameTransactionalProcessor, FungibleAdapter, HandleFee, HashedDescription, IsConcrete, + ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::{ traits::{FeeManager, FeeReason, FeeReason::Export}, @@ -81,6 +82,8 @@ pub type LocationToAccountId = ( AccountId32Aliases, // Foreign locations alias into accounts according to a hash of their standard description. HashedDescription>, + // Different global consensus locations sovereign accounts. + ExternalConsensusLocationsConverterFor, ); /// Means for transacting the native currency on this chain. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index d9b69c5480481..9332da3194c05 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -49,7 +49,9 @@ use frame_support::{ ConstU8, }, }; +use hex_literal::hex; use parachains_common::{AccountId, AuraId, Balance}; +use parachains_runtimes_test_utils::ExtBuilder; use sp_consensus_aura::SlotDuration; use sp_core::crypto::Ss58Codec; use sp_keyring::Sr25519Keyring::{Alice, Bob}; @@ -58,7 +60,7 @@ use sp_runtime::{ AccountId32, Either, Perbill, }; use testnet_parachains_constants::westend::{consensus::*, fee::WeightToFee}; -use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; +use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; use xcm_runtime_apis::conversions::LocationToAccountHelper; // Random para id of sibling chain used in tests. @@ -536,19 +538,142 @@ fn location_conversion_works() { ), expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", }, + // ExternalConsensusLocationsConverterFor + TestCase { + description: "Describe Ethereum Location", + location: Location::new(2, [GlobalConsensus(Ethereum { chain_id: 11155111 })]), + expected_account_id_str: "5GjRnmh5o3usSYzVmsxBWzHEpvJyHK4tKNPhjpUR3ASrruBy", + }, + TestCase { + description: "Describe Ethereum AccountKey", + location: Location::new( + 2, + [ + GlobalConsensus(Ethereum { chain_id: 11155111 }), + AccountKey20 { + network: None, + key: hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"), + }, + ], + ), + expected_account_id_str: "5HV4j4AsqT349oLRZmTjhGKDofPBWmWaPUfWGaRkuvzkjW9i", + }, + TestCase { + description: "Describe Rococo Location", + location: Location::new(2, [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))]), + expected_account_id_str: "5FfpYGrFybJXFsQk7dabr1vEbQ5ycBBu85vrDjPJsF3q4A8P", + }, + TestCase { + description: "Describe Rococo AccountID", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + xcm::prelude::AccountId32 { network: None, id: AccountId::from(Alice).into() }, + ], + ), + expected_account_id_str: "5CYn32qPAc8FpQP55Br6AS2ZKhfCHD8Tt3v4CnCZo1rhDPd4", + }, + TestCase { + description: "Describe Rococo AccountKey", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + AccountKey20 { network: None, key: [0u8; 20] }, + ], + ), + expected_account_id_str: "5GbRhbJWb2hZY7TCeNvTqZXaP3x3UY5xt4ccxpV1ZtJS1gFL", + }, + TestCase { + description: "Describe Rococo Treasury Plurality", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Plurality { id: BodyId::Treasury, part: BodyPart::Voice }, + ], + ), + expected_account_id_str: "5EGi9NgJNGoMawY8ubnCDLmbdEW6nt2W2U2G3j9E3jXmspT7", + }, + TestCase { + description: "Describe Rococo Parachain Location", + location: Location::new( + 2, + [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), Parachain(1000)], + ), + expected_account_id_str: "5CQeLKM7XC1xNBiQLp26Wa948cudjYRD5VzvaTG3BjnmUvLL", + }, + TestCase { + description: "Describe Rococo Parachain AccountID", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Parachain(1000), + xcm::prelude::AccountId32 { network: None, id: AccountId::from(Alice).into() }, + ], + ), + expected_account_id_str: "5CWnqmyXccGPg27BTxGmycvdEs5HvQq2FQY61xsS8H7uAvmW", + }, + TestCase { + description: "Describe Rococo Parachain AccountKey", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Parachain(1000), + AccountKey20 { network: None, key: [0u8; 20] }, + ], + ), + expected_account_id_str: "5G121Rtddxn6zwMD2rZZGXxFHZ2xAgzFUgM9ki4A8wMGo4e2", + }, + TestCase { + description: "Describe Rococo Parachain Treasury Plurality", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Parachain(1000), + Plurality { id: BodyId::Treasury, part: BodyPart::Voice }, + ], + ), + expected_account_id_str: "5FNk7za2pQ71NHnN1jA63hJxJwdQywiVGnK6RL3nYjCdkWDF", + }, + TestCase { + description: "Describe Rococo USDT Location", + location: Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Parachain(1000), + PalletInstance(50), + GeneralIndex(1984), + ], + ), + expected_account_id_str: "5HNfT779KHeAL7PaVBTQDVxrT6dfJZJoQMTScxLSahBc9kxF", + }, ]; - for tc in test_cases { - let expected = - AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); - - let got = LocationToAccountHelper::::convert_location( - tc.location.into(), - ) - .unwrap(); - - assert_eq!(got, expected, "{}", tc.description); - } + ExtBuilder::::default() + .with_collators(collator_session_keys().collators()) + .with_session_keys(collator_session_keys().session_keys()) + .with_para_id(1000.into()) + .build() + .execute_with(|| { + for tc in test_cases { + let expected = AccountId::from_string(tc.expected_account_id_str) + .expect("Invalid AccountId string"); + + let got = + LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } + }); } #[test] diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs index 41f985811538d..5b8d1d860e159 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `e0f303704c84`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `99fc4dfa9c86`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -57,11 +57,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 14_894_000 picoseconds. - Weight::from_parts(15_589_339, 0) + // Minimum execution time: 14_410_000 picoseconds. + Weight::from_parts(15_193_802, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_607 - .saturating_add(Weight::from_parts(36_872, 0).saturating_mul(p.into())) + // Standard Error: 997 + .saturating_add(Weight::from_parts(30_486, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -76,13 +76,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `488 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 42_380_000 picoseconds. - Weight::from_parts(43_727_725, 0) + // Minimum execution time: 42_192_000 picoseconds. + Weight::from_parts(42_093_319, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_764 - .saturating_add(Weight::from_parts(144_220, 0).saturating_mul(a.into())) - // Standard Error: 2_855 - .saturating_add(Weight::from_parts(39_640, 0).saturating_mul(p.into())) + // Standard Error: 2_704 + .saturating_add(Weight::from_parts(158_968, 0).saturating_mul(a.into())) + // Standard Error: 2_794 + .saturating_add(Weight::from_parts(61_101, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -96,13 +96,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 29_880_000 picoseconds. - Weight::from_parts(29_370_254, 0) + // Minimum execution time: 30_338_000 picoseconds. + Weight::from_parts(28_843_418, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 5_994 - .saturating_add(Weight::from_parts(123_707, 0).saturating_mul(a.into())) - // Standard Error: 6_193 - .saturating_add(Weight::from_parts(89_910, 0).saturating_mul(p.into())) + // Standard Error: 6_326 + .saturating_add(Weight::from_parts(124_307, 0).saturating_mul(a.into())) + // Standard Error: 6_536 + .saturating_add(Weight::from_parts(97_949, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -116,13 +116,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 30_243_000 picoseconds. - Weight::from_parts(29_101_364, 0) + // Minimum execution time: 29_682_000 picoseconds. + Weight::from_parts(28_524_214, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 6_516 - .saturating_add(Weight::from_parts(130_667, 0).saturating_mul(a.into())) - // Standard Error: 6_733 - .saturating_add(Weight::from_parts(95_291, 0).saturating_mul(p.into())) + // Standard Error: 6_288 + .saturating_add(Weight::from_parts(127_666, 0).saturating_mul(a.into())) + // Standard Error: 6_497 + .saturating_add(Weight::from_parts(103_590, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -138,13 +138,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `420 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 39_270_000 picoseconds. - Weight::from_parts(39_500_844, 0) + // Minimum execution time: 37_999_000 picoseconds. + Weight::from_parts(37_895_047, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_386 - .saturating_add(Weight::from_parts(144_415, 0).saturating_mul(a.into())) - // Standard Error: 2_465 - .saturating_add(Weight::from_parts(55_156, 0).saturating_mul(p.into())) + // Standard Error: 2_480 + .saturating_add(Weight::from_parts(165_564, 0).saturating_mul(a.into())) + // Standard Error: 2_562 + .saturating_add(Weight::from_parts(66_557, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -155,11 +155,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_633_000 picoseconds. - Weight::from_parts(26_804_755, 0) + // Minimum execution time: 25_113_000 picoseconds. + Weight::from_parts(26_024_377, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_374 - .saturating_add(Weight::from_parts(49_600, 0).saturating_mul(p.into())) + // Standard Error: 1_385 + .saturating_add(Weight::from_parts(45_544, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -170,11 +170,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_746_000 picoseconds. - Weight::from_parts(26_719_021, 0) + // Minimum execution time: 25_130_000 picoseconds. + Weight::from_parts(26_149_710, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_765 - .saturating_add(Weight::from_parts(47_725, 0).saturating_mul(p.into())) + // Standard Error: 1_578 + .saturating_add(Weight::from_parts(35_938, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -185,11 +185,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_240_000 picoseconds. - Weight::from_parts(23_427_919, 0) + // Minimum execution time: 22_244_000 picoseconds. + Weight::from_parts(23_072_656, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_532 - .saturating_add(Weight::from_parts(34_610, 0).saturating_mul(p.into())) + // Standard Error: 1_299 + .saturating_add(Weight::from_parts(34_781, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -200,11 +200,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `4706` - // Minimum execution time: 26_708_000 picoseconds. - Weight::from_parts(28_113_891, 0) + // Minimum execution time: 26_399_000 picoseconds. + Weight::from_parts(27_535_679, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_315 - .saturating_add(Weight::from_parts(14_632, 0).saturating_mul(p.into())) + // Standard Error: 1_302 + .saturating_add(Weight::from_parts(10_041, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -215,12 +215,28 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `198 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 23_509_000 picoseconds. - Weight::from_parts(24_555_865, 0) + // Minimum execution time: 23_128_000 picoseconds. + Weight::from_parts(24_202_796, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_385 - .saturating_add(Weight::from_parts(34_040, 0).saturating_mul(p.into())) + // Standard Error: 1_739 + .saturating_add(Weight::from_parts(27_659, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `487` + // Estimated: `5698` + // Minimum execution time: 49_692_000 picoseconds. + Weight::from_parts(51_121_000, 0) + .saturating_add(Weight::from_parts(0, 5698)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_proxy.rs index f24dd1c26b0ef..c0fd7f312191d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_proxy.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_proxy.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `731f893ee36e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `99fc4dfa9c86`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -57,11 +57,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 14_371_000 picoseconds. - Weight::from_parts(15_478_815, 0) + // Minimum execution time: 13_890_000 picoseconds. + Weight::from_parts(14_690_357, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_762 - .saturating_add(Weight::from_parts(15_930, 0).saturating_mul(p.into())) + // Standard Error: 1_079 + .saturating_add(Weight::from_parts(35_620, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -76,13 +76,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `454 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 41_173_000 picoseconds. - Weight::from_parts(43_432_388, 0) + // Minimum execution time: 40_937_000 picoseconds. + Weight::from_parts(41_413_996, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 6_381 - .saturating_add(Weight::from_parts(155_547, 0).saturating_mul(a.into())) - // Standard Error: 6_593 - .saturating_add(Weight::from_parts(55_795, 0).saturating_mul(p.into())) + // Standard Error: 2_304 + .saturating_add(Weight::from_parts(151_878, 0).saturating_mul(a.into())) + // Standard Error: 2_380 + .saturating_add(Weight::from_parts(49_552, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -96,13 +96,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `369 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 26_224_000 picoseconds. - Weight::from_parts(27_902_096, 0) + // Minimum execution time: 25_502_000 picoseconds. + Weight::from_parts(26_072_967, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 3_611 - .saturating_add(Weight::from_parts(133_140, 0).saturating_mul(a.into())) - // Standard Error: 3_731 - .saturating_add(Weight::from_parts(14_956, 0).saturating_mul(p.into())) + // Standard Error: 1_715 + .saturating_add(Weight::from_parts(150_032, 0).saturating_mul(a.into())) + // Standard Error: 1_772 + .saturating_add(Weight::from_parts(27_530, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -116,13 +116,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `369 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 25_947_000 picoseconds. - Weight::from_parts(27_442_525, 0) + // Minimum execution time: 25_381_000 picoseconds. + Weight::from_parts(25_796_690, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 3_438 - .saturating_add(Weight::from_parts(139_129, 0).saturating_mul(a.into())) - // Standard Error: 3_553 - .saturating_add(Weight::from_parts(22_162, 0).saturating_mul(p.into())) + // Standard Error: 1_798 + .saturating_add(Weight::from_parts(155_598, 0).saturating_mul(a.into())) + // Standard Error: 1_858 + .saturating_add(Weight::from_parts(31_967, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -138,13 +138,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `386 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 34_101_000 picoseconds. - Weight::from_parts(38_190_660, 0) + // Minimum execution time: 33_900_000 picoseconds. + Weight::from_parts(37_483_729, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 5_442 - .saturating_add(Weight::from_parts(173_059, 0).saturating_mul(a.into())) - // Standard Error: 5_623 - .saturating_add(Weight::from_parts(77_492, 0).saturating_mul(p.into())) + // Standard Error: 3_283 + .saturating_add(Weight::from_parts(166_328, 0).saturating_mul(a.into())) + // Standard Error: 3_392 + .saturating_add(Weight::from_parts(48_909, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -155,11 +155,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_706_000 picoseconds. - Weight::from_parts(25_739_083, 0) + // Minimum execution time: 24_003_000 picoseconds. + Weight::from_parts(24_851_370, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 3_446 - .saturating_add(Weight::from_parts(62_969, 0).saturating_mul(p.into())) + // Standard Error: 1_101 + .saturating_add(Weight::from_parts(51_924, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -170,11 +170,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_440_000 picoseconds. - Weight::from_parts(25_773_227, 0) + // Minimum execution time: 23_865_000 picoseconds. + Weight::from_parts(24_891_590, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 3_212 - .saturating_add(Weight::from_parts(48_965, 0).saturating_mul(p.into())) + // Standard Error: 1_213 + .saturating_add(Weight::from_parts(51_884, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -185,11 +185,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 21_879_000 picoseconds. - Weight::from_parts(23_159_112, 0) + // Minimum execution time: 21_419_000 picoseconds. + Weight::from_parts(22_277_152, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_962 - .saturating_add(Weight::from_parts(26_919, 0).saturating_mul(p.into())) + // Standard Error: 1_286 + .saturating_add(Weight::from_parts(32_631, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -200,11 +200,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `139` // Estimated: `4706` - // Minimum execution time: 25_946_000 picoseconds. - Weight::from_parts(27_209_556, 0) + // Minimum execution time: 25_635_000 picoseconds. + Weight::from_parts(26_592_871, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 5_681 - .saturating_add(Weight::from_parts(34_902, 0).saturating_mul(p.into())) + // Standard Error: 1_635 + .saturating_add(Weight::from_parts(22_103, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -215,12 +215,28 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `164 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_900_000 picoseconds. - Weight::from_parts(24_094_563, 0) + // Minimum execution time: 22_150_000 picoseconds. + Weight::from_parts(23_367_544, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 4_924 - .saturating_add(Weight::from_parts(54_861, 0).saturating_mul(p.into())) + // Standard Error: 1_500 + .saturating_add(Weight::from_parts(24_164, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `453` + // Estimated: `5698` + // Minimum execution time: 43_886_000 picoseconds. + Weight::from_parts(45_017_000, 0) + .saturating_add(Weight::from_parts(0, 5698)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_proxy.rs index e2bcbf11561ce..b18f24086e5bf 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_proxy.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_proxy.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `050e4dc4313a`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `99fc4dfa9c86`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -57,11 +57,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 14_409_000 picoseconds. - Weight::from_parts(15_053_784, 0) + // Minimum execution time: 13_924_000 picoseconds. + Weight::from_parts(14_790_514, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_072 - .saturating_add(Weight::from_parts(33_384, 0).saturating_mul(p.into())) + // Standard Error: 1_262 + .saturating_add(Weight::from_parts(24_379, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -76,13 +76,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `454 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 41_927_000 picoseconds. - Weight::from_parts(42_129_298, 0) + // Minimum execution time: 40_753_000 picoseconds. + Weight::from_parts(40_824_200, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_790 - .saturating_add(Weight::from_parts(147_133, 0).saturating_mul(a.into())) - // Standard Error: 2_883 - .saturating_add(Weight::from_parts(59_473, 0).saturating_mul(p.into())) + // Standard Error: 3_314 + .saturating_add(Weight::from_parts(156_665, 0).saturating_mul(a.into())) + // Standard Error: 3_424 + .saturating_add(Weight::from_parts(63_749, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -96,13 +96,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `369 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 26_048_000 picoseconds. - Weight::from_parts(26_452_311, 0) + // Minimum execution time: 25_557_000 picoseconds. + Weight::from_parts(26_150_713, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_860 - .saturating_add(Weight::from_parts(159_533, 0).saturating_mul(a.into())) - // Standard Error: 1_922 - .saturating_add(Weight::from_parts(32_811, 0).saturating_mul(p.into())) + // Standard Error: 2_113 + .saturating_add(Weight::from_parts(152_353, 0).saturating_mul(a.into())) + // Standard Error: 2_183 + .saturating_add(Weight::from_parts(30_158, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -116,13 +116,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `369 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 25_980_000 picoseconds. - Weight::from_parts(26_249_773, 0) + // Minimum execution time: 25_589_000 picoseconds. + Weight::from_parts(25_958_292, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_769 - .saturating_add(Weight::from_parts(163_533, 0).saturating_mul(a.into())) - // Standard Error: 1_828 - .saturating_add(Weight::from_parts(39_430, 0).saturating_mul(p.into())) + // Standard Error: 1_983 + .saturating_add(Weight::from_parts(156_891, 0).saturating_mul(a.into())) + // Standard Error: 2_049 + .saturating_add(Weight::from_parts(26_327, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -138,13 +138,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `386 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 34_077_000 picoseconds. - Weight::from_parts(38_101_638, 0) + // Minimum execution time: 33_748_000 picoseconds. + Weight::from_parts(36_450_227, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 4_328 - .saturating_add(Weight::from_parts(162_966, 0).saturating_mul(a.into())) - // Standard Error: 4_472 - .saturating_add(Weight::from_parts(64_885, 0).saturating_mul(p.into())) + // Standard Error: 3_480 + .saturating_add(Weight::from_parts(193_241, 0).saturating_mul(a.into())) + // Standard Error: 3_595 + .saturating_add(Weight::from_parts(73_997, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -155,11 +155,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_748_000 picoseconds. - Weight::from_parts(25_945_504, 0) + // Minimum execution time: 24_018_000 picoseconds. + Weight::from_parts(25_033_148, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_780 - .saturating_add(Weight::from_parts(45_619, 0).saturating_mul(p.into())) + // Standard Error: 1_434 + .saturating_add(Weight::from_parts(53_810, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -170,11 +170,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_697_000 picoseconds. - Weight::from_parts(25_776_786, 0) + // Minimum execution time: 23_797_000 picoseconds. + Weight::from_parts(24_845_997, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_957 - .saturating_add(Weight::from_parts(50_166, 0).saturating_mul(p.into())) + // Standard Error: 1_369 + .saturating_add(Weight::from_parts(49_882, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -185,11 +185,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 21_912_000 picoseconds. - Weight::from_parts(22_823_643, 0) + // Minimum execution time: 21_311_000 picoseconds. + Weight::from_parts(22_123_383, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_396 - .saturating_add(Weight::from_parts(38_644, 0).saturating_mul(p.into())) + // Standard Error: 1_276 + .saturating_add(Weight::from_parts(32_718, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -200,11 +200,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `139` // Estimated: `4706` - // Minimum execution time: 25_848_000 picoseconds. - Weight::from_parts(27_293_823, 0) + // Minimum execution time: 25_458_000 picoseconds. + Weight::from_parts(26_445_470, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_451 - .saturating_add(Weight::from_parts(8_546, 0).saturating_mul(p.into())) + // Standard Error: 1_375 + .saturating_add(Weight::from_parts(19_575, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -215,12 +215,28 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `164 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_938_000 picoseconds. - Weight::from_parts(24_023_183, 0) + // Minimum execution time: 22_087_000 picoseconds. + Weight::from_parts(23_104_943, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_563 - .saturating_add(Weight::from_parts(37_026, 0).saturating_mul(p.into())) + // Standard Error: 1_686 + .saturating_add(Weight::from_parts(35_333, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `453` + // Estimated: `5698` + // Minimum execution time: 43_537_000 picoseconds. + Weight::from_parts(44_096_000, 0) + .saturating_add(Weight::from_parts(0, 5698)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/genesis_config_presets.rs index 35b356f4421e4..64693c92c4e49 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/genesis_config_presets.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/genesis_config_presets.rs @@ -28,10 +28,15 @@ use sp_keyring::Sr25519Keyring; /// However, the presets does not allow dynamic para-ids currently. pub const DEFAULT_GLUTTON_PARA_ID: ParaId = ParaId::new(1300); -pub fn glutton_westend_genesis(authorities: Vec, id: ParaId) -> serde_json::Value { +pub fn glutton_westend_genesis( + authorities: Vec, + sudo: Option, + id: ParaId, +) -> serde_json::Value { build_struct_json_patch!(RuntimeGenesisConfig { parachain_info: ParachainInfoConfig { parachain_id: id }, aura: AuraConfig { authorities }, + sudo: SudoConfig { key: sudo } }) } @@ -41,11 +46,13 @@ pub fn get_preset(id: &PresetId) -> Option> { sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => glutton_westend_genesis( // initial collators. vec![Sr25519Keyring::Alice.public().into(), Sr25519Keyring::Bob.public().into()], + Some(Sr25519Keyring::Alice.to_account_id()), DEFAULT_GLUTTON_PARA_ID, ), sp_genesis_builder::DEV_RUNTIME_PRESET => glutton_westend_genesis( // initial collators. vec![Sr25519Keyring::Alice.public().into()], + Some(Sr25519Keyring::Alice.to_account_id()), DEFAULT_GLUTTON_PARA_ID, ), _ => return None, diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_proxy.rs index dd7ac3348d972..c971e1a96379b 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_proxy.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_proxy.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `afc679a858d4`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `99fc4dfa9c86`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -57,11 +57,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 14_117_000 picoseconds. - Weight::from_parts(15_104_234, 0) + // Minimum execution time: 14_193_000 picoseconds. + Weight::from_parts(14_814_540, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 908 - .saturating_add(Weight::from_parts(30_959, 0).saturating_mul(p.into())) + // Standard Error: 1_163 + .saturating_add(Weight::from_parts(25_891, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -76,13 +76,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `454 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 41_398_000 picoseconds. - Weight::from_parts(42_122_465, 0) + // Minimum execution time: 40_717_000 picoseconds. + Weight::from_parts(41_406_158, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_537 - .saturating_add(Weight::from_parts(154_452, 0).saturating_mul(a.into())) - // Standard Error: 2_622 - .saturating_add(Weight::from_parts(51_800, 0).saturating_mul(p.into())) + // Standard Error: 3_363 + .saturating_add(Weight::from_parts(149_287, 0).saturating_mul(a.into())) + // Standard Error: 3_475 + .saturating_add(Weight::from_parts(53_202, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -96,13 +96,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `369 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 25_783_000 picoseconds. - Weight::from_parts(26_897_251, 0) + // Minimum execution time: 25_574_000 picoseconds. + Weight::from_parts(25_943_471, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_851 - .saturating_add(Weight::from_parts(148_965, 0).saturating_mul(a.into())) - // Standard Error: 1_913 - .saturating_add(Weight::from_parts(25_587, 0).saturating_mul(p.into())) + // Standard Error: 1_934 + .saturating_add(Weight::from_parts(145_112, 0).saturating_mul(a.into())) + // Standard Error: 1_998 + .saturating_add(Weight::from_parts(31_322, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -116,13 +116,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `369 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 26_002_000 picoseconds. - Weight::from_parts(26_723_567, 0) + // Minimum execution time: 25_649_000 picoseconds. + Weight::from_parts(25_882_341, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_593 - .saturating_add(Weight::from_parts(148_320, 0).saturating_mul(a.into())) - // Standard Error: 1_646 - .saturating_add(Weight::from_parts(32_877, 0).saturating_mul(p.into())) + // Standard Error: 2_025 + .saturating_add(Weight::from_parts(142_994, 0).saturating_mul(a.into())) + // Standard Error: 2_092 + .saturating_add(Weight::from_parts(34_199, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -138,13 +138,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `386 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 36_935_000 picoseconds. - Weight::from_parts(38_029_030, 0) + // Minimum execution time: 37_082_000 picoseconds. + Weight::from_parts(37_886_513, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_711 - .saturating_add(Weight::from_parts(157_336, 0).saturating_mul(a.into())) - // Standard Error: 2_801 - .saturating_add(Weight::from_parts(55_583, 0).saturating_mul(p.into())) + // Standard Error: 3_640 + .saturating_add(Weight::from_parts(144_359, 0).saturating_mul(a.into())) + // Standard Error: 3_760 + .saturating_add(Weight::from_parts(45_703, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -155,11 +155,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_665_000 picoseconds. - Weight::from_parts(25_465_824, 0) + // Minimum execution time: 23_957_000 picoseconds. + Weight::from_parts(24_927_975, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_219 - .saturating_add(Weight::from_parts(51_099, 0).saturating_mul(p.into())) + // Standard Error: 1_758 + .saturating_add(Weight::from_parts(43_725, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -170,11 +170,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_496_000 picoseconds. - Weight::from_parts(25_411_752, 0) + // Minimum execution time: 23_729_000 picoseconds. + Weight::from_parts(24_583_323, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_322 - .saturating_add(Weight::from_parts(52_961, 0).saturating_mul(p.into())) + // Standard Error: 1_400 + .saturating_add(Weight::from_parts(45_509, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -185,11 +185,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 21_726_000 picoseconds. - Weight::from_parts(22_667_897, 0) + // Minimum execution time: 21_192_000 picoseconds. + Weight::from_parts(21_995_477, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_026 - .saturating_add(Weight::from_parts(30_007, 0).saturating_mul(p.into())) + // Standard Error: 1_926 + .saturating_add(Weight::from_parts(34_525, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -200,11 +200,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `139` // Estimated: `4706` - // Minimum execution time: 26_078_000 picoseconds. - Weight::from_parts(27_080_372, 0) + // Minimum execution time: 25_253_000 picoseconds. + Weight::from_parts(26_188_295, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_900 - .saturating_add(Weight::from_parts(16_097, 0).saturating_mul(p.into())) + // Standard Error: 1_659 + .saturating_add(Weight::from_parts(22_321, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -215,12 +215,28 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `164 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_856_000 picoseconds. - Weight::from_parts(23_811_973, 0) + // Minimum execution time: 22_278_000 picoseconds. + Weight::from_parts(23_226_079, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_116 - .saturating_add(Weight::from_parts(29_272, 0).saturating_mul(p.into())) + // Standard Error: 1_375 + .saturating_add(Weight::from_parts(23_729, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `453` + // Estimated: `5698` + // Minimum execution time: 43_833_000 picoseconds. + Weight::from_parts(44_489_000, 0) + .saturating_add(Weight::from_parts(0, 5698)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_proxy.rs index dde21332f10a9..28868c97fd6fc 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_proxy.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_proxy.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `b9a9df1fcddf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `99fc4dfa9c86`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -57,11 +57,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 14_211_000 picoseconds. - Weight::from_parts(15_083_006, 0) + // Minimum execution time: 14_292_000 picoseconds. + Weight::from_parts(15_140_165, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_072 - .saturating_add(Weight::from_parts(34_377, 0).saturating_mul(p.into())) + // Standard Error: 1_184 + .saturating_add(Weight::from_parts(37_715, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -76,13 +76,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `454 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 42_275_000 picoseconds. - Weight::from_parts(42_416_930, 0) + // Minimum execution time: 41_703_000 picoseconds. + Weight::from_parts(42_146_045, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_511 - .saturating_add(Weight::from_parts(154_326, 0).saturating_mul(a.into())) - // Standard Error: 2_594 - .saturating_add(Weight::from_parts(55_214, 0).saturating_mul(p.into())) + // Standard Error: 3_906 + .saturating_add(Weight::from_parts(152_856, 0).saturating_mul(a.into())) + // Standard Error: 4_036 + .saturating_add(Weight::from_parts(63_199, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -96,13 +96,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `369 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 26_124_000 picoseconds. - Weight::from_parts(26_832_182, 0) + // Minimum execution time: 25_888_000 picoseconds. + Weight::from_parts(26_963_577, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_619 - .saturating_add(Weight::from_parts(152_291, 0).saturating_mul(a.into())) - // Standard Error: 1_673 - .saturating_add(Weight::from_parts(27_294, 0).saturating_mul(p.into())) + // Standard Error: 2_452 + .saturating_add(Weight::from_parts(142_225, 0).saturating_mul(a.into())) + // Standard Error: 2_534 + .saturating_add(Weight::from_parts(15_645, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -116,13 +116,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `369 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 26_147_000 picoseconds. - Weight::from_parts(27_079_986, 0) + // Minimum execution time: 25_867_000 picoseconds. + Weight::from_parts(25_977_737, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_776 - .saturating_add(Weight::from_parts(146_088, 0).saturating_mul(a.into())) - // Standard Error: 1_835 - .saturating_add(Weight::from_parts(25_996, 0).saturating_mul(p.into())) + // Standard Error: 1_855 + .saturating_add(Weight::from_parts(157_924, 0).saturating_mul(a.into())) + // Standard Error: 1_916 + .saturating_add(Weight::from_parts(41_678, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -138,13 +138,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `386 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 37_992_000 picoseconds. - Weight::from_parts(38_894_678, 0) + // Minimum execution time: 37_343_000 picoseconds. + Weight::from_parts(37_754_114, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_213 - .saturating_add(Weight::from_parts(151_123, 0).saturating_mul(a.into())) - // Standard Error: 2_287 - .saturating_add(Weight::from_parts(53_141, 0).saturating_mul(p.into())) + // Standard Error: 2_112 + .saturating_add(Weight::from_parts(154_206, 0).saturating_mul(a.into())) + // Standard Error: 2_183 + .saturating_add(Weight::from_parts(65_528, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -155,11 +155,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_993_000 picoseconds. - Weight::from_parts(25_785_521, 0) + // Minimum execution time: 24_058_000 picoseconds. + Weight::from_parts(25_043_055, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_123 - .saturating_add(Weight::from_parts(48_508, 0).saturating_mul(p.into())) + // Standard Error: 1_548 + .saturating_add(Weight::from_parts(59_084, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -170,11 +170,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_674_000 picoseconds. - Weight::from_parts(25_625_536, 0) + // Minimum execution time: 24_027_000 picoseconds. + Weight::from_parts(25_013_643, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_365 - .saturating_add(Weight::from_parts(48_773, 0).saturating_mul(p.into())) + // Standard Error: 1_194 + .saturating_add(Weight::from_parts(52_428, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -185,11 +185,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `127 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_076_000 picoseconds. - Weight::from_parts(22_971_690, 0) + // Minimum execution time: 21_487_000 picoseconds. + Weight::from_parts(22_407_585, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_069 - .saturating_add(Weight::from_parts(33_803, 0).saturating_mul(p.into())) + // Standard Error: 1_366 + .saturating_add(Weight::from_parts(35_206, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -200,11 +200,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `139` // Estimated: `4706` - // Minimum execution time: 26_289_000 picoseconds. - Weight::from_parts(27_283_065, 0) + // Minimum execution time: 25_488_000 picoseconds. + Weight::from_parts(26_421_963, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_211 - .saturating_add(Weight::from_parts(15_881, 0).saturating_mul(p.into())) + // Standard Error: 1_595 + .saturating_add(Weight::from_parts(23_770, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -215,12 +215,28 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `164 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_961_000 picoseconds. - Weight::from_parts(24_005_115, 0) + // Minimum execution time: 22_402_000 picoseconds. + Weight::from_parts(23_547_112, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_176 - .saturating_add(Weight::from_parts(29_506, 0).saturating_mul(p.into())) + // Standard Error: 2_281 + .saturating_add(Weight::from_parts(33_996, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `453` + // Estimated: `5698` + // Minimum execution time: 44_682_000 picoseconds. + Weight::from_parts(45_638_000, 0) + .saturating_add(Weight::from_parts(0, 5698)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 82db8829e516a..65e2c7e738d30 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -56,15 +56,14 @@ use pallet_xcm::XcmPassthrough; use parachains_common::{xcm_config::AssetFeeAsExistentialDepositMultiplier, TREASURY_PALLET_ID}; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::{impls::ToAuthor, xcm_sender::ExponentialPrice}; -use snowbridge_router_primitives::inbound::EthereumLocationsConverterFor; use sp_runtime::traits::{AccountIdConversion, ConvertInto, Identity, TryConvertInto}; use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ AccountId32Aliases, AliasOriginRootUsingFilter, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, AsPrefixedGeneralIndex, ConvertedConcreteId, DescribeAllTerminal, DescribeFamily, - EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, - FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, IsConcrete, + EnsureXcmOrigin, ExternalConsensusLocationsConverterFor, FixedWeightBounds, + FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, HashedDescription, IsConcrete, LocalMint, NativeAsset, NoChecking, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SingleAssetExchangeAdapter, @@ -106,12 +105,8 @@ pub type LocationToAccountId = ( AccountId32Aliases, // Foreign locations alias into accounts according to a hash of their standard description. HashedDescription>, - // Different global consensus parachain sovereign account. - // (Used for over-bridge transfers and reserve processing) - GlobalConsensusParachainConvertsFor, - // Ethereum contract sovereign account. - // (Used to get convert ethereum contract locations to sovereign account) - EthereumLocationsConverterFor, + // Different global consensus locations sovereign accounts. + ExternalConsensusLocationsConverterFor, ); /// Means for transacting assets on this chain. diff --git a/cumulus/polkadot-omni-node/lib/src/cli.rs b/cumulus/polkadot-omni-node/lib/src/cli.rs index 65d3fdecdee7a..3814192fb4a1c 100644 --- a/cumulus/polkadot-omni-node/lib/src/cli.rs +++ b/cumulus/polkadot-omni-node/lib/src/cli.rs @@ -76,6 +76,19 @@ pub enum Subcommand { Key(sc_cli::KeySubcommand), /// Build a chain specification. + /// + /// The `build-spec` command relies on the chain specification built (hard-coded) into the node + /// binary, and may utilize the genesis presets of the runtimes also embedded in the nodes + /// that support this command. Since `polkadot-omni-node` does not contain any embedded + /// runtime, and requires a `chain-spec` path to be passed to its `--chain` flag, the command + /// isn't bringing significant value as it does for other node binaries (e.g. the + /// `polkadot` binary). + /// + /// For a more versatile `chain-spec` manipulation experience please check out the + /// `polkadot-omni-node chain-spec-builder` subcommand. + #[deprecated( + note = "build-spec will be removed after 1/06/2025. Use chain-spec-builder instead" + )] BuildSpec(sc_cli::BuildSpecCmd), /// Validate blocks. @@ -95,10 +108,15 @@ pub enum Subcommand { /// Subcommand for generating and managing chain specifications. /// - /// Unlike `build-spec`, which generates a chain specification based on existing - /// configurations, `chain-spec-builder` provides a more interactive and customizable approach - /// to defining a chain spec. It allows users to create specifications with additional - /// parameters and validation steps before finalizing the output. + /// A `chain-spec-builder` subcommand corresponds to the existing `chain-spec-builder` tool + /// (), which can be used already standalone. + /// It provides the same functionality as the tool but bundled with `polkadot-omni-node` to + /// enable easier access to chain-spec generation, patching, converting to raw or validation, + /// from a single binary, which can be used as a parachain node tool + /// For a detailed usage guide please check out the standalone tool's crates.io or docs.rs + /// pages: + /// - + /// - ChainSpecBuilder(ChainSpecBuilder), /// Remove the whole chain. diff --git a/cumulus/polkadot-omni-node/lib/src/command.rs b/cumulus/polkadot-omni-node/lib/src/command.rs index a5b474a89f570..28a7b712b8236 100644 --- a/cumulus/polkadot-omni-node/lib/src/command.rs +++ b/cumulus/polkadot-omni-node/lib/src/command.rs @@ -103,6 +103,7 @@ pub fn run(cmd_config: RunConfig) -> Result<() let mut cli = Cli::::from_args(); cli.chain_spec_loader = Some(cmd_config.chain_spec_loader); + #[allow(deprecated)] match &cli.subcommand { Some(Subcommand::BuildSpec(cmd)) => { let runner = cli.create_runner(cmd)?; diff --git a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs index 8553218d34e54..4414c92eda005 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs @@ -44,6 +44,7 @@ pub fn glutton_westend_config( .with_genesis_config_patch( glutton_westend_runtime::genesis_config_presets::glutton_westend_genesis( authorities, + Some(Sr25519Keyring::Alice.to_account_id()), para_id, ), ) diff --git a/docs/sdk/src/guides/your_first_node.rs b/docs/sdk/src/guides/your_first_node.rs index 90fa8639f7577..e289734d3df8b 100644 --- a/docs/sdk/src/guides/your_first_node.rs +++ b/docs/sdk/src/guides/your_first_node.rs @@ -256,9 +256,14 @@ mod tests { let expected_blocks = (10_000 / block_time).saturating_div(2); assert!(expected_blocks > 0, "test configuration is bad, should give it more time"); - assert!(String::from_utf8(output.stderr) - .unwrap() - .contains(format!("Imported #{}", expected_blocks).to_string().as_str())); + let output = String::from_utf8(output.stderr).unwrap(); + let want = format!("Imported #{}", expected_blocks); + if !output.contains(&want) { + panic!( + "Output did not contain the pattern:\n\npattern: {}\n\noutput: {}\n", + want, output + ); + } } #[test] @@ -282,6 +287,7 @@ mod tests { } #[test] + #[ignore] fn parachain_runtime_works() { // TODO: None doesn't work. But maybe it should? it would be misleading as many users might // use it. diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 24590fe0c90ea..a40db8439f3ad 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -195,9 +195,43 @@ where let relay_parent = candidate_receipt.descriptor.relay_parent(); let maybe_claim_queue = claim_queue(relay_parent, &mut sender).await; + let Some(session_index) = get_session_index(&mut sender, relay_parent).await else { + let error = "cannot fetch session index from the runtime"; + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + error, + ); + + let _ = response_sender + .send(Err(ValidationFailed("Session index not found".to_string()))); + return + }; + + // This will return a default value for the limit if runtime API is not available. + // however we still error out if there is a weird runtime API error. + let Ok(validation_code_bomb_limit) = util::runtime::fetch_validation_code_bomb_limit( + relay_parent, + session_index, + &mut sender, + ) + .await + else { + let error = "cannot fetch validation code bomb limit from the runtime"; + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + error, + ); + + let _ = response_sender.send(Err(ValidationFailed( + "Validation code bomb limit not available".to_string(), + ))); + return + }; let res = validate_candidate_exhaustive( - get_session_index(&mut sender, relay_parent).await, + session_index, validation_host, validation_data, validation_code, @@ -207,6 +241,7 @@ where exec_kind, &metrics, maybe_claim_queue, + validation_code_bomb_limit, ) .await; @@ -220,9 +255,46 @@ where response_sender, .. } => async move { - let precheck_result = - precheck_pvf(&mut sender, validation_host, relay_parent, validation_code_hash) - .await; + let Some(session_index) = get_session_index(&mut sender, relay_parent).await else { + let error = "cannot fetch session index from the runtime"; + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + error, + ); + + let _ = response_sender.send(PreCheckOutcome::Failed); + return + }; + + // This will return a default value for the limit if runtime API is not available. + // however we still error out if there is a weird runtime API error. + let Ok(validation_code_bomb_limit) = util::runtime::fetch_validation_code_bomb_limit( + relay_parent, + session_index, + &mut sender, + ) + .await + else { + let error = "cannot fetch validation code bomb limit from the runtime"; + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + error, + ); + + let _ = response_sender.send(PreCheckOutcome::Failed); + return + }; + + let precheck_result = precheck_pvf( + &mut sender, + validation_host, + relay_parent, + validation_code_hash, + validation_code_bomb_limit, + ) + .await; let _ = response_sender.send(precheck_result); } @@ -533,11 +605,33 @@ where continue; }; + let Some(session_index) = get_session_index(sender, relay_parent).await else { continue }; + + let validation_code_bomb_limit = match util::runtime::fetch_validation_code_bomb_limit( + relay_parent, + session_index, + sender, + ) + .await + { + Ok(limit) => limit, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + ?err, + "cannot fetch validation code bomb limit from runtime API", + ); + continue; + }, + }; + let pvf = PvfPrepData::from_code( validation_code.0, executor_params.clone(), timeout, PrepareJobKind::Prechecking, + validation_code_bomb_limit, ); active_pvfs.push(pvf); @@ -690,6 +784,7 @@ async fn precheck_pvf( mut validation_backend: impl ValidationBackend, relay_parent: Hash, validation_code_hash: ValidationCodeHash, + validation_code_bomb_limit: u32, ) -> PreCheckOutcome where Sender: SubsystemSender, @@ -739,6 +834,7 @@ where executor_params, timeout, PrepareJobKind::Prechecking, + validation_code_bomb_limit, ); match validation_backend.precheck_pvf(pvf).await { @@ -753,7 +849,7 @@ where } async fn validate_candidate_exhaustive( - maybe_expected_session_index: Option, + expected_session_index: SessionIndex, mut validation_backend: impl ValidationBackend + Send, persisted_validation_data: PersistedValidationData, validation_code: ValidationCode, @@ -763,6 +859,7 @@ async fn validate_candidate_exhaustive( exec_kind: PvfExecKind, metrics: &Metrics, maybe_claim_queue: Option, + validation_code_bomb_limit: u32, ) -> Result { let _timer = metrics.time_validate_candidate_exhaustive(); let validation_code_hash = validation_code.hash(); @@ -778,22 +875,10 @@ async fn validate_candidate_exhaustive( // We only check the session index for backing. match (exec_kind, candidate_receipt.descriptor.session_index()) { - (PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_), Some(session_index)) => { - let Some(expected_session_index) = maybe_expected_session_index else { - let error = "cannot fetch session index from the runtime"; - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - error, - ); - - return Err(ValidationFailed(error.into())) - }; - + (PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_), Some(session_index)) => if session_index != expected_session_index { return Ok(ValidationResult::Invalid(InvalidCandidate::InvalidSessionIndex)) - } - }, + }, (_, _) => {}, }; @@ -819,6 +904,7 @@ async fn validate_candidate_exhaustive( executor_params, prep_timeout, PrepareJobKind::Compilation, + validation_code_bomb_limit, ); validation_backend @@ -843,6 +929,7 @@ async fn validate_candidate_exhaustive( PVF_APPROVAL_EXECUTION_RETRY_DELAY, exec_kind.into(), exec_kind, + validation_code_bomb_limit, ) .await, }; @@ -1005,6 +1092,7 @@ trait ValidationBackend { prepare_priority: polkadot_node_core_pvf::Priority, // The kind for the execution job. exec_kind: PvfExecKind, + validation_code_bomb_limit: u32, ) -> Result { let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare); // Construct the PVF a single time, since it is an expensive operation. Cloning it is cheap. @@ -1013,6 +1101,7 @@ trait ValidationBackend { executor_params, prep_timeout, PrepareJobKind::Compilation, + validation_code_bomb_limit, ); // We keep track of the total time that has passed and stop retrying if we are taking too // long. diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index ee72daa1f86eb..a6938df7f0148 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -24,7 +24,7 @@ use crate::PvfExecKind; use assert_matches::assert_matches; use futures::executor; use polkadot_node_core_pvf::PrepareError; -use polkadot_node_primitives::{BlockData, VALIDATION_CODE_BOMB_LIMIT}; +use polkadot_node_primitives::BlockData; use polkadot_node_subsystem::messages::AllMessages; use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystemContextHandle}; use polkadot_node_subsystem_util::reexports::SubsystemContext; @@ -46,6 +46,8 @@ use sp_core::{sr25519::Public, testing::TaskExecutor}; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; +const VALIDATION_CODE_BOMB_LIMIT: u32 = 30 * 1024 * 1024; + #[derive(Debug)] enum AssumptionCheckOutcome { Matches(PersistedValidationData, ValidationCode), @@ -518,7 +520,7 @@ fn session_index_checked_only_in_backing() { // The session index is invalid let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -528,6 +530,7 @@ fn session_index_checked_only_in_backing() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -535,7 +538,7 @@ fn session_index_checked_only_in_backing() { // Approval doesn't fail since the check is ommited. let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -545,6 +548,7 @@ fn session_index_checked_only_in_backing() { PvfExecKind::Approval, &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -559,7 +563,7 @@ fn session_index_checked_only_in_backing() { // Approval doesn't fail since the check is ommited. let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), validation_data.clone(), validation_code, @@ -569,6 +573,7 @@ fn session_index_checked_only_in_backing() { PvfExecKind::Dispute, &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -657,7 +662,7 @@ fn candidate_validation_ok_is_ok(#[case] v2_descriptor: bool) { let _ = cq.insert(CoreIndex(1), vec![1.into(), 1.into()].into()); let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), validation_data.clone(), validation_code, @@ -667,6 +672,7 @@ fn candidate_validation_ok_is_ok(#[case] v2_descriptor: bool) { PvfExecKind::Backing(dummy_hash()), &Default::default(), Some(ClaimQueueSnapshot(cq)), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -735,7 +741,7 @@ fn invalid_session_or_core_index() { CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; let err = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -745,13 +751,14 @@ fn invalid_session_or_core_index() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); assert_matches!(err, ValidationResult::Invalid(InvalidCandidate::InvalidSessionIndex)); let err = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -761,6 +768,7 @@ fn invalid_session_or_core_index() { PvfExecKind::BackingSystemParas(dummy_hash()), &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -769,7 +777,7 @@ fn invalid_session_or_core_index() { candidate_receipt.descriptor.set_session_index(1); let result = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -779,12 +787,13 @@ fn invalid_session_or_core_index() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Some(Default::default()), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); assert_matches!(result, ValidationResult::Invalid(InvalidCandidate::InvalidCoreIndex)); let result = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -794,12 +803,13 @@ fn invalid_session_or_core_index() { PvfExecKind::BackingSystemParas(dummy_hash()), &Default::default(), Some(Default::default()), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); assert_matches!(result, ValidationResult::Invalid(InvalidCandidate::InvalidCoreIndex)); let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -809,6 +819,7 @@ fn invalid_session_or_core_index() { PvfExecKind::Approval, &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -824,7 +835,7 @@ fn invalid_session_or_core_index() { // Dispute check passes because we don't check core or session index let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -834,6 +845,7 @@ fn invalid_session_or_core_index() { PvfExecKind::Dispute, &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -853,7 +865,7 @@ fn invalid_session_or_core_index() { let _ = cq.insert(CoreIndex(1), vec![1.into(), 2.into()].into()); let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -863,6 +875,7 @@ fn invalid_session_or_core_index() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Some(ClaimQueueSnapshot(cq.clone())), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -876,7 +889,7 @@ fn invalid_session_or_core_index() { }); let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -886,6 +899,7 @@ fn invalid_session_or_core_index() { PvfExecKind::BackingSystemParas(dummy_hash()), &Default::default(), Some(ClaimQueueSnapshot(cq)), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -920,7 +934,7 @@ fn invalid_session_or_core_index() { [PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())] { let result = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -930,6 +944,7 @@ fn invalid_session_or_core_index() { exec_kind, &Default::default(), Some(Default::default()), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); assert_matches!(result, ValidationResult::Invalid(InvalidCandidate::InvalidCoreIndex)); @@ -938,7 +953,7 @@ fn invalid_session_or_core_index() { // Validation doesn't fail for approvals and disputes, core/session index is not checked. for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] { let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), validation_data.clone(), validation_code.clone(), @@ -948,6 +963,7 @@ fn invalid_session_or_core_index() { exec_kind, &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -992,7 +1008,7 @@ fn candidate_validation_bad_return_is_invalid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( WasmInvalidCandidate::HardTimeout, ))), @@ -1004,6 +1020,7 @@ fn candidate_validation_bad_return_is_invalid() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1075,7 +1092,7 @@ fn candidate_validation_one_ambiguous_error_is_valid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result_list(vec![ Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), Ok(validation_result), @@ -1088,6 +1105,7 @@ fn candidate_validation_one_ambiguous_error_is_valid() { PvfExecKind::Approval, &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1118,7 +1136,7 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result_list(vec![ Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), @@ -1131,6 +1149,7 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { PvfExecKind::Approval, &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1238,7 +1257,7 @@ fn candidate_validation_retry_on_error_helper( let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; return executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result_list(mock_errors), validation_data, validation_code, @@ -1248,6 +1267,7 @@ fn candidate_validation_retry_on_error_helper( exec_kind, &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) } @@ -1281,7 +1301,7 @@ fn candidate_validation_timeout_is_internal_error() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( WasmInvalidCandidate::HardTimeout, ))), @@ -1293,6 +1313,7 @@ fn candidate_validation_timeout_is_internal_error() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )); assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))); @@ -1332,7 +1353,7 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() { }; let result = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), validation_data, validation_code, @@ -1342,6 +1363,7 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1382,7 +1404,7 @@ fn candidate_validation_code_mismatch_is_invalid() { let (_ctx, _ctx_handle) = make_subsystem_context::(pool.clone()); let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( WasmInvalidCandidate::HardTimeout, ))), @@ -1394,6 +1416,7 @@ fn candidate_validation_code_mismatch_is_invalid() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Default::default(), + VALIDATION_CODE_BOMB_LIMIT, )) .unwrap(); @@ -1407,9 +1430,10 @@ fn compressed_code_works() { let head_data = HeadData(vec![1, 1, 1]); let raw_code = vec![2u8; 16]; - let validation_code = sp_maybe_compressed_blob::compress(&raw_code, VALIDATION_CODE_BOMB_LIMIT) - .map(ValidationCode) - .unwrap(); + let validation_code = + sp_maybe_compressed_blob::compress(&raw_code, VALIDATION_CODE_BOMB_LIMIT as usize) + .map(ValidationCode) + .unwrap(); let descriptor = make_valid_candidate_descriptor( ParaId::from(1_u32), @@ -1444,7 +1468,7 @@ fn compressed_code_works() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; let v = executor::block_on(validate_candidate_exhaustive( - Some(1), + 1, MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), validation_data, validation_code, @@ -1454,6 +1478,7 @@ fn compressed_code_works() { PvfExecKind::Backing(dummy_hash()), &Default::default(), Some(Default::default()), + VALIDATION_CODE_BOMB_LIMIT, )); assert_matches!(v, Ok(ValidationResult::Valid(_, _))); @@ -1514,6 +1539,7 @@ fn precheck_works() { MockPreCheckBackend::with_hardcoded_result(Ok(())), relay_parent, validation_code_hash, + VALIDATION_CODE_BOMB_LIMIT, ) .remote_handle(); @@ -1571,6 +1597,7 @@ fn precheck_properly_classifies_outcomes() { MockPreCheckBackend::with_hardcoded_result(prepare_result), relay_parent, validation_code_hash, + VALIDATION_CODE_BOMB_LIMIT, ) .remote_handle(); @@ -1821,6 +1848,21 @@ fn maybe_prepare_validation_golden_path() { let _ = tx.send(Ok(Some(ValidationCode(Vec::new())))); } ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => { + let _ = tx.send(Ok(1)); + } + ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeBombLimit(session, tx))) => { + assert_eq!(session, 1); + let _ = tx.send(Ok(VALIDATION_CODE_BOMB_LIMIT)); + } + ); }; let test_fut = future::join(test_fut, check_fut); @@ -1980,6 +2022,21 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_no_new_session_but_a_valida let _ = tx.send(Ok(Some(ValidationCode(Vec::new())))); } ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => { + let _ = tx.send(Ok(1)); + } + ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeBombLimit(session, tx))) => { + assert_eq!(session, 1); + let _ = tx.send(Ok(VALIDATION_CODE_BOMB_LIMIT)); + } + ); }; let test_fut = future::join(test_fut, check_fut); @@ -2131,21 +2188,30 @@ fn maybe_prepare_validation_prepares_a_limited_number_of_pvfs() { } ); - assert_matches!( - ctx_handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))) => { - assert_eq!(hash, ValidationCode(vec![0; 16]).hash()); - let _ = tx.send(Ok(Some(ValidationCode(Vec::new())))); - } - ); + for c in 0..2 { + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))) => { + assert_eq!(hash, ValidationCode(vec![c; 16]).hash()); + let _ = tx.send(Ok(Some(ValidationCode(Vec::new())))); + } + ); - assert_matches!( - ctx_handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx))) => { - assert_eq!(hash, ValidationCode(vec![1; 16]).hash()); - let _ = tx.send(Ok(Some(ValidationCode(Vec::new())))); - } - ); + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => { + let _ = tx.send(Ok(1)); + } + ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeBombLimit(session, tx))) => { + assert_eq!(session, 1); + let _ = tx.send(Ok(VALIDATION_CODE_BOMB_LIMIT)); + } + ); + } }; let test_fut = future::join(test_fut, check_fut); @@ -2216,6 +2282,21 @@ fn maybe_prepare_validation_does_not_prepare_already_prepared_pvfs() { let _ = tx.send(Ok(Some(ValidationCode(Vec::new())))); } ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => { + let _ = tx.send(Ok(1)); + } + ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeBombLimit(session, tx))) => { + assert_eq!(session, 1); + let _ = tx.send(Ok(VALIDATION_CODE_BOMB_LIMIT)); + } + ); }; let test_fut = future::join(test_fut, check_fut); diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs index 342128b7cca21..05becf87a3251 100644 --- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -77,6 +77,7 @@ impl TestHost { executor_params, TEST_PREPARATION_TIMEOUT, PrepareJobKind::Prechecking, + 16 * 1024 * 1024, ), result_tx, ) @@ -98,6 +99,7 @@ fn host_prepare_rococo_runtime(c: &mut Criterion) { ExecutorParams::default(), Duration::from_secs(360), PrepareJobKind::Compilation, + 64 * 1024 * 1024, ), Err(e) => { panic!("Cannot decompress blob: {:?}", e); diff --git a/polkadot/node/core/pvf/common/src/pvf.rs b/polkadot/node/core/pvf/common/src/pvf.rs index 4019a8d8b0d00..6bfe9331224fb 100644 --- a/polkadot/node/core/pvf/common/src/pvf.rs +++ b/polkadot/node/core/pvf/common/src/pvf.rs @@ -28,6 +28,8 @@ use std::{fmt, sync::Arc, time::Duration}; pub struct PvfPrepData { /// Wasm code (maybe compressed) maybe_compressed_code: Arc>, + /// Maximum uncompressed code size. + validation_code_bomb_limit: u32, /// Wasm code hash. code_hash: ValidationCodeHash, /// Executor environment parameters for the session for which artifact is prepared @@ -45,11 +47,19 @@ impl PvfPrepData { executor_params: ExecutorParams, prep_timeout: Duration, prep_kind: PrepareJobKind, + validation_code_bomb_limit: u32, ) -> Self { let maybe_compressed_code = Arc::new(code); let code_hash = sp_crypto_hashing::blake2_256(&maybe_compressed_code).into(); let executor_params = Arc::new(executor_params); - Self { maybe_compressed_code, code_hash, executor_params, prep_timeout, prep_kind } + Self { + maybe_compressed_code, + code_hash, + executor_params, + prep_timeout, + prep_kind, + validation_code_bomb_limit, + } } /// Returns validation code hash @@ -77,6 +87,11 @@ impl PvfPrepData { self.prep_kind } + /// Returns validation code bomb limit. + pub fn validation_code_bomb_limit(&self) -> u32 { + self.validation_code_bomb_limit + } + /// Creates a structure for tests. #[cfg(feature = "test-utils")] pub fn from_discriminator_and_timeout(num: u32, timeout: Duration) -> Self { @@ -86,6 +101,7 @@ impl PvfPrepData { ExecutorParams::default(), timeout, PrepareJobKind::Compilation, + 30 * 1024 * 1024, ) } diff --git a/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs b/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs index 49b30dc33ceb7..9e6947655365e 100644 --- a/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs @@ -47,6 +47,7 @@ fn prepare_rococo_runtime(c: &mut Criterion) { ExecutorParams::default(), Duration::from_secs(360), PrepareJobKind::Compilation, + 64 * 1024 * 1024, ), Err(e) => { panic!("Cannot decompress blob: {:?}", e); diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index f8ebb6effcecd..533abe414a0a9 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -38,7 +38,6 @@ use polkadot_node_core_pvf_common::{ executor_interface::{prepare, prevalidate}, worker::{pipe2_cloexec, PipeFd, WorkerInfo}, }; -use polkadot_node_primitives::VALIDATION_CODE_BOMB_LIMIT; use codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ @@ -303,9 +302,11 @@ pub fn worker_entrypoint( fn prepare_artifact(pvf: PvfPrepData) -> Result { let maybe_compressed_code = pvf.maybe_compressed_code(); - let raw_validation_code = - sp_maybe_compressed_blob::decompress(&maybe_compressed_code, VALIDATION_CODE_BOMB_LIMIT) - .map_err(|e| PrepareError::CouldNotDecompressCodeBlob(e.to_string()))?; + let raw_validation_code = sp_maybe_compressed_blob::decompress( + &maybe_compressed_code, + pvf.validation_code_bomb_limit() as usize, + ) + .map_err(|e| PrepareError::CouldNotDecompressCodeBlob(e.to_string()))?; let observed_wasm_code_len = raw_validation_code.len() as u32; let blob = match prevalidate(&raw_validation_code) { diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index cfb78fd530d23..9b24e7b64c89c 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -24,7 +24,7 @@ use polkadot_node_core_pvf::{ PossiblyInvalidError, PrepareError, PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; -use polkadot_node_primitives::{PoV, POV_BOMB_LIMIT, VALIDATION_CODE_BOMB_LIMIT}; +use polkadot_node_primitives::{PoV, POV_BOMB_LIMIT}; use polkadot_node_subsystem::messages::PvfExecKind; use polkadot_parachain_primitives::primitives::{BlockData, ValidationResult}; use polkadot_primitives::{ @@ -33,6 +33,8 @@ use polkadot_primitives::{ }; use sp_core::H256; +const VALIDATION_CODE_BOMB_LIMIT: u32 = 30 * 1024 * 1024; + use std::{io::Write, sync::Arc, time::Duration}; use tokio::sync::Mutex; @@ -94,6 +96,7 @@ impl TestHost { executor_params, TEST_PREPARATION_TIMEOUT, PrepareJobKind::Prechecking, + VALIDATION_CODE_BOMB_LIMIT, ), result_tx, ) @@ -121,6 +124,7 @@ impl TestHost { executor_params, TEST_PREPARATION_TIMEOUT, PrepareJobKind::Compilation, + VALIDATION_CODE_BOMB_LIMIT, ), TEST_EXECUTION_TIMEOUT, Arc::new(pvd), @@ -682,9 +686,10 @@ async fn artifact_does_reprepare_on_meaningful_exec_parameter_change() { #[tokio::test] async fn invalid_compressed_code_fails_prechecking() { let host = TestHost::new().await; - let raw_code = vec![2u8; VALIDATION_CODE_BOMB_LIMIT + 1]; + let raw_code = vec![2u8; VALIDATION_CODE_BOMB_LIMIT as usize + 1]; let validation_code = - sp_maybe_compressed_blob::compress(&raw_code, VALIDATION_CODE_BOMB_LIMIT + 1).unwrap(); + sp_maybe_compressed_blob::compress(&raw_code, VALIDATION_CODE_BOMB_LIMIT as usize + 1) + .unwrap(); let res = host.precheck_pvf(&validation_code, Default::default()).await; @@ -703,9 +708,10 @@ async fn invalid_compressed_code_fails_validation() { }; let pov = PoV { block_data: BlockData(Vec::new()) }; - let raw_code = vec![2u8; VALIDATION_CODE_BOMB_LIMIT + 1]; + let raw_code = vec![2u8; VALIDATION_CODE_BOMB_LIMIT as usize + 1]; let validation_code = - sp_maybe_compressed_blob::compress(&raw_code, VALIDATION_CODE_BOMB_LIMIT + 1).unwrap(); + sp_maybe_compressed_blob::compress(&raw_code, VALIDATION_CODE_BOMB_LIMIT as usize + 1) + .unwrap(); let result = host .validate_candidate(&validation_code, pvd, pov, Default::default(), H256::default()) diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 4ed42626d88ee..4e16264145b06 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -77,6 +77,7 @@ pub(crate) struct RequestResultCache { claim_queue: LruMap>>, backing_constraints: LruMap<(Hash, ParaId), Option>, scheduling_lookahead: LruMap, + validation_code_bomb_limits: LruMap, } impl Default for RequestResultCache { @@ -116,6 +117,7 @@ impl Default for RequestResultCache { claim_queue: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), backing_constraints: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), scheduling_lookahead: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + validation_code_bomb_limits: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), } } } @@ -590,6 +592,16 @@ impl RequestResultCache { ) { self.scheduling_lookahead.insert(session_index, scheduling_lookahead); } + + /// Cache the validation code bomb limit for a session + pub(crate) fn cache_validation_code_bomb_limit(&mut self, session: SessionIndex, limit: u32) { + self.validation_code_bomb_limits.insert(session, limit); + } + + /// Get the validation code bomb limit for a session if cached + pub(crate) fn validation_code_bomb_limit(&mut self, session: SessionIndex) -> Option { + self.validation_code_bomb_limits.get(&session).copied() + } } pub(crate) enum RequestResult { @@ -642,4 +654,5 @@ pub(crate) enum RequestResult { CandidatesPendingAvailability(Hash, ParaId, Vec), BackingConstraints(Hash, ParaId, Option), SchedulingLookahead(SessionIndex, u32), + ValidationCodeBombLimit(SessionIndex, u32), } diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index 2d864c8cf2f4c..eefc1be106d30 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -189,6 +189,8 @@ where SchedulingLookahead(session_index, scheduling_lookahead) => self .requests_cache .cache_scheduling_lookahead(session_index, scheduling_lookahead), + ValidationCodeBombLimit(session_index, limit) => + self.requests_cache.cache_validation_code_bomb_limit(session_index, limit), } } @@ -357,6 +359,15 @@ where Some(Request::SchedulingLookahead(index, sender)) } }, + Request::ValidationCodeBombLimit(index, sender) => { + if let Some(value) = self.requests_cache.validation_code_bomb_limit(index) { + self.metrics.on_cached_request(); + let _ = sender.send(Ok(value)); + None + } else { + Some(Request::ValidationCodeBombLimit(index, sender)) + } + }, } } @@ -684,5 +695,12 @@ where sender, result = (index) ), + Request::ValidationCodeBombLimit(index, sender) => query!( + ValidationCodeBombLimit, + validation_code_bomb_limit(), + ver = Request::VALIDATION_CODE_BOMB_LIMIT_RUNTIME_REQUIREMENT, + sender, + result = (index) + ), } } diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index bbc5801290022..bb75417a278fb 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -319,6 +319,10 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { ) -> Result, ApiError> { todo!("Not required for tests") } + + async fn validation_code_bomb_limit(&self, _: Hash) -> Result { + todo!("Not required for tests") + } } #[test] diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 26ec2d883783c..2abaa0ff312d3 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -69,6 +69,10 @@ const MERKLE_NODE_MAX_SIZE: usize = 512 + 100; const MERKLE_PROOF_MAX_DEPTH: usize = 8; /// The bomb limit for decompressing code blobs. +#[deprecated( + note = "`VALIDATION_CODE_BOMB_LIMIT` will be removed. Use `validation_code_bomb_limit` + runtime API to retrieve the value from the runtime" +)] pub const VALIDATION_CODE_BOMB_LIMIT: usize = (MAX_CODE_SIZE * 4u32) as usize; /// The bomb limit for decompressing PoV blobs. diff --git a/polkadot/node/service/src/builder/mod.rs b/polkadot/node/service/src/builder/mod.rs new file mode 100644 index 0000000000000..dc33af673ae07 --- /dev/null +++ b/polkadot/node/service/src/builder/mod.rs @@ -0,0 +1,869 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Polkadot service builder. + +#![cfg(feature = "full-node")] + +mod partial; +use partial::PolkadotPartialComponents; +pub(crate) use partial::{new_partial, new_partial_basics}; + +use crate::{ + grandpa_support, open_database, + overseer::{ExtendedOverseerGenArgs, OverseerGen, OverseerGenArgs}, + parachains_db, + relay_chain_selection::SelectRelayChain, + workers, Chain, Error, FullBackend, FullClient, IdentifyVariant, IsParachainNode, + GRANDPA_JUSTIFICATION_PERIOD, KEEP_FINALIZED_FOR_LIVE_NETWORKS, +}; +use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; +use gum::info; +use mmr_gadget::MmrGadget; +use polkadot_availability_recovery::FETCH_CHUNKS_THRESHOLD; +use polkadot_node_core_approval_voting::Config as ApprovalVotingConfig; +use polkadot_node_core_av_store::Config as AvailabilityConfig; +use polkadot_node_core_candidate_validation::Config as CandidateValidationConfig; +use polkadot_node_core_chain_selection::{ + self as chain_selection_subsystem, Config as ChainSelectionConfig, +}; +use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig; +use polkadot_node_network_protocol::{ + peer_set::{PeerSet, PeerSetProtocolNames}, + request_response::{IncomingRequest, ReqProtocolNames}, +}; +use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_overseer::{Handle, OverseerConnector}; +use polkadot_primitives::Block; +use sc_client_api::Backend; +use sc_network::config::FullNetworkConfiguration; +use sc_network_sync::WarpSyncConfig; +use sc_service::{Configuration, RpcHandlers, TaskManager}; +use sc_sysinfo::Metric; +use sc_telemetry::TelemetryWorkerHandle; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use sp_consensus_beefy::ecdsa_crypto; +use sp_runtime::traits::Block as BlockT; +use std::{collections::HashMap, sync::Arc, time::Duration}; + +/// Polkadot node service initialization parameters. +pub struct NewFullParams { + pub is_parachain_node: IsParachainNode, + pub enable_beefy: bool, + /// Whether to enable the block authoring backoff on production networks + /// where it isn't enabled by default. + pub force_authoring_backoff: bool, + pub telemetry_worker_handle: Option, + /// The version of the node. TESTING ONLY: `None` can be passed to skip the node/worker version + /// check, both on startup and in the workers. + pub node_version: Option, + /// Whether the node is attempting to run as a secure validator. + pub secure_validator_mode: bool, + /// An optional path to a directory containing the workers. + pub workers_path: Option, + /// Optional custom names for the prepare and execute workers. + pub workers_names: Option<(String, String)>, + /// An optional number of the maximum number of pvf execute workers. + pub execute_workers_max_num: Option, + /// An optional maximum number of pvf workers that can be spawned in the pvf prepare pool for + /// tasks with the priority below critical. + pub prepare_workers_soft_max_num: Option, + /// An optional absolute number of pvf workers that can be spawned in the pvf prepare pool. + pub prepare_workers_hard_max_num: Option, + /// How long finalized data should be kept in the availability store (in hours) + pub keep_finalized_for: Option, + pub overseer_gen: OverseerGenerator, + pub overseer_message_channel_capacity_override: Option, + #[allow(dead_code)] + pub malus_finality_delay: Option, + pub hwbench: Option, + /// Enable approval voting processing in parallel. + pub enable_approval_voting_parallel: bool, +} + +/// Completely built polkadot node service. +pub struct NewFull { + pub task_manager: TaskManager, + pub client: Arc, + pub overseer_handle: Option, + pub network: Arc, + pub sync_service: Arc>, + pub rpc_handlers: RpcHandlers, + pub backend: Arc, +} + +pub struct PolkadotServiceBuilder +where + OverseerGenerator: OverseerGen, + Network: sc_network::NetworkBackend::Hash>, +{ + config: Configuration, + params: NewFullParams, + overseer_connector: OverseerConnector, + partial_components: PolkadotPartialComponents>, + net_config: FullNetworkConfiguration::Hash, Network>, +} + +impl PolkadotServiceBuilder +where + OverseerGenerator: OverseerGen, + Network: sc_network::NetworkBackend::Hash>, +{ + /// Create new polkadot service builder. + pub fn new( + mut config: Configuration, + params: NewFullParams, + ) -> Result, Error> { + let basics = new_partial_basics(&mut config, params.telemetry_worker_handle.clone())?; + + let prometheus_registry = config.prometheus_registry().cloned(); + let overseer_connector = OverseerConnector::default(); + let overseer_handle = Handle::new(overseer_connector.handle()); + let auth_or_collator = config.role.is_authority() || params.is_parachain_node.is_collator(); + + let select_chain = if auth_or_collator { + let metrics = polkadot_node_subsystem_util::metrics::Metrics::register( + prometheus_registry.as_ref(), + )?; + + SelectRelayChain::new_with_overseer( + basics.backend.clone(), + overseer_handle.clone(), + metrics, + Some(basics.task_manager.spawn_handle()), + params.enable_approval_voting_parallel, + ) + } else { + SelectRelayChain::new_longest_chain(basics.backend.clone()) + }; + + let partial_components = + new_partial::>(&mut config, basics, select_chain)?; + + let net_config = sc_network::config::FullNetworkConfiguration::<_, _, Network>::new( + &config.network, + config.prometheus_config.as_ref().map(|cfg| cfg.registry.clone()), + ); + + Ok(PolkadotServiceBuilder { + config, + params, + overseer_connector, + partial_components, + net_config, + }) + } + + /// Get the genesis hash of the polkadot service being built. + pub fn genesis_hash(&self) -> ::Hash { + self.partial_components.client.chain_info().genesis_hash + } + + /// Add extra request-response protocol to the polkadot service. + pub fn add_extra_request_response_protocol( + &mut self, + config: Network::RequestResponseProtocolConfig, + ) { + self.net_config.add_request_response_protocol(config); + } + + /// Build polkadot service. + pub fn build(self) -> Result { + let Self { + config, + params: + NewFullParams { + is_parachain_node, + enable_beefy, + force_authoring_backoff, + telemetry_worker_handle: _, + node_version, + secure_validator_mode, + workers_path, + workers_names, + overseer_gen, + overseer_message_channel_capacity_override, + malus_finality_delay: _malus_finality_delay, + hwbench, + execute_workers_max_num, + prepare_workers_soft_max_num, + prepare_workers_hard_max_num, + keep_finalized_for, + enable_approval_voting_parallel, + }, + overseer_connector, + partial_components: + sc_service::PartialComponents::<_, _, SelectRelayChain<_>, _, _, _> { + client, + backend, + mut task_manager, + keystore_container, + select_chain, + import_queue, + transaction_pool, + other: + (rpc_extensions_builder, import_setup, rpc_setup, slot_duration, mut telemetry), + }, + mut net_config, + } = self; + + let role = config.role; + let auth_or_collator = config.role.is_authority() || is_parachain_node.is_collator(); + let is_offchain_indexing_enabled = config.offchain_worker.indexing_enabled; + let force_authoring = config.force_authoring; + let disable_grandpa = config.disable_grandpa; + let name = config.network.node_name.clone(); + let backoff_authoring_blocks = if !force_authoring_backoff && + (config.chain_spec.is_polkadot() || config.chain_spec.is_kusama()) + { + // the block authoring backoff is disabled by default on production networks + None + } else { + let mut backoff = sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default(); + + if config.chain_spec.is_rococo() || + config.chain_spec.is_versi() || + config.chain_spec.is_dev() + { + // on testnets that are in flux (like rococo or versi), finality has stalled + // sometimes due to operational issues and it's annoying to slow down block + // production to 1 block per hour. + backoff.max_interval = 10; + } + + Some(backoff) + }; + let shared_voter_state = rpc_setup; + let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; + let auth_disc_public_addresses = config.network.public_addresses.clone(); + + let genesis_hash = client.chain_info().genesis_hash; + let peer_store_handle = net_config.peer_store_handle(); + + let prometheus_registry = config.prometheus_registry().cloned(); + let metrics = Network::register_notification_metrics( + config.prometheus_config.as_ref().map(|cfg| &cfg.registry), + ); + + // Note: GrandPa is pushed before the Polkadot-specific protocols. This doesn't change + // anything in terms of behaviour, but makes the logs more consistent with the other + // Substrate nodes. + let grandpa_protocol_name = + sc_consensus_grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec); + let (grandpa_protocol_config, grandpa_notification_service) = + sc_consensus_grandpa::grandpa_peers_set_config::<_, Network>( + grandpa_protocol_name.clone(), + metrics.clone(), + Arc::clone(&peer_store_handle), + ); + net_config.add_notification_protocol(grandpa_protocol_config); + + let beefy_gossip_proto_name = + sc_consensus_beefy::gossip_protocol_name(&genesis_hash, config.chain_spec.fork_id()); + // `beefy_on_demand_justifications_handler` is given to `beefy-gadget` task to be run, + // while `beefy_req_resp_cfg` is added to `config.network.request_response_protocols`. + let (beefy_on_demand_justifications_handler, beefy_req_resp_cfg) = + sc_consensus_beefy::communication::request_response::BeefyJustifsRequestHandler::new::< + _, + Network, + >( + &genesis_hash, + config.chain_spec.fork_id(), + client.clone(), + prometheus_registry.clone(), + ); + let beefy_notification_service = match enable_beefy { + false => None, + true => { + let (beefy_notification_config, beefy_notification_service) = + sc_consensus_beefy::communication::beefy_peers_set_config::<_, Network>( + beefy_gossip_proto_name.clone(), + metrics.clone(), + Arc::clone(&peer_store_handle), + ); + + net_config.add_notification_protocol(beefy_notification_config); + net_config.add_request_response_protocol(beefy_req_resp_cfg); + Some(beefy_notification_service) + }, + }; + + // validation/collation protocols are enabled only if `Overseer` is enabled + let peerset_protocol_names = + PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); + + // If this is a validator or running alongside a parachain node, we need to enable the + // networking protocols. + // + // Collators and parachain full nodes require the collator and validator networking to send + // collations and to be able to recover PoVs. + let notification_services = if role.is_authority() || + is_parachain_node.is_running_alongside_parachain_node() + { + use polkadot_network_bridge::{peer_sets_info, IsAuthority}; + let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; + + peer_sets_info::<_, Network>( + is_authority, + &peerset_protocol_names, + metrics.clone(), + Arc::clone(&peer_store_handle), + ) + .into_iter() + .map(|(config, (peerset, service))| { + net_config.add_notification_protocol(config); + (peerset, service) + }) + .collect::>>() + } else { + std::collections::HashMap::new() + }; + + let req_protocol_names = ReqProtocolNames::new(&genesis_hash, config.chain_spec.fork_id()); + + let (collation_req_v1_receiver, cfg) = + IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); + net_config.add_request_response_protocol(cfg); + let (collation_req_v2_receiver, cfg) = + IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); + net_config.add_request_response_protocol(cfg); + let (available_data_req_receiver, cfg) = + IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); + net_config.add_request_response_protocol(cfg); + let (pov_req_receiver, cfg) = + IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); + net_config.add_request_response_protocol(cfg); + let (chunk_req_v1_receiver, cfg) = + IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); + net_config.add_request_response_protocol(cfg); + let (chunk_req_v2_receiver, cfg) = + IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); + net_config.add_request_response_protocol(cfg); + + let grandpa_hard_forks = if config.chain_spec.is_kusama() { + grandpa_support::kusama_hard_forks() + } else { + Vec::new() + }; + + let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + import_setup.1.shared_authority_set().clone(), + grandpa_hard_forks, + )); + + let ext_overseer_args = if is_parachain_node.is_running_alongside_parachain_node() { + None + } else { + let parachains_db = open_database(&config.database)?; + let candidate_validation_config = if role.is_authority() { + let (prep_worker_path, exec_worker_path) = workers::determine_workers_paths( + workers_path, + workers_names, + node_version.clone(), + )?; + log::info!("🚀 Using prepare-worker binary at: {:?}", prep_worker_path); + log::info!("🚀 Using execute-worker binary at: {:?}", exec_worker_path); + + Some(CandidateValidationConfig { + artifacts_cache_path: config + .database + .path() + .ok_or(Error::DatabasePathRequired)? + .join("pvf-artifacts"), + node_version, + secure_validator_mode, + prep_worker_path, + exec_worker_path, + // Default execution workers is 4 because we have 8 cores on the reference + // hardware, and this accounts for 50% of that cpu capacity. + pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or(4), + pvf_prepare_workers_soft_max_num: prepare_workers_soft_max_num.unwrap_or(1), + pvf_prepare_workers_hard_max_num: prepare_workers_hard_max_num.unwrap_or(2), + }) + } else { + None + }; + let (statement_req_receiver, cfg) = + IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); + net_config.add_request_response_protocol(cfg); + let (candidate_req_v2_receiver, cfg) = + IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); + net_config.add_request_response_protocol(cfg); + let (dispute_req_receiver, cfg) = + IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); + net_config.add_request_response_protocol(cfg); + let approval_voting_config = ApprovalVotingConfig { + col_approval_data: parachains_db::REAL_COLUMNS.col_approval_data, + slot_duration_millis: slot_duration.as_millis() as u64, + }; + let dispute_coordinator_config = DisputeCoordinatorConfig { + col_dispute_data: parachains_db::REAL_COLUMNS.col_dispute_coordinator_data, + }; + let chain_selection_config = ChainSelectionConfig { + col_data: parachains_db::REAL_COLUMNS.col_chain_selection_data, + stagnant_check_interval: Default::default(), + stagnant_check_mode: chain_selection_subsystem::StagnantCheckMode::PruneOnly, + }; + + // Kusama + testnets get a higher threshold, we are conservative on Polkadot for now. + let fetch_chunks_threshold = + if config.chain_spec.is_polkadot() { None } else { Some(FETCH_CHUNKS_THRESHOLD) }; + + let availability_config = AvailabilityConfig { + col_data: parachains_db::REAL_COLUMNS.col_availability_data, + col_meta: parachains_db::REAL_COLUMNS.col_availability_meta, + keep_finalized_for: if matches!(config.chain_spec.identify_chain(), Chain::Rococo) { + keep_finalized_for.unwrap_or(1) + } else { + KEEP_FINALIZED_FOR_LIVE_NETWORKS + }, + }; + + Some(ExtendedOverseerGenArgs { + keystore: keystore_container.local_keystore(), + parachains_db, + candidate_validation_config, + availability_config, + pov_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, + statement_req_receiver, + candidate_req_v2_receiver, + approval_voting_config, + dispute_req_receiver, + dispute_coordinator_config, + chain_selection_config, + fetch_chunks_threshold, + enable_approval_voting_parallel, + }) + }; + + let (network, system_rpc_tx, tx_handler_controller, sync_service) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + net_config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + block_announce_validator_builder: None, + warp_sync_config: Some(WarpSyncConfig::WithProvider(warp_sync)), + block_relay: None, + metrics, + })?; + + if config.offchain_worker.enabled { + use futures::FutureExt; + + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-work", + sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { + runtime_api_provider: client.clone(), + keystore: Some(keystore_container.keystore()), + offchain_db: backend.offchain_storage(), + transaction_pool: Some(OffchainTransactionPoolFactory::new( + transaction_pool.clone(), + )), + network_provider: Arc::new(network.clone()), + is_validator: role.is_authority(), + enable_http_requests: false, + custom_extensions: move |_| vec![], + })? + .run(client.clone(), task_manager.spawn_handle()) + .boxed(), + ); + } + + let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + config, + backend: backend.clone(), + client: client.clone(), + keystore: keystore_container.keystore(), + network: network.clone(), + sync_service: sync_service.clone(), + rpc_builder: Box::new(rpc_extensions_builder), + transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + system_rpc_tx, + tx_handler_controller, + telemetry: telemetry.as_mut(), + })?; + + if let Some(hwbench) = hwbench { + sc_sysinfo::print_hwbench(&hwbench); + match SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench, role.is_authority()) { + Err(err) if role.is_authority() => { + if err + .0 + .iter() + .any(|failure| matches!(failure.metric, Metric::Blake2256Parallel { .. })) + { + log::warn!( + "⚠️ Starting January 2025 the hardware will fail the minimal physical CPU cores requirements {} for role 'Authority',\n\ + find out more when this will become mandatory at:\n\ + https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware", + err + ); + } + if err + .0 + .iter() + .any(|failure| !matches!(failure.metric, Metric::Blake2256Parallel { .. })) + { + log::warn!( + "⚠️ The hardware does not meet the minimal requirements {} for role 'Authority' find out more at:\n\ + https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware", + err + ); + } + }, + _ => {}, + } + + if let Some(ref mut telemetry) = telemetry { + let telemetry_handle = telemetry.handle(); + task_manager.spawn_handle().spawn( + "telemetry_hwbench", + None, + sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), + ); + } + } + + let (block_import, link_half, babe_link, beefy_links) = import_setup; + + let overseer_client = client.clone(); + let spawner = task_manager.spawn_handle(); + + let authority_discovery_service = + // We need the authority discovery if this node is either a validator or running alongside a parachain node. + // Parachains node require the authority discovery for finding relay chain validators for sending + // their PoVs or recovering PoVs. + if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() { + use futures::StreamExt; + use sc_network::{Event, NetworkEventStream}; + + let authority_discovery_role = if role.is_authority() { + sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore()) + } else { + // don't publish our addresses when we're not an authority (collator, cumulus, ..) + sc_authority_discovery::Role::Discover + }; + let dht_event_stream = + network.event_stream("authority-discovery").filter_map(|e| async move { + match e { + Event::Dht(e) => Some(e), + _ => None, + } + }); + let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( + sc_authority_discovery::WorkerConfig { + publish_non_global_ips: auth_disc_publish_non_global_ips, + public_addresses: auth_disc_public_addresses, + // Require that authority discovery records are signed. + strict_record_validation: true, + ..Default::default() + }, + client.clone(), + Arc::new(network.clone()), + Box::pin(dht_event_stream), + authority_discovery_role, + prometheus_registry.clone(), + ); + + task_manager.spawn_handle().spawn( + "authority-discovery-worker", + Some("authority-discovery"), + Box::pin(worker.run()), + ); + Some(service) + } else { + None + }; + + let runtime_client = Arc::new(DefaultSubsystemClient::new( + overseer_client.clone(), + OffchainTransactionPoolFactory::new(transaction_pool.clone()), + )); + + let overseer_handle = if let Some(authority_discovery_service) = authority_discovery_service + { + let (overseer, overseer_handle) = overseer_gen + .generate::>( + overseer_connector, + OverseerGenArgs { + runtime_client, + network_service: network.clone(), + sync_service: sync_service.clone(), + authority_discovery_service, + collation_req_v1_receiver, + collation_req_v2_receiver, + available_data_req_receiver, + registry: prometheus_registry.as_ref(), + spawner, + is_parachain_node, + overseer_message_channel_capacity_override, + req_protocol_names, + peerset_protocol_names, + notification_services, + }, + ext_overseer_args, + ) + .map_err(|e| { + gum::error!("Failed to init overseer: {}", e); + e + })?; + let handle = Handle::new(overseer_handle.clone()); + + { + let handle = handle.clone(); + task_manager.spawn_essential_handle().spawn_blocking( + "overseer", + None, + Box::pin(async move { + use futures::{pin_mut, select, FutureExt}; + + let forward = polkadot_overseer::forward_events(overseer_client, handle); + + let forward = forward.fuse(); + let overseer_fut = overseer.run().fuse(); + + pin_mut!(overseer_fut); + pin_mut!(forward); + + select! { + () = forward => (), + () = overseer_fut => (), + complete => (), + } + }), + ); + } + Some(handle) + } else { + assert!( + !auth_or_collator, + "Precondition congruence (false) is guaranteed by manual checking. qed" + ); + None + }; + + if role.is_authority() { + let proposer = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), + ); + + let client_clone = client.clone(); + let overseer_handle = + overseer_handle.as_ref().ok_or(Error::AuthoritiesRequireRealOverseer)?.clone(); + let slot_duration = babe_link.config().slot_duration(); + let babe_config = sc_consensus_babe::BabeParams { + keystore: keystore_container.keystore(), + client: client.clone(), + select_chain, + block_import, + env: proposer, + sync_oracle: sync_service.clone(), + justification_sync_link: sync_service.clone(), + create_inherent_data_providers: move |parent, ()| { + let client_clone = client_clone.clone(); + let overseer_handle = overseer_handle.clone(); + + async move { + let parachain = + polkadot_node_core_parachains_inherent::ParachainsInherentDataProvider::new( + client_clone, + overseer_handle, + parent, + ); + + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + + Ok((slot, timestamp, parachain)) + } + }, + force_authoring, + backoff_authoring_blocks, + babe_link, + block_proposal_slot_portion: sc_consensus_babe::SlotProportion::new(2f32 / 3f32), + max_block_proposal_slot_portion: None, + telemetry: telemetry.as_ref().map(|x| x.handle()), + }; + + let babe = sc_consensus_babe::start_babe(babe_config)?; + task_manager.spawn_essential_handle().spawn_blocking("babe", None, babe); + } + + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore_opt = + if role.is_authority() { Some(keystore_container.keystore()) } else { None }; + + // beefy is enabled if its notification service exists + if let Some(notification_service) = beefy_notification_service { + let justifications_protocol_name = + beefy_on_demand_justifications_handler.protocol_name(); + let network_params = sc_consensus_beefy::BeefyNetworkParams { + network: Arc::new(network.clone()), + sync: sync_service.clone(), + gossip_protocol_name: beefy_gossip_proto_name, + justifications_protocol_name, + notification_service, + _phantom: core::marker::PhantomData::, + }; + let payload_provider = sp_consensus_beefy::mmr::MmrRootProvider::new(client.clone()); + let beefy_params = sc_consensus_beefy::BeefyParams { + client: client.clone(), + backend: backend.clone(), + payload_provider, + runtime: client.clone(), + key_store: keystore_opt.clone(), + network_params, + min_block_delta: 8, + prometheus_registry: prometheus_registry.clone(), + links: beefy_links, + on_demand_justifications_handler: beefy_on_demand_justifications_handler, + is_authority: role.is_authority(), + }; + + let gadget = sc_consensus_beefy::start_beefy_gadget::< + _, + _, + _, + _, + _, + _, + _, + ecdsa_crypto::AuthorityId, + >(beefy_params); + + // BEEFY is part of consensus, if it fails we'll bring the node down with it to make + // sure it is noticed. + task_manager + .spawn_essential_handle() + .spawn_blocking("beefy-gadget", None, gadget); + } + // When offchain indexing is enabled, MMR gadget should also run. + if is_offchain_indexing_enabled { + task_manager.spawn_essential_handle().spawn_blocking( + "mmr-gadget", + None, + MmrGadget::start( + client.clone(), + backend.clone(), + sp_mmr_primitives::INDEXING_PREFIX.to_vec(), + ), + ); + } + + let config = sc_consensus_grandpa::Config { + // FIXME substrate#1578 make this available through chainspec + // Grandpa performance can be improved a bit by tuning this parameter, see: + // https://github.com/paritytech/polkadot/issues/5464 + gossip_duration: Duration::from_millis(1000), + justification_generation_period: GRANDPA_JUSTIFICATION_PERIOD, + name: Some(name), + observer_enabled: false, + keystore: keystore_opt, + local_role: role, + telemetry: telemetry.as_ref().map(|x| x.handle()), + protocol_name: grandpa_protocol_name, + }; + + let enable_grandpa = !disable_grandpa; + if enable_grandpa { + // start the full GRANDPA voter + // NOTE: unlike in substrate we are currently running the full + // GRANDPA voter protocol for all full nodes (regardless of whether + // they're validators or not). at this point the full voter should + // provide better guarantees of block and vote data availability than + // the observer. + + let mut voting_rules_builder = sc_consensus_grandpa::VotingRulesBuilder::default(); + + #[cfg(not(feature = "malus"))] + let _malus_finality_delay = None; + + if let Some(delay) = _malus_finality_delay { + info!(?delay, "Enabling malus finality delay",); + voting_rules_builder = + voting_rules_builder.add(sc_consensus_grandpa::BeforeBestBlockBy(delay)); + }; + + let grandpa_config = sc_consensus_grandpa::GrandpaParams { + config, + link: link_half, + network: network.clone(), + sync: sync_service.clone(), + voting_rule: voting_rules_builder.build(), + prometheus_registry: prometheus_registry.clone(), + shared_voter_state, + telemetry: telemetry.as_ref().map(|x| x.handle()), + notification_service: grandpa_notification_service, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( + transaction_pool.clone(), + ), + }; + + task_manager.spawn_essential_handle().spawn_blocking( + "grandpa-voter", + None, + sc_consensus_grandpa::run_grandpa_voter(grandpa_config)?, + ); + } + + Ok(NewFull { + task_manager, + client, + overseer_handle, + network, + sync_service, + rpc_handlers, + backend, + }) + } +} + +/// Create a new full node of arbitrary runtime and executor. +/// +/// This is an advanced feature and not recommended for general use. Generally, `build_full` is +/// a better choice. +/// +/// `workers_path` is used to get the path to the directory where auxiliary worker binaries reside. +/// If not specified, the main binary's directory is searched first, then `/usr/lib/polkadot` is +/// searched. If the path points to an executable rather then directory, that executable is used +/// both as preparation and execution worker (supposed to be used for tests only). +pub fn new_full< + OverseerGenerator: OverseerGen, + Network: sc_network::NetworkBackend::Hash>, +>( + config: Configuration, + params: NewFullParams, +) -> Result { + PolkadotServiceBuilder::::new(config, params)?.build() +} diff --git a/polkadot/node/service/src/builder/partial.rs b/polkadot/node/service/src/builder/partial.rs new file mode 100644 index 0000000000000..0926230bff1db --- /dev/null +++ b/polkadot/node/service/src/builder/partial.rs @@ -0,0 +1,280 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Polkadot service partial builder. + +#![cfg(feature = "full-node")] + +use crate::{ + fake_runtime_api::RuntimeApi, grandpa_support, relay_chain_selection, Error, FullBackend, + FullClient, IdentifyVariant, GRANDPA_JUSTIFICATION_PERIOD, +}; +use polkadot_primitives::Block; +use sc_consensus_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; +use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; +use sc_service::{Configuration, Error as SubstrateServiceError, KeystoreContainer, TaskManager}; +use sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use sp_consensus::SelectChain; +use sp_consensus_beefy::ecdsa_crypto; +use std::sync::Arc; + +type FullSelectChain = relay_chain_selection::SelectRelayChain; +type FullGrandpaBlockImport = + sc_consensus_grandpa::GrandpaBlockImport; +type FullBeefyBlockImport = + sc_consensus_beefy::import::BeefyBlockImport< + Block, + FullBackend, + FullClient, + InnerBlockImport, + AuthorityId, + >; + +pub(crate) type PolkadotPartialComponents = sc_service::PartialComponents< + FullClient, + FullBackend, + ChainSelection, + sc_consensus::DefaultImportQueue, + sc_transaction_pool::TransactionPoolHandle, + ( + Box< + dyn Fn( + polkadot_rpc::SubscriptionTaskExecutor, + ) -> Result, + >, + ( + sc_consensus_babe::BabeBlockImport< + Block, + FullClient, + FullBeefyBlockImport< + FullGrandpaBlockImport, + ecdsa_crypto::AuthorityId, + >, + >, + sc_consensus_grandpa::LinkHalf, + sc_consensus_babe::BabeLink, + sc_consensus_beefy::BeefyVoterLinks, + ), + sc_consensus_grandpa::SharedVoterState, + sp_consensus_babe::SlotDuration, + Option, + ), +>; + +pub(crate) struct Basics { + pub(crate) task_manager: TaskManager, + pub(crate) client: Arc, + pub(crate) backend: Arc, + pub(crate) keystore_container: KeystoreContainer, + pub(crate) telemetry: Option, +} + +pub(crate) fn new_partial_basics( + config: &mut Configuration, + telemetry_worker_handle: Option, +) -> Result { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(move |endpoints| -> Result<_, sc_telemetry::Error> { + let (worker, mut worker_handle) = if let Some(worker_handle) = telemetry_worker_handle { + (None, worker_handle) + } else { + let worker = TelemetryWorker::new(16)?; + let worker_handle = worker.handle(); + (Some(worker), worker_handle) + }; + let telemetry = worker_handle.new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let heap_pages = config + .executor + .default_heap_pages + .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { extra_pages: h as _ }); + + let executor = WasmExecutor::builder() + .with_execution_method(config.executor.wasm_method) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .with_max_runtime_instances(config.executor.max_runtime_instances) + .with_runtime_cache_size(config.executor.runtime_cache_size) + .build(); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + let client = Arc::new(client); + + let telemetry = telemetry.map(|(worker, telemetry)| { + if let Some(worker) = worker { + task_manager.spawn_handle().spawn( + "telemetry", + Some("telemetry"), + Box::pin(worker.run()), + ); + } + telemetry + }); + + Ok(Basics { task_manager, client, backend, keystore_container, telemetry }) +} + +pub(crate) fn new_partial( + config: &mut Configuration, + Basics { task_manager, backend, client, keystore_container, telemetry }: Basics, + select_chain: ChainSelection, +) -> Result, Error> +where + ChainSelection: 'static + SelectChain, +{ + let transaction_pool = Arc::from( + sc_transaction_pool::Builder::new( + task_manager.spawn_essential_handle(), + client.clone(), + config.role.is_authority().into(), + ) + .with_options(config.transaction_pool.clone()) + .with_prometheus(config.prometheus_registry()) + .build(), + ); + + let grandpa_hard_forks = if config.chain_spec.is_kusama() { + grandpa_support::kusama_hard_forks() + } else { + Vec::new() + }; + + let (grandpa_block_import, grandpa_link) = + sc_consensus_grandpa::block_import_with_authority_set_hard_forks( + client.clone(), + GRANDPA_JUSTIFICATION_PERIOD, + &client.clone(), + select_chain.clone(), + grandpa_hard_forks, + telemetry.as_ref().map(|x| x.handle()), + )?; + let justification_import = grandpa_block_import.clone(); + + let (beefy_block_import, beefy_voter_links, beefy_rpc_links) = + sc_consensus_beefy::beefy_block_import_and_links( + grandpa_block_import, + backend.clone(), + client.clone(), + config.prometheus_registry().cloned(), + ); + + let babe_config = sc_consensus_babe::configuration(&*client)?; + let (block_import, babe_link) = + sc_consensus_babe::block_import(babe_config.clone(), beefy_block_import, client.clone())?; + + let slot_duration = babe_link.config().slot_duration(); + let (import_queue, babe_worker_handle) = + sc_consensus_babe::import_queue(sc_consensus_babe::ImportQueueParams { + link: babe_link.clone(), + block_import: block_import.clone(), + justification_import: Some(Box::new(justification_import)), + client: client.clone(), + select_chain: select_chain.clone(), + create_inherent_data_providers: move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + + Ok((slot, timestamp)) + }, + spawner: &task_manager.spawn_essential_handle(), + registry: config.prometheus_registry(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), + })?; + + let justification_stream = grandpa_link.justification_stream(); + let shared_authority_set = grandpa_link.shared_authority_set().clone(); + let shared_voter_state = sc_consensus_grandpa::SharedVoterState::empty(); + let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service( + backend.clone(), + Some(shared_authority_set.clone()), + ); + + let import_setup = (block_import, grandpa_link, babe_link, beefy_voter_links); + let rpc_setup = shared_voter_state.clone(); + + let rpc_extensions_builder = { + let client = client.clone(); + let keystore = keystore_container.keystore(); + let transaction_pool = transaction_pool.clone(); + let select_chain = select_chain.clone(); + let chain_spec = config.chain_spec.cloned_box(); + let backend = backend.clone(); + + move |subscription_executor: polkadot_rpc::SubscriptionTaskExecutor| + -> Result { + let deps = polkadot_rpc::FullDeps { + client: client.clone(), + pool: transaction_pool.clone(), + select_chain: select_chain.clone(), + chain_spec: chain_spec.cloned_box(), + babe: polkadot_rpc::BabeDeps { + babe_worker_handle: babe_worker_handle.clone(), + keystore: keystore.clone(), + }, + grandpa: polkadot_rpc::GrandpaDeps { + shared_voter_state: shared_voter_state.clone(), + shared_authority_set: shared_authority_set.clone(), + justification_stream: justification_stream.clone(), + subscription_executor: subscription_executor.clone(), + finality_provider: finality_proof_provider.clone(), + }, + beefy: polkadot_rpc::BeefyDeps:: { + beefy_finality_proof_stream: beefy_rpc_links.from_voter_justif_stream.clone(), + beefy_best_block_stream: beefy_rpc_links.from_voter_best_beefy_stream.clone(), + subscription_executor, + }, + backend: backend.clone(), + }; + + polkadot_rpc::create_full(deps).map_err(Into::into) + } + }; + + Ok(sc_service::PartialComponents { + client, + backend, + task_manager, + keystore_container, + select_chain, + import_queue, + transaction_pool, + other: ( + Box::new(rpc_extensions_builder), + import_setup, + rpc_setup, + slot_duration, + telemetry, + ), + }) +} diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 077fb5c578a83..349facb7dcf28 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -25,11 +25,16 @@ mod grandpa_support; mod parachains_db; mod relay_chain_selection; +#[cfg(feature = "full-node")] +pub mod builder; #[cfg(feature = "full-node")] pub mod overseer; #[cfg(feature = "full-node")] pub mod workers; +#[cfg(feature = "full-node")] +pub use crate::builder::{new_full, NewFull, NewFullParams}; + #[cfg(feature = "full-node")] pub use self::overseer::{ CollatorOverseerGen, ExtendedOverseerGenArgs, OverseerGen, OverseerGenArgs, @@ -39,26 +44,14 @@ pub use self::overseer::{ #[cfg(test)] mod tests; +#[cfg(feature = "full-node")] +use crate::builder::{new_partial, new_partial_basics}; + #[cfg(feature = "full-node")] use { - gum::info, - polkadot_node_core_approval_voting::{ - self as approval_voting_subsystem, Config as ApprovalVotingConfig, - }, - polkadot_node_core_av_store::Config as AvailabilityConfig, + polkadot_node_core_approval_voting as approval_voting_subsystem, polkadot_node_core_av_store::Error as AvailabilityError, - polkadot_node_core_candidate_validation::Config as CandidateValidationConfig, - polkadot_node_core_chain_selection::{ - self as chain_selection_subsystem, Config as ChainSelectionConfig, - }, - polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig, - polkadot_node_network_protocol::{ - peer_set::{PeerSet, PeerSetProtocolNames}, - request_response::ReqProtocolNames, - }, - sc_client_api::BlockBackend, - sc_consensus_grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}, - sc_transaction_pool_api::OffchainTransactionPoolFactory, + polkadot_node_core_chain_selection as chain_selection_subsystem, }; use polkadot_node_subsystem_util::database::Database; @@ -75,25 +68,17 @@ pub use { sp_consensus_babe::BabeApi, }; -use std::{collections::HashMap, path::PathBuf, sync::Arc, time::Duration}; +use std::{path::PathBuf, sync::Arc}; use prometheus_endpoint::Registry; -#[cfg(feature = "full-node")] -use sc_service::KeystoreContainer; -use sc_service::{RpcHandlers, SpawnTaskHandle}; -use sc_telemetry::TelemetryWorker; -#[cfg(feature = "full-node")] -use sc_telemetry::{Telemetry, TelemetryWorkerHandle}; +use sc_service::SpawnTaskHandle; pub use chain_spec::{GenericChainSpec, RococoChainSpec, WestendChainSpec}; -use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; -use mmr_gadget::MmrGadget; -use polkadot_node_subsystem_types::DefaultSubsystemClient; pub use polkadot_primitives::{Block, BlockId, BlockNumber, CollatorPair, Hash, Id as ParaId}; pub use sc_client_api::{Backend, CallExecutor}; pub use sc_consensus::{BlockImport, LongestChain}; pub use sc_executor::NativeExecutionDispatch; -use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; +use sc_executor::WasmExecutor; pub use sc_service::{ config::{DatabaseSource, PrometheusConfig}, ChainSpec, Configuration, Error as SubstrateServiceError, PruningMode, Role, TFullBackend, @@ -101,7 +86,6 @@ pub use sc_service::{ }; pub use sp_api::{ApiRef, ConstructRuntimeApi, Core as CoreApi, ProvideRuntimeApi}; pub use sp_consensus::{Proposal, SelectChain}; -use sp_consensus_beefy::ecdsa_crypto; pub use sp_runtime::{ generic, traits::{self as runtime_traits, BlakeTwo256, Block as BlockT, Header as HeaderT, NumberFor}, @@ -361,298 +345,6 @@ pub fn open_database(db_source: &DatabaseSource) -> Result, Er Ok(parachains_db) } -#[cfg(feature = "full-node")] -type FullSelectChain = relay_chain_selection::SelectRelayChain; -#[cfg(feature = "full-node")] -type FullGrandpaBlockImport = - sc_consensus_grandpa::GrandpaBlockImport; -#[cfg(feature = "full-node")] -type FullBeefyBlockImport = - sc_consensus_beefy::import::BeefyBlockImport< - Block, - FullBackend, - FullClient, - InnerBlockImport, - AuthorityId, - >; - -#[cfg(feature = "full-node")] -struct Basics { - task_manager: TaskManager, - client: Arc, - backend: Arc, - keystore_container: KeystoreContainer, - telemetry: Option, -} - -#[cfg(feature = "full-node")] -fn new_partial_basics( - config: &mut Configuration, - telemetry_worker_handle: Option, -) -> Result { - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(move |endpoints| -> Result<_, sc_telemetry::Error> { - let (worker, mut worker_handle) = if let Some(worker_handle) = telemetry_worker_handle { - (None, worker_handle) - } else { - let worker = TelemetryWorker::new(16)?; - let worker_handle = worker.handle(); - (Some(worker), worker_handle) - }; - let telemetry = worker_handle.new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let heap_pages = config - .executor - .default_heap_pages - .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { extra_pages: h as _ }); - - let executor = WasmExecutor::builder() - .with_execution_method(config.executor.wasm_method) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .with_max_runtime_instances(config.executor.max_runtime_instances) - .with_runtime_cache_size(config.executor.runtime_cache_size) - .build(); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - &config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - let client = Arc::new(client); - - let telemetry = telemetry.map(|(worker, telemetry)| { - if let Some(worker) = worker { - task_manager.spawn_handle().spawn( - "telemetry", - Some("telemetry"), - Box::pin(worker.run()), - ); - } - telemetry - }); - - Ok(Basics { task_manager, client, backend, keystore_container, telemetry }) -} - -#[cfg(feature = "full-node")] -fn new_partial( - config: &mut Configuration, - Basics { task_manager, backend, client, keystore_container, telemetry }: Basics, - select_chain: ChainSelection, -) -> Result< - sc_service::PartialComponents< - FullClient, - FullBackend, - ChainSelection, - sc_consensus::DefaultImportQueue, - sc_transaction_pool::TransactionPoolHandle, - ( - impl Fn( - polkadot_rpc::SubscriptionTaskExecutor, - ) -> Result, - ( - sc_consensus_babe::BabeBlockImport< - Block, - FullClient, - FullBeefyBlockImport< - FullGrandpaBlockImport, - ecdsa_crypto::AuthorityId, - >, - >, - sc_consensus_grandpa::LinkHalf, - sc_consensus_babe::BabeLink, - sc_consensus_beefy::BeefyVoterLinks, - ), - sc_consensus_grandpa::SharedVoterState, - sp_consensus_babe::SlotDuration, - Option, - ), - >, - Error, -> -where - ChainSelection: 'static + SelectChain, -{ - let transaction_pool = Arc::from( - sc_transaction_pool::Builder::new( - task_manager.spawn_essential_handle(), - client.clone(), - config.role.is_authority().into(), - ) - .with_options(config.transaction_pool.clone()) - .with_prometheus(config.prometheus_registry()) - .build(), - ); - - let grandpa_hard_forks = if config.chain_spec.is_kusama() { - grandpa_support::kusama_hard_forks() - } else { - Vec::new() - }; - - let (grandpa_block_import, grandpa_link) = - sc_consensus_grandpa::block_import_with_authority_set_hard_forks( - client.clone(), - GRANDPA_JUSTIFICATION_PERIOD, - &(client.clone() as Arc<_>), - select_chain.clone(), - grandpa_hard_forks, - telemetry.as_ref().map(|x| x.handle()), - )?; - let justification_import = grandpa_block_import.clone(); - - let (beefy_block_import, beefy_voter_links, beefy_rpc_links) = - sc_consensus_beefy::beefy_block_import_and_links( - grandpa_block_import, - backend.clone(), - client.clone(), - config.prometheus_registry().cloned(), - ); - - let babe_config = sc_consensus_babe::configuration(&*client)?; - let (block_import, babe_link) = - sc_consensus_babe::block_import(babe_config.clone(), beefy_block_import, client.clone())?; - - let slot_duration = babe_link.config().slot_duration(); - let (import_queue, babe_worker_handle) = - sc_consensus_babe::import_queue(sc_consensus_babe::ImportQueueParams { - link: babe_link.clone(), - block_import: block_import.clone(), - justification_import: Some(Box::new(justification_import)), - client: client.clone(), - select_chain: select_chain.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) - }, - spawner: &task_manager.spawn_essential_handle(), - registry: config.prometheus_registry(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), - })?; - - let justification_stream = grandpa_link.justification_stream(); - let shared_authority_set = grandpa_link.shared_authority_set().clone(); - let shared_voter_state = sc_consensus_grandpa::SharedVoterState::empty(); - let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service( - backend.clone(), - Some(shared_authority_set.clone()), - ); - - let import_setup = (block_import, grandpa_link, babe_link, beefy_voter_links); - let rpc_setup = shared_voter_state.clone(); - - let rpc_extensions_builder = { - let client = client.clone(); - let keystore = keystore_container.keystore(); - let transaction_pool = transaction_pool.clone(); - let select_chain = select_chain.clone(); - let chain_spec = config.chain_spec.cloned_box(); - let backend = backend.clone(); - - move |subscription_executor: polkadot_rpc::SubscriptionTaskExecutor| - -> Result { - let deps = polkadot_rpc::FullDeps { - client: client.clone(), - pool: transaction_pool.clone(), - select_chain: select_chain.clone(), - chain_spec: chain_spec.cloned_box(), - babe: polkadot_rpc::BabeDeps { - babe_worker_handle: babe_worker_handle.clone(), - keystore: keystore.clone(), - }, - grandpa: polkadot_rpc::GrandpaDeps { - shared_voter_state: shared_voter_state.clone(), - shared_authority_set: shared_authority_set.clone(), - justification_stream: justification_stream.clone(), - subscription_executor: subscription_executor.clone(), - finality_provider: finality_proof_provider.clone(), - }, - beefy: polkadot_rpc::BeefyDeps:: { - beefy_finality_proof_stream: beefy_rpc_links.from_voter_justif_stream.clone(), - beefy_best_block_stream: beefy_rpc_links.from_voter_best_beefy_stream.clone(), - subscription_executor, - }, - backend: backend.clone(), - }; - - polkadot_rpc::create_full(deps).map_err(Into::into) - } - }; - - Ok(sc_service::PartialComponents { - client, - backend, - task_manager, - keystore_container, - select_chain, - import_queue, - transaction_pool, - other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration, telemetry), - }) -} - -#[cfg(feature = "full-node")] -pub struct NewFullParams { - pub is_parachain_node: IsParachainNode, - pub enable_beefy: bool, - /// Whether to enable the block authoring backoff on production networks - /// where it isn't enabled by default. - pub force_authoring_backoff: bool, - pub telemetry_worker_handle: Option, - /// The version of the node. TESTING ONLY: `None` can be passed to skip the node/worker version - /// check, both on startup and in the workers. - pub node_version: Option, - /// Whether the node is attempting to run as a secure validator. - pub secure_validator_mode: bool, - /// An optional path to a directory containing the workers. - pub workers_path: Option, - /// Optional custom names for the prepare and execute workers. - pub workers_names: Option<(String, String)>, - /// An optional number of the maximum number of pvf execute workers. - pub execute_workers_max_num: Option, - /// An optional maximum number of pvf workers that can be spawned in the pvf prepare pool for - /// tasks with the priority below critical. - pub prepare_workers_soft_max_num: Option, - /// An optional absolute number of pvf workers that can be spawned in the pvf prepare pool. - pub prepare_workers_hard_max_num: Option, - /// How long finalized data should be kept in the availability store (in hours) - pub keep_finalized_for: Option, - pub overseer_gen: OverseerGenerator, - pub overseer_message_channel_capacity_override: Option, - #[allow(dead_code)] - pub malus_finality_delay: Option, - pub hwbench: Option, - /// Enable approval voting processing in parallel. - pub enable_approval_voting_parallel: bool, -} - -#[cfg(feature = "full-node")] -pub struct NewFull { - pub task_manager: TaskManager, - pub client: Arc, - pub overseer_handle: Option, - pub network: Arc, - pub sync_service: Arc>, - pub rpc_handlers: RpcHandlers, - pub backend: Arc, -} - /// Is this node running as in-process node for a parachain node? #[cfg(feature = "full-node")] #[derive(Clone)] @@ -696,703 +388,6 @@ impl IsParachainNode { } } -/// Create a new full node of arbitrary runtime and executor. -/// -/// This is an advanced feature and not recommended for general use. Generally, `build_full` is -/// a better choice. -/// -/// `workers_path` is used to get the path to the directory where auxiliary worker binaries reside. -/// If not specified, the main binary's directory is searched first, then `/usr/lib/polkadot` is -/// searched. If the path points to an executable rather then directory, that executable is used -/// both as preparation and execution worker (supposed to be used for tests only). -#[cfg(feature = "full-node")] -pub fn new_full< - OverseerGenerator: OverseerGen, - Network: sc_network::NetworkBackend::Hash>, ->( - mut config: Configuration, - NewFullParams { - is_parachain_node, - enable_beefy, - force_authoring_backoff, - telemetry_worker_handle, - node_version, - secure_validator_mode, - workers_path, - workers_names, - overseer_gen, - overseer_message_channel_capacity_override, - malus_finality_delay: _malus_finality_delay, - hwbench, - execute_workers_max_num, - prepare_workers_soft_max_num, - prepare_workers_hard_max_num, - keep_finalized_for, - enable_approval_voting_parallel, - }: NewFullParams, -) -> Result { - use polkadot_availability_recovery::FETCH_CHUNKS_THRESHOLD; - use polkadot_node_network_protocol::request_response::IncomingRequest; - use sc_network_sync::WarpSyncConfig; - use sc_sysinfo::Metric; - - let is_offchain_indexing_enabled = config.offchain_worker.indexing_enabled; - let role = config.role; - let force_authoring = config.force_authoring; - let backoff_authoring_blocks = if !force_authoring_backoff && - (config.chain_spec.is_polkadot() || config.chain_spec.is_kusama()) - { - // the block authoring backoff is disabled by default on production networks - None - } else { - let mut backoff = sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default(); - - if config.chain_spec.is_rococo() || - config.chain_spec.is_versi() || - config.chain_spec.is_dev() - { - // on testnets that are in flux (like rococo or versi), finality has stalled - // sometimes due to operational issues and it's annoying to slow down block - // production to 1 block per hour. - backoff.max_interval = 10; - } - - Some(backoff) - }; - - let disable_grandpa = config.disable_grandpa; - let name = config.network.node_name.clone(); - - let basics = new_partial_basics(&mut config, telemetry_worker_handle)?; - - let prometheus_registry = config.prometheus_registry().cloned(); - - let overseer_connector = OverseerConnector::default(); - let overseer_handle = Handle::new(overseer_connector.handle()); - - let keystore = basics.keystore_container.local_keystore(); - let auth_or_collator = role.is_authority() || is_parachain_node.is_collator(); - - let select_chain = if auth_or_collator { - let metrics = - polkadot_node_subsystem_util::metrics::Metrics::register(prometheus_registry.as_ref())?; - - SelectRelayChain::new_with_overseer( - basics.backend.clone(), - overseer_handle.clone(), - metrics, - Some(basics.task_manager.spawn_handle()), - enable_approval_voting_parallel, - ) - } else { - SelectRelayChain::new_longest_chain(basics.backend.clone()) - }; - - let sc_service::PartialComponents::<_, _, SelectRelayChain<_>, _, _, _> { - client, - backend, - mut task_manager, - keystore_container, - select_chain, - import_queue, - transaction_pool, - other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration, mut telemetry), - } = new_partial::>(&mut config, basics, select_chain)?; - - let metrics = Network::register_notification_metrics( - config.prometheus_config.as_ref().map(|cfg| &cfg.registry), - ); - let shared_voter_state = rpc_setup; - let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; - let auth_disc_public_addresses = config.network.public_addresses.clone(); - - let mut net_config = sc_network::config::FullNetworkConfiguration::<_, _, Network>::new( - &config.network, - config.prometheus_config.as_ref().map(|cfg| cfg.registry.clone()), - ); - - let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); - let peer_store_handle = net_config.peer_store_handle(); - - // Note: GrandPa is pushed before the Polkadot-specific protocols. This doesn't change - // anything in terms of behaviour, but makes the logs more consistent with the other - // Substrate nodes. - let grandpa_protocol_name = - sc_consensus_grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec); - let (grandpa_protocol_config, grandpa_notification_service) = - sc_consensus_grandpa::grandpa_peers_set_config::<_, Network>( - grandpa_protocol_name.clone(), - metrics.clone(), - Arc::clone(&peer_store_handle), - ); - net_config.add_notification_protocol(grandpa_protocol_config); - - let beefy_gossip_proto_name = - sc_consensus_beefy::gossip_protocol_name(&genesis_hash, config.chain_spec.fork_id()); - // `beefy_on_demand_justifications_handler` is given to `beefy-gadget` task to be run, - // while `beefy_req_resp_cfg` is added to `config.network.request_response_protocols`. - let (beefy_on_demand_justifications_handler, beefy_req_resp_cfg) = - sc_consensus_beefy::communication::request_response::BeefyJustifsRequestHandler::new::< - _, - Network, - >(&genesis_hash, config.chain_spec.fork_id(), client.clone(), prometheus_registry.clone()); - let beefy_notification_service = match enable_beefy { - false => None, - true => { - let (beefy_notification_config, beefy_notification_service) = - sc_consensus_beefy::communication::beefy_peers_set_config::<_, Network>( - beefy_gossip_proto_name.clone(), - metrics.clone(), - Arc::clone(&peer_store_handle), - ); - - net_config.add_notification_protocol(beefy_notification_config); - net_config.add_request_response_protocol(beefy_req_resp_cfg); - Some(beefy_notification_service) - }, - }; - - // validation/collation protocols are enabled only if `Overseer` is enabled - let peerset_protocol_names = - PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); - - // If this is a validator or running alongside a parachain node, we need to enable the - // networking protocols. - // - // Collators and parachain full nodes require the collator and validator networking to send - // collations and to be able to recover PoVs. - let notification_services = - if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() { - use polkadot_network_bridge::{peer_sets_info, IsAuthority}; - let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; - - peer_sets_info::<_, Network>( - is_authority, - &peerset_protocol_names, - metrics.clone(), - Arc::clone(&peer_store_handle), - ) - .into_iter() - .map(|(config, (peerset, service))| { - net_config.add_notification_protocol(config); - (peerset, service) - }) - .collect::>>() - } else { - std::collections::HashMap::new() - }; - - let req_protocol_names = ReqProtocolNames::new(&genesis_hash, config.chain_spec.fork_id()); - - let (collation_req_v1_receiver, cfg) = - IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); - net_config.add_request_response_protocol(cfg); - let (collation_req_v2_receiver, cfg) = - IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); - net_config.add_request_response_protocol(cfg); - let (available_data_req_receiver, cfg) = - IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); - net_config.add_request_response_protocol(cfg); - let (pov_req_receiver, cfg) = - IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); - net_config.add_request_response_protocol(cfg); - let (chunk_req_v1_receiver, cfg) = - IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); - net_config.add_request_response_protocol(cfg); - let (chunk_req_v2_receiver, cfg) = - IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); - net_config.add_request_response_protocol(cfg); - - let grandpa_hard_forks = if config.chain_spec.is_kusama() { - grandpa_support::kusama_hard_forks() - } else { - Vec::new() - }; - - let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new( - backend.clone(), - import_setup.1.shared_authority_set().clone(), - grandpa_hard_forks, - )); - - let ext_overseer_args = if is_parachain_node.is_running_alongside_parachain_node() { - None - } else { - let parachains_db = open_database(&config.database)?; - let candidate_validation_config = if role.is_authority() { - let (prep_worker_path, exec_worker_path) = workers::determine_workers_paths( - workers_path, - workers_names, - node_version.clone(), - )?; - log::info!("🚀 Using prepare-worker binary at: {:?}", prep_worker_path); - log::info!("🚀 Using execute-worker binary at: {:?}", exec_worker_path); - - Some(CandidateValidationConfig { - artifacts_cache_path: config - .database - .path() - .ok_or(Error::DatabasePathRequired)? - .join("pvf-artifacts"), - node_version, - secure_validator_mode, - prep_worker_path, - exec_worker_path, - // Default execution workers is 4 because we have 8 cores on the reference hardware, - // and this accounts for 50% of that cpu capacity. - pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or(4), - pvf_prepare_workers_soft_max_num: prepare_workers_soft_max_num.unwrap_or(1), - pvf_prepare_workers_hard_max_num: prepare_workers_hard_max_num.unwrap_or(2), - }) - } else { - None - }; - let (statement_req_receiver, cfg) = - IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); - net_config.add_request_response_protocol(cfg); - let (candidate_req_v2_receiver, cfg) = - IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); - net_config.add_request_response_protocol(cfg); - let (dispute_req_receiver, cfg) = - IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); - net_config.add_request_response_protocol(cfg); - let approval_voting_config = ApprovalVotingConfig { - col_approval_data: parachains_db::REAL_COLUMNS.col_approval_data, - slot_duration_millis: slot_duration.as_millis() as u64, - }; - let dispute_coordinator_config = DisputeCoordinatorConfig { - col_dispute_data: parachains_db::REAL_COLUMNS.col_dispute_coordinator_data, - }; - let chain_selection_config = ChainSelectionConfig { - col_data: parachains_db::REAL_COLUMNS.col_chain_selection_data, - stagnant_check_interval: Default::default(), - stagnant_check_mode: chain_selection_subsystem::StagnantCheckMode::PruneOnly, - }; - - // Kusama + testnets get a higher threshold, we are conservative on Polkadot for now. - let fetch_chunks_threshold = - if config.chain_spec.is_polkadot() { None } else { Some(FETCH_CHUNKS_THRESHOLD) }; - - let availability_config = AvailabilityConfig { - col_data: parachains_db::REAL_COLUMNS.col_availability_data, - col_meta: parachains_db::REAL_COLUMNS.col_availability_meta, - keep_finalized_for: if matches!(config.chain_spec.identify_chain(), Chain::Rococo) { - keep_finalized_for.unwrap_or(1) - } else { - KEEP_FINALIZED_FOR_LIVE_NETWORKS - }, - }; - - Some(ExtendedOverseerGenArgs { - keystore, - parachains_db, - candidate_validation_config, - availability_config, - pov_req_receiver, - chunk_req_v1_receiver, - chunk_req_v2_receiver, - statement_req_receiver, - candidate_req_v2_receiver, - approval_voting_config, - dispute_req_receiver, - dispute_coordinator_config, - chain_selection_config, - fetch_chunks_threshold, - enable_approval_voting_parallel, - }) - }; - - let (network, system_rpc_tx, tx_handler_controller, sync_service) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - net_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - block_announce_validator_builder: None, - warp_sync_config: Some(WarpSyncConfig::WithProvider(warp_sync)), - block_relay: None, - metrics, - })?; - - if config.offchain_worker.enabled { - use futures::FutureExt; - - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-work", - sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { - runtime_api_provider: client.clone(), - keystore: Some(keystore_container.keystore()), - offchain_db: backend.offchain_storage(), - transaction_pool: Some(OffchainTransactionPoolFactory::new( - transaction_pool.clone(), - )), - network_provider: Arc::new(network.clone()), - is_validator: role.is_authority(), - enable_http_requests: false, - custom_extensions: move |_| vec![], - })? - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), - ); - } - - let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { - config, - backend: backend.clone(), - client: client.clone(), - keystore: keystore_container.keystore(), - network: network.clone(), - sync_service: sync_service.clone(), - rpc_builder: Box::new(rpc_extensions_builder), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - match SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench, role.is_authority()) { - Err(err) if role.is_authority() => { - if err - .0 - .iter() - .any(|failure| matches!(failure.metric, Metric::Blake2256Parallel { .. })) - { - log::warn!( - "⚠️ Starting January 2025 the hardware will fail the minimal physical CPU cores requirements {} for role 'Authority',\n\ - find out more when this will become mandatory at:\n\ - https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware", - err - ); - } - if err - .0 - .iter() - .any(|failure| !matches!(failure.metric, Metric::Blake2256Parallel { .. })) - { - log::warn!( - "⚠️ The hardware does not meet the minimal requirements {} for role 'Authority' find out more at:\n\ - https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware", - err - ); - } - }, - _ => {}, - } - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } - - let (block_import, link_half, babe_link, beefy_links) = import_setup; - - let overseer_client = client.clone(); - let spawner = task_manager.spawn_handle(); - - let authority_discovery_service = - // We need the authority discovery if this node is either a validator or running alongside a parachain node. - // Parachains node require the authority discovery for finding relay chain validators for sending - // their PoVs or recovering PoVs. - if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() { - use futures::StreamExt; - use sc_network::{Event, NetworkEventStream}; - - let authority_discovery_role = if role.is_authority() { - sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore()) - } else { - // don't publish our addresses when we're not an authority (collator, cumulus, ..) - sc_authority_discovery::Role::Discover - }; - let dht_event_stream = - network.event_stream("authority-discovery").filter_map(|e| async move { - match e { - Event::Dht(e) => Some(e), - _ => None, - } - }); - let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( - sc_authority_discovery::WorkerConfig { - publish_non_global_ips: auth_disc_publish_non_global_ips, - public_addresses: auth_disc_public_addresses, - // Require that authority discovery records are signed. - strict_record_validation: true, - ..Default::default() - }, - client.clone(), - Arc::new(network.clone()), - Box::pin(dht_event_stream), - authority_discovery_role, - prometheus_registry.clone(), - ); - - task_manager.spawn_handle().spawn( - "authority-discovery-worker", - Some("authority-discovery"), - Box::pin(worker.run()), - ); - Some(service) - } else { - None - }; - - let runtime_client = Arc::new(DefaultSubsystemClient::new( - overseer_client.clone(), - OffchainTransactionPoolFactory::new(transaction_pool.clone()), - )); - - let overseer_handle = if let Some(authority_discovery_service) = authority_discovery_service { - let (overseer, overseer_handle) = overseer_gen - .generate::>( - overseer_connector, - OverseerGenArgs { - runtime_client, - network_service: network.clone(), - sync_service: sync_service.clone(), - authority_discovery_service, - collation_req_v1_receiver, - collation_req_v2_receiver, - available_data_req_receiver, - registry: prometheus_registry.as_ref(), - spawner, - is_parachain_node, - overseer_message_channel_capacity_override, - req_protocol_names, - peerset_protocol_names, - notification_services, - }, - ext_overseer_args, - ) - .map_err(|e| { - gum::error!("Failed to init overseer: {}", e); - e - })?; - let handle = Handle::new(overseer_handle.clone()); - - { - let handle = handle.clone(); - task_manager.spawn_essential_handle().spawn_blocking( - "overseer", - None, - Box::pin(async move { - use futures::{pin_mut, select, FutureExt}; - - let forward = polkadot_overseer::forward_events(overseer_client, handle); - - let forward = forward.fuse(); - let overseer_fut = overseer.run().fuse(); - - pin_mut!(overseer_fut); - pin_mut!(forward); - - select! { - () = forward => (), - () = overseer_fut => (), - complete => (), - } - }), - ); - } - Some(handle) - } else { - assert!( - !auth_or_collator, - "Precondition congruence (false) is guaranteed by manual checking. qed" - ); - None - }; - - if role.is_authority() { - let proposer = sc_basic_authorship::ProposerFactory::new( - task_manager.spawn_handle(), - client.clone(), - transaction_pool.clone(), - prometheus_registry.as_ref(), - telemetry.as_ref().map(|x| x.handle()), - ); - - let client_clone = client.clone(); - let overseer_handle = - overseer_handle.as_ref().ok_or(Error::AuthoritiesRequireRealOverseer)?.clone(); - let slot_duration = babe_link.config().slot_duration(); - let babe_config = sc_consensus_babe::BabeParams { - keystore: keystore_container.keystore(), - client: client.clone(), - select_chain, - block_import, - env: proposer, - sync_oracle: sync_service.clone(), - justification_sync_link: sync_service.clone(), - create_inherent_data_providers: move |parent, ()| { - let client_clone = client_clone.clone(); - let overseer_handle = overseer_handle.clone(); - - async move { - let parachain = - polkadot_node_core_parachains_inherent::ParachainsInherentDataProvider::new( - client_clone, - overseer_handle, - parent, - ); - - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp, parachain)) - } - }, - force_authoring, - backoff_authoring_blocks, - babe_link, - block_proposal_slot_portion: sc_consensus_babe::SlotProportion::new(2f32 / 3f32), - max_block_proposal_slot_portion: None, - telemetry: telemetry.as_ref().map(|x| x.handle()), - }; - - let babe = sc_consensus_babe::start_babe(babe_config)?; - task_manager.spawn_essential_handle().spawn_blocking("babe", None, babe); - } - - // if the node isn't actively participating in consensus then it doesn't - // need a keystore, regardless of which protocol we use below. - let keystore_opt = if role.is_authority() { Some(keystore_container.keystore()) } else { None }; - - // beefy is enabled if its notification service exists - if let Some(notification_service) = beefy_notification_service { - let justifications_protocol_name = beefy_on_demand_justifications_handler.protocol_name(); - let network_params = sc_consensus_beefy::BeefyNetworkParams { - network: Arc::new(network.clone()), - sync: sync_service.clone(), - gossip_protocol_name: beefy_gossip_proto_name, - justifications_protocol_name, - notification_service, - _phantom: core::marker::PhantomData::, - }; - let payload_provider = sp_consensus_beefy::mmr::MmrRootProvider::new(client.clone()); - let beefy_params = sc_consensus_beefy::BeefyParams { - client: client.clone(), - backend: backend.clone(), - payload_provider, - runtime: client.clone(), - key_store: keystore_opt.clone(), - network_params, - min_block_delta: 8, - prometheus_registry: prometheus_registry.clone(), - links: beefy_links, - on_demand_justifications_handler: beefy_on_demand_justifications_handler, - is_authority: role.is_authority(), - }; - - let gadget = sc_consensus_beefy::start_beefy_gadget::< - _, - _, - _, - _, - _, - _, - _, - ecdsa_crypto::AuthorityId, - >(beefy_params); - - // BEEFY is part of consensus, if it fails we'll bring the node down with it to make sure it - // is noticed. - task_manager - .spawn_essential_handle() - .spawn_blocking("beefy-gadget", None, gadget); - } - // When offchain indexing is enabled, MMR gadget should also run. - if is_offchain_indexing_enabled { - task_manager.spawn_essential_handle().spawn_blocking( - "mmr-gadget", - None, - MmrGadget::start( - client.clone(), - backend.clone(), - sp_mmr_primitives::INDEXING_PREFIX.to_vec(), - ), - ); - } - - let config = sc_consensus_grandpa::Config { - // FIXME substrate#1578 make this available through chainspec - // Grandpa performance can be improved a bit by tuning this parameter, see: - // https://github.com/paritytech/polkadot/issues/5464 - gossip_duration: Duration::from_millis(1000), - justification_generation_period: GRANDPA_JUSTIFICATION_PERIOD, - name: Some(name), - observer_enabled: false, - keystore: keystore_opt, - local_role: role, - telemetry: telemetry.as_ref().map(|x| x.handle()), - protocol_name: grandpa_protocol_name, - }; - - let enable_grandpa = !disable_grandpa; - if enable_grandpa { - // start the full GRANDPA voter - // NOTE: unlike in substrate we are currently running the full - // GRANDPA voter protocol for all full nodes (regardless of whether - // they're validators or not). at this point the full voter should - // provide better guarantees of block and vote data availability than - // the observer. - - let mut voting_rules_builder = sc_consensus_grandpa::VotingRulesBuilder::default(); - - #[cfg(not(feature = "malus"))] - let _malus_finality_delay = None; - - if let Some(delay) = _malus_finality_delay { - info!(?delay, "Enabling malus finality delay",); - voting_rules_builder = - voting_rules_builder.add(sc_consensus_grandpa::BeforeBestBlockBy(delay)); - }; - - let grandpa_config = sc_consensus_grandpa::GrandpaParams { - config, - link: link_half, - network: network.clone(), - sync: sync_service.clone(), - voting_rule: voting_rules_builder.build(), - prometheus_registry: prometheus_registry.clone(), - shared_voter_state, - telemetry: telemetry.as_ref().map(|x| x.handle()), - notification_service: grandpa_notification_service, - offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), - }; - - task_manager.spawn_essential_handle().spawn_blocking( - "grandpa-voter", - None, - sc_consensus_grandpa::run_grandpa_voter(grandpa_config)?, - ); - } - - Ok(NewFull { - task_manager, - client, - overseer_handle, - network, - sync_service, - rpc_handlers, - backend, - }) -} - #[cfg(feature = "full-node")] macro_rules! chain_ops { ($config:expr, $telemetry_worker_handle:expr) => {{ diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 4d4fc89a6addc..ed9300be249e1 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -778,6 +778,9 @@ pub enum RuntimeApiRequest { /// Get the lookahead from the scheduler params. /// `V12` SchedulingLookahead(SessionIndex, RuntimeApiSender), + /// Get the maximum uncompressed code size. + /// `V12` + ValidationCodeBombLimit(SessionIndex, RuntimeApiSender), } impl RuntimeApiRequest { @@ -824,6 +827,9 @@ impl RuntimeApiRequest { /// `SchedulingLookahead` pub const SCHEDULING_LOOKAHEAD_RUNTIME_REQUIREMENT: u32 = 12; + + /// `ValidationCodeBombLimit` + pub const VALIDATION_CODE_BOMB_LIMIT_RUNTIME_REQUIREMENT: u32 = 12; } /// A message to the Runtime API subsystem. diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 7e3849c20694d..dd5f61b3bc7f0 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -353,6 +353,10 @@ pub trait RuntimeApiSubsystemClient { // === v12 === /// Fetch the scheduling lookahead value async fn scheduling_lookahead(&self, at: Hash) -> Result; + + // === v12 === + /// Fetch the maximum uncompressed code size. + async fn validation_code_bomb_limit(&self, at: Hash) -> Result; } /// Default implementation of [`RuntimeApiSubsystemClient`] using the client. @@ -642,6 +646,10 @@ where async fn scheduling_lookahead(&self, at: Hash) -> Result { self.client.runtime_api().scheduling_lookahead(at) } + + async fn validation_code_bomb_limit(&self, at: Hash) -> Result { + self.client.runtime_api().validation_code_bomb_limit(at) + } } impl HeaderBackend for DefaultSubsystemClient diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index dd843cfb01fa9..46a009ae5c817 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -637,3 +637,32 @@ pub async fn fetch_scheduling_lookahead( res } } + +/// Fetch the validation code bomb limit from the runtime. +pub async fn fetch_validation_code_bomb_limit( + parent: Hash, + session_index: SessionIndex, + sender: &mut impl overseer::SubsystemSender, +) -> Result { + let res = recv_runtime( + request_from_runtime(parent, sender, |tx| { + RuntimeApiRequest::ValidationCodeBombLimit(session_index, tx) + }) + .await, + ) + .await; + + if let Err(Error::RuntimeRequest(RuntimeApiError::NotSupported { .. })) = res { + gum::trace!( + target: LOG_TARGET, + ?parent, + "Querying the validation code bomb limit from the runtime is not supported by the current Runtime API", + ); + + // TODO: Remove this once runtime API version 12 is released. + #[allow(deprecated)] + Ok(polkadot_node_primitives::VALIDATION_CODE_BOMB_LIMIT as u32) + } else { + res + } +} diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index e0516a2f77e42..2471dc1fc0738 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -308,5 +308,10 @@ sp_api::decl_runtime_apis! { /// Retrieve the scheduling lookahead #[api_version(12)] fn scheduling_lookahead() -> u32; + + /***** Added in v12 *****/ + /// Retrieve the maximum uncompressed code size. + #[api_version(12)] + fn validation_code_bomb_limit() -> u32; } } diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index e5cf7c4d276e8..8c63fb620418b 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -50,6 +50,9 @@ const LOG_TARGET: &str = "runtime::configuration"; // `polkadot_node_network_protocol::POV_RESPONSE_SIZE`. const POV_SIZE_HARD_LIMIT: u32 = 16 * 1024 * 1024; +// The maximum compression ratio that we use to compute the maximum uncompressed code size. +pub(crate) const MAX_VALIDATION_CODE_COMPRESSION_RATIO: u32 = 10; + /// All configuration of the runtime with respect to paras. #[derive( Clone, diff --git a/polkadot/runtime/parachains/src/configuration/benchmarking.rs b/polkadot/runtime/parachains/src/configuration/benchmarking.rs index adc7f31a7b291..49c1876adca00 100644 --- a/polkadot/runtime/parachains/src/configuration/benchmarking.rs +++ b/polkadot/runtime/parachains/src/configuration/benchmarking.rs @@ -13,45 +13,91 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +#![cfg(feature = "runtime-benchmarks")] use crate::configuration::*; -use frame_benchmarking::{benchmarks, BenchmarkError, BenchmarkResult}; +use frame_benchmarking::v2::*; use frame_system::RawOrigin; use polkadot_primitives::{ExecutorParam, ExecutorParams, PvfExecKind, PvfPrepKind}; use sp_runtime::traits::One; -benchmarks! { - set_config_with_block_number {}: set_code_retention_period(RawOrigin::Root, One::one()) +#[benchmarks] +mod benchmarks { + use super::*; - set_config_with_u32 {}: set_max_code_size(RawOrigin::Root, 100) + #[benchmark] + fn set_config_with_block_number() { + #[extrinsic_call] + set_code_retention_period(RawOrigin::Root, One::one()); + } - set_config_with_option_u32 {}: set_max_validators(RawOrigin::Root, Some(10)) + #[benchmark] + fn set_config_with_u32() { + #[extrinsic_call] + set_max_code_size(RawOrigin::Root, 100); + } - set_hrmp_open_request_ttl {}: { - Err(BenchmarkError::Override( - BenchmarkResult::from_weight(T::BlockWeights::get().max_block) - ))?; + #[benchmark] + fn set_config_with_option_u32() { + #[extrinsic_call] + set_max_validators(RawOrigin::Root, Some(10)); } - set_config_with_balance {}: set_hrmp_sender_deposit(RawOrigin::Root, 100_000_000_000) + #[benchmark] + fn set_hrmp_open_request_ttl() -> Result<(), BenchmarkError> { + #[block] + { + Err(BenchmarkError::Override(BenchmarkResult::from_weight( + T::BlockWeights::get().max_block, + )))?; + } + Ok(()) + } - set_config_with_executor_params {}: set_executor_params(RawOrigin::Root, ExecutorParams::from(&[ - ExecutorParam::MaxMemoryPages(2080), - ExecutorParam::StackLogicalMax(65536), - ExecutorParam::StackNativeMax(256 * 1024 * 1024), - ExecutorParam::WasmExtBulkMemory, - ExecutorParam::PrecheckingMaxMemory(2 * 1024 * 1024 * 1024), - ExecutorParam::PvfPrepTimeout(PvfPrepKind::Precheck, 60_000), - ExecutorParam::PvfPrepTimeout(PvfPrepKind::Prepare, 360_000), - ExecutorParam::PvfExecTimeout(PvfExecKind::Backing, 2_000), - ExecutorParam::PvfExecTimeout(PvfExecKind::Approval, 12_000), - ][..])) + #[benchmark] + fn set_config_with_balance() { + #[extrinsic_call] + set_hrmp_sender_deposit(RawOrigin::Root, 100_000_000_000); + } - set_config_with_perbill {}: set_on_demand_fee_variability(RawOrigin::Root, Perbill::from_percent(100)) + #[benchmark] + fn set_config_with_executor_params() { + #[extrinsic_call] + set_executor_params( + RawOrigin::Root, + ExecutorParams::from( + &[ + ExecutorParam::MaxMemoryPages(2080), + ExecutorParam::StackLogicalMax(65536), + ExecutorParam::StackNativeMax(256 * 1024 * 1024), + ExecutorParam::WasmExtBulkMemory, + ExecutorParam::PrecheckingMaxMemory(2 * 1024 * 1024 * 1024), + ExecutorParam::PvfPrepTimeout(PvfPrepKind::Precheck, 60_000), + ExecutorParam::PvfPrepTimeout(PvfPrepKind::Prepare, 360_000), + ExecutorParam::PvfExecTimeout(PvfExecKind::Backing, 2_000), + ExecutorParam::PvfExecTimeout(PvfExecKind::Approval, 12_000), + ][..], + ), + ); + } - set_node_feature{}: set_node_feature(RawOrigin::Root, 255, true) + #[benchmark] + fn set_config_with_perbill() { + #[extrinsic_call] + set_on_demand_fee_variability(RawOrigin::Root, Perbill::from_percent(100)); + } + + #[benchmark] + fn set_node_feature() { + #[extrinsic_call] + set_node_feature(RawOrigin::Root, 255, true); + } - set_config_with_scheduler_params {} : set_scheduler_params(RawOrigin::Root, SchedulerParams::default()) + #[benchmark] + fn set_config_with_scheduler_params() { + #[extrinsic_call] + set_scheduler_params(RawOrigin::Root, SchedulerParams::default()); + } impl_benchmark_test_suite!( Pallet, diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 5a77af0d79731..e79f1bb0f283f 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -50,3 +50,9 @@ pub fn backing_constraints( pub fn scheduling_lookahead() -> u32 { configuration::ActiveConfig::::get().scheduler_params.lookahead } + +/// Implementation for `validation_code_bomb_limit` function from the runtime API +pub fn validation_code_bomb_limit() -> u32 { + configuration::ActiveConfig::::get().max_code_size * + configuration::MAX_VALIDATION_CODE_COMPRESSION_RATIO +} diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 0a6f52890b272..4961d7bbad296 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -2166,6 +2166,10 @@ sp_api::impl_runtime_apis! { fn scheduling_lookahead() -> u32 { parachains_staging_runtime_api_impl::scheduling_lookahead::() } + + fn validation_code_bomb_limit() -> u32 { + parachains_staging_runtime_api_impl::validation_code_bomb_limit::() + } } #[api_version(5)] diff --git a/polkadot/runtime/rococo/src/weights/pallet_nis.rs b/polkadot/runtime/rococo/src/weights/pallet_nis.rs index 531b9be0b8df3..1d1a6afab8eba 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_nis.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_nis.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_nis` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `d3a9aad6f7a3`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `bd5e4dfa0790`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -64,11 +64,11 @@ impl pallet_nis::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `6213 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 51_535_000 picoseconds. - Weight::from_parts(48_054_720, 0) + // Minimum execution time: 49_715_000 picoseconds. + Weight::from_parts(47_437_903, 0) .saturating_add(Weight::from_parts(0, 51487)) - // Standard Error: 1_637 - .saturating_add(Weight::from_parts(116_321, 0).saturating_mul(l.into())) + // Standard Error: 1_376 + .saturating_add(Weight::from_parts(103_572, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -84,8 +84,8 @@ impl pallet_nis::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `54215` // Estimated: `51487` - // Minimum execution time: 173_342_000 picoseconds. - Weight::from_parts(184_974_000, 0) + // Minimum execution time: 158_994_000 picoseconds. + Weight::from_parts(163_192_000, 0) .saturating_add(Weight::from_parts(0, 51487)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -101,11 +101,11 @@ impl pallet_nis::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `6209 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 48_913_000 picoseconds. - Weight::from_parts(40_310_888, 0) + // Minimum execution time: 47_456_000 picoseconds. + Weight::from_parts(39_000_499, 0) .saturating_add(Weight::from_parts(0, 51487)) - // Standard Error: 1_567 - .saturating_add(Weight::from_parts(98_720, 0).saturating_mul(l.into())) + // Standard Error: 1_374 + .saturating_add(Weight::from_parts(87_646, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -117,8 +117,8 @@ impl pallet_nis::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `225` // Estimated: `3593` - // Minimum execution time: 33_430_000 picoseconds. - Weight::from_parts(34_693_000, 0) + // Minimum execution time: 33_224_000 picoseconds. + Weight::from_parts(34_527_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -137,8 +137,8 @@ impl pallet_nis::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `387` // Estimated: `3593` - // Minimum execution time: 70_761_000 picoseconds. - Weight::from_parts(72_954_000, 0) + // Minimum execution time: 68_688_000 picoseconds. + Weight::from_parts(70_098_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -157,8 +157,8 @@ impl pallet_nis::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `543` // Estimated: `3593` - // Minimum execution time: 89_467_000 picoseconds. - Weight::from_parts(92_605_000, 0) + // Minimum execution time: 85_968_000 picoseconds. + Weight::from_parts(87_276_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -175,8 +175,8 @@ impl pallet_nis::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `387` // Estimated: `3593` - // Minimum execution time: 56_865_000 picoseconds. - Weight::from_parts(57_749_000, 0) + // Minimum execution time: 53_366_000 picoseconds. + Weight::from_parts(54_742_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -193,8 +193,8 @@ impl pallet_nis::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `488` // Estimated: `3593` - // Minimum execution time: 91_240_000 picoseconds. - Weight::from_parts(93_106_000, 0) + // Minimum execution time: 88_287_000 picoseconds. + Weight::from_parts(90_235_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) @@ -209,8 +209,8 @@ impl pallet_nis::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `6658` // Estimated: `7487` - // Minimum execution time: 23_148_000 picoseconds. - Weight::from_parts(24_318_000, 0) + // Minimum execution time: 21_256_000 picoseconds. + Weight::from_parts(22_594_000, 0) .saturating_add(Weight::from_parts(0, 7487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -221,8 +221,8 @@ impl pallet_nis::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `76` // Estimated: `51487` - // Minimum execution time: 5_213_000 picoseconds. - Weight::from_parts(5_487_000, 0) + // Minimum execution time: 5_026_000 picoseconds. + Weight::from_parts(5_304_000, 0) .saturating_add(Weight::from_parts(0, 51487)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -233,8 +233,8 @@ impl pallet_nis::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_226_000 picoseconds. - Weight::from_parts(5_480_000, 0) + // Minimum execution time: 4_905_000 picoseconds. + Weight::from_parts(5_189_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_proxy.rs b/polkadot/runtime/rococo/src/weights/pallet_proxy.rs index e29329b4a4e2d..e41173ebe3362 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_proxy.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_proxy.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `d3a9aad6f7a3`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `99fc4dfa9c86`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -58,11 +58,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 14_003_000 picoseconds. - Weight::from_parts(14_850_553, 0) + // Minimum execution time: 13_806_000 picoseconds. + Weight::from_parts(14_554_351, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_115 - .saturating_add(Weight::from_parts(29_879, 0).saturating_mul(p.into())) + // Standard Error: 1_532 + .saturating_add(Weight::from_parts(34_377, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -77,13 +77,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `416 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 38_342_000 picoseconds. - Weight::from_parts(39_628_705, 0) + // Minimum execution time: 37_228_000 picoseconds. + Weight::from_parts(38_313_529, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 3_133 - .saturating_add(Weight::from_parts(142_200, 0).saturating_mul(a.into())) - // Standard Error: 3_237 - .saturating_add(Weight::from_parts(32_673, 0).saturating_mul(p.into())) + // Standard Error: 2_504 + .saturating_add(Weight::from_parts(152_320, 0).saturating_mul(a.into())) + // Standard Error: 2_587 + .saturating_add(Weight::from_parts(31_543, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -97,13 +97,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `331 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 26_033_000 picoseconds. - Weight::from_parts(27_251_440, 0) + // Minimum execution time: 25_166_000 picoseconds. + Weight::from_parts(26_418_176, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_815 - .saturating_add(Weight::from_parts(147_524, 0).saturating_mul(a.into())) - // Standard Error: 1_876 - .saturating_add(Weight::from_parts(7_453, 0).saturating_mul(p.into())) + // Standard Error: 2_246 + .saturating_add(Weight::from_parts(153_564, 0).saturating_mul(a.into())) + // Standard Error: 2_320 + .saturating_add(Weight::from_parts(4_962, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -117,13 +117,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `331 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 25_640_000 picoseconds. - Weight::from_parts(26_979_431, 0) + // Minimum execution time: 25_037_000 picoseconds. + Weight::from_parts(26_468_002, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_485 - .saturating_add(Weight::from_parts(151_580, 0).saturating_mul(a.into())) - // Standard Error: 2_567 - .saturating_add(Weight::from_parts(15_525, 0).saturating_mul(p.into())) + // Standard Error: 2_411 + .saturating_add(Weight::from_parts(143_670, 0).saturating_mul(a.into())) + // Standard Error: 2_491 + .saturating_add(Weight::from_parts(6_595, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -139,13 +139,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `348 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 34_615_000 picoseconds. - Weight::from_parts(35_256_571, 0) + // Minimum execution time: 33_282_000 picoseconds. + Weight::from_parts(34_525_175, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_509 - .saturating_add(Weight::from_parts(161_986, 0).saturating_mul(a.into())) - // Standard Error: 2_593 - .saturating_add(Weight::from_parts(37_310, 0).saturating_mul(p.into())) + // Standard Error: 2_568 + .saturating_add(Weight::from_parts(153_824, 0).saturating_mul(a.into())) + // Standard Error: 2_654 + .saturating_add(Weight::from_parts(32_899, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -156,11 +156,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_334_000 picoseconds. - Weight::from_parts(25_719_163, 0) + // Minimum execution time: 23_623_000 picoseconds. + Weight::from_parts(24_796_223, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_993 - .saturating_add(Weight::from_parts(51_224, 0).saturating_mul(p.into())) + // Standard Error: 1_528 + .saturating_add(Weight::from_parts(49_181, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -171,11 +171,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 24_600_000 picoseconds. - Weight::from_parts(25_587_987, 0) + // Minimum execution time: 23_507_000 picoseconds. + Weight::from_parts(24_816_973, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_434 - .saturating_add(Weight::from_parts(52_906, 0).saturating_mul(p.into())) + // Standard Error: 1_511 + .saturating_add(Weight::from_parts(38_405, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,11 +186,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 21_716_000 picoseconds. - Weight::from_parts(22_675_217, 0) + // Minimum execution time: 20_730_000 picoseconds. + Weight::from_parts(22_017_715, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_305 - .saturating_add(Weight::from_parts(30_903, 0).saturating_mul(p.into())) + // Standard Error: 1_790 + .saturating_add(Weight::from_parts(23_582, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -201,11 +201,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `4706` - // Minimum execution time: 26_034_000 picoseconds. - Weight::from_parts(27_334_391, 0) + // Minimum execution time: 25_202_000 picoseconds. + Weight::from_parts(26_459_004, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_440 - .saturating_add(Weight::from_parts(6_151, 0).saturating_mul(p.into())) + // Standard Error: 1_850 + .saturating_add(Weight::from_parts(3_579, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -216,12 +216,28 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `126 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_474_000 picoseconds. - Weight::from_parts(23_584_470, 0) + // Minimum execution time: 21_449_000 picoseconds. + Weight::from_parts(22_621_565, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_258 - .saturating_add(Weight::from_parts(39_748, 0).saturating_mul(p.into())) + // Standard Error: 2_057 + .saturating_add(Weight::from_parts(41_291, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `414` + // Estimated: `5698` + // Minimum execution time: 44_155_000 picoseconds. + Weight::from_parts(45_402_000, 0) + .saturating_add(Weight::from_parts(0, 5698)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_configuration.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_configuration.rs index ef8cfb1ed51fc..6bbec592bb4cd 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_configuration.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_configuration.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `polkadot_runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-02-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `d3a9aad6f7a3`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `13d69b199c54`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -61,8 +61,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 11_055_000 picoseconds. - Weight::from_parts(11_488_000, 0) + // Minimum execution time: 10_901_000 picoseconds. + Weight::from_parts(11_468_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,8 +77,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 11_125_000 picoseconds. - Weight::from_parts(11_591_000, 0) + // Minimum execution time: 10_847_000 picoseconds. + Weight::from_parts(11_499_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -93,8 +93,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 11_127_000 picoseconds. - Weight::from_parts(11_499_000, 0) + // Minimum execution time: 11_143_000 picoseconds. + Weight::from_parts(11_488_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -119,8 +119,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_955_000 picoseconds. - Weight::from_parts(11_407_000, 0) + // Minimum execution time: 11_126_000 picoseconds. + Weight::from_parts(11_416_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -135,8 +135,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 13_162_000 picoseconds. - Weight::from_parts(13_403_000, 0) + // Minimum execution time: 13_119_000 picoseconds. + Weight::from_parts(13_612_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -151,8 +151,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_868_000 picoseconds. - Weight::from_parts(11_438_000, 0) + // Minimum execution time: 11_072_000 picoseconds. + Weight::from_parts(11_412_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -167,8 +167,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 13_354_000 picoseconds. - Weight::from_parts(14_407_000, 0) + // Minimum execution time: 13_268_000 picoseconds. + Weight::from_parts(13_664_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -183,8 +183,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 11_283_000 picoseconds. - Weight::from_parts(12_054_000, 0) + // Minimum execution time: 11_047_000 picoseconds. + Weight::from_parts(11_298_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 117aa849b133f..226e22c078359 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -1110,6 +1110,10 @@ sp_api::impl_runtime_apis! { fn scheduling_lookahead() -> u32 { staging_runtime_impl::scheduling_lookahead::() } + + fn validation_code_bomb_limit() -> u32 { + staging_runtime_impl::validation_code_bomb_limit::() + } } impl sp_consensus_beefy::BeefyApi for Runtime { diff --git a/polkadot/runtime/westend/constants/src/lib.rs b/polkadot/runtime/westend/constants/src/lib.rs index 8d66ac2868d0b..f69c85e699e35 100644 --- a/polkadot/runtime/westend/constants/src/lib.rs +++ b/polkadot/runtime/westend/constants/src/lib.rs @@ -113,6 +113,8 @@ pub mod system_parachain { pub const PEOPLE_ID: u32 = 1004; /// Brokerage parachain ID. pub const BROKER_ID: u32 = 1005; + /// AH-next - temporary AH clone. + pub const ASSET_HUB_NEXT_ID: u32 = 1100; /// All system parachains of Westend. pub type SystemParachains = IsChildSystemParachain; diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 741957d847377..b5dc9b8f55cd1 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2213,6 +2213,10 @@ sp_api::impl_runtime_apis! { fn scheduling_lookahead() -> u32 { parachains_staging_runtime_api_impl::scheduling_lookahead::() } + + fn validation_code_bomb_limit() -> u32 { + parachains_staging_runtime_api_impl::validation_code_bomb_limit::() + } } #[api_version(5)] diff --git a/polkadot/runtime/westend/src/weights/pallet_proxy.rs b/polkadot/runtime/westend/src/weights/pallet_proxy.rs index 4f1ac72a482f2..af628d9bd66d3 100644 --- a/polkadot/runtime/westend/src/weights/pallet_proxy.rs +++ b/polkadot/runtime/westend/src/weights/pallet_proxy.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `99fc4dfa9c86`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -58,11 +58,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 17_743_000 picoseconds. - Weight::from_parts(18_436_629, 0) + // Minimum execution time: 17_270_000 picoseconds. + Weight::from_parts(18_200_528, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_056 - .saturating_add(Weight::from_parts(43_916, 0).saturating_mul(p.into())) + // Standard Error: 1_391 + .saturating_add(Weight::from_parts(26_966, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -77,13 +77,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `416 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 41_728_000 picoseconds. - Weight::from_parts(42_605_142, 0) + // Minimum execution time: 41_652_000 picoseconds. + Weight::from_parts(42_325_742, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_675 - .saturating_add(Weight::from_parts(173_815, 0).saturating_mul(a.into())) - // Standard Error: 2_764 - .saturating_add(Weight::from_parts(29_849, 0).saturating_mul(p.into())) + // Standard Error: 2_581 + .saturating_add(Weight::from_parts(153_418, 0).saturating_mul(a.into())) + // Standard Error: 2_667 + .saturating_add(Weight::from_parts(32_644, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -97,13 +97,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `331 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 29_396_000 picoseconds. - Weight::from_parts(31_069_465, 0) + // Minimum execution time: 29_407_000 picoseconds. + Weight::from_parts(30_301_847, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_248 - .saturating_add(Weight::from_parts(134_192, 0).saturating_mul(a.into())) - // Standard Error: 2_322 - .saturating_add(Weight::from_parts(7_479, 0).saturating_mul(p.into())) + // Standard Error: 1_850 + .saturating_add(Weight::from_parts(146_134, 0).saturating_mul(a.into())) + // Standard Error: 1_912 + .saturating_add(Weight::from_parts(11_996, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -117,13 +117,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `331 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 28_816_000 picoseconds. - Weight::from_parts(30_383_460, 0) + // Minimum execution time: 28_928_000 picoseconds. + Weight::from_parts(30_241_050, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_128 - .saturating_add(Weight::from_parts(157_895, 0).saturating_mul(a.into())) - // Standard Error: 2_198 - .saturating_add(Weight::from_parts(10_169, 0).saturating_mul(p.into())) + // Standard Error: 2_174 + .saturating_add(Weight::from_parts(148_745, 0).saturating_mul(a.into())) + // Standard Error: 2_247 + .saturating_add(Weight::from_parts(9_802, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -139,13 +139,13 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `348 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 37_628_000 picoseconds. - Weight::from_parts(39_513_043, 0) + // Minimum execution time: 37_416_000 picoseconds. + Weight::from_parts(38_427_320, 0) .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_372 + .saturating_add(Weight::from_parts(150_795, 0).saturating_mul(a.into())) // Standard Error: 2_451 - .saturating_add(Weight::from_parts(149_654, 0).saturating_mul(a.into())) - // Standard Error: 2_533 - .saturating_add(Weight::from_parts(17_215, 0).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(33_626, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -156,11 +156,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 28_284_000 picoseconds. - Weight::from_parts(29_549_215, 0) + // Minimum execution time: 27_489_000 picoseconds. + Weight::from_parts(28_509_919, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 4_083 - .saturating_add(Weight::from_parts(61_848, 0).saturating_mul(p.into())) + // Standard Error: 1_592 + .saturating_add(Weight::from_parts(60_615, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -171,11 +171,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 28_231_000 picoseconds. - Weight::from_parts(29_589_594, 0) + // Minimum execution time: 27_232_000 picoseconds. + Weight::from_parts(28_580_565, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_950 - .saturating_add(Weight::from_parts(54_339, 0).saturating_mul(p.into())) + // Standard Error: 1_717 + .saturating_add(Weight::from_parts(57_719, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,11 +186,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_116_000 picoseconds. - Weight::from_parts(26_314_944, 0) + // Minimum execution time: 24_690_000 picoseconds. + Weight::from_parts(25_944_249, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_968 - .saturating_add(Weight::from_parts(39_294, 0).saturating_mul(p.into())) + // Standard Error: 2_748 + .saturating_add(Weight::from_parts(30_286, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -201,11 +201,11 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `4706` - // Minimum execution time: 29_742_000 picoseconds. - Weight::from_parts(31_063_206, 0) + // Minimum execution time: 28_721_000 picoseconds. + Weight::from_parts(30_396_418, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_575 - .saturating_add(Weight::from_parts(22_471, 0).saturating_mul(p.into())) + // Standard Error: 2_107 + .saturating_add(Weight::from_parts(1_013, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -216,12 +216,28 @@ impl pallet_proxy::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `126 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 26_141_000 picoseconds. - Weight::from_parts(27_309_074, 0) + // Minimum execution time: 25_591_000 picoseconds. + Weight::from_parts(27_026_043, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_808 - .saturating_add(Weight::from_parts(37_564, 0).saturating_mul(p.into())) + // Standard Error: 1_719 + .saturating_add(Weight::from_parts(24_549, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `452` + // Estimated: `5698` + // Minimum execution time: 49_684_000 picoseconds. + Weight::from_parts(50_366_000, 0) + .saturating_add(Weight::from_parts(0, 5698)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_configuration.rs b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_configuration.rs index 6e27b729a361b..98b98b3c19cc2 100644 --- a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_configuration.rs +++ b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_configuration.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `polkadot_runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-02-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `13d69b199c54`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -61,8 +61,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_475_000 picoseconds. - Weight::from_parts(11_132_000, 0) + // Minimum execution time: 10_838_000 picoseconds. + Weight::from_parts(11_218_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,8 +77,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_419_000 picoseconds. - Weight::from_parts(11_052_000, 0) + // Minimum execution time: 10_949_000 picoseconds. + Weight::from_parts(11_200_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -93,8 +93,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_613_000 picoseconds. - Weight::from_parts(11_170_000, 0) + // Minimum execution time: 10_975_000 picoseconds. + Weight::from_parts(11_519_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -119,8 +119,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_818_000 picoseconds. - Weight::from_parts(11_183_000, 0) + // Minimum execution time: 10_729_000 picoseconds. + Weight::from_parts(11_195_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -135,8 +135,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 12_672_000 picoseconds. - Weight::from_parts(13_154_000, 0) + // Minimum execution time: 12_949_000 picoseconds. + Weight::from_parts(13_251_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -151,8 +151,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_814_000 picoseconds. - Weight::from_parts(11_120_000, 0) + // Minimum execution time: 10_683_000 picoseconds. + Weight::from_parts(11_196_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -167,8 +167,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 12_919_000 picoseconds. - Weight::from_parts(13_455_000, 0) + // Minimum execution time: 12_855_000 picoseconds. + Weight::from_parts(13_484_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -183,8 +183,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_495_000 picoseconds. - Weight::from_parts(11_189_000, 0) + // Minimum execution time: 10_933_000 picoseconds. + Weight::from_parts(11_367_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 4235edf82b24d..c7615ef217724 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -111,6 +111,7 @@ pub type XcmRouter = WithUniqueTopic< parameter_types! { pub AssetHub: Location = Parachain(ASSET_HUB_ID).into_location(); + pub AssetHubNext: Location = Parachain(ASSET_HUB_NEXT_ID).into_location(); pub Collectives: Location = Parachain(COLLECTIVES_ID).into_location(); pub BridgeHub: Location = Parachain(BRIDGE_HUB_ID).into_location(); pub Encointer: Location = Parachain(ENCOINTER_ID).into_location(); @@ -118,6 +119,7 @@ parameter_types! { pub Broker: Location = Parachain(BROKER_ID).into_location(); pub Wnd: AssetFilter = Wild(AllOf { fun: WildFungible, id: AssetId(TokenLocation::get()) }); pub WndForAssetHub: (AssetFilter, Location) = (Wnd::get(), AssetHub::get()); + pub WndForAssetHubNext: (AssetFilter, Location) = (Wnd::get(), AssetHubNext::get()); pub WndForCollectives: (AssetFilter, Location) = (Wnd::get(), Collectives::get()); pub WndForBridgeHub: (AssetFilter, Location) = (Wnd::get(), BridgeHub::get()); pub WndForEncointer: (AssetFilter, Location) = (Wnd::get(), Encointer::get()); @@ -129,6 +131,7 @@ parameter_types! { pub type TrustedTeleporters = ( xcm_builder::Case, + xcm_builder::Case, xcm_builder::Case, xcm_builder::Case, xcm_builder::Case, diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 1c08c875eb21c..8f6eb9d642962 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -83,8 +83,9 @@ pub use location_conversion::{ ChildParachainConvertsVia, DescribeAccountId32Terminal, DescribeAccountIdTerminal, DescribeAccountKey20Terminal, DescribeAllTerminal, DescribeBodyTerminal, DescribeFamily, DescribeLocation, DescribePalletTerminal, DescribeTerminus, DescribeTreasuryVoiceTerminal, - GlobalConsensusConvertsFor, GlobalConsensusParachainConvertsFor, HashedDescription, - LocalTreasuryVoiceConvertsVia, ParentIsPreset, SiblingParachainConvertsVia, + ExternalConsensusLocationsConverterFor, GlobalConsensusConvertsFor, + GlobalConsensusParachainConvertsFor, HashedDescription, LocalTreasuryVoiceConvertsVia, + ParentIsPreset, SiblingParachainConvertsVia, }; mod matches_location; diff --git a/polkadot/xcm/xcm-builder/src/location_conversion.rs b/polkadot/xcm/xcm-builder/src/location_conversion.rs index c7aa0c8b5041d..0f4703dca89ca 100644 --- a/polkadot/xcm/xcm-builder/src/location_conversion.rs +++ b/polkadot/xcm/xcm-builder/src/location_conversion.rs @@ -427,6 +427,8 @@ impl GlobalConsensusConvertsFor( PhantomData<(UniversalLocation, AccountId)>, ); @@ -458,6 +460,55 @@ impl } } +/// Converts locations from external global consensus systems (e.g., Ethereum, other parachains) +/// into `AccountId`. +/// +/// Replaces `GlobalConsensusParachainConvertsFor` and `EthereumLocationsConverterFor` in a +/// backwards-compatible way, and extends them for also handling child locations (e.g., +/// `AccountId(Alice)`). +pub struct ExternalConsensusLocationsConverterFor( + PhantomData<(UniversalLocation, AccountId)>, +); + +impl, AccountId: From<[u8; 32]> + Clone> + ConvertLocation + for ExternalConsensusLocationsConverterFor +{ + fn convert_location(location: &Location) -> Option { + let universal_source = UniversalLocation::get(); + tracing::trace!( + target: "xcm::location_conversion", + "ExternalConsensusLocationsConverterFor universal_source: {:?}, location: {:?}", + universal_source, location, + ); + let (remote_network, remote_location) = + ensure_is_remote(universal_source, location.clone()).ok()?; + + // replaces and extends `EthereumLocationsConverterFor` and + // `GlobalConsensusParachainConvertsFor` + let acc_id: AccountId = if let Ethereum { chain_id } = &remote_network { + match remote_location.as_slice() { + // equivalent to `EthereumLocationsConverterFor` + [] => (b"ethereum-chain", chain_id).using_encoded(blake2_256).into(), + // equivalent to `EthereumLocationsConverterFor` + [AccountKey20 { network: _, key }] => + (b"ethereum-chain", chain_id, *key).using_encoded(blake2_256).into(), + // extends `EthereumLocationsConverterFor` + tail => (b"ethereum-chain", chain_id, tail).using_encoded(blake2_256).into(), + } + } else { + match remote_location.as_slice() { + // equivalent to `GlobalConsensusParachainConvertsFor` + [Parachain(para_id)] => + (b"glblcnsnss/prchn_", remote_network, para_id).using_encoded(blake2_256).into(), + // converts everything else based on hash of encoded location tail + tail => (b"glblcnsnss", remote_network, tail).using_encoded(blake2_256).into(), + } + }; + Some(acc_id) + } +} + #[cfg(test)] mod tests { use super::*; @@ -608,32 +659,32 @@ mod tests { GlobalConsensusConvertsFor::::convert_location( &Location::new(2, [GlobalConsensus(network_3)]), ) - .expect("conversion is ok"); + .unwrap(); let res_2_gc_network_3 = GlobalConsensusConvertsFor::::convert_location( &Location::new(2, [GlobalConsensus(network_3)]), ) - .expect("conversion is ok"); + .unwrap(); let res_1_gc_network_4 = GlobalConsensusConvertsFor::::convert_location( &Location::new(2, [GlobalConsensus(network_4)]), ) - .expect("conversion is ok"); + .unwrap(); let res_2_gc_network_4 = GlobalConsensusConvertsFor::::convert_location( &Location::new(2, [GlobalConsensus(network_4)]), ) - .expect("conversion is ok"); + .unwrap(); let res_1_gc_network_5 = GlobalConsensusConvertsFor::::convert_location( &Location::new(2, [GlobalConsensus(network_5)]), ) - .expect("conversion is ok"); + .unwrap(); let res_2_gc_network_5 = GlobalConsensusConvertsFor::::convert_location( &Location::new(2, [GlobalConsensus(network_5)]), ) - .expect("conversion is ok"); + .unwrap(); assert_ne!(res_1_gc_network_3, res_1_gc_network_4); assert_ne!(res_1_gc_network_4, res_1_gc_network_5); @@ -678,6 +729,10 @@ mod tests { GlobalConsensusParachainConvertsFor::::convert_location( &location, ); + let result2 = + ExternalConsensusLocationsConverterFor::::convert_location( + &location, + ); match result { Some(account) => { assert_eq!( @@ -707,29 +762,64 @@ mod tests { ); }, } + if expected_result { + assert_eq!(result, result2); + } } // all success + let location = Location::new(2, [GlobalConsensus(ByGenesis([3; 32])), Parachain(1000)]); let res_gc_a_p1000 = GlobalConsensusParachainConvertsFor::::convert_location( - &Location::new(2, [GlobalConsensus(ByGenesis([3; 32])), Parachain(1000)]), + &location, ) - .expect("conversion is ok"); + .unwrap(); + assert_eq!( + res_gc_a_p1000, + ExternalConsensusLocationsConverterFor::::convert_location( + &location, + ).unwrap() + ); + + let location = Location::new(2, [GlobalConsensus(ByGenesis([3; 32])), Parachain(1001)]); let res_gc_a_p1001 = GlobalConsensusParachainConvertsFor::::convert_location( - &Location::new(2, [GlobalConsensus(ByGenesis([3; 32])), Parachain(1001)]), + &location, ) - .expect("conversion is ok"); + .unwrap(); + assert_eq!( + res_gc_a_p1001, + ExternalConsensusLocationsConverterFor::::convert_location( + &location, + ).unwrap() + ); + + let location = Location::new(2, [GlobalConsensus(ByGenesis([4; 32])), Parachain(1000)]); let res_gc_b_p1000 = GlobalConsensusParachainConvertsFor::::convert_location( - &Location::new(2, [GlobalConsensus(ByGenesis([4; 32])), Parachain(1000)]), + &location, ) - .expect("conversion is ok"); + .unwrap(); + assert_eq!( + res_gc_b_p1000, + ExternalConsensusLocationsConverterFor::::convert_location( + &location, + ).unwrap() + ); + + let location = Location::new(2, [GlobalConsensus(ByGenesis([4; 32])), Parachain(1001)]); let res_gc_b_p1001 = GlobalConsensusParachainConvertsFor::::convert_location( - &Location::new(2, [GlobalConsensus(ByGenesis([4; 32])), Parachain(1001)]), + &location, ) - .expect("conversion is ok"); + .unwrap(); + assert_eq!( + res_gc_b_p1001, + ExternalConsensusLocationsConverterFor::::convert_location( + &location, + ).unwrap() + ); + assert_ne!(res_gc_a_p1000, res_gc_a_p1001); assert_ne!(res_gc_a_p1000, res_gc_b_p1000); assert_ne!(res_gc_a_p1000, res_gc_b_p1001); diff --git a/polkadot/xcm/xcm-simulator/example/src/tests.rs b/polkadot/xcm/xcm-simulator/example/src/tests.rs index 0808d209343f0..e6a996e4e5589 100644 --- a/polkadot/xcm/xcm-simulator/example/src/tests.rs +++ b/polkadot/xcm/xcm-simulator/example/src/tests.rs @@ -168,7 +168,7 @@ fn reserve_transfer_with_error() { MockNet::reset(); // Execute XCM Transfer and Capture Logs - let (log_capture, subscriber) = init_log_capture(Level::ERROR); + let (log_capture, subscriber) = init_log_capture(Level::ERROR, false); subscriber::with_default(subscriber, || { let invalid_dest = Box::new(Parachain(9999).into()); let withdraw_amount = 123; diff --git a/prdoc/pr_6293.prdoc b/prdoc/pr_6293.prdoc new file mode 100644 index 0000000000000..dfd4bdee45744 --- /dev/null +++ b/prdoc/pr_6293.prdoc @@ -0,0 +1,11 @@ +title: Migrate pallet-nis benchmark to v2 +doc: +- audience: Runtime Dev + description: |- + - Refactor to use the `frame` crate. + - Use procedural macro version `construct_runtime` in mock. + - Expose `PalletId` to `frame::pallet_prelude`. + - Part of #6202. +crates: +- name: pallet-nis + bump: patch diff --git a/prdoc/pr_7313.prdoc b/prdoc/pr_7313.prdoc new file mode 100644 index 0000000000000..d1114534cc239 --- /dev/null +++ b/prdoc/pr_7313.prdoc @@ -0,0 +1,21 @@ +title: "[XCM] add generic location to account converter that also works with external ecosystems" + +doc: +- audience: Runtime Dev + description: | + Adds a new `ExternalConsensusLocationsConverterFor` struct to handle external global + consensus locations and their child locations. + This struct extends the functionality of existing converters (`GlobalConsensusParachainConvertsFor` + and `EthereumLocationsConverterFor`) while maintaining backward compatibility. + +crates: + - name: snowbridge-router-primitives + bump: minor + - name: staging-xcm-builder + bump: minor + - name: asset-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: penpal-runtime + bump: minor diff --git a/prdoc/pr_7639.prdoc b/prdoc/pr_7639.prdoc new file mode 100644 index 0000000000000..3a6013b28925e --- /dev/null +++ b/prdoc/pr_7639.prdoc @@ -0,0 +1,12 @@ +title: '`fatxpool`: improved handling of finality stalls' +doc: +- audience: Node Dev + description: |- + This pull request introduces measures to handle finality stalls by : + - notifying outdated transactions with a `FinalityTimeout` event. + - removing outdated views from the `view_store` + + An item is considered _outdated_ when the difference between its associated block and the current block exceeds a pre-defined threshold. +crates: +- name: sc-transaction-pool + bump: minor diff --git a/prdoc/pr_7708.prdoc b/prdoc/pr_7708.prdoc new file mode 100644 index 0000000000000..a3577238ac379 --- /dev/null +++ b/prdoc/pr_7708.prdoc @@ -0,0 +1,10 @@ +title: Support adding extra request-response protocols to the node +doc: +- audience: Node Dev + description: Allow adding extra request-response protocols during polkadot service + initialization. This is required to add a request-response protocol described + in [RFC-0008](https://polkadot-fellows.github.io/RFCs/approved/0008-parachain-bootnodes-dht.html) + to the relay chain side of the parachain node. +crates: +- name: polkadot-service + bump: minor diff --git a/prdoc/pr_7760.prdoc b/prdoc/pr_7760.prdoc new file mode 100644 index 0000000000000..53df66430f86f --- /dev/null +++ b/prdoc/pr_7760.prdoc @@ -0,0 +1,39 @@ +title: Dynamic uncompressed code size limit +doc: +- audience: Runtime Dev + description: |- + Deprecates node constant `VALIDATION_CODE_BOMB_LIMIT` and introduces + `validation_code_bomb_limit` runtime API that computes the maximum + uncompressed code size as the maximum code size multiplied by a + compression ratio of 10. +crates: +- name: polkadot-node-primitives + bump: patch +- name: polkadot-node-subsystem-types + bump: major +- name: polkadot-primitives + bump: minor +- name: polkadot-runtime-parachains + bump: minor +- name: rococo-runtime + bump: minor +- name: westend-runtime + bump: minor +- name: polkadot-node-core-runtime-api + bump: patch +- name: polkadot-node-subsystem-util + bump: minor +- name: polkadot-node-core-candidate-validation + bump: patch +- name: polkadot-node-core-pvf-common + bump: major +- name: polkadot-node-core-pvf-prepare-worker + bump: patch +- name: polkadot-node-core-pvf + bump: patch +- name: cumulus-relay-chain-minimal-node + bump: minor +- name: cumulus-relay-chain-rpc-interface + bump: minor +- name: cumulus-pov-validator + bump: minor diff --git a/prdoc/pr_7769.prdoc b/prdoc/pr_7769.prdoc new file mode 100644 index 0000000000000..16fb5c73a562b --- /dev/null +++ b/prdoc/pr_7769.prdoc @@ -0,0 +1,10 @@ +title: Ensure Logs Are Captured for Assertions and Printed During Tests +doc: +- audience: Runtime Dev + description: |- + This PR enhances test_log_capture, ensuring logs are captured for assertions and printed to the console during test execution. +crates: +- name: sp-tracing + bump: minor +- name: xcm-simulator-example + bump: patch \ No newline at end of file diff --git a/prdoc/pr_7784.prdoc b/prdoc/pr_7784.prdoc new file mode 100644 index 0000000000000..cb8d5e8a42591 --- /dev/null +++ b/prdoc/pr_7784.prdoc @@ -0,0 +1,11 @@ +title: '[pallet-revive] block.timestamps should return seconds' +doc: +- audience: Runtime Dev + description: |- + In solidity `block.timestamp` should be expressed in seconds + see https://docs.soliditylang.org/en/latest/units-and-global-variables.html#block-and-transaction-properties +crates: +- name: pallet-revive + bump: patch +- name: pallet-revive-uapi + bump: patch diff --git a/prdoc/pr_7786.prdoc b/prdoc/pr_7786.prdoc new file mode 100644 index 0000000000000..ddbaf5328023a --- /dev/null +++ b/prdoc/pr_7786.prdoc @@ -0,0 +1,10 @@ +title: 'pallet revive: rpc build script should not panic' +doc: +- audience: Runtime Dev + description: |- + Fix a build error in the pallet revive RPC build scrip that can occur when using `cargo remote` + or `cargo vendor`. + +crates: +- name: pallet-revive-eth-rpc + bump: patch diff --git a/prdoc/pr_7787.prdoc b/prdoc/pr_7787.prdoc new file mode 100644 index 0000000000000..bc19eb79886b3 --- /dev/null +++ b/prdoc/pr_7787.prdoc @@ -0,0 +1,11 @@ +title: Add asset-hub-next as a trusted teleporter +doc: +- audience: Runtime Dev + description: |- + Asset Hub Next has been deployed on Westend as parachain 1100, but it's not yet a trusted teleporter. + This minimal PR adds it in stable2412 so that it can be deployed right away without waiting for the rest of the release to be finalised and deployed. +crates: +- name: westend-runtime-constants + bump: patch +- name: westend-runtime + bump: patch diff --git a/prdoc/pr_7790.prdoc b/prdoc/pr_7790.prdoc new file mode 100644 index 0000000000000..09a8ff52c8d13 --- /dev/null +++ b/prdoc/pr_7790.prdoc @@ -0,0 +1,9 @@ +title: 'pallet-scheduler: Put back postponed tasks into the agenda' +doc: +- audience: Runtime Dev + description: "Right now `pallet-scheduler` is not putting back postponed tasks into\ + \ the agenda when the early weight check is failing. This pull request ensures\ + \ that these tasks are put back into the agenda and are not just \"lost\".\r\n" +crates: +- name: pallet-scheduler + bump: patch diff --git a/prdoc/pr_7794.prdoc b/prdoc/pr_7794.prdoc new file mode 100644 index 0000000000000..e9b8b57a61e94 --- /dev/null +++ b/prdoc/pr_7794.prdoc @@ -0,0 +1,16 @@ +title: '[glutton-westend] remove `CheckNonce` from `TXExtension` and add sudo key + to genesis config' +doc: +- audience: Runtime Dev + description: |- + I discovered in https://github.com/paritytech/polkadot-sdk/pull/7459, that the overhead benchmark is not working for glutton-westend, as the client can't send `system.remark` extrinsics. This was due to 2 issues: + + 1. Alice was not set as sudo. Hence, the `CheckOnlySudoAccount` deemed the extrinsic as invalid. + 2. The `CheckNonce` TxExtension also marked the extrinsic as invalid, as the account doesn't exist (because glutton has no balances pallet). + + This PR fixes the 1.) for now. I wanted to simply remove the `CheckNonce` in the TxExtension to fix 2., but it turns out that this is not possible, as the tx-pool needs the nonce tag to identify the transaction. https://github.com/paritytech/polkadot-sdk/pull/6884 will fix sending extrinsics on glutton. +crates: +- name: glutton-westend-runtime + bump: minor +- name: polkadot-parachain-bin + bump: minor diff --git a/prdoc/pr_7801.prdoc b/prdoc/pr_7801.prdoc new file mode 100644 index 0000000000000..3d3dde3b02f44 --- /dev/null +++ b/prdoc/pr_7801.prdoc @@ -0,0 +1,25 @@ +title: add poke_deposit extrinsic to pallet-proxy +doc: +- audience: Runtime Dev + description: This PR adds a new extrinsic `poke_deposit` to `pallet-proxy`. This extrinsic will be used to re-adjust the deposits made in the pallet to create a proxy or to create an announcement. +crates: +- name: asset-hub-rococo-runtime + bump: major +- name: asset-hub-westend-runtime + bump: major +- name: collectives-westend-runtime + bump: major +- name: coretime-rococo-runtime + bump: major +- name: coretime-westend-runtime + bump: major +- name: people-rococo-runtime + bump: major +- name: people-westend-runtime + bump: major +- name: rococo-runtime + bump: major +- name: westend-runtime + bump: major +- name: pallet-proxy + bump: major diff --git a/prdoc/pr_7802.prdoc b/prdoc/pr_7802.prdoc new file mode 100644 index 0000000000000..71c1256e75c74 --- /dev/null +++ b/prdoc/pr_7802.prdoc @@ -0,0 +1,11 @@ +title: '[AHM] child bounties and recovery: make more stuff public' +doc: +- audience: Runtime Dev + description: | + Make some items in the child-bounties and recovery pallet public to reduce code-duplication for + the Asset Hub migration. +crates: +- name: pallet-child-bounties + bump: minor +- name: pallet-recovery + bump: minor diff --git a/prdoc/pr_7809.prdoc b/prdoc/pr_7809.prdoc new file mode 100644 index 0000000000000..ab7a4c84c4d71 --- /dev/null +++ b/prdoc/pr_7809.prdoc @@ -0,0 +1,19 @@ +title: "[XCM] Add generic location to account converter that also works with external ecosystems for bridge hubs" + +doc: +- audience: Runtime Dev + description: | + Adds a new `ExternalConsensusLocationsConverterFor` struct to handle external global + consensus locations and their child locations for Bridge Hubs. + This struct extends the functionality of existing converters (`GlobalConsensusParachainConvertsFor` + and `EthereumLocationsConverterFor`) while maintaining backward compatibility. + +crates: + - name: asset-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor diff --git a/prdoc/pr_7812.prdoc b/prdoc/pr_7812.prdoc new file mode 100644 index 0000000000000..97098651734ab --- /dev/null +++ b/prdoc/pr_7812.prdoc @@ -0,0 +1,13 @@ +title: '`apply_authorized_upgrade`: Remote authorization if the version check fails' +doc: +- audience: Runtime User + description: |- + This pr ensures that we remove the authorization for a runtime upgrade if the version check failed. + If that check is failing, it means that the runtime upgrade is invalid and the check will never succeed. + + Besides that the pr is doing some clean ups. +crates: +- name: cumulus-pallet-parachain-system + bump: major +- name: frame-system + bump: major diff --git a/prdoc/pr_7813.prdoc b/prdoc/pr_7813.prdoc new file mode 100644 index 0000000000000..827bcdf3541f8 --- /dev/null +++ b/prdoc/pr_7813.prdoc @@ -0,0 +1,10 @@ +title: Improve metadata for `SkipCheckIfFeeless` +doc: +- audience: Runtime Dev + description: |- + If the inner transaction extension used inside `SkipCheckIfFeeless` are multiples then the metadata is not correct, it is now fixed. + + E.g. if the transaction extension is `SkipCheckIfFeeless::` then the metadata was wrong. +crates: +- name: pallet-skip-feeless-payment + bump: patch diff --git a/prdoc/pr_7818.prdoc b/prdoc/pr_7818.prdoc new file mode 100644 index 0000000000000..fbf7a845010ae --- /dev/null +++ b/prdoc/pr_7818.prdoc @@ -0,0 +1,7 @@ +title: '[pallet-revive] eth-rpc-tester quick fixes' +doc: +- audience: Runtime Dev + description: Small tweaks to the eth-rpc-tester bin +crates: +- name: pallet-revive-eth-rpc + bump: patch diff --git a/prdoc/pr_7820.prdoc b/prdoc/pr_7820.prdoc new file mode 100644 index 0000000000000..627b1ad34e44e --- /dev/null +++ b/prdoc/pr_7820.prdoc @@ -0,0 +1,8 @@ +title: 'Make pallet-transaction-payment-benchmark work with ed 0' +doc: +- audience: Runtime Dev + description: | + Make it possible to use the transaction-payment work with existential deposit 0 +crates: +- name: pallet-transaction-payment + bump: minor \ No newline at end of file diff --git a/substrate/client/network/src/protocol/notifications/tests/conformance.rs b/substrate/client/network/src/protocol/notifications/tests/conformance.rs index 421177997f998..0bf39973d16c7 100644 --- a/substrate/client/network/src/protocol/notifications/tests/conformance.rs +++ b/substrate/client/network/src/protocol/notifications/tests/conformance.rs @@ -675,7 +675,7 @@ async fn litep2p_disconnects_libp2p_substream() { let mut open_times = 0; // Disarm first timer interval. - let mut timer = tokio::time::interval(std::time::Duration::from_secs(u64::MAX)); + let mut timer = tokio::time::interval(std::time::Duration::from_secs(u64::MAX / 4)); timer.tick().await; loop { diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index 26bbf58f1522d..355b1a16d4355 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -43,6 +43,7 @@ tokio-stream = { workspace = true } tracing = { workspace = true, default-features = true } [dev-dependencies] +anyhow = { workspace = true } array-bytes = { workspace = true, default-features = true } assert_matches = { workspace = true } criterion = { workspace = true, default-features = true } @@ -51,6 +52,12 @@ sp-consensus = { workspace = true, default-features = true } substrate-test-runtime = { workspace = true } substrate-test-runtime-client = { workspace = true } substrate-test-runtime-transaction-pool = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["rt-multi-thread"] } +tracing-subscriber = { workspace = true } +txtesttool = { version = "0.4.0", package = "substrate-txtesttool" } +zombienet-configuration = { workspace = true } +zombienet-sdk = { workspace = true } [[bench]] name = "basics" diff --git a/substrate/client/transaction-pool/src/common/tracing_log_xt.rs b/substrate/client/transaction-pool/src/common/tracing_log_xt.rs index 4d1c5d09cc7ac..9be9cddd978fd 100644 --- a/substrate/client/transaction-pool/src/common/tracing_log_xt.rs +++ b/substrate/client/transaction-pool/src/common/tracing_log_xt.rs @@ -21,34 +21,34 @@ /// Logs every transaction from given `tx_collection` with given level. macro_rules! log_xt { (data: hash, target: $target:expr, $level:expr, $tx_collection:expr, $text_with_format:expr) => { - for tx in $tx_collection { + for tx_hash in $tx_collection { tracing::event!( + target: $target, $level, - target = $target, - tx_hash = format!("{:?}", tx), + ?tx_hash, $text_with_format, ); } }; (data: hash, target: $target:expr, $level:expr, $tx_collection:expr, $text_with_format:expr, $($arg:expr),*) => { - for tx in $tx_collection { + for tx_hash in $tx_collection { tracing::event!( + target: $target, $level, - target = $target, - tx_hash = format!("{:?}", tx), + ?tx_hash, $text_with_format, $($arg),* ); } }; (data: tuple, target: $target:expr, $level:expr, $tx_collection:expr, $text_with_format:expr) => { - for tx in $tx_collection { + for (tx_hash, arg) in $tx_collection { tracing::event!( + target: $target, $level, - target = $target, - tx_hash = format!("{:?}", tx.0), + ?tx_hash, $text_with_format, - tx.1 + arg ); } }; diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index 91237910adc1f..580464abc6161 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -113,8 +113,8 @@ where RemoveView(BlockHash), /// Removes referencing views for given extrinsic hashes. /// - /// Intended to ba called on finalization. - RemoveFinalizedTxs(Vec>), + /// Intended to ba called when transactions were finalized or their finality timed out. + RemoveTransactions(Vec>), } impl Debug for Command @@ -125,7 +125,7 @@ where match self { Command::AddView(..) => write!(f, "AddView"), Command::RemoveView(..) => write!(f, "RemoveView"), - Command::RemoveFinalizedTxs(..) => write!(f, "RemoveFinalizedTxs"), + Command::RemoveTransactions(..) => write!(f, "RemoveTransactions"), } } } @@ -229,7 +229,7 @@ where } }); }, - Command::RemoveFinalizedTxs(xts) => { + Command::RemoveTransactions(xts) => { log_xt_trace!( target: LOG_TARGET, xts.clone(), @@ -422,16 +422,16 @@ where }); } - /// Removes status info for finalized transactions. - pub fn remove_finalized_txs( + /// Removes status info for transactions. + pub fn remove_transactions( &self, xts: impl IntoIterator> + Clone, ) { let _ = self .controller - .unbounded_send(Command::RemoveFinalizedTxs(xts.into_iter().collect())) + .unbounded_send(Command::RemoveTransactions(xts.into_iter().collect())) .map_err(|e| { - trace!(target: LOG_TARGET, "dropped_watcher: remove_finalized_txs send message failed: {e}"); + trace!(target: LOG_TARGET, "dropped_watcher: remove_transactions send message failed: {e}"); }); } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index c21e0b8df6ff5..337da1dd61d4f 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -38,7 +38,7 @@ use crate::{ graph::{ self, base_pool::{TimedTransactionSource, Transaction}, - ExtrinsicFor, ExtrinsicHash, IsValidator, Options, + BlockHash, ExtrinsicFor, ExtrinsicHash, IsValidator, Options, }, ReadyIteratorFor, LOG_TARGET, }; @@ -62,9 +62,10 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor}, transaction_validity::{TransactionValidityError, ValidTransaction}, + Saturating, }; use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, pin::Pin, sync::Arc, time::Instant, @@ -72,6 +73,11 @@ use std::{ use tokio::select; use tracing::{debug, info, trace, warn}; +/// The maximum block height difference before considering a view or transaction as timed-out +/// due to a finality stall. When the difference exceeds this threshold, elements are treated +/// as stale and are subject to cleanup. +const FINALITY_TIMEOUT_THRESHOLD: usize = 128; + /// Fork aware transaction pool task, that needs to be polled. pub type ForkAwareTxPoolTask = Pin + Send>>; @@ -161,6 +167,22 @@ where /// Is node the validator. is_validator: IsValidator, + + /// Finality timeout threshold. + /// + /// Sets the maximum permissible block height difference between the latest block + /// and the oldest transactions or views in the pool. Beyond this difference, + /// transactions/views are considered timed out and eligible for cleanup. + finality_timeout_threshold: usize, + + /// Transactions included in blocks since the most recently finalized block (including this + /// block). + /// + /// Holds a mapping of block hash and number to their corresponding transaction hashes. + /// + /// Intended to be used in the finality stall cleanups and also as a cache for all in-block + /// transactions. + included_transactions: Mutex, Vec>>>, } impl ForkAwareTxPool @@ -175,6 +197,7 @@ where pool_api: Arc, best_block_hash: Block::Hash, finalized_hash: Block::Hash, + finality_timeout_threshold: Option, ) -> (Self, ForkAwareTxPoolTask) { Self::new_test_with_limits( pool_api, @@ -183,6 +206,7 @@ where Options::default().ready, Options::default().future, usize::MAX, + finality_timeout_threshold, ) } @@ -195,6 +219,7 @@ where ready_limits: crate::PoolLimit, future_limits: crate::PoolLimit, mempool_max_transactions_count: usize, + finality_timeout_threshold: Option, ) -> (Self, ForkAwareTxPoolTask) { let (listener, listener_task) = MultiViewListener::new_with_worker(Default::default()); let listener = Arc::new(listener); @@ -250,6 +275,9 @@ where is_validator: false.into(), metrics: Default::default(), events_metrics_collector: EventsMetricsCollector::default(), + finality_timeout_threshold: finality_timeout_threshold + .unwrap_or(FINALITY_TIMEOUT_THRESHOLD), + included_transactions: Default::default(), }, combined_tasks, ) @@ -381,6 +409,8 @@ where metrics, events_metrics_collector, is_validator, + finality_timeout_threshold: FINALITY_TIMEOUT_THRESHOLD, + included_transactions: Default::default(), } } @@ -1061,6 +1091,78 @@ where View::start_background_revalidation(view, self.revalidation_queue.clone()).await; } + + self.finality_stall_cleanup(hash_and_number); + } + + /// Cleans up transactions and views outdated by potential finality stalls. + /// + /// This function removes transactions from the pool that were included in blocks but not + /// finalized within a pre-defined block height threshold. Transactions not meeting finality + /// within this threshold are notified with finality timed out event. The threshold is based on + /// the current block number, 'at'. + /// + /// Additionally, this method triggers the view store to handle and remove stale views caused by + /// the finality stall. + fn finality_stall_cleanup(&self, at: &HashAndNumber) { + let (oldest_block_number, finality_timedout_blocks) = { + let mut included_transactions = self.included_transactions.lock(); + + let Some(oldest_block_number) = + included_transactions.first_key_value().map(|(k, _)| k.number) + else { + return + }; + + if at.number.saturating_sub(oldest_block_number).into() <= + self.finality_timeout_threshold.into() + { + return + } + + let mut finality_timedout_blocks = + indexmap::IndexMap::, Vec>>::default(); + + included_transactions.retain( + |HashAndNumber { number: view_number, hash: view_hash }, tx_hashes| { + let diff = at.number.saturating_sub(*view_number); + if diff.into() > self.finality_timeout_threshold.into() { + finality_timedout_blocks.insert(*view_hash, std::mem::take(tx_hashes)); + false + } else { + true + } + }, + ); + + (oldest_block_number, finality_timedout_blocks) + }; + + if !finality_timedout_blocks.is_empty() { + self.ready_poll.lock().remove_cancelled(); + self.view_store.listener.remove_stale_controllers(); + } + + let finality_timedout_blocks_len = finality_timedout_blocks.len(); + + for (block_hash, tx_hashes) in finality_timedout_blocks { + self.view_store.listener.transactions_finality_timeout(&tx_hashes, block_hash); + + self.mempool.remove_transactions(&tx_hashes); + self.import_notification_sink.clean_notified_items(&tx_hashes); + self.view_store.dropped_stream_controller.remove_transactions(tx_hashes.clone()); + } + + self.view_store.finality_stall_view_cleanup(at, self.finality_timeout_threshold); + + debug!( + target: LOG_TARGET, + ?at, + included_transactions_len = ?self.included_transactions.lock().len(), + finality_timedout_blocks_len, + ?oldest_block_number, + "finality_stall_cleanup" + ); } /// Builds a new view. @@ -1158,48 +1260,75 @@ where Some(view) } - /// Returns the list of xts included in all block ancestors, including the block itself. + /// Retrieves transactions hashes from a `included_transactions` cache or, if not present, + /// fetches them from the blockchain API using the block's hash `at`. /// - /// Example: for the following chain `F<-B1<-B2<-B3` xts from `F,B1,B2,B3` will be returned. - async fn extrinsics_included_since_finalized(&self, at: Block::Hash) -> HashSet> { + /// Returns a `Vec` of transactions hashes + async fn fetch_block_transactions(&self, at: &HashAndNumber) -> Vec> { + if let Some(txs) = self.included_transactions.lock().get(at) { + return txs.clone() + }; + + trace!( + target: LOG_TARGET, + ?at, + "fetch_block_transactions from api" + ); + + self.api + .block_body(at.hash) + .await + .unwrap_or_else(|error| { + warn!( + target: LOG_TARGET, + %error, + "fetch_block_transactions: error request" + ); + None + }) + .unwrap_or_default() + .into_iter() + .map(|t| self.hash_of(&t)) + .collect::>() + } + + /// Returns the list of xts included in all block's ancestors up to recently finalized block (or + /// up finality timeout threshold), including the block itself. + /// + /// Example: for the following chain `F<-B1<-B2<-B3` xts from `B1,B2,B3` will be returned. + async fn txs_included_since_finalized( + &self, + at: &HashAndNumber, + ) -> HashSet> { let start = Instant::now(); let recent_finalized_block = self.enactment_state.lock().recent_finalized_block(); - let Ok(tree_route) = self.api.tree_route(recent_finalized_block, at) else { + let Ok(tree_route) = self.api.tree_route(recent_finalized_block, at.hash) else { return Default::default() }; - let api = self.api.clone(); - let mut all_extrinsics = HashSet::new(); + let mut all_txs = HashSet::new(); - for h in tree_route.enacted().iter().rev() { - api.block_body(h.hash) - .await - .unwrap_or_else(|error| { - warn!( - target: LOG_TARGET, - %error, - "Compute ready light transactions: error request" - ); - None - }) - .unwrap_or_default() - .into_iter() - .map(|t| self.hash_of(&t)) - .for_each(|tx_hash| { - all_extrinsics.insert(tx_hash); - }); + for block in tree_route.enacted().iter() { + // note: There is no point to fetch the transactions from blocks older than threshold. + // All transactions included in these blocks, were already removed from pool + // with FinalityTimeout event. + if at.number.saturating_sub(block.number).into() <= + self.finality_timeout_threshold.into() + { + all_txs.extend(self.fetch_block_transactions(block).await); + } } debug!( target: LOG_TARGET, ?at, ?recent_finalized_block, - extrinsics_count = all_extrinsics.len(), + extrinsics_count = all_txs.len(), duration = ?start.elapsed(), - "fatp::extrinsics_included_since_finalized" + "fatp::txs_included_since_finalized" ); - all_extrinsics + all_txs } /// Updates the given view with the transactions from the internal mempol. @@ -1209,8 +1338,7 @@ where /// view. /// /// If there are no views, and mempool transaction is reported as invalid for the given view, - /// the transaction is reported as invalid and removed from the mempool. This does not apply to - /// stale and temporarily banned transactions. + /// the transaction is notified as invalid and removed from the mempool. async fn update_view_with_mempool(&self, view: &View) { debug!( target: LOG_TARGET, @@ -1219,7 +1347,7 @@ where active_views_count = self.active_views_count(), "update_view_with_mempool" ); - let included_xts = self.extrinsics_included_since_finalized(view.at.hash).await; + let included_xts = self.txs_included_since_finalized(&view.at).await; let (hashes, xts_filtered): (Vec<_>, Vec<_>) = self .mempool @@ -1289,16 +1417,15 @@ where // transactions with those hashes from the retracted blocks. let mut pruned_log = HashSet::>::new(); - future::join_all( - tree_route - .enacted() - .iter() - .map(|h| crate::prune_known_txs_for_block(h, &*api, &view.pool)), - ) + future::join_all(tree_route.enacted().iter().map(|hn| { + let api = api.clone(); + async move { (hn, crate::prune_known_txs_for_block(hn, &*api, &view.pool).await) } + })) .await .into_iter() - .for_each(|enacted_log| { - pruned_log.extend(enacted_log); + .for_each(|(key, enacted_log)| { + pruned_log.extend(enacted_log.clone()); + self.included_transactions.lock().insert(key.clone(), enacted_log); }); self.metrics.report(|metrics| { @@ -1395,6 +1522,9 @@ where .report(|metrics| metrics.finalized_txs.inc_by(finalized_xts.len() as _)); if let Ok(Some(finalized_number)) = finalized_number { + self.included_transactions + .lock() + .retain(|cached_block, _| finalized_number < cached_block.number); self.revalidation_queue .revalidate_mempool( self.mempool.clone(), @@ -1406,14 +1536,16 @@ where trace!( target: LOG_TARGET, ?finalized_number, - "purge_transactions_later skipped, cannot find block number" + "handle_finalized: revalidation/cleanup skipped: could not resolve finalized block number" ); } self.ready_poll.lock().remove_cancelled(); - trace!( + + debug!( target: LOG_TARGET, active_views_count = self.active_views_count(), + included_transactions_len = ?self.included_transactions.lock().len(), "handle_finalized after" ); } @@ -1581,9 +1713,6 @@ where // } }, Ok(EnactmentAction::HandleEnactment(tree_route)) => { - if matches!(event, ChainEvent::Finalized { .. }) { - self.view_store.handle_pre_finalized(event.hash()).await; - }; self.handle_new_block(&tree_route).await; }, }; @@ -1607,7 +1736,8 @@ where info!( target: LOG_TARGET, txs = ?self.mempool_len(), - active_views_count = self.active_views_count(), + a = self.active_views_count(), + i = self.inactive_views_count(), views = ?self.views_stats(), ?event, ?duration, diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs index 5216f494ffa55..396bc8a6a9282 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs @@ -84,22 +84,27 @@ enum TransactionStatusUpdate { /// Marks a transaction as invalidated. /// /// If all pre-conditions are met, an external invalid event will be sent out. - TransactionInvalidated(ExtrinsicHash), + Invalidated(ExtrinsicHash), /// Notifies that a transaction was finalized in a specific block hash and transaction index. /// /// Send out an external finalized event. - TransactionFinalized(ExtrinsicHash, BlockHash, TxIndex), + Finalized(ExtrinsicHash, BlockHash, TxIndex), /// Notifies that a transaction was broadcasted with a list of peer addresses. /// /// Sends out an external broadcasted event. - TransactionBroadcasted(ExtrinsicHash, Vec), + Broadcasted(ExtrinsicHash, Vec), /// Notifies that a transaction was dropped from the pool. /// /// If all preconditions are met, an external dropped event will be sent out. - TransactionDropped(ExtrinsicHash, DroppedReason>), + Dropped(ExtrinsicHash, DroppedReason>), + + /// Notifies that a finality watcher timed out. + /// + /// An external finality timed out event will be sent out. + FinalityTimeout(ExtrinsicHash, BlockHash), } impl TransactionStatusUpdate @@ -108,10 +113,11 @@ where { fn hash(&self) -> ExtrinsicHash { match self { - Self::TransactionInvalidated(hash) | - Self::TransactionFinalized(hash, _, _) | - Self::TransactionBroadcasted(hash, _) | - Self::TransactionDropped(hash, _) => *hash, + Self::Invalidated(hash) | + Self::Finalized(hash, _, _) | + Self::Broadcasted(hash, _) | + Self::Dropped(hash, _) => *hash, + Self::FinalityTimeout(hash, _) => *hash, } } } @@ -123,17 +129,19 @@ where { fn into(self) -> TransactionStatus, BlockHash> { match self { - TransactionStatusUpdate::TransactionInvalidated(_) => TransactionStatus::Invalid, - TransactionStatusUpdate::TransactionFinalized(_, hash, index) => + TransactionStatusUpdate::Invalidated(_) => TransactionStatus::Invalid, + TransactionStatusUpdate::Finalized(_, hash, index) => TransactionStatus::Finalized((*hash, *index)), - TransactionStatusUpdate::TransactionBroadcasted(_, peers) => + TransactionStatusUpdate::Broadcasted(_, peers) => TransactionStatus::Broadcast(peers.clone()), - TransactionStatusUpdate::TransactionDropped(_, DroppedReason::Usurped(by)) => + TransactionStatusUpdate::Dropped(_, DroppedReason::Usurped(by)) => TransactionStatus::Usurped(*by), - TransactionStatusUpdate::TransactionDropped(_, DroppedReason::LimitsEnforced) => + TransactionStatusUpdate::Dropped(_, DroppedReason::LimitsEnforced) => TransactionStatus::Dropped, - TransactionStatusUpdate::TransactionDropped(_, DroppedReason::Invalid) => + TransactionStatusUpdate::Dropped(_, DroppedReason::Invalid) => TransactionStatus::Invalid, + TransactionStatusUpdate::FinalityTimeout(_, block_hash) => + TransactionStatus::FinalityTimeout(*block_hash), } } } @@ -144,17 +152,20 @@ where { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Self::TransactionInvalidated(h) => { - write!(f, "TransactionInvalidated({h})") + Self::Invalidated(h) => { + write!(f, "Invalidated({h})") + }, + Self::Finalized(h, b, i) => { + write!(f, "Finalized({h},{b},{i})") }, - Self::TransactionFinalized(h, b, i) => { - write!(f, "FinalizeTransaction({h},{b},{i})") + Self::Broadcasted(h, _) => { + write!(f, "Broadcasted({h})") }, - Self::TransactionBroadcasted(h, _) => { - write!(f, "TransactionBroadcasted({h})") + Self::Dropped(h, r) => { + write!(f, "Dropped({h},{r:?})") }, - Self::TransactionDropped(h, r) => { - write!(f, "TransactionDropped({h},{r:?})") + Self::FinalityTimeout(h, b) => { + write!(f, "FinalityTimeout({h},{b:?})") }, } } @@ -174,45 +185,54 @@ where } } } + impl ControllerCommand where ChainApi: graph::ChainApi, { /// Creates new instance of a command requesting [`TransactionStatus::Invalid`] transaction /// status. - fn new_transaction_invalidated(tx_hash: ExtrinsicHash) -> Self { - ControllerCommand::TransactionStatusRequest( - TransactionStatusUpdate::TransactionInvalidated(tx_hash), - ) + fn new_invalidated(tx_hash: ExtrinsicHash) -> Self { + ControllerCommand::TransactionStatusRequest(TransactionStatusUpdate::Invalidated(tx_hash)) } /// Creates new instance of a command requesting [`TransactionStatus::Broadcast`] transaction /// status. - fn new_transaction_broadcasted(tx_hash: ExtrinsicHash, peers: Vec) -> Self { - ControllerCommand::TransactionStatusRequest( - TransactionStatusUpdate::TransactionBroadcasted(tx_hash, peers), - ) + fn new_broadcasted(tx_hash: ExtrinsicHash, peers: Vec) -> Self { + ControllerCommand::TransactionStatusRequest(TransactionStatusUpdate::Broadcasted( + tx_hash, peers, + )) } /// Creates new instance of a command requesting [`TransactionStatus::Finalized`] transaction /// status. - fn new_transaction_finalized( + fn new_finalized( tx_hash: ExtrinsicHash, block_hash: BlockHash, index: TxIndex, ) -> Self { - ControllerCommand::TransactionStatusRequest(TransactionStatusUpdate::TransactionFinalized( + ControllerCommand::TransactionStatusRequest(TransactionStatusUpdate::Finalized( tx_hash, block_hash, index, )) } /// Creates new instance of a command requesting [`TransactionStatus::Dropped`] transaction /// status. - fn new_transaction_dropped( + fn new_dropped( tx_hash: ExtrinsicHash, reason: DroppedReason>, ) -> Self { - ControllerCommand::TransactionStatusRequest(TransactionStatusUpdate::TransactionDropped( + ControllerCommand::TransactionStatusRequest(TransactionStatusUpdate::Dropped( tx_hash, reason, )) } + /// Creates new instance of a command requesting [`TransactionStatus::FinalityTimeout`] + /// transaction status. + fn new_finality_timeout( + tx_hash: ExtrinsicHash, + block_hash: BlockHash, + ) -> Self { + ControllerCommand::TransactionStatusRequest(TransactionStatusUpdate::FinalityTimeout( + tx_hash, block_hash, + )) + } } /// This struct allows to create and control listener for multiple transactions. @@ -366,11 +386,11 @@ where Some(status) } }, - TransactionStatus::FinalityTimeout(_) => Some(status), TransactionStatus::Finalized(_) => { self.terminate = true; Some(status) }, + TransactionStatus::FinalityTimeout(_) | TransactionStatus::Retracted(_) | TransactionStatus::Broadcast(_) | TransactionStatus::Usurped(_) | @@ -667,9 +687,8 @@ where pub(crate) fn transactions_invalidated(&self, invalid_hashes: &[ExtrinsicHash]) { log_xt_trace!(target: LOG_TARGET, invalid_hashes, "transactions_invalidated"); for tx_hash in invalid_hashes { - if let Err(error) = self - .controller - .unbounded_send(ControllerCommand::new_transaction_invalidated(*tx_hash)) + if let Err(error) = + self.controller.unbounded_send(ControllerCommand::new_invalidated(*tx_hash)) { trace!( target: LOG_TARGET, @@ -692,7 +711,7 @@ where for (tx_hash, peers) in propagated { if let Err(error) = self .controller - .unbounded_send(ControllerCommand::new_transaction_broadcasted(tx_hash, peers)) + .unbounded_send(ControllerCommand::new_broadcasted(tx_hash, peers)) { trace!( target: LOG_TARGET, @@ -711,9 +730,8 @@ where pub(crate) fn transaction_dropped(&self, dropped: DroppedTransaction>) { let DroppedTransaction { tx_hash, reason } = dropped; trace!(target: LOG_TARGET, ?tx_hash, ?reason, "transaction_dropped"); - if let Err(error) = self - .controller - .unbounded_send(ControllerCommand::new_transaction_dropped(tx_hash, reason)) + if let Err(error) = + self.controller.unbounded_send(ControllerCommand::new_dropped(tx_hash, reason)) { trace!( target: LOG_TARGET, @@ -736,7 +754,7 @@ where trace!(target: LOG_TARGET, ?tx_hash, "transaction_finalized"); if let Err(error) = self .controller - .unbounded_send(ControllerCommand::new_transaction_finalized(tx_hash, block, idx)) + .unbounded_send(ControllerCommand::new_finalized(tx_hash, block, idx)) { trace!( target: LOG_TARGET, @@ -747,6 +765,30 @@ where }; } + /// Send `FinalityTimeout` event for given transactions at given block. + /// + /// This will trigger `FinalityTimeout` event to the external watcher. + pub(crate) fn transactions_finality_timeout( + &self, + tx_hashes: &[ExtrinsicHash], + block: BlockHash, + ) { + for tx_hash in tx_hashes { + trace!(target: LOG_TARGET, ?tx_hash, "transaction_finality_timeout"); + if let Err(error) = self + .controller + .unbounded_send(ControllerCommand::new_finality_timeout(*tx_hash, block)) + { + trace!( + target: LOG_TARGET, + ?tx_hash, + %error, + "transaction_finality_timeout: send message failed" + ); + }; + } + } + /// Removes stale controllers. pub(crate) fn remove_stale_controllers(&self) { self.external_controllers.write().retain(|_, c| !c.is_closed()); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 348108e24dcfc..d496b24fb4e73 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -333,7 +333,8 @@ where target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "view::submit_many at:{}", - self.at.hash); + self.at.hash + ); self.pool.submit_at(&self.at, xts).await } else { self.pool.submit_at(&self.at, xts).await diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index a1585741839fa..f61e300f36462 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -27,17 +27,18 @@ use crate::{ graph::{ self, base_pool::{TimedTransactionSource, Transaction}, - BaseSubmitOutcome, ExtrinsicFor, ExtrinsicHash, TransactionFor, ValidatedPoolSubmitOutcome, + BaseSubmitOutcome, BlockHash, ExtrinsicFor, ExtrinsicHash, TransactionFor, + ValidatedPoolSubmitOutcome, }, ReadyIteratorFor, LOG_TARGET, }; use itertools::Itertools; use parking_lot::RwLock; use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus, TxInvalidityReportMap}; -use sp_blockchain::TreeRoute; +use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_runtime::{ generic::BlockId, - traits::Block as BlockT, + traits::{Block as BlockT, Saturating}, transaction_validity::{InvalidTransaction, TransactionValidityError}, }; use std::{ @@ -537,49 +538,6 @@ where None } - /// The pre-finalization event handle for the view store. - /// - /// This function removes the references to the views that will be removed during finalization - /// from the dropped stream controller. This will allow for correct dispatching of `Dropped` - /// events. - pub(crate) async fn handle_pre_finalized(&self, finalized_hash: Block::Hash) { - let finalized_number = self.api.block_id_to_number(&BlockId::Hash(finalized_hash)); - let mut removed_views = vec![]; - - { - let active_views = self.active_views.read(); - let inactive_views = self.inactive_views.read(); - - active_views - .iter() - .filter(|(hash, v)| !match finalized_number { - Err(_) | Ok(None) => **hash == finalized_hash, - Ok(Some(n)) if v.at.number == n => **hash == finalized_hash, - Ok(Some(n)) => v.at.number > n, - }) - .map(|(_, v)| removed_views.push(v.at.hash)) - .for_each(drop); - - inactive_views - .iter() - .filter(|(_, v)| !match finalized_number { - Err(_) | Ok(None) => false, - Ok(Some(n)) => v.at.number >= n, - }) - .map(|(_, v)| removed_views.push(v.at.hash)) - .for_each(drop); - } - - trace!( - target: LOG_TARGET, - ?removed_views, - "handle_pre_finalized" - ); - removed_views.iter().for_each(|view| { - self.dropped_stream_controller.remove_view(*view); - }); - } - /// The finalization event handle for the view store. /// /// Views that have associated block number less than finalized block number are removed from @@ -643,7 +601,7 @@ where ); self.listener.remove_stale_controllers(); - self.dropped_stream_controller.remove_finalized_txs(finalized_xts.clone()); + self.dropped_stream_controller.remove_transactions(finalized_xts.clone()); self.listener.remove_view(finalized_hash); for view in dropped_views { @@ -914,4 +872,40 @@ where removed } + + /// Clears stale views when blockchain finality stalls. + /// + /// This function removes outdated active and inactive views based on the block height + /// difference compared to the current block's height. Views are considered stale and + /// purged from the `ViewStore` if their height difference from the current block `at` + /// exceeds the specified `threshold`. + /// + /// If any views are removed, corresponding cleanup operations are performed on multi-view + /// stream controllers to ensure views are also removed there. + pub(crate) fn finality_stall_view_cleanup(&self, at: &HashAndNumber, threshold: usize) { + let mut dropped_views = vec![]; + { + let mut active_views = self.active_views.write(); + let mut inactive_views = self.inactive_views.write(); + let mut f = |hash: &BlockHash, v: &View| -> bool { + let diff = at.number.saturating_sub(v.at.number); + if diff.into() > threshold.into() { + dropped_views.push(*hash); + false + } else { + true + } + }; + + active_views.retain(|h, v| f(h, v)); + inactive_views.retain(|h, v| f(h, v)); + } + + if !dropped_views.is_empty() { + for view in dropped_views { + self.listener.remove_view(view); + self.dropped_stream_controller.remove_view(view); + } + } + } } diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index 745b57d0c85bf..6c91e85571ea0 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -583,6 +583,8 @@ impl RevalidationStatus { } /// Prune the known txs for the given block. +/// +/// Returns the hashes of all transactions included in given block. pub async fn prune_known_txs_for_block< Block: BlockT, Api: graph::ChainApi, diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs index a4a932dd85361..5b738ef4045e6 100644 --- a/substrate/client/transaction-pool/tests/fatp.rs +++ b/substrate/client/transaction-pool/tests/fatp.rs @@ -664,7 +664,6 @@ fn fatp_fork_no_xts_ready_switch_to_future() { // wait 10 blocks for revalidation and 1 extra for applying revalidation results let mut prev_header = forks[1][2].clone(); - log::debug!("====> {:?}", prev_header); for _ in 3..=12 { let header = api.push_block_with_parent(prev_header.hash(), vec![], true); let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); diff --git a/substrate/client/transaction-pool/tests/fatp_common/mod.rs b/substrate/client/transaction-pool/tests/fatp_common/mod.rs index 20178fdc7c4e3..b6c84693cf2ae 100644 --- a/substrate/client/transaction-pool/tests/fatp_common/mod.rs +++ b/substrate/client/transaction-pool/tests/fatp_common/mod.rs @@ -70,6 +70,7 @@ pub struct TestPoolBuilder { ready_limits: sc_transaction_pool::PoolLimit, future_limits: sc_transaction_pool::PoolLimit, mempool_max_transactions_count: usize, + finality_timeout_threshold: Option, } impl Default for TestPoolBuilder { @@ -80,6 +81,7 @@ impl Default for TestPoolBuilder { ready_limits: PoolLimit { count: 8192, total_bytes: 20 * 1024 * 1024 }, future_limits: PoolLimit { count: 512, total_bytes: 1 * 1024 * 1024 }, mempool_max_transactions_count: usize::MAX, + finality_timeout_threshold: None, } } } @@ -124,6 +126,11 @@ impl TestPoolBuilder { self } + pub fn with_finality_timeout_threshold(mut self, threshold: usize) -> Self { + self.finality_timeout_threshold = Some(threshold); + self + } + pub fn build( self, ) -> (ForkAwareTxPool, Arc, futures::executor::ThreadPool) { @@ -140,7 +147,12 @@ impl TestPoolBuilder { .expect("there is block 0. qed"); let (pool, txpool_task) = if self.use_default_limits { - ForkAwareTxPool::new_test(api.clone(), genesis_hash, genesis_hash) + ForkAwareTxPool::new_test( + api.clone(), + genesis_hash, + genesis_hash, + self.finality_timeout_threshold, + ) } else { ForkAwareTxPool::new_test_with_limits( api.clone(), @@ -149,6 +161,7 @@ impl TestPoolBuilder { self.ready_limits, self.future_limits, self.mempool_max_transactions_count, + self.finality_timeout_threshold, ) }; diff --git a/substrate/client/transaction-pool/tests/fatp_finality_timeout.rs b/substrate/client/transaction-pool/tests/fatp_finality_timeout.rs new file mode 100644 index 0000000000000..49dccf00d4691 --- /dev/null +++ b/substrate/client/transaction-pool/tests/fatp_finality_timeout.rs @@ -0,0 +1,273 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests for finality timeout handling for fork-aware transaction pool. + +pub mod fatp_common; + +use std::cmp::min; + +use fatp_common::{ + finalized_block_event, invalid_hash, new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE, +}; +use futures::{executor::block_on, FutureExt}; +use sc_transaction_pool::ChainApi; +use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionStatus}; +use substrate_test_runtime_client::Sr25519Keyring::*; +use substrate_test_runtime_transaction_pool::uxt; + +#[test] +fn fatp_finality_timeout_works() { + sp_tracing::try_init_simple(); + + const FINALITY_TIMEOUT_THRESHOLD: usize = 10; + + let (pool, api, _) = TestPoolBuilder::new() + .with_finality_timeout_threshold(FINALITY_TIMEOUT_THRESHOLD) + .build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + let xt3 = uxt(Dave, 500); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 4, 0); + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1, xt2, xt3]); + + let header02a = api.push_block_with_parent( + header01.hash(), + vec![xt0.clone(), xt1.clone(), xt2.clone(), xt3.clone()], + true, + ); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02a.hash()))); + assert_pool_status!(header02a.hash(), &pool, 0, 0); + + let header02b = api.push_block_with_parent(header01.hash(), vec![xt0, xt1, xt2, xt3], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02a.hash()), header02b.hash()))); + assert_pool_status!(header02b.hash(), &pool, 0, 0); + + let mut prev_header = header02b.clone(); + for n in 3..66 { + let header = api.push_block_with_parent(prev_header.hash(), vec![], true); + let event = new_best_block_event(&pool, Some(prev_header.hash()), header.hash()); + block_on(pool.maintain(event)); + + prev_header = header; + if n < 3 + FINALITY_TIMEOUT_THRESHOLD { + assert_eq!(pool.active_views_count(), 2); + } else { + assert_eq!(pool.active_views_count(), 1); + assert_eq!(pool.inactive_views_count(), FINALITY_TIMEOUT_THRESHOLD); + } + } + + for (i, watcher) in + vec![xt0_watcher, xt1_watcher, xt2_watcher, xt3_watcher].into_iter().enumerate() + { + assert_watcher_stream!( + watcher, + [ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02a.hash(), i)), + TransactionStatus::InBlock((header02b.hash(), i)), + TransactionStatus::FinalityTimeout(min(header02a.hash(), header02b.hash())) + ] + ); + } +} + +#[test] +fn fatp_finalized_still_works_after_finality_stall() { + sp_tracing::try_init_simple(); + + const FINALITY_TIMEOUT_THRESHOLD: usize = 10; + + let (pool, api, _) = TestPoolBuilder::new() + .with_finality_timeout_threshold(FINALITY_TIMEOUT_THRESHOLD) + .build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header01.hash()))); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + let xt3 = uxt(Dave, 500); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 4, 0); + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1, xt2, xt3]); + + let header02a = api.push_block_with_parent( + header01.hash(), + vec![xt0.clone(), xt1.clone(), xt2.clone()], + true, + ); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02a.hash()))); + assert_pool_status!(header02a.hash(), &pool, 1, 0); + + let header02b = api.push_block_with_parent(header01.hash(), vec![xt0, xt1, xt2], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02a.hash()), header02b.hash()))); + assert_pool_status!(header02b.hash(), &pool, 1, 0); + + let header03b = api.push_block_with_parent(header02b.hash(), vec![xt3], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02b.hash()), header03b.hash()))); + assert_pool_status!(header03b.hash(), &pool, 0, 0); + + let mut prev_header = header03b.clone(); + for block_n in 4..=3 + FINALITY_TIMEOUT_THRESHOLD { + let header = api.push_block_with_parent(prev_header.hash(), vec![], true); + let event = new_best_block_event(&pool, Some(prev_header.hash()), header.hash()); + block_on(pool.maintain(event)); + + prev_header = header; + if block_n == 3 + FINALITY_TIMEOUT_THRESHOLD { + //finality timeout triggered + assert_eq!(pool.active_views_count(), 1); + assert_eq!(pool.inactive_views_count(), FINALITY_TIMEOUT_THRESHOLD); + } else { + assert_eq!(pool.active_views_count(), 2); + } + } + + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header03b.hash()))); + + for (i, watcher) in vec![xt0_watcher, xt1_watcher, xt2_watcher].into_iter().enumerate() { + assert_watcher_stream!( + watcher, + [ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02a.hash(), i)), + TransactionStatus::InBlock((header02b.hash(), i)), + TransactionStatus::FinalityTimeout(min(header02a.hash(), header02b.hash())) + ] + ); + } + + assert_watcher_stream!( + xt3_watcher, + [ + TransactionStatus::Ready, + TransactionStatus::InBlock((header03b.hash(), 0)), + TransactionStatus::Finalized((header03b.hash(), 0)) + ] + ); +} + +#[test] +fn fatp_finality_timeout_works_for_txs_included_before_finalized() { + sp_tracing::try_init_simple(); + + const FINALITY_TIMEOUT_THRESHOLD: usize = 10; + + let (pool, api, _) = TestPoolBuilder::new() + .with_finality_timeout_threshold(FINALITY_TIMEOUT_THRESHOLD) + .build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header01.hash()))); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + let xt3 = uxt(Dave, 500); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 4, 0); + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1, xt2, xt3]); + + let header02a = api.push_block_with_parent( + header01.hash(), + vec![xt0.clone(), xt1.clone(), xt2.clone()], + true, + ); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02a.hash()))); + assert_pool_status!(header02a.hash(), &pool, 1, 0); + + let header02b = api.push_block_with_parent(header01.hash(), vec![xt0, xt1, xt2], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02a.hash()), header02b.hash()))); + assert_pool_status!(header02b.hash(), &pool, 1, 0); + + let header03b = api.push_block_with_parent(header02b.hash(), vec![xt3], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02b.hash()), header03b.hash()))); + assert_pool_status!(header03b.hash(), &pool, 0, 0); + + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02b.hash()))); + + let mut prev_header = header03b.clone(); + for block_n in 4..=4 + FINALITY_TIMEOUT_THRESHOLD { + let header = api.push_block_with_parent(prev_header.hash(), vec![], true); + let event = new_best_block_event(&pool, Some(prev_header.hash()), header.hash()); + block_on(pool.maintain(event)); + + prev_header = header; + assert_eq!(pool.active_views_count(), 1); + if block_n == 4 + FINALITY_TIMEOUT_THRESHOLD { + //finality timeout triggered + assert_eq!(pool.inactive_views_count(), FINALITY_TIMEOUT_THRESHOLD); + } + } + + for (i, watcher) in vec![xt0_watcher, xt1_watcher, xt2_watcher].into_iter().enumerate() { + assert_watcher_stream!( + watcher, + [ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02a.hash(), i)), + TransactionStatus::InBlock((header02b.hash(), i)), + TransactionStatus::Finalized((header02b.hash(), i)) + ] + ); + } + + assert_watcher_stream!( + xt3_watcher, + [ + TransactionStatus::Ready, + TransactionStatus::InBlock((header03b.hash(), 0)), + TransactionStatus::FinalityTimeout(header03b.hash()) + ] + ); +} diff --git a/substrate/client/transaction-pool/tests/integration.rs b/substrate/client/transaction-pool/tests/integration.rs new file mode 100644 index 0000000000000..200266848d8a2 --- /dev/null +++ b/substrate/client/transaction-pool/tests/integration.rs @@ -0,0 +1,135 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +// Testsuite of fatp integration tests. +pub mod zombienet; + +use crate::zombienet::{ + default_zn_scenario_builder, + relaychain_rococo_local_network_spec::{ + parachain_asset_hub_network_spec::HIGH_POOL_LIMIT_FATP as PARACHAIN_HIGH_POOL_LIMIT_FATP, + HIGH_POOL_LIMIT_FATP as RELAYCHAIN_HIGH_POOL_LIMIT_FATP, + }, + NetworkSpawner, +}; +use txtesttool::execution_log::ExecutionLog; +use zombienet::DEFAULT_SEND_FUTURE_AND_READY_TXS_TESTS_TIMEOUT_IN_SECS; + +// Test which sends future and ready txs from many accounts +// to an unlimited pool of a parachain collator based on the asset-hub-rococo runtime. +#[tokio::test(flavor = "multi_thread")] +#[ignore] +async fn send_future_and_ready_from_many_accounts_to_parachain() { + let net = NetworkSpawner::from_toml_with_env_logger(PARACHAIN_HIGH_POOL_LIMIT_FATP) + .await + .unwrap(); + + // Wait for the parachain collator to start block production. + net.wait_for_block_production("charlie").await.unwrap(); + + // Create future & ready txs executors. + let ws = net.node_rpc_uri("charlie").unwrap(); + let future_scenario_executor = default_zn_scenario_builder(&net) + .with_rpc_uri(ws.clone()) + .with_start_id(0) + .with_last_id(99) + .with_nonce_from(Some(100)) + .with_txs_count(100) + .with_executor_id("future-txs-executor".to_string()) + .with_timeout_in_secs(DEFAULT_SEND_FUTURE_AND_READY_TXS_TESTS_TIMEOUT_IN_SECS) + .build() + .await; + let ready_scenario_executor = default_zn_scenario_builder(&net) + .with_rpc_uri(ws) + .with_start_id(0) + .with_last_id(99) + .with_nonce_from(Some(0)) + .with_txs_count(100) + .with_executor_id("ready-txs-executor".to_string()) + .with_timeout_in_secs(DEFAULT_SEND_FUTURE_AND_READY_TXS_TESTS_TIMEOUT_IN_SECS) + .build() + .await; + + // Execute transactions and fetch the execution logs. + let (future_logs, ready_logs) = futures::future::join( + future_scenario_executor.execute(), + ready_scenario_executor.execute(), + ) + .await; + + let finalized_future = + future_logs.values().filter_map(|default_log| default_log.finalized()).count(); + let finalized_ready = + ready_logs.values().filter_map(|default_log| default_log.finalized()).count(); + + assert_eq!(finalized_future, 10_000); + assert_eq!(finalized_ready, 10_000); +} + +// Test which sends future and ready txs from many accounts +// to an unlimited pool of a relaychain node based on `rococo-local` runtime. +#[tokio::test(flavor = "multi_thread")] +#[ignore] +async fn send_future_and_ready_from_many_accounts_to_relaychain() { + let net = NetworkSpawner::from_toml_with_env_logger(RELAYCHAIN_HIGH_POOL_LIMIT_FATP) + .await + .unwrap(); + + // Wait for the paracha validator to start block production & have its genesis block + // finalized. + net.wait_for_block_production("alice").await.unwrap(); + + // Create future & ready txs executors. + let ws = net.node_rpc_uri("alice").unwrap(); + let future_scenario_executor = default_zn_scenario_builder(&net) + .with_rpc_uri(ws.clone()) + .with_start_id(0) + .with_last_id(99) + .with_nonce_from(Some(100)) + .with_txs_count(100) + .with_executor_id("future-txs-executor".to_string()) + .with_timeout_in_secs(DEFAULT_SEND_FUTURE_AND_READY_TXS_TESTS_TIMEOUT_IN_SECS) + .build() + .await; + let ready_scenario_executor = default_zn_scenario_builder(&net) + .with_rpc_uri(ws) + .with_start_id(0) + .with_last_id(99) + .with_nonce_from(Some(0)) + .with_txs_count(100) + .with_executor_id("ready-txs-executor".to_string()) + .with_timeout_in_secs(DEFAULT_SEND_FUTURE_AND_READY_TXS_TESTS_TIMEOUT_IN_SECS) + .build() + .await; + + // Execute transactions and fetch the execution logs. + // Execute transactions and fetch the execution logs. + let (future_logs, ready_logs) = futures::future::join( + future_scenario_executor.execute(), + ready_scenario_executor.execute(), + ) + .await; + + let finalized_future = + future_logs.values().filter_map(|default_log| default_log.finalized()).count(); + let finalized_ready = + ready_logs.values().filter_map(|default_log| default_log.finalized()).count(); + + assert_eq!(finalized_future, 10_000); + assert_eq!(finalized_ready, 10_000); +} diff --git a/substrate/client/transaction-pool/tests/zombienet/mod.rs b/substrate/client/transaction-pool/tests/zombienet/mod.rs new file mode 100644 index 0000000000000..206c69b0c657b --- /dev/null +++ b/substrate/client/transaction-pool/tests/zombienet/mod.rs @@ -0,0 +1,167 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! The zombienet spawner for integration tests for a transaction pool. Holds shared logic used +//! across integration tests for transaction pool. + +use anyhow::anyhow; +use tracing_subscriber::EnvFilter; +use txtesttool::scenario::{ChainType, ScenarioBuilder}; +use zombienet_sdk::{ + subxt::SubstrateConfig, LocalFileSystem, Network, NetworkConfig, NetworkConfigExt, +}; + +/// Gathers TOML files paths for relaychains and for parachains' (that use rococo-local based +/// relaychains) zombienet network specs for testing in relation to fork aware transaction pool. +pub mod relaychain_rococo_local_network_spec { + pub const HIGH_POOL_LIMIT_FATP: &'static str = + "tests/zombienet/network-specs/rococo-local-high-pool-limit-fatp.toml"; + + /// Network specs used for fork-aware tx pool testing of parachains. + pub mod parachain_asset_hub_network_spec { + pub const LOW_POOL_LIMIT_FATP: &'static str = + "tests/zombienet/network-specs/asset-hub-low-pool-limit-fatp.toml"; + pub const HIGH_POOL_LIMIT_FATP: &'static str = + "tests/zombienet/network-specs/asset-hub-high-pool-limit-fatp.toml"; + } +} + +/// Default time that we expect to need for a full run of current tests that send future and ready +/// txs to parachain or relaychain networks. +pub const DEFAULT_SEND_FUTURE_AND_READY_TXS_TESTS_TIMEOUT_IN_SECS: u64 = 1500; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Network initialization failure: {0}")] + NetworkInit(anyhow::Error), + #[error("Node couldn't be found as part of the network: {0}")] + NodeNotFound(anyhow::Error), + #[error("Failed to get node online client")] + FailedToGetOnlineClinet, + #[error("Failed to get node blocks stream")] + FailedToGetBlocksStream, +} + +/// Result of work related to network spawning. +pub type Result = std::result::Result; + +/// Provides logic to spawn a network based on a Zombienet toml file. +pub struct NetworkSpawner { + network: Network, +} + +impl NetworkSpawner { + /// Initialize the network spawner based on a Zombienet toml file + pub async fn from_toml_with_env_logger(toml_path: &'static str) -> Result { + // Initialize the subscriber with a default log level of INFO if RUST_LOG is not set + let env_filter = + EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")); + // Set up the subscriber with the formatter and the environment filter + tracing_subscriber::fmt() + .with_env_filter(env_filter) // Use the env filter + .init(); + + let net_config = NetworkConfig::load_from_toml(toml_path).map_err(Error::NetworkInit)?; + Ok(NetworkSpawner { + network: net_config + .spawn_native() + .await + .map_err(|err| Error::NetworkInit(anyhow!(err.to_string())))?, + }) + } + + /// Returns the spawned network. + pub fn network(&self) -> &Network { + &self.network + } + + /// Waits for blocks production/import to kick-off on given node. + pub async fn wait_for_block_production(&self, node_name: &str) -> Result<()> { + let node = self + .network + .get_node(node_name) + .map_err(|_| Error::NodeNotFound(anyhow!("{node_name}")))?; + let client = node + .wait_client::() + .await + .map_err(|_| Error::FailedToGetOnlineClinet)?; + let mut stream = client + .blocks() + .subscribe_best() + .await + .map_err(|_| Error::FailedToGetBlocksStream)?; + // It should take at most two iterations to return with the best block, if any. + for _ in 0..=1 { + let Some(block) = stream.next().await else { + continue; + }; + + if let Some(block) = block.ok().filter(|block| block.number() == 1) { + tracing::info!("[{node_name}] found first best block: {:#?}", block.hash()); + break; + } + + tracing::info!("[{node_name}] waiting for first best block"); + } + Ok(()) + } + + /// Get the network filesystem base dir path. + pub fn base_dir_path(&self) -> Option<&str> { + self.network.base_dir() + } + + /// Get a certain node rpc uri. + pub fn node_rpc_uri(&self, node_name: &str) -> Result { + self.network + .get_node(node_name) + .and_then(|node| Ok(node.ws_uri().to_string())) + .map_err(|_| Error::NodeNotFound(anyhow!("{node_name}"))) + } +} + +/// Shared params usually set in same way for most of the scenarios. +pub struct ScenarioBuilderSharedParams { + watched_txs: bool, + does_block_monitoring: bool, + send_threshold: usize, + chain_type: ChainType, +} + +impl Default for ScenarioBuilderSharedParams { + fn default() -> Self { + Self { + watched_txs: true, + does_block_monitoring: false, + send_threshold: 20000, + chain_type: ChainType::Sub, + } + } +} + +/// Creates a [`txtesttool::scenario::ScenarioBuilder`] with a set of default parameters defined +/// with [`ScenarioBuilderSharedParams::default`]. +pub fn default_zn_scenario_builder(net_spawner: &NetworkSpawner) -> ScenarioBuilder { + let shared_params = ScenarioBuilderSharedParams::default(); + ScenarioBuilder::new() + .with_watched_txs(shared_params.watched_txs) + .with_send_threshold(shared_params.send_threshold) + .with_block_monitoring(shared_params.does_block_monitoring) + .with_chain_type(shared_params.chain_type) + .with_base_dir_path(net_spawner.base_dir_path().unwrap().to_string()) +} diff --git a/substrate/client/transaction-pool/tests/zombienet/network-specs/asset-hub-high-pool-limit-fatp.toml b/substrate/client/transaction-pool/tests/zombienet/network-specs/asset-hub-high-pool-limit-fatp.toml new file mode 100644 index 0000000000000..8ca7a134e18ae --- /dev/null +++ b/substrate/client/transaction-pool/tests/zombienet/network-specs/asset-hub-high-pool-limit-fatp.toml @@ -0,0 +1,66 @@ +[settings] +timeout = 1500 + +[relaychain] +default_image = "parity/polkadot:latest" +default_command = "polkadot" +chain = "rococo-local" + +[[relaychain.nodes]] +name = "alice" +rpc_port = 9944 +validator = true + +[[relaychain.nodes]] +name = "bob" +rpc_port = 9945 +validator = true + +[[parachains]] +id = 2000 +chain = "asset-hub-rococo-local" +default_command = "polkadot-parachain" +default_image = "parity/polkadot-parachain:latest" +cumulus_based = true +default_args = [ + "--force-authoring", + "--pool-kbytes 2048000", + "--pool-limit 500000", + "--pool-type=fork-aware", + "--rpc-max-connections 15000", + "--rpc-max-response-size 150", + "--rpc-max-subscriptions-per-connection=128000", + "--state-pruning=1024", + "-laura::cumulus=info", + "-lbasic-authorship=info", + "-lpeerset=info", + "-lsub-libp2p=info", + "-lsync=info", + "-ltxpool=debug", +] +[parachains.genesis.runtimeGenesis.patch.balances] +devAccounts = [ + 100, + 1000000000000000000, + "//Sender//{}", +] + +[[parachains.collators]] +name = "charlie" +validator = false +rpc_port = 9933 + +[[parachains.collators]] +name = "dave" +validator = true +rpc_port = 9934 + +[[parachains.collators]] +name = "eve" +validator = true +rpc_port = 9935 + +[[parachains.collators]] +name = "ferdie" +validator = true +rpc_port = 9936 diff --git a/substrate/client/transaction-pool/tests/zombienet/network-specs/asset-hub-low-pool-limit-fatp.toml b/substrate/client/transaction-pool/tests/zombienet/network-specs/asset-hub-low-pool-limit-fatp.toml new file mode 100644 index 0000000000000..2a4a276eb1913 --- /dev/null +++ b/substrate/client/transaction-pool/tests/zombienet/network-specs/asset-hub-low-pool-limit-fatp.toml @@ -0,0 +1,66 @@ +[settings] +timeout = 1500 + +[relaychain] +default_image = "parity/polkadot:latest" +default_command = "polkadot" +chain = "rococo-local" + +[[relaychain.nodes]] +name = "alice" +rpc_port = 9944 +validator = true + +[[relaychain.nodes]] +name = "bob" +validator = true + +[[parachains]] +id = 2000 +cumulus_based = true +chain = "asset-hub-rococo-local" +default_image = "parity/polkadot-parachain:latest" +default_command = "polkadot-parachain" +default_args = [ + "--force-authoring", + "--pool-kbytes 2048000", + "--pool-limit 300", + "--pool-type=fork-aware", + "--rpc-max-connections 15000", + "--rpc-max-response-size 150", + "--rpc-max-subscriptions-per-connection=128000", + "--state-pruning=1024", + "-laura::cumulus=info", + "-lbasic-authorship=info", + "-lpeerset=info", + "-lsub-libp2p=info", + "-lsync=info", + "-ltxpool=debug", +] +[parachains.genesis.runtimeGenesis.patch.balances] +devAccounts = [ + 100, + 1000000000000000000, + "//Sender//{}", +] + +# run charlie as parachain collator +[[parachains.collators]] +name = "charlie" +validator = false +rpc_port = 9933 + +[[parachains.collators]] +name = "dave" +validator = true +rpc_port = 9934 + +[[parachains.collators]] +name = "eve" +validator = true +rpc_port = 9935 + +[[parachains.collators]] +name = "ferdie" +validator = true +rpc_port = 9936 diff --git a/substrate/client/transaction-pool/tests/zombienet/network-specs/rococo-local-high-pool-limit-fatp.toml b/substrate/client/transaction-pool/tests/zombienet/network-specs/rococo-local-high-pool-limit-fatp.toml new file mode 100644 index 0000000000000..a67e0a4f444e5 --- /dev/null +++ b/substrate/client/transaction-pool/tests/zombienet/network-specs/rococo-local-high-pool-limit-fatp.toml @@ -0,0 +1,34 @@ +[settings] +timeout = 1500 + +[relaychain] +default_image = "parity/polkadot:latest" +default_command = "polkadot" +chain = "rococo-local" +default_args = [ + "--pool-kbytes 2048000", + "--pool-limit 500000", + "--pool-type=fork-aware", + "--rpc-max-connections 15000", + "--rpc-max-response-size 150", + "--rpc-max-subscriptions-per-connection=128000", + "--state-pruning=1024", + "-lsync=info", + "-ltxpool=debug", +] +[relaychain.genesis.runtimeGenesis.patch.balances] +devAccounts = [ + 100, + 1000000000000000000, + "//Sender//{}", +] + +[[relaychain.nodes]] +name = "alice" +rpc_port = 9944 +validator = true + +[[relaychain.nodes]] +name = "bob" +rpc_port = 9945 +validator = true diff --git a/substrate/frame/child-bounties/src/lib.rs b/substrate/frame/child-bounties/src/lib.rs index 9fca26510989a..b20ef72fda4b1 100644 --- a/substrate/frame/child-bounties/src/lib.rs +++ b/substrate/frame/child-bounties/src/lib.rs @@ -88,26 +88,26 @@ pub use weights::WeightInfo; pub use pallet::*; -type BalanceOf = pallet_treasury::BalanceOf; -type BountiesError = pallet_bounties::Error; -type BountyIndex = pallet_bounties::BountyIndex; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -type BlockNumberFor = +pub type BalanceOf = pallet_treasury::BalanceOf; +pub type BountiesError = pallet_bounties::Error; +pub type BountyIndex = pallet_bounties::BountyIndex; +pub type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +pub type BlockNumberFor = <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; /// A child bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ChildBounty { /// The parent of this child-bounty. - parent_bounty: BountyIndex, + pub parent_bounty: BountyIndex, /// The (total) amount that should be paid if this child-bounty is rewarded. - value: Balance, + pub value: Balance, /// The child bounty curator fee. - fee: Balance, + pub fee: Balance, /// The deposit of child-bounty curator. - curator_deposit: Balance, + pub curator_deposit: Balance, /// The status of this child-bounty. - status: ChildBountyStatus, + pub status: ChildBountyStatus, } /// The status of a child-bounty. diff --git a/substrate/frame/nis/Cargo.toml b/substrate/frame/nis/Cargo.toml index ec1a5d93bcbaa..f1cf7f31fdc80 100644 --- a/substrate/frame/nis/Cargo.toml +++ b/substrate/frame/nis/Cargo.toml @@ -17,13 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -frame-benchmarking = { optional = true, workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } +frame = { workspace = true, features = ["runtime"] } scale-info = { features = ["derive"], workspace = true } -sp-arithmetic = { workspace = true } -sp-core = { workspace = true } -sp-runtime = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } @@ -33,26 +28,14 @@ sp-io = { workspace = true, default-features = true } default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", - "frame-support/std", - "frame-system/std", - "pallet-balances/std", + "frame/std", "scale-info/std", - "sp-arithmetic/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", ] runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", + "frame/runtime-benchmarks", "pallet-balances/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", ] try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", + "frame/try-runtime", "pallet-balances/try-runtime", - "sp-runtime/try-runtime", ] diff --git a/substrate/frame/nis/src/benchmarking.rs b/substrate/frame/nis/src/benchmarking.rs index 2c7ad651f9903..26024e1a6250a 100644 --- a/substrate/frame/nis/src/benchmarking.rs +++ b/substrate/frame/nis/src/benchmarking.rs @@ -19,19 +19,9 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; -use frame_support::traits::{ - fungible::Inspect as FunInspect, nonfungible::Inspect, EnsureOrigin, Get, -}; -use frame_system::RawOrigin; -use sp_arithmetic::Perquintill; -use sp_runtime::{ - traits::{Bounded, One, Zero}, - DispatchError, PerThing, -}; - -use crate::Pallet as Nis; +use frame::benchmarking::prelude::*; + +use crate::*; const SEED: u32 = 0; @@ -49,62 +39,88 @@ fn fill_queues() -> Result<(), DispatchError> { T::Currency::set_balance(&caller, T::MinBid::get() * BalanceOf::::from(queues + bids)); for _ in 0..bids { - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; } for d in 1..queues { - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1 + d)?; + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1 + d)?; } Ok(()) } -benchmarks! { - place_bid { - let l in 0..(T::MaxQueueLen::get() - 1); +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn place_bid(l: Linear<0, { T::MaxQueueLen::get() - 1 }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let ed = T::Currency::minimum_balance(); let bid = T::MinBid::get(); T::Currency::set_balance(&caller, (ed + bid) * BalanceOf::::from(l + 1) + bid); - for i in 0..l { - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; + for _ in 0..l { + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; } - }: _(RawOrigin::Signed(caller.clone()), T::MinBid::get() * BalanceOf::::from(2u32), 1) - verify { - assert_eq!(QueueTotals::::get()[0], (l + 1, T::MinBid::get() * BalanceOf::::from(l + 2))); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), T::MinBid::get() * BalanceOf::::from(2_u32), 1); + + assert_eq!( + QueueTotals::::get()[0], + (l + 1, T::MinBid::get() * BalanceOf::::from(l + 2)) + ); + + Ok(()) } - place_bid_max { + #[benchmark] + fn place_bid_max() -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let origin = RawOrigin::Signed(caller.clone()); let ed = T::Currency::minimum_balance(); let bid = T::MinBid::get(); let ql = T::MaxQueueLen::get(); T::Currency::set_balance(&caller, (ed + bid) * BalanceOf::::from(ql + 1) + bid); - for i in 0..T::MaxQueueLen::get() { - Nis::::place_bid(origin.clone().into(), T::MinBid::get(), 1)?; + for _ in 0..T::MaxQueueLen::get() { + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; } - }: place_bid(origin, T::MinBid::get() * BalanceOf::::from(2u32), 1) - verify { - assert_eq!(QueueTotals::::get()[0], ( - T::MaxQueueLen::get(), - T::MinBid::get() * BalanceOf::::from(T::MaxQueueLen::get() + 1), - )); + + #[extrinsic_call] + place_bid(origin, T::MinBid::get() * BalanceOf::::from(2_u32), 1); + + assert_eq!( + QueueTotals::::get()[0], + ( + T::MaxQueueLen::get(), + T::MinBid::get() * BalanceOf::::from(T::MaxQueueLen::get() + 1), + ) + ); + + Ok(()) } - retract_bid { - let l in 1..T::MaxQueueLen::get(); + #[benchmark] + fn retract_bid(l: Linear<1, { T::MaxQueueLen::get() }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let ed = T::Currency::minimum_balance(); let bid = T::MinBid::get(); T::Currency::set_balance(&caller, (ed + bid) * BalanceOf::::from(l + 1) + bid); - for i in 0..l { - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; + for _ in 0..l { + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinBid::get(), 1)?; } - }: _(RawOrigin::Signed(caller.clone()), T::MinBid::get(), 1) - verify { - assert_eq!(QueueTotals::::get()[0], (l - 1, T::MinBid::get() * BalanceOf::::from(l - 1))); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), T::MinBid::get(), 1); + + assert_eq!( + QueueTotals::::get()[0], + (l - 1, T::MinBid::get() * BalanceOf::::from(l - 1)) + ); + + Ok(()) } - fund_deficit { + #[benchmark] + fn fund_deficit() -> Result<(), BenchmarkError> { T::BenchmarkSetup::create_counterpart_asset(); let origin = T::FundOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -112,49 +128,65 @@ benchmarks! { let bid = T::MinBid::get().max(One::one()); let ed = T::Currency::minimum_balance(); T::Currency::set_balance(&caller, ed + bid); - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; - Nis::::process_queues(Perquintill::one(), 1, 1, &mut WeightCounter::unlimited()); - Nis::::communify(RawOrigin::Signed(caller.clone()).into(), 0)?; - let original = T::Currency::balance(&Nis::::account_id()); - T::Currency::set_balance(&Nis::::account_id(), BalanceOf::::min_value()); - }: _(origin) - verify { + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; + Pallet::::process_queues(Perquintill::one(), 1, 1, &mut WeightCounter::unlimited()); + Pallet::::communify(RawOrigin::Signed(caller.clone()).into(), 0)?; + let original = T::Currency::balance(&Pallet::::account_id()); + T::Currency::set_balance(&Pallet::::account_id(), BalanceOf::::min_value()); + + #[extrinsic_call] + _(origin as T::RuntimeOrigin); + // Must fund at least 99.999% of the required amount. - let missing = Perquintill::from_rational( - T::Currency::balance(&Nis::::account_id()), original).left_from_one(); + let missing = + Perquintill::from_rational(T::Currency::balance(&Pallet::::account_id()), original) + .left_from_one(); assert!(missing <= Perquintill::one() / 100_000); + + Ok(()) } - communify { + #[benchmark] + fn communify() -> Result<(), BenchmarkError> { T::BenchmarkSetup::create_counterpart_asset(); let caller: T::AccountId = whitelisted_caller(); let bid = T::MinBid::get().max(One::one()) * 100u32.into(); let ed = T::Currency::minimum_balance(); T::Currency::set_balance(&caller, ed + bid + bid); - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; - Nis::::process_queues(Perquintill::one(), 1, 2, &mut WeightCounter::unlimited()); - }: _(RawOrigin::Signed(caller.clone()), 0) - verify { - assert_eq!(Nis::::owner(&0), None); + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; + Pallet::::process_queues(Perquintill::one(), 1, 2, &mut WeightCounter::unlimited()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), 0); + + assert_eq!(Pallet::::owner(&0), None); + + Ok(()) } - privatize { + #[benchmark] + fn privatize() -> Result<(), BenchmarkError> { T::BenchmarkSetup::create_counterpart_asset(); let caller: T::AccountId = whitelisted_caller(); let bid = T::MinBid::get().max(One::one()); let ed = T::Currency::minimum_balance(); T::Currency::set_balance(&caller, ed + bid + bid); - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; - Nis::::process_queues(Perquintill::one(), 1, 2, &mut WeightCounter::unlimited()); - Nis::::communify(RawOrigin::Signed(caller.clone()).into(), 0)?; - }: _(RawOrigin::Signed(caller.clone()), 0) - verify { - assert_eq!(Nis::::owner(&0), Some(caller)); + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; + Pallet::::process_queues(Perquintill::one(), 1, 2, &mut WeightCounter::unlimited()); + Pallet::::communify(RawOrigin::Signed(caller.clone()).into(), 0)?; + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), 0); + + assert_eq!(Pallet::::owner(&0), Some(caller)); + + Ok(()) } - thaw_private { + #[benchmark] + fn thaw_private() -> Result<(), BenchmarkError> { T::BenchmarkSetup::create_counterpart_asset(); let whale: T::AccountId = account("whale", 0, SEED); let caller: T::AccountId = whitelisted_caller(); @@ -162,17 +194,27 @@ benchmarks! { let ed = T::Currency::minimum_balance(); T::Currency::set_balance(&caller, ed + bid + bid); // Ensure we don't get throttled. - T::Currency::set_balance(&whale, T::ThawThrottle::get().0.saturating_reciprocal_mul_ceil(T::Currency::balance(&caller))); - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; - Nis::::process_queues(Perquintill::one(), 1, 2, &mut WeightCounter::unlimited()); + T::Currency::set_balance( + &whale, + T::ThawThrottle::get() + .0 + .saturating_reciprocal_mul_ceil(T::Currency::balance(&caller)), + ); + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; + Pallet::::process_queues(Perquintill::one(), 1, 2, &mut WeightCounter::unlimited()); frame_system::Pallet::::set_block_number(Receipts::::get(0).unwrap().expiry); - }: _(RawOrigin::Signed(caller.clone()), 0, None) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), 0, None); + assert!(Receipts::::get(0).is_none()); + + Ok(()) } - thaw_communal { + #[benchmark] + fn thaw_communal() -> Result<(), BenchmarkError> { T::BenchmarkSetup::create_counterpart_asset(); let whale: T::AccountId = account("whale", 0, SEED); let caller: T::AccountId = whitelisted_caller(); @@ -180,69 +222,93 @@ benchmarks! { let ed = T::Currency::minimum_balance(); T::Currency::set_balance(&caller, ed + bid + bid); // Ensure we don't get throttled. - T::Currency::set_balance(&whale, T::ThawThrottle::get().0.saturating_reciprocal_mul_ceil(T::Currency::balance(&caller))); - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; - Nis::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; - Nis::::process_queues(Perquintill::one(), 1, 2, &mut WeightCounter::unlimited()); + T::Currency::set_balance( + &whale, + T::ThawThrottle::get() + .0 + .saturating_reciprocal_mul_ceil(T::Currency::balance(&caller)), + ); + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; + Pallet::::place_bid(RawOrigin::Signed(caller.clone()).into(), bid, 1)?; + Pallet::::process_queues(Perquintill::one(), 1, 2, &mut WeightCounter::unlimited()); frame_system::Pallet::::set_block_number(Receipts::::get(0).unwrap().expiry); - Nis::::communify(RawOrigin::Signed(caller.clone()).into(), 0)?; - }: _(RawOrigin::Signed(caller.clone()), 0) - verify { + Pallet::::communify(RawOrigin::Signed(caller.clone()).into(), 0)?; + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), 0); + assert!(Receipts::::get(0).is_none()); + + Ok(()) } - process_queues { + #[benchmark] + fn process_queues() -> Result<(), BenchmarkError> { fill_queues::()?; - }: { - Nis::::process_queues( - Perquintill::one(), - Zero::zero(), - u32::max_value(), - &mut WeightCounter::unlimited(), - ) + + #[block] + { + Pallet::::process_queues( + Perquintill::one(), + Zero::zero(), + u32::max_value(), + &mut WeightCounter::unlimited(), + ); + } + + Ok(()) } - process_queue { - let our_account = Nis::::account_id(); - let issuance = Nis::::issuance(); + #[benchmark] + fn process_queue() { + let our_account = Pallet::::account_id(); + let issuance = Pallet::::issuance(); let mut summary = Summary::::get(); - }: { - Nis::::process_queue( - 1u32, - 1u32.into(), - &our_account, - &issuance, - 0, - &mut Bounded::max_value(), - &mut (T::MaxQueueLen::get(), Bounded::max_value()), - &mut summary, - &mut WeightCounter::unlimited(), - ) + + #[block] + { + Pallet::::process_queue( + 1_u32, + 1_u32.into(), + &our_account, + &issuance, + 0, + &mut Bounded::max_value(), + &mut (T::MaxQueueLen::get(), Bounded::max_value()), + &mut summary, + &mut WeightCounter::unlimited(), + ); + } } - process_bid { + #[benchmark] + fn process_bid() { let who = account::("bidder", 0, SEED); let min_bid = T::MinBid::get().max(One::one()); let ed = T::Currency::minimum_balance(); T::Currency::set_balance(&who, ed + min_bid); - let bid = Bid { - amount: T::MinBid::get(), - who, - }; - let our_account = Nis::::account_id(); - let issuance = Nis::::issuance(); + let bid = Bid { amount: T::MinBid::get(), who }; + let our_account = Pallet::::account_id(); + let issuance = Pallet::::issuance(); let mut summary = Summary::::get(); - }: { - Nis::::process_bid( - bid, - 2u32.into(), - &our_account, - &issuance, - &mut Bounded::max_value(), - &mut Bounded::max_value(), - &mut summary, - ) + + #[block] + { + Pallet::::process_bid( + bid, + 2_u32.into(), + &our_account, + &issuance, + &mut Bounded::max_value(), + &mut Bounded::max_value(), + &mut summary, + ); + } } - impl_benchmark_test_suite!(Nis, crate::mock::new_test_ext_empty(), crate::mock::Test); + impl_benchmark_test_suite! { + Pallet, + mock::new_test_ext_empty(), + mock::Test + } } diff --git a/substrate/frame/nis/src/lib.rs b/substrate/frame/nis/src/lib.rs index 87e2276e768d0..6ce15fc560c63 100644 --- a/substrate/frame/nis/src/lib.rs +++ b/substrate/frame/nis/src/lib.rs @@ -78,24 +78,28 @@ extern crate alloc; -use frame_support::traits::{ - fungible::{self, Inspect as FunInspect, Mutate as FunMutate}, - tokens::{DepositConsequence, Fortitude, Preservation, Provenance, WithdrawConsequence}, -}; -pub use pallet::*; -use sp_arithmetic::{traits::Unsigned, RationalArg}; -use sp_core::TypedGet; -use sp_runtime::{ - traits::{Convert, ConvertBack}, - DispatchError, Perquintill, -}; +pub mod weights; mod benchmarking; #[cfg(test)] mod mock; #[cfg(test)] mod tests; -pub mod weights; + +pub use pallet::*; +pub use weights::WeightInfo; + +use alloc::{vec, vec::Vec}; +use frame::prelude::*; +use fungible::{ + Balanced as FunBalanced, Inspect as FunInspect, Mutate as FunMutate, + MutateHold as FunMutateHold, +}; +use nonfungible::{Inspect as NftInspect, Transfer as NftTransfer}; +use tokens::{Balance, Restriction::*}; +use Fortitude::*; +use Precision::*; +use Preservation::*; pub struct WithMaximumOf(core::marker::PhantomData); impl Convert for WithMaximumOf @@ -169,33 +173,9 @@ impl BenchmarkSetup for () { fn create_counterpart_asset() {} } -#[frame_support::pallet] +#[frame::pallet] pub mod pallet { - use super::{FunInspect, FunMutate}; - pub use crate::weights::WeightInfo; - use alloc::{vec, vec::Vec}; - use frame_support::{ - pallet_prelude::*, - traits::{ - fungible::{self, hold::Mutate as FunHoldMutate, Balanced as FunBalanced}, - nonfungible::{Inspect as NftInspect, Transfer as NftTransfer}, - tokens::{ - Balance, - Fortitude::Polite, - Precision::{BestEffort, Exact}, - Preservation::Expendable, - Restriction::{Free, OnHold}, - }, - Defensive, DefensiveSaturating, OnUnbalanced, - }, - PalletId, - }; - use frame_system::pallet_prelude::*; - use sp_arithmetic::{PerThing, Perquintill}; - use sp_runtime::{ - traits::{AccountIdConversion, Bounded, Convert, ConvertBack, Saturating, Zero}, - Rounding, TokenError, - }; + use super::*; type BalanceOf = <::Currency as FunInspect<::AccountId>>::Balance; @@ -224,7 +204,7 @@ pub mod pallet { type Currency: FunInspect + FunMutate + FunBalanced - + FunHoldMutate; + + FunMutateHold; /// Overarching hold reason. type RuntimeHoldReason: From; diff --git a/substrate/frame/nis/src/mock.rs b/substrate/frame/nis/src/mock.rs index 82b9f55b919be..0e71e43f56bd7 100644 --- a/substrate/frame/nis/src/mock.rs +++ b/substrate/frame/nis/src/mock.rs @@ -17,32 +17,38 @@ //! Test environment for NIS pallet. -use crate::{self as pallet_nis, Perquintill, WithMaximumOf}; - -use frame_support::{ - derive_impl, ord_parameter_types, parameter_types, - traits::{fungible::Inspect, ConstU32, ConstU64, StorageMapShim}, - weights::Weight, - PalletId, -}; -use pallet_balances::{Instance1, Instance2}; -use sp_core::ConstU128; -use sp_runtime::BuildStorage; +use frame::{runtime::prelude::*, testing_prelude::*, traits::StorageMapShim}; -type Block = frame_system::mocking::MockBlock; +use crate::{self as pallet_nis, *}; pub type Balance = u64; +type Block = frame_system::mocking::MockBlock; + // Configure a mock runtime to test the pallet. -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system, - Balances: pallet_balances::, - NisBalances: pallet_balances::, - Nis: pallet_nis, - } -); +#[frame_construct_runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeError, + RuntimeEvent, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeOrigin, + RuntimeTask + )] + pub struct Test; + + #[runtime::pallet_index(0)] + pub type System = frame_system; + #[runtime::pallet_index(1)] + pub type Balances = pallet_balances; + #[runtime::pallet_index(2)] + pub type NisBalances = pallet_balances; + #[runtime::pallet_index(3)] + pub type Nis = pallet_nis; +} #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { @@ -50,7 +56,7 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -impl pallet_balances::Config for Test { +impl pallet_balances::Config for Test { type Balance = Balance; type DustRemoval = (); type RuntimeEvent = RuntimeEvent; @@ -67,13 +73,13 @@ impl pallet_balances::Config for Test { type DoneSlashHandler = (); } -impl pallet_balances::Config for Test { +impl pallet_balances::Config for Test { type Balance = u128; type DustRemoval = (); type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<1>; type AccountStore = StorageMapShim< - pallet_balances::Account, + pallet_balances::Account, u64, pallet_balances::AccountData, >; @@ -106,7 +112,7 @@ impl pallet_nis::Config for Test { type RuntimeEvent = RuntimeEvent; type PalletId = NisPalletId; type Currency = Balances; - type CurrencyBalance = >::Balance; + type CurrencyBalance = >::Balance; type FundOrigin = frame_system::EnsureSigned; type Deficit = (); type IgnoredIssuance = IgnoredIssuance; @@ -131,7 +137,7 @@ impl pallet_nis::Config for Test { // our desired mockup. pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { + pallet_balances::GenesisConfig:: { balances: vec![(1, 100), (2, 100), (3, 100), (4, 100)], ..Default::default() } diff --git a/substrate/frame/nis/src/tests.rs b/substrate/frame/nis/src/tests.rs index 10c39a0d48edb..945532364e00a 100644 --- a/substrate/frame/nis/src/tests.rs +++ b/substrate/frame/nis/src/tests.rs @@ -17,21 +17,13 @@ //! Tests for NIS pallet. -use super::*; -use crate::{mock::*, Error}; -use frame_support::{ - assert_noop, assert_ok, - traits::{ - fungible::{hold::Inspect as InspectHold, Inspect as FunInspect, Mutate as FunMutate}, - nonfungible::{Inspect, Transfer}, - tokens::{Fortitude::Force, Precision::Exact, Preservation::Expendable}, - }, -}; -use sp_arithmetic::Perquintill; -use sp_runtime::{ - Saturating, - TokenError::{self, FundsUnavailable}, +use frame::testing_prelude::*; + +use crate::{ + mock::{Balance, *}, + *, }; +use fungible::InspectHold; fn pot() -> Balance { Balances::free_balance(&Nis::account_id()) @@ -78,7 +70,7 @@ fn place_bid_works() { new_test_ext().execute_with(|| { System::run_to_block::(1); assert_noop!(Nis::place_bid(signed(1), 1, 2), Error::::AmountTooSmall); - assert_noop!(Nis::place_bid(signed(1), 101, 2), FundsUnavailable); + assert_noop!(Nis::place_bid(signed(1), 101, 2), TokenError::FundsUnavailable); assert_noop!(Nis::place_bid(signed(1), 10, 4), Error::::DurationTooBig); assert_ok!(Nis::place_bid(signed(1), 10, 2)); assert_eq!(Balances::reserved_balance(1), 10); diff --git a/substrate/frame/nis/src/weights.rs b/substrate/frame/nis/src/weights.rs index 2842d6fbc5b01..97f126c540fa4 100644 --- a/substrate/frame/nis/src/weights.rs +++ b/substrate/frame/nis/src/weights.rs @@ -35,9 +35,9 @@ //! Autogenerated weights for `pallet_nis` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `4563561839a5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `bd5e4dfa0790`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: @@ -54,7 +54,7 @@ // --steps=50 // --repeat=20 // --heap-pages=4096 -// --template=substrate/.maintain/frame-weight-template.hbs +// --template=substrate/.maintain/frame-umbrella-weight-template.hbs // --no-storage-info // --no-min-squares // --no-median-slopes @@ -67,8 +67,7 @@ #![allow(missing_docs)] #![allow(dead_code)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; +use frame::weights_prelude::*; /// Weight functions needed for `pallet_nis`. pub trait WeightInfo { @@ -99,10 +98,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6115 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 45_697_000 picoseconds. - Weight::from_parts(46_540_958, 51487) - // Standard Error: 1_097 - .saturating_add(Weight::from_parts(99_061, 0).saturating_mul(l.into())) + // Minimum execution time: 43_703_000 picoseconds. + Weight::from_parts(42_231_449, 51487) + // Standard Error: 1_303 + .saturating_add(Weight::from_parts(92_036, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -116,8 +115,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `54117` // Estimated: `51487` - // Minimum execution time: 151_410_000 picoseconds. - Weight::from_parts(164_191_000, 51487) + // Minimum execution time: 146_230_000 picoseconds. + Weight::from_parts(154_165_000, 51487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -132,10 +131,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6115 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 46_132_000 picoseconds. - Weight::from_parts(39_240_527, 51487) - // Standard Error: 1_206 - .saturating_add(Weight::from_parts(82_315, 0).saturating_mul(l.into())) + // Minimum execution time: 43_614_000 picoseconds. + Weight::from_parts(37_201_688, 51487) + // Standard Error: 1_241 + .saturating_add(Weight::from_parts(79_738, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -147,8 +146,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3593` - // Minimum execution time: 31_132_000 picoseconds. - Weight::from_parts(32_025_000, 3593) + // Minimum execution time: 29_920_000 picoseconds. + Weight::from_parts(30_761_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -168,8 +167,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `533` // Estimated: `3892` - // Minimum execution time: 74_327_000 picoseconds. - Weight::from_parts(76_096_000, 3892) + // Minimum execution time: 71_257_000 picoseconds. + Weight::from_parts(72_559_000, 3892) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -189,8 +188,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `699` // Estimated: `3892` - // Minimum execution time: 97_108_000 picoseconds. - Weight::from_parts(98_562_000, 3892) + // Minimum execution time: 92_578_000 picoseconds. + Weight::from_parts(94_495_000, 3892) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -206,8 +205,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `316` // Estimated: `3892` - // Minimum execution time: 51_390_000 picoseconds. - Weight::from_parts(52_693_000, 3892) + // Minimum execution time: 48_478_000 picoseconds. + Weight::from_parts(49_690_000, 3892) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -225,8 +224,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `642` // Estimated: `3675` - // Minimum execution time: 94_872_000 picoseconds. - Weight::from_parts(96_477_000, 3675) + // Minimum execution time: 89_681_000 picoseconds. + Weight::from_parts(92_693_000, 3675) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -240,8 +239,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6563` // Estimated: `7487` - // Minimum execution time: 20_317_000 picoseconds. - Weight::from_parts(21_176_000, 7487) + // Minimum execution time: 19_670_000 picoseconds. + Weight::from_parts(22_353_000, 7487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -251,8 +250,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `51487` - // Minimum execution time: 2_861_000 picoseconds. - Weight::from_parts(2_979_000, 51487) + // Minimum execution time: 2_755_000 picoseconds. + Weight::from_parts(2_944_000, 51487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -262,8 +261,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_061_000 picoseconds. - Weight::from_parts(4_288_000, 0) + // Minimum execution time: 3_919_000 picoseconds. + Weight::from_parts(4_114_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -281,10 +280,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6115 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 45_697_000 picoseconds. - Weight::from_parts(46_540_958, 51487) - // Standard Error: 1_097 - .saturating_add(Weight::from_parts(99_061, 0).saturating_mul(l.into())) + // Minimum execution time: 43_703_000 picoseconds. + Weight::from_parts(42_231_449, 51487) + // Standard Error: 1_303 + .saturating_add(Weight::from_parts(92_036, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -298,8 +297,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `54117` // Estimated: `51487` - // Minimum execution time: 151_410_000 picoseconds. - Weight::from_parts(164_191_000, 51487) + // Minimum execution time: 146_230_000 picoseconds. + Weight::from_parts(154_165_000, 51487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -314,10 +313,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6115 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 46_132_000 picoseconds. - Weight::from_parts(39_240_527, 51487) - // Standard Error: 1_206 - .saturating_add(Weight::from_parts(82_315, 0).saturating_mul(l.into())) + // Minimum execution time: 43_614_000 picoseconds. + Weight::from_parts(37_201_688, 51487) + // Standard Error: 1_241 + .saturating_add(Weight::from_parts(79_738, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -329,8 +328,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3593` - // Minimum execution time: 31_132_000 picoseconds. - Weight::from_parts(32_025_000, 3593) + // Minimum execution time: 29_920_000 picoseconds. + Weight::from_parts(30_761_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -350,8 +349,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `533` // Estimated: `3892` - // Minimum execution time: 74_327_000 picoseconds. - Weight::from_parts(76_096_000, 3892) + // Minimum execution time: 71_257_000 picoseconds. + Weight::from_parts(72_559_000, 3892) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -371,8 +370,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `699` // Estimated: `3892` - // Minimum execution time: 97_108_000 picoseconds. - Weight::from_parts(98_562_000, 3892) + // Minimum execution time: 92_578_000 picoseconds. + Weight::from_parts(94_495_000, 3892) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -388,8 +387,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `316` // Estimated: `3892` - // Minimum execution time: 51_390_000 picoseconds. - Weight::from_parts(52_693_000, 3892) + // Minimum execution time: 48_478_000 picoseconds. + Weight::from_parts(49_690_000, 3892) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -407,8 +406,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `642` // Estimated: `3675` - // Minimum execution time: 94_872_000 picoseconds. - Weight::from_parts(96_477_000, 3675) + // Minimum execution time: 89_681_000 picoseconds. + Weight::from_parts(92_693_000, 3675) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -422,8 +421,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6563` // Estimated: `7487` - // Minimum execution time: 20_317_000 picoseconds. - Weight::from_parts(21_176_000, 7487) + // Minimum execution time: 19_670_000 picoseconds. + Weight::from_parts(22_353_000, 7487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -433,8 +432,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `51487` - // Minimum execution time: 2_861_000 picoseconds. - Weight::from_parts(2_979_000, 51487) + // Minimum execution time: 2_755_000 picoseconds. + Weight::from_parts(2_944_000, 51487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -444,8 +443,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_061_000 picoseconds. - Weight::from_parts(4_288_000, 0) + // Minimum execution time: 3_919_000 picoseconds. + Weight::from_parts(4_114_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/proxy/src/benchmarking.rs b/substrate/frame/proxy/src/benchmarking.rs index b72f53af8e722..aa3621c7c9595 100644 --- a/substrate/frame/proxy/src/benchmarking.rs +++ b/substrate/frame/proxy/src/benchmarking.rs @@ -32,6 +32,10 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } +fn assert_has_event(generic_event: ::RuntimeEvent) { + frame_system::Pallet::::assert_has_event(generic_event.into()); +} + fn add_proxies(n: u32, maybe_who: Option) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(whitelisted_caller); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); @@ -341,5 +345,117 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn poke_deposit() -> Result<(), BenchmarkError> { + // Create accounts using the same pattern as other benchmarks + let account_1: T::AccountId = account("account", 1, SEED); + let account_2: T::AccountId = account("account", 2, SEED); + let account_3: T::AccountId = account("account", 3, SEED); + + // Fund accounts + T::Currency::make_free_balance_be(&account_1, BalanceOf::::max_value() / 100u8.into()); + T::Currency::make_free_balance_be(&account_2, BalanceOf::::max_value() / 100u8.into()); + T::Currency::make_free_balance_be(&account_3, BalanceOf::::max_value() / 100u8.into()); + + // Add proxy relationships + Proxy::::add_proxy( + RawOrigin::Signed(account_1.clone()).into(), + T::Lookup::unlookup(account_2.clone()), + T::ProxyType::default(), + BlockNumberFor::::zero(), + )?; + Proxy::::add_proxy( + RawOrigin::Signed(account_2.clone()).into(), + T::Lookup::unlookup(account_3.clone()), + T::ProxyType::default(), + BlockNumberFor::::zero(), + )?; + let (proxies, initial_proxy_deposit) = Proxies::::get(&account_2); + assert!(!initial_proxy_deposit.is_zero()); + assert_eq!(initial_proxy_deposit, T::Currency::reserved_balance(&account_2)); + + // Create announcement + Proxy::::announce( + RawOrigin::Signed(account_2.clone()).into(), + T::Lookup::unlookup(account_1.clone()), + T::CallHasher::hash_of(&("add_announcement", 1)), + )?; + let (announcements, initial_announcement_deposit) = Announcements::::get(&account_2); + assert!(!initial_announcement_deposit.is_zero()); + assert_eq!( + initial_announcement_deposit.saturating_add(initial_proxy_deposit), + T::Currency::reserved_balance(&account_2) + ); + + // Artificially inflate deposits and reserve the extra amount + let extra_proxy_deposit = initial_proxy_deposit; // Double the deposit + let extra_announcement_deposit = initial_announcement_deposit; // Double the deposit + let total = extra_proxy_deposit.saturating_add(extra_announcement_deposit); + + T::Currency::reserve(&account_2, total)?; + + let initial_reserved = T::Currency::reserved_balance(&account_2); + assert_eq!(initial_reserved, total.saturating_add(total)); // Double + + // Update storage with increased deposits + Proxies::::insert( + &account_2, + (proxies, initial_proxy_deposit.saturating_add(extra_proxy_deposit)), + ); + Announcements::::insert( + &account_2, + ( + announcements, + initial_announcement_deposit.saturating_add(extra_announcement_deposit), + ), + ); + + // Verify artificial state + let (_, inflated_proxy_deposit) = Proxies::::get(&account_2); + let (_, inflated_announcement_deposit) = Announcements::::get(&account_2); + assert_eq!( + inflated_proxy_deposit, + initial_proxy_deposit.saturating_add(extra_proxy_deposit) + ); + assert_eq!( + inflated_announcement_deposit, + initial_announcement_deposit.saturating_add(extra_announcement_deposit) + ); + + #[extrinsic_call] + _(RawOrigin::Signed(account_2.clone())); + + // Verify results + let (_, final_proxy_deposit) = Proxies::::get(&account_2); + let (_, final_announcement_deposit) = Announcements::::get(&account_2); + assert_eq!(final_proxy_deposit, initial_proxy_deposit); + assert_eq!(final_announcement_deposit, initial_announcement_deposit); + + let final_reserved = T::Currency::reserved_balance(&account_2); + assert_eq!(final_reserved, initial_reserved.saturating_sub(total)); + + // Verify events + assert_has_event::( + Event::DepositPoked { + who: account_2.clone(), + kind: DepositKind::Proxies, + old_deposit: inflated_proxy_deposit, + new_deposit: final_proxy_deposit, + } + .into(), + ); + assert_last_event::( + Event::DepositPoked { + who: account_2, + kind: DepositKind::Announcements, + old_deposit: inflated_announcement_deposit, + new_deposit: final_announcement_deposit, + } + .into(), + ); + + Ok(()) + } + impl_benchmark_test_suite!(Proxy, crate::tests::new_test_ext(), crate::tests::Test); } diff --git a/substrate/frame/proxy/src/lib.rs b/substrate/frame/proxy/src/lib.rs index 594d1721cd41b..e583980760231 100644 --- a/substrate/frame/proxy/src/lib.rs +++ b/substrate/frame/proxy/src/lib.rs @@ -88,6 +88,26 @@ pub struct Announcement { height: BlockNumber, } +/// The type of deposit +#[derive( + Encode, + Decode, + Clone, + Copy, + Eq, + PartialEq, + RuntimeDebug, + MaxEncodedLen, + TypeInfo, + DecodeWithMemTracking, +)] +pub enum DepositKind { + /// Proxy registration deposit + Proxies, + /// Announcement deposit + Announcements, +} + #[frame::pallet] pub mod pallet { use super::*; @@ -529,6 +549,105 @@ pub mod pallet { Ok(()) } + + /// Poke / Adjust deposits made for proxies and announcements based on current values. + /// This can be used by accounts to possibly lower their locked amount. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// The transaction fee is waived if the deposit amount has changed. + /// + /// Emits `DepositPoked` if successful. + #[pallet::call_index(10)] + #[pallet::weight(T::WeightInfo::poke_deposit())] + pub fn poke_deposit(origin: OriginFor) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + let mut deposit_updated = false; + + // Check and update proxy deposits + Proxies::::try_mutate_exists(&who, |maybe_proxies| -> DispatchResult { + let (proxies, old_deposit) = maybe_proxies.take().unwrap_or_default(); + let maybe_new_deposit = Self::rejig_deposit( + &who, + old_deposit, + T::ProxyDepositBase::get(), + T::ProxyDepositFactor::get(), + proxies.len(), + )?; + + match maybe_new_deposit { + Some(new_deposit) if new_deposit != old_deposit => { + *maybe_proxies = Some((proxies, new_deposit)); + deposit_updated = true; + Self::deposit_event(Event::DepositPoked { + who: who.clone(), + kind: DepositKind::Proxies, + old_deposit, + new_deposit, + }); + }, + Some(_) => { + *maybe_proxies = Some((proxies, old_deposit)); + }, + None => { + *maybe_proxies = None; + if !old_deposit.is_zero() { + deposit_updated = true; + Self::deposit_event(Event::DepositPoked { + who: who.clone(), + kind: DepositKind::Proxies, + old_deposit, + new_deposit: BalanceOf::::zero(), + }); + } + }, + } + Ok(()) + })?; + + // Check and update announcement deposits + Announcements::::try_mutate_exists(&who, |maybe_announcements| -> DispatchResult { + let (announcements, old_deposit) = maybe_announcements.take().unwrap_or_default(); + let maybe_new_deposit = Self::rejig_deposit( + &who, + old_deposit, + T::AnnouncementDepositBase::get(), + T::AnnouncementDepositFactor::get(), + announcements.len(), + )?; + + match maybe_new_deposit { + Some(new_deposit) if new_deposit != old_deposit => { + *maybe_announcements = Some((announcements, new_deposit)); + deposit_updated = true; + Self::deposit_event(Event::DepositPoked { + who: who.clone(), + kind: DepositKind::Announcements, + old_deposit, + new_deposit, + }); + }, + Some(_) => { + *maybe_announcements = Some((announcements, old_deposit)); + }, + None => { + *maybe_announcements = None; + if !old_deposit.is_zero() { + deposit_updated = true; + Self::deposit_event(Event::DepositPoked { + who: who.clone(), + kind: DepositKind::Announcements, + old_deposit, + new_deposit: BalanceOf::::zero(), + }); + } + }, + } + Ok(()) + })?; + + Ok(if deposit_updated { Pays::No.into() } else { Pays::Yes.into() }) + } } #[pallet::event] @@ -560,6 +679,13 @@ pub mod pallet { proxy_type: T::ProxyType, delay: BlockNumberFor, }, + /// A deposit stored for proxies or announcements was poked / updated. + DepositPoked { + who: T::AccountId, + kind: DepositKind, + old_deposit: BalanceOf, + new_deposit: BalanceOf, + }, } #[pallet::error] @@ -779,9 +905,16 @@ impl Pallet { let new_deposit = if len == 0 { BalanceOf::::zero() } else { base + factor * (len as u32).into() }; if new_deposit > old_deposit { - T::Currency::reserve(who, new_deposit - old_deposit)?; + T::Currency::reserve(who, new_deposit.saturating_sub(old_deposit))?; } else if new_deposit < old_deposit { - T::Currency::unreserve(who, old_deposit - new_deposit); + let excess = old_deposit.saturating_sub(new_deposit); + let remaining_unreserved = T::Currency::unreserve(who, excess); + if !remaining_unreserved.is_zero() { + defensive!( + "Failed to unreserve full amount. (Requested, Actual)", + (excess, excess.saturating_sub(remaining_unreserved)) + ); + } } Ok(if len == 0 { None } else { Some(new_deposit) }) } diff --git a/substrate/frame/proxy/src/tests.rs b/substrate/frame/proxy/src/tests.rs index b52dc5ce0e398..2e2d729c1f3bc 100644 --- a/substrate/frame/proxy/src/tests.rs +++ b/substrate/frame/proxy/src/tests.rs @@ -107,25 +107,33 @@ impl Contains for BaseFilter { } } } + +parameter_types! { + pub static ProxyDepositBase: u64 = 1; + pub static ProxyDepositFactor: u64 = 1; + pub static AnnouncementDepositBase: u64 = 1; + pub static AnnouncementDepositFactor: u64 = 1; +} + impl Config for Test { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; type Currency = Balances; type ProxyType = ProxyType; - type ProxyDepositBase = ConstU64<1>; - type ProxyDepositFactor = ConstU64<1>; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; type MaxProxies = ConstU32<4>; type WeightInfo = (); type CallHasher = BlakeTwo256; type MaxPending = ConstU32<2>; - type AnnouncementDepositBase = ConstU64<1>; - type AnnouncementDepositFactor = ConstU64<1>; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; type BlockNumberProvider = frame_system::Pallet; } use super::{Call as ProxyCall, Event as ProxyEvent}; use frame_system::Call as SystemCall; -use pallet_balances::{Call as BalancesCall, Event as BalancesEvent}; +use pallet_balances::{Call as BalancesCall, Error as BalancesError, Event as BalancesEvent}; use pallet_utility::{Call as UtilityCall, Event as UtilityEvent}; type SystemError = frame_system::Error; @@ -586,3 +594,232 @@ fn pure_works() { ); }); } + +#[test] +fn poke_deposit_works_for_proxy_deposits() { + new_test_ext().execute_with(|| { + // Add a proxy and check initial deposit + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 0)); + assert_eq!(Balances::reserved_balance(1), 2); // Base(1) + Factor(1) * 1 + + // Change the proxy deposit base to trigger deposit update + ProxyDepositBase::set(2); + let result = Proxy::poke_deposit(RuntimeOrigin::signed(1)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::No); + assert_eq!(Balances::reserved_balance(1), 3); // New Base(2) + Factor(1) * 1 + System::assert_last_event( + ProxyEvent::DepositPoked { + who: 1, + kind: DepositKind::Proxies, + old_deposit: 2, + new_deposit: 3, + } + .into(), + ); + assert!(System::events() + .iter() + .any(|record| matches!(record.event, RuntimeEvent::Proxy(Event::DepositPoked { .. })))); + }); +} + +#[test] +fn poke_deposit_works_for_announcement_deposits() { + new_test_ext().execute_with(|| { + // Setup proxy and make announcement + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 1)); + assert_eq!(Balances::reserved_balance(1), 2); // Base(1) + Factor(1) * 1 + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![Announcement { real: 1, call_hash: [1; 32].into(), height: 1 }] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); + let initial_deposit = Balances::reserved_balance(3); + + // Change announcement deposit base to trigger update + AnnouncementDepositBase::set(2); + let result = Proxy::poke_deposit(RuntimeOrigin::signed(3)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::No); + let new_deposit = initial_deposit.saturating_add(1); // Base increased by 1 + assert_eq!(Balances::reserved_balance(3), new_deposit); + System::assert_last_event( + ProxyEvent::DepositPoked { + who: 3, + kind: DepositKind::Announcements, + old_deposit: initial_deposit, + new_deposit, + } + .into(), + ); + assert!(System::events() + .iter() + .any(|record| matches!(record.event, RuntimeEvent::Proxy(Event::DepositPoked { .. })))); + }); +} + +#[test] +fn poke_deposit_charges_fee_when_deposit_unchanged() { + new_test_ext().execute_with(|| { + // Add a proxy and check initial deposit + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 0)); + assert_eq!(Balances::reserved_balance(1), 2); // Base(1) + Factor(1) * 1 + + // Poke the deposit without changing deposit required and check fee + let result = Proxy::poke_deposit(RuntimeOrigin::signed(1)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); // Pays fee + assert_eq!(Balances::reserved_balance(1), 2); // No change + + // No event emitted + assert!(!System::events() + .iter() + .any(|record| matches!(record.event, RuntimeEvent::Proxy(Event::DepositPoked { .. })))); + + // Add an announcement and check initial deposit + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![Announcement { real: 1, call_hash: [1; 32].into(), height: 1 }] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); + let initial_deposit = Balances::reserved_balance(3); + + // Poke the deposit without changing deposit required and check fee + let result = Proxy::poke_deposit(RuntimeOrigin::signed(3)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); // Pays fee + assert_eq!(Balances::reserved_balance(3), initial_deposit); // No change + + // No event emitted + assert!(!System::events() + .iter() + .any(|record| matches!(record.event, RuntimeEvent::Proxy(Event::DepositPoked { .. })))); + }); +} + +#[test] +fn poke_deposit_handles_insufficient_balance() { + new_test_ext().execute_with(|| { + // Setup with account that has minimal balance + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(5), 3, ProxyType::Any, 0)); + let initial_deposit = Balances::reserved_balance(5); + + // Change deposit base to require more than available balance + ProxyDepositBase::set(10); + + // Poking should fail due to insufficient balance + assert_noop!( + Proxy::poke_deposit(RuntimeOrigin::signed(5)), + BalancesError::::InsufficientBalance, + ); + + // Original deposit should remain unchanged + assert_eq!(Balances::reserved_balance(5), initial_deposit); + }); +} + +#[test] +fn poke_deposit_updates_both_proxy_and_announcement_deposits() { + new_test_ext().execute_with(|| { + // Setup both proxy and announcement for the same account + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 0)); + assert_eq!(Balances::reserved_balance(1), 2); // Base(1) + Factor(1) * 1 + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(2), 3, ProxyType::Any, 1)); + assert_eq!(Balances::reserved_balance(2), 2); // Base(1) + Factor(1) * 1 + assert_ok!(Proxy::announce(RuntimeOrigin::signed(2), 1, [1; 32].into())); + let announcements = Announcements::::get(2); + assert_eq!( + announcements.0, + vec![Announcement { real: 1, call_hash: [1; 32].into(), height: 1 }] + ); + assert_eq!(announcements.1, 2); // Base(1) + Factor(1) * 1 + + // Record initial deposits + let initial_proxy_deposit = Proxies::::get(2).1; + let initial_announcement_deposit = Announcements::::get(2).1; + + // Total reserved = deposit for proxy + deposit for announcement + assert_eq!( + Balances::reserved_balance(2), + initial_proxy_deposit.saturating_add(initial_announcement_deposit) + ); + + // Change both deposit requirements + ProxyDepositBase::set(2); + AnnouncementDepositBase::set(2); + + // Poke deposits - should update both deposits and emit two events + let result = Proxy::poke_deposit(RuntimeOrigin::signed(2)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::No); + + // Check both deposits were updated + let (_, new_proxy_deposit) = Proxies::::get(2); + let (_, new_announcement_deposit) = Announcements::::get(2); + assert_eq!(new_proxy_deposit, 3); // Base(2) + Factor(1) * 1 + assert_eq!(new_announcement_deposit, 3); // Base(2) + Factor(1) * 1 + assert_eq!( + Balances::reserved_balance(2), + new_proxy_deposit.saturating_add(new_announcement_deposit) + ); + + // Verify both events were emitted in the correct order + let events = System::events(); + let relevant_events: Vec<_> = events + .iter() + .filter(|record| { + matches!(record.event, RuntimeEvent::Proxy(ProxyEvent::DepositPoked { .. })) + }) + .collect(); + + assert_eq!(relevant_events.len(), 2); + + // First event should be for Proxies + assert_eq!( + relevant_events[0].event, + ProxyEvent::DepositPoked { + who: 2, + kind: DepositKind::Proxies, + old_deposit: initial_proxy_deposit, + new_deposit: new_proxy_deposit, + } + .into() + ); + + // Second event should be for Announcements + assert_eq!( + relevant_events[1].event, + ProxyEvent::DepositPoked { + who: 2, + kind: DepositKind::Announcements, + old_deposit: initial_announcement_deposit, + new_deposit: new_announcement_deposit, + } + .into() + ); + + // Poking again should charge fee as nothing changes + let result = Proxy::poke_deposit(RuntimeOrigin::signed(2)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + + // Verify deposits remained the same + assert_eq!(Proxies::::get(2).1, new_proxy_deposit); + assert_eq!(Announcements::::get(2).1, new_announcement_deposit); + assert_eq!( + Balances::reserved_balance(2), + new_proxy_deposit.saturating_add(new_announcement_deposit) + ); + }); +} + +#[test] +fn poke_deposit_fails_for_unsigned_origin() { + new_test_ext().execute_with(|| { + assert_noop!(Proxy::poke_deposit(RuntimeOrigin::none()), DispatchError::BadOrigin,); + }); +} diff --git a/substrate/frame/proxy/src/weights.rs b/substrate/frame/proxy/src/weights.rs index c9a39a400a88f..504b083f37e89 100644 --- a/substrate/frame/proxy/src/weights.rs +++ b/substrate/frame/proxy/src/weights.rs @@ -35,9 +35,9 @@ //! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `4563561839a5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `99fc4dfa9c86`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: @@ -58,7 +58,6 @@ // --no-storage-info // --no-min-squares // --no-median-slopes -// --genesis-builder-policy=none // --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage,pallet_election_provider_multi_block,pallet_election_provider_multi_block::signed,pallet_election_provider_multi_block::unsigned,pallet_election_provider_multi_block::verifier #![cfg_attr(rustfmt, rustfmt_skip)] @@ -81,6 +80,7 @@ pub trait WeightInfo { fn remove_proxies(p: u32, ) -> Weight; fn create_pure(p: u32, ) -> Weight; fn kill_pure(p: u32, ) -> Weight; + fn poke_deposit() -> Weight; } /// Weights for `pallet_proxy` using the Substrate node and recommended hardware. @@ -95,12 +95,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `65 + p * (37 ±0)` + // Measured: `339 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 14_388_000 picoseconds. - Weight::from_parts(14_978_053, 4706) - // Standard Error: 982 - .saturating_add(Weight::from_parts(26_955, 0).saturating_mul(p.into())) + // Minimum execution time: 23_353_000 picoseconds. + Weight::from_parts(25_084_085, 4706) + // Standard Error: 2_569 + .saturating_add(Weight::from_parts(33_574, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -117,14 +117,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `358 + a * (68 ±0) + p * (37 ±0)` + // Measured: `666 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 36_883_000 picoseconds. - Weight::from_parts(36_934_197, 5698) - // Standard Error: 2_551 - .saturating_add(Weight::from_parts(180_913, 0).saturating_mul(a.into())) - // Standard Error: 2_635 - .saturating_add(Weight::from_parts(38_689, 0).saturating_mul(p.into())) + // Minimum execution time: 47_196_000 picoseconds. + Weight::from_parts(48_686_812, 5698) + // Standard Error: 3_711 + .saturating_add(Weight::from_parts(171_107, 0).saturating_mul(a.into())) + // Standard Error: 3_834 + .saturating_add(Weight::from_parts(34_523, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -136,14 +136,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `272 + a * (68 ±0)` + // Measured: `436 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 22_910_000 picoseconds. - Weight::from_parts(24_053_942, 5698) - // Standard Error: 1_044 - .saturating_add(Weight::from_parts(147_368, 0).saturating_mul(a.into())) - // Standard Error: 1_078 - .saturating_add(Weight::from_parts(4_805, 0).saturating_mul(p.into())) + // Minimum execution time: 29_341_000 picoseconds. + Weight::from_parts(30_320_504, 5698) + // Standard Error: 1_821 + .saturating_add(Weight::from_parts(158_572, 0).saturating_mul(a.into())) + // Standard Error: 1_881 + .saturating_add(Weight::from_parts(8_433, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -155,14 +155,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `272 + a * (68 ±0)` + // Measured: `436 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 22_951_000 picoseconds. - Weight::from_parts(24_164_509, 5698) - // Standard Error: 1_202 - .saturating_add(Weight::from_parts(149_236, 0).saturating_mul(a.into())) - // Standard Error: 1_242 - .saturating_add(Weight::from_parts(898, 0).saturating_mul(p.into())) + // Minimum execution time: 28_422_000 picoseconds. + Weight::from_parts(29_754_384, 5698) + // Standard Error: 1_840 + .saturating_add(Weight::from_parts(176_827, 0).saturating_mul(a.into())) + // Standard Error: 1_901 + .saturating_add(Weight::from_parts(9_607, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -176,14 +176,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `290 + a * (68 ±0) + p * (37 ±0)` + // Measured: `453 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 30_098_000 picoseconds. - Weight::from_parts(31_057_828, 5698) - // Standard Error: 2_790 - .saturating_add(Weight::from_parts(171_651, 0).saturating_mul(a.into())) - // Standard Error: 2_883 - .saturating_add(Weight::from_parts(38_563, 0).saturating_mul(p.into())) + // Minimum execution time: 36_885_000 picoseconds. + Weight::from_parts(38_080_636, 5698) + // Standard Error: 2_642 + .saturating_add(Weight::from_parts(157_335, 0).saturating_mul(a.into())) + // Standard Error: 2_730 + .saturating_add(Weight::from_parts(28_872, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -192,12 +192,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `65 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 20_657_000 picoseconds. - Weight::from_parts(21_576_519, 4706) - // Standard Error: 1_096 - .saturating_add(Weight::from_parts(40_842, 0).saturating_mul(p.into())) + // Minimum execution time: 27_016_000 picoseconds. + Weight::from_parts(28_296_216, 4706) + // Standard Error: 1_643 + .saturating_add(Weight::from_parts(50_271, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -206,12 +206,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `65 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 20_470_000 picoseconds. - Weight::from_parts(21_337_014, 4706) - // Standard Error: 1_496 - .saturating_add(Weight::from_parts(39_232, 0).saturating_mul(p.into())) + // Minimum execution time: 26_955_000 picoseconds. + Weight::from_parts(28_379_566, 4706) + // Standard Error: 1_547 + .saturating_add(Weight::from_parts(45_784, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -220,12 +220,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `65 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 18_116_000 picoseconds. - Weight::from_parts(18_895_722, 4706) - // Standard Error: 809 - .saturating_add(Weight::from_parts(23_829, 0).saturating_mul(p.into())) + // Minimum execution time: 24_656_000 picoseconds. + Weight::from_parts(25_821_878, 4706) + // Standard Error: 2_300 + .saturating_add(Weight::from_parts(33_972, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -234,12 +234,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `76` + // Measured: `206` // Estimated: `4706` - // Minimum execution time: 21_990_000 picoseconds. - Weight::from_parts(22_637_682, 4706) - // Standard Error: 1_147 - .saturating_add(Weight::from_parts(21_637, 0).saturating_mul(p.into())) + // Minimum execution time: 28_416_000 picoseconds. + Weight::from_parts(29_662_728, 4706) + // Standard Error: 1_851 + .saturating_add(Weight::from_parts(29_928, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -248,15 +248,30 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `102 + p * (37 ±0)` + // Measured: `231 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 19_860_000 picoseconds. - Weight::from_parts(20_734_482, 4706) - // Standard Error: 916 - .saturating_add(Weight::from_parts(21_379, 0).saturating_mul(p.into())) + // Minimum execution time: 25_505_000 picoseconds. + Weight::from_parts(26_780_627, 4706) + // Standard Error: 1_581 + .saturating_add(Weight::from_parts(33_085, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `519` + // Estimated: `5698` + // Minimum execution time: 46_733_000 picoseconds. + Weight::from_parts(47_972_000, 5698) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } } // For backwards compatibility and tests. @@ -270,12 +285,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `65 + p * (37 ±0)` + // Measured: `339 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 14_388_000 picoseconds. - Weight::from_parts(14_978_053, 4706) - // Standard Error: 982 - .saturating_add(Weight::from_parts(26_955, 0).saturating_mul(p.into())) + // Minimum execution time: 23_353_000 picoseconds. + Weight::from_parts(25_084_085, 4706) + // Standard Error: 2_569 + .saturating_add(Weight::from_parts(33_574, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -292,14 +307,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `358 + a * (68 ±0) + p * (37 ±0)` + // Measured: `666 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 36_883_000 picoseconds. - Weight::from_parts(36_934_197, 5698) - // Standard Error: 2_551 - .saturating_add(Weight::from_parts(180_913, 0).saturating_mul(a.into())) - // Standard Error: 2_635 - .saturating_add(Weight::from_parts(38_689, 0).saturating_mul(p.into())) + // Minimum execution time: 47_196_000 picoseconds. + Weight::from_parts(48_686_812, 5698) + // Standard Error: 3_711 + .saturating_add(Weight::from_parts(171_107, 0).saturating_mul(a.into())) + // Standard Error: 3_834 + .saturating_add(Weight::from_parts(34_523, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -311,14 +326,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `272 + a * (68 ±0)` + // Measured: `436 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 22_910_000 picoseconds. - Weight::from_parts(24_053_942, 5698) - // Standard Error: 1_044 - .saturating_add(Weight::from_parts(147_368, 0).saturating_mul(a.into())) - // Standard Error: 1_078 - .saturating_add(Weight::from_parts(4_805, 0).saturating_mul(p.into())) + // Minimum execution time: 29_341_000 picoseconds. + Weight::from_parts(30_320_504, 5698) + // Standard Error: 1_821 + .saturating_add(Weight::from_parts(158_572, 0).saturating_mul(a.into())) + // Standard Error: 1_881 + .saturating_add(Weight::from_parts(8_433, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -330,14 +345,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `272 + a * (68 ±0)` + // Measured: `436 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 22_951_000 picoseconds. - Weight::from_parts(24_164_509, 5698) - // Standard Error: 1_202 - .saturating_add(Weight::from_parts(149_236, 0).saturating_mul(a.into())) - // Standard Error: 1_242 - .saturating_add(Weight::from_parts(898, 0).saturating_mul(p.into())) + // Minimum execution time: 28_422_000 picoseconds. + Weight::from_parts(29_754_384, 5698) + // Standard Error: 1_840 + .saturating_add(Weight::from_parts(176_827, 0).saturating_mul(a.into())) + // Standard Error: 1_901 + .saturating_add(Weight::from_parts(9_607, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -351,14 +366,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `290 + a * (68 ±0) + p * (37 ±0)` + // Measured: `453 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 30_098_000 picoseconds. - Weight::from_parts(31_057_828, 5698) - // Standard Error: 2_790 - .saturating_add(Weight::from_parts(171_651, 0).saturating_mul(a.into())) - // Standard Error: 2_883 - .saturating_add(Weight::from_parts(38_563, 0).saturating_mul(p.into())) + // Minimum execution time: 36_885_000 picoseconds. + Weight::from_parts(38_080_636, 5698) + // Standard Error: 2_642 + .saturating_add(Weight::from_parts(157_335, 0).saturating_mul(a.into())) + // Standard Error: 2_730 + .saturating_add(Weight::from_parts(28_872, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -367,12 +382,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `65 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 20_657_000 picoseconds. - Weight::from_parts(21_576_519, 4706) - // Standard Error: 1_096 - .saturating_add(Weight::from_parts(40_842, 0).saturating_mul(p.into())) + // Minimum execution time: 27_016_000 picoseconds. + Weight::from_parts(28_296_216, 4706) + // Standard Error: 1_643 + .saturating_add(Weight::from_parts(50_271, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -381,12 +396,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `65 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 20_470_000 picoseconds. - Weight::from_parts(21_337_014, 4706) - // Standard Error: 1_496 - .saturating_add(Weight::from_parts(39_232, 0).saturating_mul(p.into())) + // Minimum execution time: 26_955_000 picoseconds. + Weight::from_parts(28_379_566, 4706) + // Standard Error: 1_547 + .saturating_add(Weight::from_parts(45_784, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -395,12 +410,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `65 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 18_116_000 picoseconds. - Weight::from_parts(18_895_722, 4706) - // Standard Error: 809 - .saturating_add(Weight::from_parts(23_829, 0).saturating_mul(p.into())) + // Minimum execution time: 24_656_000 picoseconds. + Weight::from_parts(25_821_878, 4706) + // Standard Error: 2_300 + .saturating_add(Weight::from_parts(33_972, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -409,12 +424,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `76` + // Measured: `206` // Estimated: `4706` - // Minimum execution time: 21_990_000 picoseconds. - Weight::from_parts(22_637_682, 4706) - // Standard Error: 1_147 - .saturating_add(Weight::from_parts(21_637, 0).saturating_mul(p.into())) + // Minimum execution time: 28_416_000 picoseconds. + Weight::from_parts(29_662_728, 4706) + // Standard Error: 1_851 + .saturating_add(Weight::from_parts(29_928, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -423,13 +438,28 @@ impl WeightInfo for () { /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `102 + p * (37 ±0)` + // Measured: `231 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 19_860_000 picoseconds. - Weight::from_parts(20_734_482, 4706) - // Standard Error: 916 - .saturating_add(Weight::from_parts(21_379, 0).saturating_mul(p.into())) + // Minimum execution time: 25_505_000 picoseconds. + Weight::from_parts(26_780_627, 4706) + // Standard Error: 1_581 + .saturating_add(Weight::from_parts(33_085, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `519` + // Estimated: `5698` + // Minimum execution time: 46_733_000 picoseconds. + Weight::from_parts(47_972_000, 5698) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } } diff --git a/substrate/frame/recovery/src/lib.rs b/substrate/frame/recovery/src/lib.rs index 8159bbefa76b1..80c897dfef700 100644 --- a/substrate/frame/recovery/src/lib.rs +++ b/substrate/frame/recovery/src/lib.rs @@ -181,23 +181,24 @@ mod mock; mod tests; pub mod weights; -type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -type BalanceOf = +pub type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type BlockNumberFromProviderOf = +pub type BlockNumberFromProviderOf = <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; -type FriendsOf = BoundedVec<::AccountId, ::MaxFriends>; +pub type FriendsOf = + BoundedVec<::AccountId, ::MaxFriends>; /// An active recovery process. #[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ActiveRecovery { /// The block number when the recovery process started. - created: BlockNumber, + pub created: BlockNumber, /// The amount held in reserve of the `depositor`, /// to be returned once this recovery process is closed. - deposit: Balance, + pub deposit: Balance, /// The friends which have vouched so far. Always sorted. - friends: Friends, + pub friends: Friends, } /// Configuration for recovering an account. @@ -205,14 +206,14 @@ pub struct ActiveRecovery { pub struct RecoveryConfig { /// The minimum number of blocks since the start of the recovery process before the account /// can be recovered. - delay_period: BlockNumber, + pub delay_period: BlockNumber, /// The amount held in reserve of the `depositor`, /// to be returned once this configuration is removed. - deposit: Balance, + pub deposit: Balance, /// The list of friends which can help recover an account. Always sorted. - friends: Friends, + pub friends: Friends, /// The number of approving friends needed to recover an account. - threshold: u16, + pub threshold: u16, } #[frame_support::pallet] diff --git a/substrate/frame/revive/rpc/build.rs b/substrate/frame/revive/rpc/build.rs index d2ea601211a00..e3ae263fdbb92 100644 --- a/substrate/frame/revive/rpc/build.rs +++ b/substrate/frame/revive/rpc/build.rs @@ -33,12 +33,17 @@ fn main() { .join(""); let target = std::env::var("TARGET").unwrap_or_else(|_| "unknown".to_string()); - let repo = git2::Repository::open("../../../..").expect("should be a repository"); - let head = repo.head().expect("should have head"); - let commit = head.peel_to_commit().expect("should have commit"); - let branch = head.shorthand().unwrap_or("unknown").to_string(); - let id = &commit.id().to_string()[..7]; - println!("cargo:rustc-env=GIT_REVISION={branch}-{id}"); + let (branch, id) = if let Ok(repo) = git2::Repository::open("../../../..") { + let head = repo.head().expect("should have head"); + let commit = head.peel_to_commit().expect("should have commit"); + let branch = head.shorthand().unwrap_or("unknown").to_string(); + let id = &commit.id().to_string()[..7]; + (branch, id.to_string()) + } else { + ("unknown".to_string(), "unknown".to_string()) + }; + println!("cargo:rustc-env=RUSTC_VERSION={rustc_version}"); println!("cargo:rustc-env=TARGET={target}"); + println!("cargo:rustc-env=GIT_REVISION={branch}-{id}"); } diff --git a/substrate/frame/revive/rpc/revive_chain.metadata b/substrate/frame/revive/rpc/revive_chain.metadata index 0e053a0dbb4d3..017a3491e35eb 100644 Binary files a/substrate/frame/revive/rpc/revive_chain.metadata and b/substrate/frame/revive/rpc/revive_chain.metadata differ diff --git a/substrate/frame/revive/rpc/src/eth-rpc-tester.rs b/substrate/frame/revive/rpc/src/eth-rpc-tester.rs index 460812602fe20..9f3c44a3289f0 100644 --- a/substrate/frame/revive/rpc/src/eth-rpc-tester.rs +++ b/substrate/frame/revive/rpc/src/eth-rpc-tester.rs @@ -35,8 +35,10 @@ pub struct CliCommand { pub rpc_url: String, /// The parity docker image e.g eth-rpc:master-fb2e414f - #[clap(long, default_value = "eth-rpc:master-fb2e414f")] - docker_image: String, + /// When not specified, no eth-rpc docker image is started + /// and the test runs against the provided `rpc_url` directly. + #[clap(long)] + docker_image: Option, /// The docker binary /// Either docker or podman @@ -48,9 +50,10 @@ pub struct CliCommand { async fn main() -> anyhow::Result<()> { let CliCommand { docker_bin, rpc_url, docker_image, .. } = CliCommand::parse(); - if std::env::var("SKIP_DOCKER").is_ok() { + let Some(docker_image) = docker_image else { + println!("Docker image not specified, using: {rpc_url:?}"); return test_eth_rpc(&rpc_url).await; - } + }; let mut docker_process = start_docker(&docker_bin, &docker_image)?; let stderr = docker_process.stderr.take().unwrap(); @@ -129,6 +132,7 @@ async fn test_eth_rpc(rpc_url: &str) -> anyhow::Result<()> { println!("Account:"); println!("- address: {:?}", account.address()); + println!("- substrate address: {}", account.substrate_account()); let client = Arc::new(HttpClientBuilder::default().build(rpc_url)?); let nonce = client.get_transaction_count(account.address(), BlockTag::Latest.into()).await?; diff --git a/substrate/frame/revive/rpc/src/example.rs b/substrate/frame/revive/rpc/src/example.rs index c8c633a4e982c..e38fc15d149f0 100644 --- a/substrate/frame/revive/rpc/src/example.rs +++ b/substrate/frame/revive/rpc/src/example.rs @@ -123,7 +123,7 @@ impl TransactionBuilder { .call( GenericTransaction { from: Some(from), - input: Some(input.clone()), + input: input.into(), value: Some(value), to, ..Default::default() @@ -151,7 +151,7 @@ impl TransactionBuilder { .estimate_gas( GenericTransaction { from: Some(from), - input: Some(input.clone()), + input: input.clone().into(), value: Some(value), gas_price: Some(gas_price), to, diff --git a/substrate/frame/revive/src/evm/api/rpc_types.rs b/substrate/frame/revive/src/evm/api/rpc_types.rs index b4b2c6ffcf17e..b4f2495100740 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types.rs @@ -120,6 +120,28 @@ fn m3_2048(bloom: &mut [u8; 256], bytes: &[u8]) { } } +#[test] +fn can_deserialize_input_or_data_field_from_generic_transaction() { + let cases = [ + ("with input", r#"{"input": "0x01"}"#), + ("with data", r#"{"data": "0x01"}"#), + ("with both", r#"{"data": "0x01", "input": "0x01"}"#), + ]; + + for (name, json) in cases { + let tx = serde_json::from_str::(json).unwrap(); + assert_eq!(tx.input.to_vec(), vec![1u8], "{}", name); + } + + let err = serde_json::from_str::(r#"{"data": "0x02", "input": "0x01"}"#) + .unwrap_err(); + assert!( + err.to_string().starts_with( + "Both \"data\" and \"input\" are set and not equal. Please use \"input\" to pass transaction call data" + ) + ); +} + #[test] fn logs_bloom_works() { let receipt: ReceiptInfo = serde_json::from_str( @@ -175,7 +197,7 @@ impl GenericTransaction { from, r#type: Some(tx.r#type.as_byte()), chain_id: tx.chain_id, - input: Some(tx.input), + input: tx.input.into(), nonce: Some(tx.nonce), value: Some(tx.value), to: tx.to, @@ -187,7 +209,7 @@ impl GenericTransaction { from, r#type: Some(tx.r#type.as_byte()), chain_id: Some(tx.chain_id), - input: Some(tx.input), + input: tx.input.into(), nonce: Some(tx.nonce), value: Some(tx.value), to: Some(tx.to), @@ -208,7 +230,7 @@ impl GenericTransaction { from, r#type: Some(tx.r#type.as_byte()), chain_id: Some(tx.chain_id), - input: Some(tx.input), + input: tx.input.into(), nonce: Some(tx.nonce), value: Some(tx.value), to: tx.to, @@ -227,7 +249,7 @@ impl GenericTransaction { from, r#type: Some(tx.r#type.as_byte()), chain_id: Some(tx.chain_id), - input: Some(tx.input), + input: tx.input.into(), nonce: Some(tx.nonce), value: Some(tx.value), to: tx.to, @@ -245,7 +267,7 @@ impl GenericTransaction { TYPE_LEGACY => Ok(TransactionLegacyUnsigned { r#type: TypeLegacy {}, chain_id: self.chain_id, - input: self.input.unwrap_or_default(), + input: self.input.to_bytes(), nonce: self.nonce.unwrap_or_default(), value: self.value.unwrap_or_default(), to: self.to, @@ -256,7 +278,7 @@ impl GenericTransaction { TYPE_EIP1559 => Ok(Transaction1559Unsigned { r#type: TypeEip1559 {}, chain_id: self.chain_id.unwrap_or_default(), - input: self.input.unwrap_or_default(), + input: self.input.to_bytes(), nonce: self.nonce.unwrap_or_default(), value: self.value.unwrap_or_default(), to: self.to, @@ -270,7 +292,7 @@ impl GenericTransaction { TYPE_EIP2930 => Ok(Transaction2930Unsigned { r#type: TypeEip2930 {}, chain_id: self.chain_id.unwrap_or_default(), - input: self.input.unwrap_or_default(), + input: self.input.to_bytes(), nonce: self.nonce.unwrap_or_default(), value: self.value.unwrap_or_default(), to: self.to, @@ -282,7 +304,7 @@ impl GenericTransaction { TYPE_EIP4844 => Ok(Transaction4844Unsigned { r#type: TypeEip4844 {}, chain_id: self.chain_id.unwrap_or_default(), - input: self.input.unwrap_or_default(), + input: self.input.to_bytes(), nonce: self.nonce.unwrap_or_default(), value: self.value.unwrap_or_default(), to: self.to.unwrap_or_default(), diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs index 8fd4c1072a931..1487e75b391ce 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs @@ -23,7 +23,55 @@ use codec::{Decode, Encode}; use derive_more::{From, TryInto}; pub use ethereum_types::*; use scale_info::TypeInfo; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; + +/// Input of a `GenericTransaction` +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct InputOrData { + #[serde(skip_serializing_if = "Option::is_none")] + input: Option, + #[serde(skip_serializing_if = "Option::is_none")] + data: Option, +} + +impl From for InputOrData { + fn from(value: Bytes) -> Self { + InputOrData { input: Some(value), data: None } + } +} + +impl From> for InputOrData { + fn from(value: Vec) -> Self { + InputOrData { input: Some(Bytes(value)), data: None } + } +} + +impl InputOrData { + /// Get the input as `Bytes`. + pub fn to_bytes(self) -> Bytes { + match self { + InputOrData { input: Some(input), data: _ } => input, + InputOrData { input: None, data: Some(data) } => data, + _ => Default::default(), + } + } + + /// Get the input as `Vec`. + pub fn to_vec(self) -> Vec { + self.to_bytes().0 + } +} + +fn deserialize_input_or_data<'d, D: Deserializer<'d>>(d: D) -> Result { + let value = InputOrData::deserialize(d)?; + match &value { + InputOrData { input: Some(input), data: Some(data) } if input != data => + Err(serde::de::Error::custom("Both \"data\" and \"input\" are set and not equal. Please use \"input\" to pass transaction call data")), + _ => Ok(value), + } +} /// Block object #[derive( @@ -207,8 +255,8 @@ pub struct GenericTransaction { #[serde(rename = "gasPrice", skip_serializing_if = "Option::is_none")] pub gas_price: Option, /// input data - #[serde(alias = "data", skip_serializing_if = "Option::is_none")] - pub input: Option, + #[serde(flatten, deserialize_with = "deserialize_input_or_data")] + pub input: InputOrData, /// max fee per blob gas /// The maximum total fee per gas the sender is willing to pay for blob gas in wei #[serde(rename = "maxFeePerBlobGas", skip_serializing_if = "Option::is_none")] diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs index 505b645f18747..2eaced0df0a83 100644 --- a/substrate/frame/revive/src/evm/runtime.rs +++ b/substrate/frame/revive/src/evm/runtime.rs @@ -328,7 +328,7 @@ pub trait EthExtra { InvalidTransaction::Call })?; - let data = input.unwrap_or_default().0; + let data = input.to_vec(); let (gas_limit, storage_deposit_limit) = ::EthGasEncoder::decode(gas).ok_or_else(|| { @@ -508,7 +508,7 @@ mod test { /// Create a new builder with an instantiate call. fn instantiate_with(code: Vec, data: Vec) -> Self { let mut builder = Self::new(); - builder.tx.input = Some(Bytes(code.into_iter().chain(data.into_iter()).collect())); + builder.tx.input = Bytes(code.into_iter().chain(data.into_iter()).collect()).into(); builder } @@ -580,7 +580,7 @@ mod test { crate::Call::call:: { dest: tx.to.unwrap(), value: tx.value.unwrap_or_default().as_u64(), - data: tx.input.unwrap_or_default().0, + data: tx.input.to_vec(), gas_limit, storage_deposit_limit } @@ -649,7 +649,7 @@ mod test { // Fail because the tx input fail to get the blob length assert_eq!( - builder.mutate_estimate_and_check(Box::new(|tx| tx.input = Some(Bytes(vec![1, 2, 3])))), + builder.mutate_estimate_and_check(Box::new(|tx| tx.input = vec![1, 2, 3].into())), Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) ); } diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs index bb8d4600152e6..d88d4f206fa47 100644 --- a/substrate/frame/revive/src/exec.rs +++ b/substrate/frame/revive/src/exec.rs @@ -357,7 +357,7 @@ pub trait Ext: sealing::Sealed { /// Returns the value transferred along with this call. fn value_transferred(&self) -> U256; - /// Returns the timestamp of the current block + /// Returns the timestamp of the current block in seconds. fn now(&self) -> U256; /// Returns the minimum balance that is required for creating an account. @@ -1796,7 +1796,7 @@ where } fn now(&self) -> U256 { - self.timestamp.into() + (self.timestamp / 1000u32.into()).into() } fn minimum_balance(&self) -> U256 { diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index c7af353de8cdd..b13ff6c032637 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -1159,7 +1159,7 @@ where Err(_) => return Err(EthTransactError::Message("Failed to convert value".into())), }; - let input = tx.input.clone().unwrap_or_default().0; + let input = tx.input.clone().to_vec(); let extract_error = |err| { if err == Error::::TransferFailed.into() || diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index 29f1b73e82c64..90cb687b9de6f 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -4282,7 +4282,7 @@ fn skip_transfer_works() { Pallet::::bare_eth_transact( GenericTransaction { from: Some(BOB_ADDR), - input: Some(code.clone().into()), + input: code.clone().into(), gas: Some(1u32.into()), ..Default::default() }, @@ -4298,7 +4298,7 @@ fn skip_transfer_works() { assert_ok!(Pallet::::bare_eth_transact( GenericTransaction { from: Some(ALICE_ADDR), - input: Some(code.clone().into()), + input: code.clone().into(), ..Default::default() }, Weight::MAX, @@ -4333,7 +4333,7 @@ fn skip_transfer_works() { GenericTransaction { from: Some(BOB_ADDR), to: Some(caller_addr), - input: Some((0u32, &addr).encode().into()), + input: (0u32, &addr).encode().into(), gas: Some(1u32.into()), ..Default::default() }, @@ -4354,7 +4354,7 @@ fn skip_transfer_works() { GenericTransaction { from: Some(BOB_ADDR), to: Some(caller_addr), - input: Some((0u32, &addr).encode().into()), + input: (0u32, &addr).encode().into(), ..Default::default() }, Weight::MAX, diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs index 3b0432d93005f..676af60ec8044 100644 --- a/substrate/frame/revive/uapi/src/host.rs +++ b/substrate/frame/revive/uapi/src/host.rs @@ -325,7 +325,7 @@ pub trait HostFn: private::Sealed { salt: Option<&[u8; 32]>, ) -> Result; - /// Load the latest block timestamp into the supplied buffer + /// Load the latest block timestamp in seconds into the supplied buffer /// /// # Parameters /// diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index 2ad94ec04df47..65099dfd690cf 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -323,6 +323,7 @@ pub mod pallet { type BlockNumberProvider: BlockNumberProvider; } + /// Block number at which the agenda began incomplete execution. #[pallet::storage] pub type IncompleteSince = StorageValue<_, BlockNumberFor>; @@ -386,6 +387,8 @@ pub mod pallet { RetryFailed { task: TaskAddress>, id: Option }, /// The given task can never be executed since it is overweight. PermanentlyOverweight { task: TaskAddress>, id: Option }, + /// Agenda is incomplete from `when`. + AgendaIncomplete { when: BlockNumberFor }, } #[pallet::error] @@ -1202,6 +1205,7 @@ impl Pallet { } incomplete_since = incomplete_since.min(when); if incomplete_since <= now { + Self::deposit_event(Event::AgendaIncomplete { when: incomplete_since }); IncompleteSince::::put(incomplete_since); } } @@ -1235,10 +1239,7 @@ impl Pallet { let mut dropped = 0; for (agenda_index, _) in ordered.into_iter().take(max as usize) { - let task = match agenda[agenda_index as usize].take() { - None => continue, - Some(t) => t, - }; + let Some(task) = agenda[agenda_index as usize].take() else { continue }; let base_weight = T::WeightInfo::service_task( task.call.lookup_len().map(|x| x as usize), task.maybe_id.is_some(), @@ -1246,6 +1247,7 @@ impl Pallet { ); if !weight.can_consume(base_weight) { postponed += 1; + agenda[agenda_index as usize] = Some(task); break } let result = Self::service_task(weight, now, when, agenda_index, *executed == 0, task); diff --git a/substrate/frame/scheduler/src/mock.rs b/substrate/frame/scheduler/src/mock.rs index a9aea97542acd..28b5ee6d6083c 100644 --- a/substrate/frame/scheduler/src/mock.rs +++ b/substrate/frame/scheduler/src/mock.rs @@ -212,7 +212,7 @@ impl WeightInfo for TestWeightInfo { } } parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * + pub storage MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; } diff --git a/substrate/frame/scheduler/src/tests.rs b/substrate/frame/scheduler/src/tests.rs index d0a3acc05ac7e..1b7739e855ea7 100644 --- a/substrate/frame/scheduler/src/tests.rs +++ b/substrate/frame/scheduler/src/tests.rs @@ -1264,8 +1264,8 @@ fn cancel_named_periodic_scheduling_works() { #[test] fn scheduler_respects_weight_limits() { - let max_weight: Weight = ::MaximumWeight::get(); new_test_ext().execute_with(|| { + let max_weight: Weight = ::MaximumWeight::get(); let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 3 * 2 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), @@ -1292,8 +1292,8 @@ fn scheduler_respects_weight_limits() { #[test] fn retry_respects_weight_limits() { - let max_weight: Weight = ::MaximumWeight::get(); new_test_ext().execute_with(|| { + let max_weight: Weight = ::MaximumWeight::get(); // schedule 42 let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 3 * 2 }); assert_ok!(Scheduler::do_schedule( @@ -1344,8 +1344,8 @@ fn retry_respects_weight_limits() { #[test] fn try_schedule_retry_respects_weight_limits() { - let max_weight: Weight = ::MaximumWeight::get(); new_test_ext().execute_with(|| { + let max_weight: Weight = ::MaximumWeight::get(); let service_agendas_weight = ::WeightInfo::service_agendas_base(); let service_agenda_weight = ::WeightInfo::service_agenda_base( ::MaxScheduledPerBlock::get(), @@ -1404,8 +1404,8 @@ fn try_schedule_retry_respects_weight_limits() { /// Permanently overweight calls are not deleted but also not executed. #[test] fn scheduler_does_not_delete_permanently_overweight_call() { - let max_weight: Weight = ::MaximumWeight::get(); new_test_ext().execute_with(|| { + let max_weight: Weight = ::MaximumWeight::get(); let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), @@ -1430,10 +1430,10 @@ fn scheduler_does_not_delete_permanently_overweight_call() { #[test] fn scheduler_handles_periodic_failure() { - let max_weight: Weight = ::MaximumWeight::get(); - let max_per_block = ::MaxScheduledPerBlock::get(); - new_test_ext().execute_with(|| { + let max_weight: Weight = ::MaximumWeight::get(); + let max_per_block = ::MaxScheduledPerBlock::get(); + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: (max_weight / 3) * 2 }); let bound = Preimage::bound(call).unwrap(); @@ -1472,9 +1472,9 @@ fn scheduler_handles_periodic_failure() { #[test] fn scheduler_handles_periodic_unavailable_preimage() { - let max_weight: Weight = ::MaximumWeight::get(); - new_test_ext().execute_with(|| { + let max_weight: Weight = ::MaximumWeight::get(); + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: (max_weight / 3) * 2 }); let hash = ::Hashing::hash_of(&call); let len = call.using_encoded(|x| x.len()) as u32; @@ -1518,8 +1518,8 @@ fn scheduler_handles_periodic_unavailable_preimage() { #[test] fn scheduler_respects_priority_ordering() { - let max_weight: Weight = ::MaximumWeight::get(); new_test_ext().execute_with(|| { + let max_weight: Weight = ::MaximumWeight::get(); let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 3 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), @@ -3039,3 +3039,40 @@ fn unavailable_call_is_detected() { assert!(!Preimage::is_requested(&hash)); }); } + +#[test] +fn postponed_task_is_still_available() { + new_test_ext().execute_with(|| { + let service_agendas_weight = ::WeightInfo::service_agendas_base(); + let service_agenda_weight = ::WeightInfo::service_agenda_base( + ::MaxScheduledPerBlock::get(), + ); + + assert_ok!(Scheduler::schedule( + RuntimeOrigin::root(), + 4, + None, + 128, + Box::new(RuntimeCall::from(frame_system::Call::remark { + remark: vec![0u8; 3 * 1024 * 1024], + })) + )); + System::run_to_block::(3); + // Scheduled calls are in the agenda. + assert_eq!(Agenda::::get(4).len(), 1); + + let old_weight = MaximumSchedulerWeight::get(); + MaximumSchedulerWeight::set(&service_agenda_weight.saturating_add(service_agendas_weight)); + + System::run_to_block::(4); + + // The task should still be there. + assert_eq!(Agenda::::get(4).iter().filter(|a| a.is_some()).count(), 1); + System::assert_last_event(crate::Event::AgendaIncomplete { when: 4 }.into()); + + // Now it should get executed + MaximumSchedulerWeight::set(&old_weight); + System::run_to_block::(5); + assert!(Agenda::::get(4).is_empty()); + }); +} diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index 4ea9f6728bb66..c912c2fe7d4fb 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -189,7 +189,7 @@ pub mod prelude { /// `frame_system`'s parent crate, which is mandatory in all pallets build with this crate. /// /// Conveniently, the keyword `frame_system` is in scope as one uses `use - /// polkadot_sdk_frame::prelude::*` + /// polkadot_sdk_frame::prelude::*`. #[doc(inline)] pub use frame_system; @@ -199,13 +199,16 @@ pub mod prelude { #[doc(no_inline)] pub use frame_support::pallet_prelude::*; - /// Dispatch types from `frame-support`, other fundamental traits + /// Dispatch types from `frame-support`, other fundamental traits. #[doc(no_inline)] pub use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo}; - pub use frame_support::traits::{ - Contains, EitherOf, EstimateNextSessionRotation, Everything, IsSubType, MapSuccess, - NoOpPoll, OnRuntimeUpgrade, OneSessionHandler, RankedMembers, RankedMembersSwapHandler, - VariantCount, VariantCountOf, + pub use frame_support::{ + traits::{ + Contains, Defensive, DefensiveSaturating, EitherOf, EstimateNextSessionRotation, + Everything, IsSubType, MapSuccess, NoOpPoll, OnRuntimeUpgrade, OneSessionHandler, + RankedMembers, RankedMembersSwapHandler, VariantCount, VariantCountOf, + }, + PalletId, }; /// Pallet prelude of `frame-system`. @@ -220,7 +223,7 @@ pub mod prelude { #[doc(no_inline)] pub use super::derive::*; - /// All hashing related things + /// All hashing related things. pub use super::hashing::*; /// All account related things. @@ -229,11 +232,15 @@ pub mod prelude { /// All arithmetic types and traits used for safe math. pub use super::arithmetic::*; + /// All token related types and traits. + pub use super::token::*; + /// Runtime traits #[doc(no_inline)] pub use sp_runtime::traits::{ - BlockNumberProvider, Bounded, Convert, DispatchInfoOf, Dispatchable, ReduceBy, - ReplaceWithDefault, SaturatedConversion, Saturating, StaticLookup, TrailingZeroInput, + AccountIdConversion, BlockNumberProvider, Bounded, Convert, ConvertBack, DispatchInfoOf, + Dispatchable, ReduceBy, ReplaceWithDefault, SaturatedConversion, Saturating, StaticLookup, + TrailingZeroInput, }; /// Bounded storage related types. @@ -537,6 +544,20 @@ pub mod arithmetic { pub use sp_arithmetic::{traits::*, *}; } +/// All token related types and traits. +pub mod token { + pub use frame_support::traits::{ + tokens, + tokens::{ + currency, fungible, fungibles, imbalance, nonfungible, nonfungible_v2, nonfungibles, + nonfungibles_v2, pay, AssetId, BalanceStatus, DepositConsequence, ExistenceRequirement, + Fortitude, Pay, Precision, Preservation, Provenance, WithdrawConsequence, + WithdrawReasons, + }, + OnUnbalanced, + }; +} + /// All derive macros used in frame. /// /// This is already part of the [`prelude`]. diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index e0db9002ab301..8cddd4b72e7ac 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -712,7 +712,7 @@ pub mod pallet { #[pallet::weight((T::SystemWeightInfo::set_code(), DispatchClass::Operational))] pub fn set_code(origin: OriginFor, code: Vec) -> DispatchResultWithPostInfo { ensure_root(origin)?; - Self::can_set_code(&code)?; + Self::can_set_code(&code, true).into_result()?; T::OnSetCode::set_code(code)?; // consume the rest of the block to prevent further transactions Ok(Some(T::BlockWeights::get().max_block).into()) @@ -729,6 +729,7 @@ pub mod pallet { code: Vec, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; + Self::can_set_code(&code, false).into_result()?; T::OnSetCode::set_code(code)?; Ok(Some(T::BlockWeights::get().max_block).into()) } @@ -863,8 +864,32 @@ pub mod pallet { _: OriginFor, code: Vec, ) -> DispatchResultWithPostInfo { - let post = Self::do_apply_authorize_upgrade(code)?; - Ok(post) + let res = Self::validate_code_is_authorized(&code)?; + AuthorizedUpgrade::::kill(); + + match Self::can_set_code(&code, res.check_version) { + CanSetCodeResult::Ok => {}, + CanSetCodeResult::MultiBlockMigrationsOngoing => + return Err(Error::::MultiBlockMigrationsOngoing.into()), + CanSetCodeResult::InvalidVersion(error) => { + // The upgrade is invalid and there is no benefit in trying to apply this again. + Self::deposit_event(Event::RejectedInvalidAuthorizedUpgrade { + code_hash: res.code_hash, + error: error.into(), + }); + + // Not the fault of the caller of call. + return Ok(Pays::No.into()) + }, + }; + T::OnSetCode::set_code(code)?; + + Ok(PostDispatchInfo { + // consume the rest of the block to prevent further transactions + actual_weight: Some(T::BlockWeights::get().max_block), + // no fee for valid upgrade + pays_fee: Pays::No, + }) } } @@ -894,6 +919,8 @@ pub mod pallet { TaskFailed { task: T::RuntimeTask, err: DispatchError }, /// An upgrade was authorized. UpgradeAuthorized { code_hash: T::Hash, check_version: bool }, + /// An invalid authorized upgrade was rejected while trying to apply it. + RejectedInvalidAuthorizedUpgrade { code_hash: T::Hash, error: DispatchError }, } /// Error for the System pallet @@ -1091,16 +1118,17 @@ pub mod pallet { type Call = Call; fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::apply_authorized_upgrade { ref code } = call { - if let Ok(hash) = Self::validate_authorized_upgrade(&code[..]) { + if let Ok(res) = Self::validate_code_is_authorized(&code[..]) { return Ok(ValidTransaction { - priority: 100, + priority: u64::max_value(), requires: Vec::new(), - provides: vec![hash.as_ref().to_vec()], + provides: vec![res.code_hash.encode()], longevity: TransactionLongevity::max_value(), propagate: true, }) } } + #[cfg(feature = "experimental")] if let Call::do_task { ref task } = call { if task.is_valid() { @@ -1113,6 +1141,7 @@ pub mod pallet { }) } } + Err(InvalidTransaction::Call.into()) } } @@ -1470,6 +1499,28 @@ pub enum DecRefStatus { Exists, } +/// Result of [`Pallet::can_set_code`]. +pub enum CanSetCodeResult { + /// Everything is fine. + Ok, + /// Multi-block migrations are on-going. + MultiBlockMigrationsOngoing, + /// The runtime version is invalid or could not be fetched. + InvalidVersion(Error), +} + +impl CanSetCodeResult { + /// Convert `Self` into a result. + pub fn into_result(self) -> Result<(), DispatchError> { + match self { + Self::Ok => Ok(()), + Self::MultiBlockMigrationsOngoing => + Err(Error::::MultiBlockMigrationsOngoing.into()), + Self::InvalidVersion(err) => Err(err.into()), + } + } +} + impl Pallet { /// Returns the `spec_version` of the last runtime upgrade. /// @@ -2213,71 +2264,55 @@ impl Pallet { /// Determine whether or not it is possible to update the code. /// - /// Checks the given code if it is a valid runtime wasm blob by instantiating - /// it and extracting the runtime version of it. It checks that the runtime version - /// of the old and new runtime has the same spec name and that the spec version is increasing. - pub fn can_set_code(code: &[u8]) -> Result<(), sp_runtime::DispatchError> { + /// - `check_version`: Should the runtime version be checked? + pub fn can_set_code(code: &[u8], check_version: bool) -> CanSetCodeResult { if T::MultiBlockMigrator::ongoing() { - return Err(Error::::MultiBlockMigrationsOngoing.into()) + return CanSetCodeResult::MultiBlockMigrationsOngoing } - let current_version = T::Version::get(); - let new_version = sp_io::misc::runtime_version(code) - .and_then(|v| RuntimeVersion::decode(&mut &v[..]).ok()) - .ok_or(Error::::FailedToExtractRuntimeVersion)?; + if check_version { + let current_version = T::Version::get(); + let Some(new_version) = sp_io::misc::runtime_version(code) + .and_then(|v| RuntimeVersion::decode(&mut &v[..]).ok()) + else { + return CanSetCodeResult::InvalidVersion(Error::::FailedToExtractRuntimeVersion) + }; - cfg_if::cfg_if! { - if #[cfg(all(feature = "runtime-benchmarks", not(test)))] { + cfg_if::cfg_if! { + if #[cfg(all(feature = "runtime-benchmarks", not(test)))] { // Let's ensure the compiler doesn't optimize our fetching of the runtime version away. core::hint::black_box((new_version, current_version)); - Ok(()) - } else { - if new_version.spec_name != current_version.spec_name { - return Err(Error::::InvalidSpecName.into()) - } + } else { + if new_version.spec_name != current_version.spec_name { + return CanSetCodeResult::InvalidVersion( Error::::InvalidSpecName) + } - if new_version.spec_version <= current_version.spec_version { - return Err(Error::::SpecVersionNeedsToIncrease.into()) + if new_version.spec_version <= current_version.spec_version { + return CanSetCodeResult::InvalidVersion(Error::::SpecVersionNeedsToIncrease) + } } - - Ok(()) } } + + CanSetCodeResult::Ok } - /// To be called after any origin/privilege checks. Put the code upgrade authorization into - /// storage and emit an event. Infallible. + /// Authorize the given `code_hash` as upgrade. pub fn do_authorize_upgrade(code_hash: T::Hash, check_version: bool) { AuthorizedUpgrade::::put(CodeUpgradeAuthorization { code_hash, check_version }); Self::deposit_event(Event::UpgradeAuthorized { code_hash, check_version }); } - /// Apply an authorized upgrade, performing any validation checks, and remove the authorization. - /// Whether or not the code is set directly depends on the `OnSetCode` configuration of the - /// runtime. - pub fn do_apply_authorize_upgrade(code: Vec) -> Result { - Self::validate_authorized_upgrade(&code[..])?; - T::OnSetCode::set_code(code)?; - AuthorizedUpgrade::::kill(); - let post = PostDispatchInfo { - // consume the rest of the block to prevent further transactions - actual_weight: Some(T::BlockWeights::get().max_block), - // no fee for valid upgrade - pays_fee: Pays::No, - }; - Ok(post) - } - - /// Check that provided `code` can be upgraded to. Namely, check that its hash matches an - /// existing authorization and that it meets the specification requirements of `can_set_code`. - pub fn validate_authorized_upgrade(code: &[u8]) -> Result { + /// Check that provided `code` is authorized as an upgrade. + /// + /// Returns the [`CodeUpgradeAuthorization`]. + fn validate_code_is_authorized( + code: &[u8], + ) -> Result, DispatchError> { let authorization = AuthorizedUpgrade::::get().ok_or(Error::::NothingAuthorized)?; let actual_hash = T::Hashing::hash(code); ensure!(actual_hash == authorization.code_hash, Error::::Unauthorized); - if authorization.check_version { - Self::can_set_code(code)? - } - Ok(actual_hash) + Ok(authorization) } /// Reclaim the weight for the extrinsic given info and post info. diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs index f842ef6cd7e6f..1e7505c83b377 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -36,6 +36,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use codec::{Decode, DecodeWithMemTracking, Encode}; use frame_support::{ dispatch::{CheckIfFeeless, DispatchResult}, @@ -129,6 +131,10 @@ where const IDENTIFIER: &'static str = S::IDENTIFIER; type Implicit = S::Implicit; + fn metadata() -> alloc::vec::Vec { + S::metadata() + } + fn implicit(&self) -> Result { self.0.implicit() } diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs index b6ecbf9d57646..9f43f57bb6752 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs @@ -94,3 +94,18 @@ fn validate_prepare_works() { assert_eq!(ValidateCount::get(), 2); assert_eq!(PrepareCount::get(), 2); } + +#[test] +fn metadata_for_wrap_multiple_tx_ext() { + let metadata = SkipCheckIfFeeless::::metadata(); + let mut expected_metadata = vec![]; + expected_metadata.extend(DummyExtension::metadata().into_iter()); + expected_metadata.extend(DummyExtension::metadata().into_iter()); + + assert_eq!(metadata.len(), expected_metadata.len()); + for i in 0..expected_metadata.len() { + assert_eq!(metadata[i].identifier, expected_metadata[i].identifier); + assert_eq!(metadata[i].ty, expected_metadata[i].ty); + assert_eq!(metadata[i].implicit, expected_metadata[i].implicit); + } +} diff --git a/substrate/frame/transaction-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/src/benchmarking.rs index eba4c0964ce71..70ecfbf149e57 100644 --- a/substrate/frame/transaction-payment/src/benchmarking.rs +++ b/substrate/frame/transaction-payment/src/benchmarking.rs @@ -45,11 +45,18 @@ mod benchmarks { #[benchmark] fn charge_transaction_payment() { let caller: T::AccountId = account("caller", 0, 0); - >::endow_account( - &caller, - >::minimum_balance() * 1000u32.into(), - ); - let tip = >::minimum_balance(); + let existential_deposit = + >::minimum_balance(); + + let (amount_to_endow, tip) = if existential_deposit.is_zero() { + let min_tip: <::OnChargeTransaction as payment::OnChargeTransaction>::Balance = 1_000_000_000u32.into(); + (min_tip * 1000u32.into(), min_tip) + } else { + (existential_deposit * 1000u32.into(), existential_deposit) + }; + + >::endow_account(&caller, amount_to_endow); + let ext: ChargeTransactionPayment = ChargeTransactionPayment::from(tip); let inner = frame_system::Call::remark { remark: alloc::vec![] }; let call = T::RuntimeCall::from(inner); diff --git a/substrate/primitives/tracing/Cargo.toml b/substrate/primitives/tracing/Cargo.toml index 6c6cd1a3b6cfe..3b5cb982b0d19 100644 --- a/substrate/primitives/tracing/Cargo.toml +++ b/substrate/primitives/tracing/Cargo.toml @@ -22,6 +22,7 @@ targets = ["wasm32-unknown-unknown", "x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } +regex = { workspace = true, optional = true } tracing = { workspace = true } tracing-core = { workspace = true } tracing-subscriber = { workspace = true, optional = true, features = [ @@ -40,4 +41,4 @@ std = [ "tracing/std", "with-tracing", ] -test-utils = ["std"] +test-utils = ["regex", "regex/std", "std"] diff --git a/substrate/primitives/tracing/src/lib.rs b/substrate/primitives/tracing/src/lib.rs index 944f5f7ff43b6..ed6feeebb692c 100644 --- a/substrate/primitives/tracing/src/lib.rs +++ b/substrate/primitives/tracing/src/lib.rs @@ -114,7 +114,12 @@ mod types; /// Try to init a simple tracing subscriber with log compatibility layer. /// -/// Ignores any error. Useful for testing. +/// Ignores any error. Useful for testing. Uses the default filter for logs. +/// +/// Related functions: +/// - [`init_for_tests()`]: Enables `TRACE` level. +/// - [`test_log_capture::init_log_capture()`]: Captures logs for assertions and/or outputs logs. +/// - [`capture_test_logs!()`]: A macro for capturing logs within test blocks. #[cfg(feature = "std")] pub fn try_init_simple() { let _ = tracing_subscriber::fmt() @@ -131,6 +136,11 @@ pub fn try_init_simple() { /// The logs are not shown by default, logs are only shown when the test fails /// or if [`nocapture`](https://doc.rust-lang.org/cargo/commands/cargo-test.html#display-options) /// is being used. +/// +/// Related functions: +/// - [`try_init_simple()`]: Uses the default filter. +/// - [`test_log_capture::init_log_capture()`]: Captures logs for assertions and/or outputs logs. +/// - [`capture_test_logs!()`]: A macro for capturing logs within test blocks. #[cfg(feature = "std")] pub fn init_for_tests() { let _ = tracing_subscriber::fmt() @@ -263,10 +273,7 @@ pub mod test_log_capture { sync::{Arc, Mutex}, }; use tracing::level_filters::LevelFilter; - use tracing_subscriber::fmt::{ - format::{DefaultFields, Format}, - MakeWriter, Subscriber, - }; + use tracing_subscriber::{fmt, fmt::MakeWriter, layer::SubscriberExt, Layer, Registry}; /// A reusable log capturing struct for unit tests. /// Captures logs written during test execution for assertions. @@ -327,7 +334,9 @@ pub mod test_log_capture { /// assert_eq!(log_capture.get_logs().trim(), "Log entry"); /// ``` pub fn get_logs(&self) -> String { - String::from_utf8(self.buffer.lock().unwrap().clone()).unwrap() + let raw_logs = String::from_utf8(self.buffer.lock().unwrap().clone()).unwrap(); + let ansi_escape = regex::Regex::new(r"\x1B\[[0-9;]*[mK]").unwrap(); // Regex to match ANSI codes + ansi_escape.replace_all(&raw_logs, "").to_string() // Remove ANSI codes } /// Returns a clone of the internal buffer for use in `MakeWriter`. @@ -359,15 +368,18 @@ pub mod test_log_capture { } } - /// Initialises a log capture utility for testing. + /// Initialises a log capture utility for testing, with optional log printing. /// /// This function sets up a `LogCapture` instance to capture logs during test execution. /// It also configures a `tracing_subscriber` with the specified maximum log level - /// and a writer that directs logs to `LogCapture`. + /// and a writer that directs logs to `LogCapture`. If `print_logs` is enabled, logs + /// up to `max_level` are also printed to the test output. /// /// # Arguments /// - /// * `max_level` - The maximum log level to capture, which can be converted into `LevelFilter`. + /// * `max_level` - The maximum log level to capture and print, which can be converted into + /// `LevelFilter`. + /// * `print_logs` - If `true`, logs up to `max_level` will also be printed to the test output. /// /// # Returns /// @@ -383,61 +395,114 @@ pub mod test_log_capture { /// tracing::{info, subscriber, Level}, /// }; /// - /// let (log_capture, subscriber) = init_log_capture(Level::INFO); + /// let (log_capture, subscriber) = init_log_capture(Level::INFO, false); /// subscriber::with_default(subscriber, || { /// info!("This log will be captured"); /// assert!(log_capture.contains("This log will be captured")); /// }); /// ``` + /// + /// # Usage Guide + /// + /// - If you only need to **capture logs for assertions** without printing them, use + /// `init_log_capture(max_level, false)`. + /// - If you need both **capturing and printing logs**, use `init_log_capture(max_level, true)`. + /// - If you only need to **print logs** but not capture them, use + /// `sp_tracing::init_for_tests()`. pub fn init_log_capture( max_level: impl Into, - ) -> (LogCapture, Subscriber) { + print_logs: bool, + ) -> (LogCapture, impl tracing::Subscriber + Send + Sync) { // Create a new log capture instance let log_capture = LogCapture::new(); - // Configure a tracing subscriber to use the log capture as the writer - let subscriber = tracing_subscriber::fmt() - .with_max_level(max_level) // Set the max log level - .with_writer(log_capture.writer()) // Use LogCapture as the writer - .finish(); + // Convert the max log level into LevelFilter + let level_filter = max_level.into(); - (log_capture, subscriber) + // Create a layer for capturing logs into LogCapture + let capture_layer = fmt::layer() + .with_writer(log_capture.writer()) // Use LogCapture as the writer + .with_filter(level_filter); // Set the max log level + + // Base subscriber with log capturing + let subscriber = Registry::default().with(capture_layer); + + // If `print_logs` is enabled, add a layer that prints logs to test output up to `max_level` + let test_layer = if print_logs { + Some( + fmt::layer() + .with_test_writer() // Direct logs to test output + .with_filter(level_filter), // Apply the same max log level filter + ) + } else { + None + }; + + // Combine the log capture subscriber with the test layer (if applicable) + let combined_subscriber = subscriber.with(test_layer); + + (log_capture, combined_subscriber) } /// Macro for capturing logs during test execution. /// - /// It sets up a log subscriber with an optional maximum log level and captures the output. + /// This macro sets up a log subscriber with a specified maximum log level + /// and an option to print logs to the test output while capturing them. + /// + /// # Arguments + /// + /// - `$max_level`: The maximum log level to capture. + /// - `$print_logs`: Whether to also print logs to the test output. + /// - `$test`: The block of code where logs are captured. /// /// # Examples + /// /// ``` /// use sp_tracing::{ /// capture_test_logs, /// tracing::{info, warn, Level}, /// }; /// - /// let log_capture = capture_test_logs!(Level::WARN, { + /// // Capture logs at WARN level without printing them + /// let log_capture = capture_test_logs!(Level::WARN, false, { /// info!("Captured info message"); /// warn!("Captured warning"); /// }); /// - /// assert!(!log_capture.contains("Captured log message")); + /// assert!(!log_capture.contains("Captured info message")); /// assert!(log_capture.contains("Captured warning")); + /// + /// // Capture logs at TRACE level and also print them + /// let log_capture = capture_test_logs!(Level::TRACE, true, { + /// info!("This will be captured and printed"); + /// }); + /// + /// assert!(log_capture.contains("This will be captured and printed")); /// ``` + /// + /// # Related functions: + /// - [`init_log_capture()`]: Captures logs for assertions. + /// - `sp_tracing::init_for_tests()`: Outputs logs but does not capture them. #[macro_export] macro_rules! capture_test_logs { - // Case when max_level is provided - ($max_level:expr, $test:block) => {{ + // Case when max_level and print_logs are provided + ($max_level:expr, $print_logs:expr, $test:block) => {{ let (log_capture, subscriber) = - sp_tracing::test_log_capture::init_log_capture($max_level); + sp_tracing::test_log_capture::init_log_capture($max_level, $print_logs); sp_tracing::tracing::subscriber::with_default(subscriber, || $test); log_capture }}; - // Case when max_level is omitted (defaults to DEBUG) + // Case when only max_level is provided (defaults to not printing logs) + ($max_level:expr, $test:block) => {{ + capture_test_logs!($max_level, false, $test) + }}; + + // Case when max_level is omitted (defaults to DEBUG, no printing) ($test:block) => {{ - capture_test_logs!(sp_tracing::tracing::Level::DEBUG, $test) + capture_test_logs!(sp_tracing::tracing::Level::DEBUG, false, $test) }}; } }