diff --git a/.github/ISSUE_TEMPLATE/release_checklist.md b/.github/ISSUE_TEMPLATE/release_checklist.md index f71bab419f..8bcb711591 100644 --- a/.github/ISSUE_TEMPLATE/release_checklist.md +++ b/.github/ISSUE_TEMPLATE/release_checklist.md @@ -26,5 +26,5 @@ Link the relevant documentation PRs for this release. - [ ] Update AvalancheGo dependency in scripts/versions.sh for e2e tests. - [ ] Add new entry in compatibility.json for RPCChainVM Compatibility - [ ] Update AvalancheGo compatibility in README -- [ ] Deploy to WAGMI +- [ ] Deploy to Echo/Dispatch - [ ] Confirm goreleaser job has successfully generated binaries by checking the releases page diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 71f2fc2135..2f850a9591 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -3,15 +3,17 @@ name: Bench on: workflow_dispatch: pull_request: + merge_group: + types: [checks_requested] jobs: bench: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "~1.22.8" + go-version-file: "go.mod" - run: go mod download shell: bash - run: ./scripts/build_bench_precompiles.sh diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 639963adec..9112587cdc 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -17,6 +17,8 @@ on: pull_request: # The branches below must be a subset of the branches above branches: [ master ] + merge_group: + types: [checks_requested] schedule: - cron: '44 11 * * 4' diff --git a/.github/workflows/publish_docker.yml b/.github/workflows/publish_docker.yml index d1764edc66..407b373739 100644 --- a/.github/workflows/publish_docker.yml +++ b/.github/workflows/publish_docker.yml @@ -8,6 +8,11 @@ on: default: '' required: false type: string + avalanche_version: + description: 'The SHA or tag of avalanchego to use for the base image (must be compatible with the version in go.mod)' + default: '' + required: false + type: string push: tags: @@ -26,8 +31,9 @@ jobs: env: DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASS: ${{ secrets.DOCKER_PASS }} - DOCKER_REPO: "avaplatform/subnet-evm" + IMAGE_NAME: "avaplatform/subnet-evm" VM_ID: ${{ inputs.vm_id }} PUBLISH: 1 PLATFORMS: "linux/amd64,linux/arm64" + AVALANCHE_VERSION: ${{ inputs.avalanche_version }} run: scripts/build_docker_image.sh diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5af8b527c2..86fc3de66f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -13,7 +13,7 @@ on: jobs: release: # needs: [lint_test, unit_test, e2e_test, simulator_test] - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - name: Git checkout uses: actions/checkout@v4 @@ -24,7 +24,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: "~1.22.8" + go-version-file: "./subnet-evm/go.mod" - name: Set up arm64 cross compiler run: | sudo apt-get -y update @@ -46,7 +46,7 @@ jobs: uses: goreleaser/goreleaser-action@v3 with: distribution: goreleaser - version: latest + version: v2.5.1 args: release --clean workdir: ./subnet-evm/ env: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 6e573052d4..34c64fe4d3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -7,25 +7,24 @@ on: tags: - "*" pull_request: - -env: - min_go_version: "~1.22.8" + merge_group: + types: [checks_requested] jobs: lint_test: name: Lint - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - run: ./scripts/lint_allowed_eth_imports.sh shell: bash - uses: actions/setup-go@v5 with: - go-version: ${{ env.min_go_version }} + go-version-file: "go.mod" - name: golangci-lint uses: golangci/golangci-lint-action@v3 with: - version: v1.56 + version: v1.63 working-directory: . args: --timeout 10m skip-pkg-cache: true @@ -46,12 +45,12 @@ jobs: strategy: fail-fast: false matrix: - os: [macos-latest, ubuntu-20.04, ubuntu-latest, windows-latest] + os: [macos-latest, ubuntu-22.04, ubuntu-latest, windows-latest] steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: - go-version: ${{ env.min_go_version }} + go-version-file: "go.mod" - name: Set timeout on Windows # Windows UT run slower and need a longer timeout shell: bash if: matrix.os == 'windows-latest' @@ -85,7 +84,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: ${{ env.min_go_version }} + go-version-file: "go.mod" - name: Use Node.js uses: actions/setup-node@v4 with: @@ -126,7 +125,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: ${{ env.min_go_version }} + go-version-file: "go.mod" - name: Use Node.js uses: actions/setup-node@v4 with: @@ -170,7 +169,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: ${{ env.min_go_version }} + go-version-file: "go.mod" - name: Install AvalancheGo Release shell: bash run: BASEDIR=/tmp/e2e-test AVALANCHEGO_BUILD_PATH=/tmp/e2e-test/avalanchego ./scripts/install_avalanchego_release.sh @@ -195,6 +194,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - name: Install qemu (required for cross-platform builds) + run: | + sudo apt update + sudo apt -y install qemu-system qemu-user-static - name: Check image build shell: bash run: bash -x scripts/tests.build_docker_image.sh @@ -205,7 +208,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: - go-version: ${{ env.min_go_version }} + go-version-file: "go.mod" - name: Install AvalancheGo Release shell: bash run: BASEDIR=/tmp/e2e-test AVALANCHEGO_BUILD_PATH=/tmp/e2e-test/avalanchego ./scripts/install_avalanchego_release.sh diff --git a/.golangci.yml b/.golangci.yml index c6c8d5748a..ace8a22dad 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,11 +3,6 @@ run: timeout: 10m tests: true - # default is true. Enables skipping of directories: - # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ - skip-dirs-use-default: true - # Include non-test files tagged as test-only. - # Context: https://github.com/ava-labs/avalanchego/pull/3173 linters: disable-all: true @@ -18,8 +13,19 @@ linters: - ineffassign - misspell - unconvert + - typecheck - unused + # - staticcheck + - bidichk + - durationcheck + - copyloopvar - whitespace + # - revive # only certain checks enabled + - durationcheck + - gocheckcompilerdirectives + - reassign + - mirror + - tenv linters-settings: gofmt: diff --git a/Dockerfile b/Dockerfile index 047144d21a..5afa01d5f0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,20 +1,18 @@ -# syntax=docker/dockerfile:experimental - # ============= Setting up base Stage ================ # AVALANCHEGO_NODE_IMAGE needs to identify an existing node image and should include the tag -ARG AVALANCHEGO_NODE_IMAGE +# This value is not intended to be used but silences a warning +ARG AVALANCHEGO_NODE_IMAGE="invalid-image" # ============= Compilation Stage ================ -FROM golang:1.22.8-bullseye AS builder +FROM --platform=$BUILDPLATFORM golang:1.23.6-bullseye AS builder WORKDIR /build # Copy avalanche dependencies first (intermediate docker image caching) # Copy avalanchego directory if present (for manual CI case, which uses local dependency) COPY go.mod go.sum avalanchego* ./ - # Download avalanche dependencies using go mod -RUN go mod download && go mod tidy -compat=1.22 +RUN go mod download && go mod tidy # Copy the code into the container COPY . . @@ -22,15 +20,39 @@ COPY . . # Ensure pre-existing builds are not available for inclusion in the final image RUN [ -d ./build ] && rm -rf ./build/* || true + +ARG TARGETPLATFORM +ARG BUILDPLATFORM + +# Configure a cross-compiler if the target platform differs from the build platform. +# +# build_env.sh is used to capture the environmental changes required by the build step since RUN +# environment state is not otherwise persistent. +RUN if [ "$TARGETPLATFORM" = "linux/arm64" ] && [ "$BUILDPLATFORM" != "linux/arm64" ]; then \ + apt-get update && apt-get install -y gcc-aarch64-linux-gnu && \ + echo "export CC=aarch64-linux-gnu-gcc" > ./build_env.sh \ + ; elif [ "$TARGETPLATFORM" = "linux/amd64" ] && [ "$BUILDPLATFORM" != "linux/amd64" ]; then \ + apt-get update && apt-get install -y gcc-x86-64-linux-gnu && \ + echo "export CC=x86_64-linux-gnu-gcc" > ./build_env.sh \ + ; else \ + echo "export CC=gcc" > ./build_env.sh \ + ; fi + # Pass in SUBNET_EVM_COMMIT as an arg to allow the build script to set this externally ARG SUBNET_EVM_COMMIT ARG CURRENT_BRANCH -RUN export SUBNET_EVM_COMMIT=$SUBNET_EVM_COMMIT && export CURRENT_BRANCH=$CURRENT_BRANCH && ./scripts/build.sh build/subnet-evm +RUN . ./build_env.sh && \ + echo "{CC=$CC, TARGETPLATFORM=$TARGETPLATFORM, BUILDPLATFORM=$BUILDPLATFORM}" && \ + export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) && \ + export CURRENT_BRANCH=$CURRENT_BRANCH && \ + export SUBNET_EVM_COMMIT=$SUBNET_EVM_COMMIT && \ + ./scripts/build.sh build/subnet-evm # ============= Cleanup Stage ================ -FROM $AVALANCHEGO_NODE_IMAGE AS builtImage +FROM $AVALANCHEGO_NODE_IMAGE AS execution # Copy the evm binary into the correct location in the container ARG VM_ID=srEXiWaHuhNyGwPUi444Tu47ZEDwxTWrbQiuD7FmgSAQ6X7Dy -COPY --from=builder /build/build/subnet-evm /avalanchego/build/plugins/$VM_ID +ENV AVAGO_PLUGIN_DIR="/avalanchego/build/plugins" +COPY --from=builder /build/build/subnet-evm $AVAGO_PLUGIN_DIR/$VM_ID diff --git a/README.md b/README.md index 30bdfb31ab..6af11e89ce 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,9 @@ # Subnet EVM -[![Build + Test + Release](https://github.com/ava-labs/subnet-evm/actions/workflows/lint-tests-release.yml/badge.svg)](https://github.com/ava-labs/subnet-evm/actions/workflows/lint-tests-release.yml) +[![CI](https://github.com/ava-labs/subnet-evm/actions/workflows/ci.yml/badge.svg)](https://github.com/ava-labs/subnet-evm/actions/workflows/ci.yml) [![CodeQL](https://github.com/ava-labs/subnet-evm/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/ava-labs/subnet-evm/actions/workflows/codeql-analysis.yml) -[Avalanche](https://docs.avax.network/overview/getting-started/avalanche-platform) is a network composed of multiple blockchains. +[Avalanche](https://docs.avax.network/avalanche-l1s) is a network composed of multiple blockchains. Each blockchain is an instance of a Virtual Machine (VM), much like an object in an object-oriented language is an instance of a class. That is, the VM defines the behavior of the blockchain. @@ -18,21 +18,9 @@ The Subnet EVM runs in a separate process from the main AvalancheGo process and ### AvalancheGo Compatibility ```text -[v0.6.0] AvalancheGo@v1.11.0-v1.11.1 (Protocol Version: 33) -[v0.6.1] AvalancheGo@v1.11.0-v1.11.1 (Protocol Version: 33) -[v0.6.2] AvalancheGo@v1.11.2 (Protocol Version: 34) -[v0.6.3] AvalancheGo@v1.11.3-v1.11.9 (Protocol Version: 35) -[v0.6.4] AvalancheGo@v1.11.3-v1.11.9 (Protocol Version: 35) -[v0.6.5] AvalancheGo@v1.11.3-v1.11.9 (Protocol Version: 35) -[v0.6.6] AvalancheGo@v1.11.3-v1.11.9 (Protocol Version: 35) -[v0.6.7] AvalancheGo@v1.11.3-v1.11.9 (Protocol Version: 35) -[v0.6.8] AvalancheGo@v1.11.10 (Protocol Version: 36) -[v0.6.9] AvalancheGo@v1.11.11-v1.11.12 (Protocol Version: 37) -[v0.6.10] AvalancheGo@v1.11.11-v1.11.12 (Protocol Version: 37) -[v0.6.11] AvalancheGo@v1.11.11-v1.11.12 (Protocol Version: 37) -[v0.6.12] AvalancheGo@v1.11.13/v1.12.0 (Protocol Version: 38) [v0.7.0] AvalancheGo@v1.12.0-v1.12.1 (Protocol Version: 38) -[v0.7.1] AvalancheGo@v1.12.0-v1.12.1 (Protocol Version: 38) +[v0.7.1] AvalancheGo@v1.12.2 (Protocol Version: 39) +[v0.7.2] AvalancheGo@v1.12.2/1.13.0-fuji (Protocol Version: 39) ``` ## API @@ -72,7 +60,7 @@ To support these changes, there have been a number of changes to the SubnetEVM b ### Clone Subnet-evm -First install Go 1.22.8 or later. Follow the instructions [here](https://go.dev/doc/install). You can verify by running `go version`. +First install Go 1.23.6 or later. Follow the instructions [here](https://go.dev/doc/install). You can verify by running `go version`. Set `$GOPATH` environment variable properly for Go to look for Go Workspaces. Please read [this](https://go.dev/doc/code) for details. You can verify by running `echo $GOPATH`. @@ -96,5 +84,5 @@ To run a local network, it is recommended to use the [avalanche-cli](https://git There are two options when using the Avalanche-CLI: -1. Use an official Subnet-EVM release: https://docs.avax.network/subnets/build-first-subnet -2. Build and deploy a locally built (and optionally modified) version of Subnet-EVM: https://docs.avax.network/subnets/create-custom-subnet +1. Use an official Subnet-EVM release: +2. Build and deploy a locally built (and optionally modified) version of Subnet-EVM: diff --git a/RELEASES.md b/RELEASES.md index 29febf1347..533c81eedc 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -2,6 +2,47 @@ ## Pending Release +## [v0.7.2](https://github.com/ava-labs/subnet-evm/releases/tag/v0.7.2) + +This version is backwards compatible to [v0.7.0](https://github.com/ava-labs/subnet-evm/releases/tag/v0.7.0). It is optional, **but strongly encouraged as it's fixing an important bug in uptime tracking.** + +### AvalancheGo Compatibility + +The plugin version is unchanged at 39 and is compatible with AvalancheGo version v1.12.2. + +### Updates + +* Fixed concurrency issue in validators/uptime manager +* Bump golang version to v1.23.6 +* Bump golangci-lint to v1.63 and add linters + +## [v0.7.1](https://github.com/ava-labs/subnet-evm/releases/tag/v0.7.1) + +This release focuses on code quality improvements and post-Etna cleanups. + +### Compatibility + +The plugin version is **updated** to 39 and is compatible with AvalancheGo version v1.12.2. + +### Updates + +* Moved client type and structs to new `plugin/evm/client` package +* Fixed statedb improper copy issue +* Limited the maximum number of query-able rewardPercentile by 100 in `eth_feeHistory` API +* Refactored `trie_prefetcher.go` to be structurally similar to upstream +* Removed deprecated legacy gossip handler and metrics +* Removed unnecessary locks in mempool + +## [v0.7.0](https://github.com/ava-labs/subnet-evm/releases/tag/v0.7.0) + +### Updates + +- Changed default write option from `Sync` to `NoSync` in PebbleDB + +### Fixes + +- Fixed database close on shutdown + * Refactored trie_prefetcher.go to be structurally similar to upstream. ## [v0.7.0](https://github.com/ava-labs/subnet-evm/releases/tag/v0.7.0) diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go index 1e30d27f19..ee46105203 100644 --- a/accounts/abi/abi_test.go +++ b/accounts/abi/abi_test.go @@ -1210,7 +1210,6 @@ func TestUnpackRevert(t *testing.T) { {"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil}, } for index, c := range cases { - index, c := index, c t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) { t.Parallel() got, err := UnpackRevert(common.Hex2Bytes(c.input)) diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go index d7fe4c03ea..1d48f6b704 100644 --- a/accounts/abi/bind/bind.go +++ b/accounts/abi/bind/bind.go @@ -43,9 +43,6 @@ import ( "github.com/ava-labs/subnet-evm/accounts/abi" ) -// BindHook is a callback function that can be used to customize the binding. -type BindHook func(lang Lang, pkg string, types []string, contracts map[string]*TmplContract, structs map[string]*TmplStruct) (data interface{}, templateSource string, err error) - // Lang is a target programming language selector to generate bindings for. type Lang int @@ -53,7 +50,7 @@ const ( LangGo Lang = iota ) -func IsKeyWord(arg string) bool { +func isKeyWord(arg string) bool { switch arg { case "break": case "case": @@ -101,10 +98,10 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] func BindHelper(types []string, abis []string, bytecodes []string, fsigs []map[string]string, pkg string, lang Lang, libs map[string]string, aliases map[string]string, bindHook BindHook) (string, error) { var ( // contracts is the map of each individual contract requested binding - contracts = make(map[string]*TmplContract) + contracts = make(map[string]*tmplContract) // structs is the map of all redeclared structs shared by passed contracts. - structs = make(map[string]*TmplStruct) + structs = make(map[string]*tmplStruct) // isLib is the map used to flag each encountered library as such isLib = make(map[string]struct{}) @@ -125,11 +122,11 @@ func BindHelper(types []string, abis []string, bytecodes []string, fsigs []map[s // Extract the call and transact methods; events, struct definitions; and sort them alphabetically var ( - calls = make(map[string]*TmplMethod) - transacts = make(map[string]*TmplMethod) + calls = make(map[string]*tmplMethod) + transacts = make(map[string]*tmplMethod) events = make(map[string]*tmplEvent) - fallback *TmplMethod - receive *TmplMethod + fallback *tmplMethod + receive *tmplMethod // identifiers are used to detect duplicated identifiers of functions // and events. For all calls, transacts and events, abigen will generate @@ -172,7 +169,7 @@ func BindHelper(types []string, abis []string, bytecodes []string, fsigs []map[s normalized.Inputs = make([]abi.Argument, len(original.Inputs)) copy(normalized.Inputs, original.Inputs) for j, input := range normalized.Inputs { - if input.Name == "" || IsKeyWord(input.Name) { + if input.Name == "" || isKeyWord(input.Name) { normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j) } if hasStruct(input.Type) { @@ -191,9 +188,9 @@ func BindHelper(types []string, abis []string, bytecodes []string, fsigs []map[s } // Append the methods to the call or transact lists if original.IsConstant() { - calls[original.Name] = &TmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)} + calls[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)} } else { - transacts[original.Name] = &TmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)} + transacts[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)} } } for _, original := range evmABI.Events { @@ -224,7 +221,7 @@ func BindHelper(types []string, abis []string, bytecodes []string, fsigs []map[s normalized.Inputs = make([]abi.Argument, len(original.Inputs)) copy(normalized.Inputs, original.Inputs) for j, input := range normalized.Inputs { - if input.Name == "" || IsKeyWord(input.Name) { + if input.Name == "" || isKeyWord(input.Name) { normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j) } // Event is a bit special, we need to define event struct in binding, @@ -245,12 +242,12 @@ func BindHelper(types []string, abis []string, bytecodes []string, fsigs []map[s } // Add two special fallback functions if they exist if evmABI.HasFallback() { - fallback = &TmplMethod{Original: evmABI.Fallback} + fallback = &tmplMethod{Original: evmABI.Fallback} } if evmABI.HasReceive() { - receive = &TmplMethod{Original: evmABI.Receive} + receive = &tmplMethod{Original: evmABI.Receive} } - contracts[types[i]] = &TmplContract{ + contracts[types[i]] = &tmplContract{ Type: capitalise(types[i]), InputABI: strings.ReplaceAll(strippedABI, "\"", "\\\""), InputBin: strings.TrimPrefix(strings.TrimSpace(bytecodes[i]), "0x"), @@ -269,7 +266,7 @@ func BindHelper(types []string, abis []string, bytecodes []string, fsigs []map[s } // Parse library references. for pattern, name := range libs { - matched, err := regexp.Match("__\\$"+pattern+"\\$__", []byte(contracts[types[i]].InputBin)) + matched, err := regexp.MatchString("__\\$"+pattern+"\\$__", contracts[types[i]].InputBin) if err != nil { log.Error("Could not search for pattern", "pattern", pattern, "contract", contracts[types[i]], "err", err) } @@ -341,14 +338,10 @@ func BindHelper(types []string, abis []string, bytecodes []string, fsigs []map[s // bindType is a set of type binders that convert Solidity types to some supported // programming language types. -var bindType = map[Lang]func(kind abi.Type, structs map[string]*TmplStruct) string{ +var bindType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{ LangGo: bindTypeGo, } -var bindTypeNew = map[Lang]func(kind abi.Type, structs map[string]*TmplStruct) string{ - LangGo: bindTypeNewGo, -} - // bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go ones. func bindBasicTypeGo(kind abi.Type) string { switch kind.T { @@ -373,43 +366,10 @@ func bindBasicTypeGo(kind abi.Type) string { } } -// bindTypeNewGo converts new types to Go ones. -func bindTypeNewGo(kind abi.Type, structs map[string]*TmplStruct) string { - switch kind.T { - case abi.TupleTy: - return structs[kind.TupleRawName+kind.String()].Name + "{}" - case abi.ArrayTy: - return fmt.Sprintf("[%d]", kind.Size) + bindTypeGo(*kind.Elem, structs) + "{}" - case abi.SliceTy: - return "nil" - case abi.AddressTy: - return "common.Address{}" - case abi.IntTy, abi.UintTy: - parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(kind.String()) - switch parts[2] { - case "8", "16", "32", "64": - return "0" - } - return "new(big.Int)" - case abi.FixedBytesTy: - return fmt.Sprintf("[%d]byte", kind.Size) + "{}" - case abi.BytesTy: - return "[]byte{}" - case abi.FunctionTy: - return "[24]byte{}" - case abi.BoolTy: - return "false" - case abi.StringTy: - return `""` - default: - return "nil" - } -} - // bindTypeGo converts solidity types to Go ones. Since there is no clear mapping // from all Solidity types to Go ones (e.g. uint17), those that cannot be exactly // mapped will use an upscaled type (e.g. BigDecimal). -func bindTypeGo(kind abi.Type, structs map[string]*TmplStruct) string { +func bindTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { switch kind.T { case abi.TupleTy: return structs[kind.TupleRawName+kind.String()].Name @@ -424,13 +384,13 @@ func bindTypeGo(kind abi.Type, structs map[string]*TmplStruct) string { // bindTopicType is a set of type binders that convert Solidity types to some // supported programming language topic types. -var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*TmplStruct) string{ +var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{ LangGo: bindTopicTypeGo, } // bindTopicTypeGo converts a Solidity topic type to a Go one. It is almost the same // functionality as for simple types, but dynamic types get converted to hashes. -func bindTopicTypeGo(kind abi.Type, structs map[string]*TmplStruct) string { +func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { bound := bindTypeGo(kind, structs) // todo(rjl493456442) according solidity documentation, indexed event @@ -447,14 +407,14 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*TmplStruct) string { // bindStructType is a set of type binders that convert Solidity tuple types to some supported // programming language struct definition. -var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*TmplStruct) string{ +var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{ LangGo: bindStructTypeGo, } // bindStructTypeGo converts a Solidity tuple type to a Go one and records the mapping // in the given map. // Notably, this function will resolve and record nested struct recursively. -func bindStructTypeGo(kind abi.Type, structs map[string]*TmplStruct) string { +func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { switch kind.T { case abi.TupleTy: // We compose a raw struct name and a canonical parameter expression @@ -483,7 +443,7 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*TmplStruct) string { } name = capitalise(name) - structs[id] = &TmplStruct{ + structs[id] = &tmplStruct{ Name: name, Fields: fields, } @@ -568,11 +528,3 @@ func hasStruct(t abi.Type) bool { return false } } - -func mkList(args ...interface{}) []interface{} { - return args -} - -func add(a, b int) int { - return a + b -} diff --git a/accounts/abi/bind/bind_extra.go b/accounts/abi/bind/bind_extra.go new file mode 100644 index 0000000000..a622058edc --- /dev/null +++ b/accounts/abi/bind/bind_extra.go @@ -0,0 +1,70 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bind + +import ( + "fmt" + "regexp" + + "github.com/ava-labs/subnet-evm/accounts/abi" +) + +type ( + // These types are exported for use in bind/precompilebind + TmplContract = tmplContract + TmplMethod = tmplMethod + TmplStruct = tmplStruct +) + +// BindHook is a callback function that can be used to customize the binding. +type BindHook func(lang Lang, pkg string, types []string, contracts map[string]*tmplContract, structs map[string]*tmplStruct) (data any, templateSource string, err error) + +func IsKeyWord(arg string) bool { + return isKeyWord(arg) +} + +var bindTypeNew = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{ + LangGo: bindTypeNewGo, +} + +// bindTypeNewGo converts new types to Go ones. +func bindTypeNewGo(kind abi.Type, structs map[string]*tmplStruct) string { + switch kind.T { + case abi.TupleTy: + return structs[kind.TupleRawName+kind.String()].Name + "{}" + case abi.ArrayTy: + return fmt.Sprintf("[%d]", kind.Size) + bindTypeGo(*kind.Elem, structs) + "{}" + case abi.SliceTy: + return "nil" + case abi.AddressTy: + return "common.Address{}" + case abi.IntTy, abi.UintTy: + parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(kind.String()) + switch parts[2] { + case "8", "16", "32", "64": + return "0" + } + return "new(big.Int)" + case abi.FixedBytesTy: + return fmt.Sprintf("[%d]byte", kind.Size) + "{}" + case abi.BytesTy: + return "[]byte{}" + case abi.FunctionTy: + return "[24]byte{}" + case abi.BoolTy: + return "false" + case abi.StringTy: + return `""` + default: + return "nil" + } +} + +func mkList(args ...any) []any { + return args +} + +func add(a, b int) int { + return a + b +} diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index 98b4c91c48..09c724a63c 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -2179,7 +2179,7 @@ func golangBindings(t *testing.T, overload bool) { if out, err := replacer.CombinedOutput(); err != nil { t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out) } - tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.22") + tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.23") tidier.Dir = pkg if out, err := tidier.CombinedOutput(); err != nil { t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) diff --git a/accounts/abi/bind/precompilebind/precompile_bind_test.go b/accounts/abi/bind/precompilebind/precompile_bind_test.go index a631f0e086..54bbf9dcf8 100644 --- a/accounts/abi/bind/precompilebind/precompile_bind_test.go +++ b/accounts/abi/bind/precompilebind/precompile_bind_test.go @@ -695,7 +695,7 @@ func TestPrecompileBind(t *testing.T) { if out, err := replacer.CombinedOutput(); err != nil { t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out) } - tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.22") + tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.23") tidier.Dir = pkg if out, err := tidier.CombinedOutput(); err != nil { t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go index 7b78dafa26..96b0d06ef5 100644 --- a/accounts/abi/bind/template.go +++ b/accounts/abi/bind/template.go @@ -31,30 +31,30 @@ import "github.com/ava-labs/subnet-evm/accounts/abi" // tmplData is the data structure required to fill the binding template. type tmplData struct { Package string // Name of the package to place the generated file in - Contracts map[string]*TmplContract // List of contracts to generate into this file + Contracts map[string]*tmplContract // List of contracts to generate into this file Libraries map[string]string // Map the bytecode's link pattern to the library name - Structs map[string]*TmplStruct // Contract struct type definitions + Structs map[string]*tmplStruct // Contract struct type definitions } -// TmplContract contains the data needed to generate an individual contract binding. -type TmplContract struct { +// tmplContract contains the data needed to generate an individual contract binding. +type tmplContract struct { Type string // Type name of the main contract binding InputABI string // JSON ABI used as the input to generate the binding from InputBin string // Optional EVM bytecode used to generate deploy code from FuncSigs map[string]string // Optional map: string signature -> 4-byte signature Constructor abi.Method // Contract constructor for deploy parametrization - Calls map[string]*TmplMethod // Contract calls that only read state data - Transacts map[string]*TmplMethod // Contract calls that write state data - Fallback *TmplMethod // Additional special fallback function - Receive *TmplMethod // Additional special receive function + Calls map[string]*tmplMethod // Contract calls that only read state data + Transacts map[string]*tmplMethod // Contract calls that write state data + Fallback *tmplMethod // Additional special fallback function + Receive *tmplMethod // Additional special receive function Events map[string]*tmplEvent // Contract events accessors Libraries map[string]string // Same as tmplData, but filtered to only keep what the contract needs Library bool // Indicator whether the contract is a library } -// TmplMethod is a wrapper around an abi.Method that contains a few preprocessed +// tmplMethod is a wrapper around an abi.Method that contains a few preprocessed // and cached data fields. -type TmplMethod struct { +type tmplMethod struct { Original abi.Method // Original method as parsed by the abi package Normalized abi.Method // Normalized version of the parsed method (capitalized names, non-anonymous args/returns) Structured bool // Whether the returns should be accumulated into a struct @@ -75,9 +75,9 @@ type tmplField struct { SolKind abi.Type // Raw abi type information } -// TmplStruct is a wrapper around an abi.tuple and contains an auto-generated +// tmplStruct is a wrapper around an abi.tuple and contains an auto-generated // struct name. -type TmplStruct struct { +type tmplStruct struct { Name string // Auto-generated struct name(before solidity v0.5.11) or raw name. Fields []*tmplField // Struct fields definition depends on the binding language. } diff --git a/accounts/abi/event_test.go b/accounts/abi/event_test.go index 1da8c9dae1..63f9fb2ffe 100644 --- a/accounts/abi/event_test.go +++ b/accounts/abi/event_test.go @@ -341,7 +341,6 @@ func TestEventTupleUnpack(t *testing.T) { for _, tc := range testCases { assert := assert.New(t) - tc := tc t.Run(tc.name, func(t *testing.T) { err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert) if tc.error == "" { diff --git a/accounts/abi/pack_test.go b/accounts/abi/pack_test.go index 99bf47200e..c76866c369 100644 --- a/accounts/abi/pack_test.go +++ b/accounts/abi/pack_test.go @@ -44,7 +44,6 @@ import ( func TestPack(t *testing.T) { t.Parallel() for i, test := range packUnpackTests { - i, test := i, test t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() encb, err := hex.DecodeString(test.packed) diff --git a/accounts/abi/reflect_test.go b/accounts/abi/reflect_test.go index 5d90cdb763..624032968d 100644 --- a/accounts/abi/reflect_test.go +++ b/accounts/abi/reflect_test.go @@ -182,7 +182,6 @@ var reflectTests = []reflectTest{ func TestReflectNameToStruct(t *testing.T) { t.Parallel() for _, test := range reflectTests { - test := test t.Run(test.name, func(t *testing.T) { t.Parallel() m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc)) diff --git a/accounts/abi/topics_test.go b/accounts/abi/topics_test.go index 691c2c09dc..cf9ca32b4f 100644 --- a/accounts/abi/topics_test.go +++ b/accounts/abi/topics_test.go @@ -147,7 +147,6 @@ func TestMakeTopics(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() got, err := MakeTopics(tt.args.query...) @@ -383,7 +382,6 @@ func TestParseTopics(t *testing.T) { tests := setupTopicsTests() for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() createObj := tt.args.createObj() @@ -403,7 +401,6 @@ func TestParseTopicsIntoMap(t *testing.T) { tests := setupTopicsTests() for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() outMap := make(map[string]interface{}) diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go index 2454bde1fb..ae55cf3250 100644 --- a/accounts/abi/unpack_test.go +++ b/accounts/abi/unpack_test.go @@ -399,7 +399,6 @@ func TestMethodMultiReturn(t *testing.T) { "Can not unpack into a slice with wrong types", }} for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { require := require.New(t) err := abi.UnpackIntoInterface(tc.dest, "multi", data) @@ -957,7 +956,7 @@ func TestOOMMaliciousInput(t *testing.T) { } encb, err := hex.DecodeString(test.enc) if err != nil { - t.Fatalf("invalid hex: %s" + test.enc) + t.Fatalf("invalid hex: %s", test.enc) } _, err = abi.Methods["method"].Outputs.UnpackValues(encb) if err == nil { diff --git a/cmd/abigen/namefilter.go b/cmd/abigen/namefilter.go index e43cdf38e0..5fcf6e17a0 100644 --- a/cmd/abigen/namefilter.go +++ b/cmd/abigen/namefilter.go @@ -7,7 +7,6 @@ // original code from which it is derived. // // Much love to the original authors for their work. -// ********** package main import ( diff --git a/cmd/abigen/namefilter_test.go b/cmd/abigen/namefilter_test.go index 43dd28707e..d6c9a0a932 100644 --- a/cmd/abigen/namefilter_test.go +++ b/cmd/abigen/namefilter_test.go @@ -7,7 +7,6 @@ // original code from which it is derived. // // Much love to the original authors for their work. -// ********** package main import ( diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go index 2304a54af6..9f653cebea 100644 --- a/cmd/evm/t8n_test.go +++ b/cmd/evm/t8n_test.go @@ -429,7 +429,7 @@ func TestT9n(t *testing.T) { ok, err := cmpJson(have, want) switch { case err != nil: - t.Logf(string(have)) + t.Log(string(have)) t.Fatalf("test %d, json parsing failed: %v", i, err) case !ok: t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) @@ -561,7 +561,7 @@ func TestB11r(t *testing.T) { ok, err := cmpJson(have, want) switch { case err != nil: - t.Logf(string(have)) + t.Log(string(have)) t.Fatalf("test %d, json parsing failed: %v", i, err) case !ok: t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) diff --git a/cmd/simulator/README.md b/cmd/simulator/README.md index f05304c079..b199106934 100644 --- a/cmd/simulator/README.md +++ b/cmd/simulator/README.md @@ -40,7 +40,7 @@ Once you've built AvalancheGo, open the AvalancheGo directory in a separate term WARNING: -The staking-enabled flag is only for local testing. Disabling staking serves two functions explicitly for testing purposes: +The `--sybil-protection-enabled=false` flag is only suitable for local testing. Disabling staking serves two functions explicitly for testing purposes: 1. Ignore stake weight on the P-Chain and count each connected peer as having a stake weight of 1 2. Automatically opts in to validate every Subnet diff --git a/cmd/simulator/config/flags.go b/cmd/simulator/config/flags.go index 8fb3d3c5fd..95ffe96603 100644 --- a/cmd/simulator/config/flags.go +++ b/cmd/simulator/config/flags.go @@ -125,5 +125,5 @@ func addSimulatorFlags(fs *pflag.FlagSet) { fs.String(LogLevelKey, "info", "Specify the log level to use in the simulator") fs.Uint64(BatchSizeKey, 100, "Specify the batchsize for the worker to issue and confirm txs") fs.Uint64(MetricsPortKey, 8082, "Specify the port to use for the metrics server") - fs.String(MetricsOutputKey, "", "Specify the file to write metrics in json format, or empy to write to stdout (defaults to stdout)") + fs.String(MetricsOutputKey, "", "Specify the file to write metrics in json format, or empty to write to stdout (defaults to stdout)") } diff --git a/cmd/simulator/load/loader.go b/cmd/simulator/load/loader.go index 31e1902a41..57fa6fce70 100644 --- a/cmd/simulator/load/loader.go +++ b/cmd/simulator/load/loader.go @@ -66,7 +66,6 @@ func (l *Loader[T]) Execute(ctx context.Context) error { log.Info("Starting tx agents...") eg := errgroup.Group{} for _, agent := range agents { - agent := agent eg.Go(func() error { return agent.Execute(ctx) }) @@ -100,8 +99,6 @@ func (l *Loader[T]) ConfirmReachedTip(ctx context.Context) error { eg := errgroup.Group{} for i, client := range l.clients { - i := i - client := client eg.Go(func() error { for { latestHeight, err := client.LatestHeight(ctx) diff --git a/compatibility.json b/compatibility.json index 435fda56ee..e2c9603394 100644 --- a/compatibility.json +++ b/compatibility.json @@ -1,19 +1,7 @@ { "rpcChainVMProtocolVersion": { - "v0.7.1": 38, - "v0.7.0": 38, - "v0.6.12": 38, - "v0.6.11": 37, - "v0.6.10": 37, - "v0.6.9": 37, - "v0.6.8": 36, - "v0.6.7": 35, - "v0.6.6": 35, - "v0.6.5": 35, - "v0.6.4": 35, - "v0.6.3": 35, - "v0.6.2": 34, - "v0.6.1": 33, - "v0.6.0": 33 + "v0.7.2": 39, + "v0.7.1": 39, + "v0.7.0": 38 } } \ No newline at end of file diff --git a/consensus/dummy/dynamic_fees.go b/consensus/dummy/dynamic_fees.go index b6c9db6480..4a308147d6 100644 --- a/consensus/dummy/dynamic_fees.go +++ b/consensus/dummy/dynamic_fees.go @@ -57,15 +57,14 @@ func CalcBaseFee(config *params.ChainConfig, feeConfig commontype.FeeConfig, par // Add in the gas used by the parent block in the correct place // If the parent consumed gas within the rollup window, add the consumed // gas in. - expectedRollUp := params.RollupWindow - if roll < expectedRollUp { - slot := expectedRollUp - 1 - roll + if roll < params.RollupWindow { + slot := params.RollupWindow - 1 - roll start := slot * wrappers.LongLen updateLongWindow(newRollupWindow, start, parent.GasUsed) } // Calculate the amount of gas consumed within the rollup window. - totalGas := sumLongWindow(newRollupWindow, int(expectedRollUp)) + totalGas := sumLongWindow(newRollupWindow, params.RollupWindow) if totalGas == parentGasTarget { return newRollupWindow, baseFee, nil @@ -94,9 +93,9 @@ func CalcBaseFee(config *params.ChainConfig, feeConfig commontype.FeeConfig, par // for the interval during which no blocks were produced. // We use roll/rollupWindow, so that the transition is applied for every [rollupWindow] seconds // that has elapsed between the parent and this block. - if roll > expectedRollUp { + if roll > params.RollupWindow { // Note: roll/params.RollupWindow must be greater than 1 since we've checked that roll > params.RollupWindow - baseFeeDelta = new(big.Int).Mul(baseFeeDelta, new(big.Int).SetUint64(roll/expectedRollUp)) + baseFeeDelta = new(big.Int).Mul(baseFeeDelta, new(big.Int).SetUint64(roll/params.RollupWindow)) } baseFee.Sub(baseFee, baseFeeDelta) } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 786a9ff31d..a019a71e4d 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -294,7 +294,6 @@ func TestBlockChainOfflinePruningUngracefulShutdown(t *testing.T) { return createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) } for _, tt := range tests { - tt := tt t.Run(tt.Name, func(t *testing.T) { t.Parallel() tt.testFunc(t, create) diff --git a/core/evm.go b/core/evm.go index 19796055bf..663e8f7938 100644 --- a/core/evm.go +++ b/core/evm.go @@ -112,7 +112,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common // NewEVMBlockContextWithPredicateResults creates a new context for use in the EVM with an override for the predicate results that is not present // in header.Extra. // This function is used to create a BlockContext when the header Extra data is not fully formed yet and it's more efficient to pass in predicateResults -// directly rather than re-encode the latest results when executing each individaul transaction. +// directly rather than re-encode the latest results when executing each individual transaction. func NewEVMBlockContextWithPredicateResults(header *types.Header, chain ChainContext, author *common.Address, predicateBytes []byte) vm.BlockContext { extra := bytes.Clone(header.Extra) if len(predicateBytes) > 0 { @@ -140,7 +140,6 @@ func newEVMBlockContext(header *types.Header, chain ChainContext, author *common if header.ExcessBlobGas != nil { blobBaseFee = eip4844.CalcBlobFee(*header.ExcessBlobGas) } - return vm.BlockContext{ CanTransfer: CanTransfer, Transfer: Transfer, diff --git a/core/predicate_check_test.go b/core/predicate_check_test.go index 5dfcc43ded..2a19fac4cf 100644 --- a/core/predicate_check_test.go +++ b/core/predicate_check_test.go @@ -244,7 +244,7 @@ func TestCheckPredicate(t *testing.T) { }, expectedErr: nil, }, - "two predicates niether named by access list": { + "two predicates neither named by access list": { gas: 61600, predicateContext: predicateContext, createPredicates: func(t testing.TB) map[common.Address]precompileconfig.Predicater { @@ -293,7 +293,6 @@ func TestCheckPredicate(t *testing.T) { expectedErr: ErrIntrinsicGas, }, } { - test := test t.Run(name, func(t *testing.T) { require := require.New(t) // Create the rules from TestChainConfig and update the predicates based on the test params diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index fe78e88643..0e151a091e 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -291,10 +291,10 @@ func TestBlockReceiptStorage(t *testing.T) { // Insert the receipt slice into the database and check presence WriteReceipts(db, hash, 0, receipts) if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) == 0 { - t.Fatalf("no receipts returned") + t.Fatal("no receipts returned") } else { if err := checkReceiptsRLP(rs, receipts); err != nil { - t.Fatalf(err.Error()) + t.Fatal(err) } } // Delete the body and ensure that the receipts are no longer returned (metadata can't be recomputed) @@ -308,7 +308,7 @@ func TestBlockReceiptStorage(t *testing.T) { } // Ensure that receipts without metadata can be returned without the block body too if err := checkReceiptsRLP(ReadRawReceipts(db, hash, 0), receipts); err != nil { - t.Fatalf(err.Error()) + t.Fatal(err) } // Sanity check that body and header alone without the receipt is a full purge WriteHeader(db, header) diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 0aaa3a27f4..a8668a6ef7 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -920,6 +920,8 @@ func (t *Tree) disklayer() *diskLayer { case *diskLayer: return layer case *diffLayer: + layer.lock.RLock() + defer layer.lock.RUnlock() return layer.origin default: panic(fmt.Sprintf("%T: undefined layer", snap)) @@ -951,7 +953,7 @@ func (t *Tree) generating() (bool, error) { return layer.genMarker != nil, nil } -// DiskRoot is a external helper function to return the disk layer root. +// DiskRoot is an external helper function to return the disk layer root. func (t *Tree) DiskRoot() common.Hash { t.lock.Lock() defer t.lock.Unlock() diff --git a/core/state/trie_prefetcher_extra_test.go b/core/state/trie_prefetcher_extra_test.go index ef75fe2396..ca57b76b79 100644 --- a/core/state/trie_prefetcher_extra_test.go +++ b/core/state/trie_prefetcher_extra_test.go @@ -15,7 +15,6 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/ethdb" - ethmetrics "github.com/ava-labs/libevm/metrics" "github.com/ava-labs/libevm/triedb" "github.com/ava-labs/subnet-evm/core/rawdb" "github.com/ava-labs/subnet-evm/core/state/snapshot" @@ -43,9 +42,6 @@ const ( // should be run against a state including around 100m storage entries. func BenchmarkPrefetcherDatabase(b *testing.B) { require := require.New(b) - metricsEnabled := ethmetrics.Enabled - ethmetrics.Enabled = true - defer func() { ethmetrics.Enabled = metricsEnabled }() dir := b.TempDir() if env := os.Getenv("TEST_DB_DIR"); env != "" { diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 42ee81b741..1ffbc4322e 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -230,9 +230,6 @@ type LegacyPool struct { signer types.Signer mu sync.RWMutex - // [currentStateLock] is required to allow concurrent access to address nonces - // and balances during reorgs and gossip handling. - currentStateLock sync.Mutex // closed when the transaction pool is stopped. Any goroutine can listen // to this to be notified if it should shut down. generalShutdownChan chan struct{} @@ -688,9 +685,6 @@ func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) erro // validateTx checks whether a transaction is valid according to the consensus // rules and adheres to some heuristic limits of the local node (price and size). func (pool *LegacyPool) validateTx(tx *types.Transaction, local bool) error { - pool.currentStateLock.Lock() - defer pool.currentStateLock.Unlock() - opts := &txpool.ValidationOptionsWithState{ State: pool.currentState, Rules: pool.chainconfig.Rules( @@ -1504,9 +1498,7 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { return } pool.currentHead.Store(newHead) - pool.currentStateLock.Lock() pool.currentState = statedb - pool.currentStateLock.Unlock() pool.pendingNonces = newNoncer(statedb) // when we reset txPool we should explicitly check if fee struct for min base fee has changed @@ -1530,9 +1522,6 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { // future queue to the set of pending transactions. During this process, all // invalidated transactions (low nonce, low balance) are deleted. func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction { - pool.currentStateLock.Lock() - defer pool.currentStateLock.Unlock() - // Track the promoted transactions to broadcast them at once var promoted []*types.Transaction @@ -1739,9 +1728,6 @@ func (pool *LegacyPool) truncateQueue() { // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful // to trigger a re-heap is this function func (pool *LegacyPool) demoteUnexecutables() { - pool.currentStateLock.Lock() - defer pool.currentStateLock.Unlock() - // Iterate over all accounts and demote any non-executable transactions gasLimit := pool.currentHead.Load().GasLimit for addr, list := range pool.pending { diff --git a/eth/filters/api.go b/eth/filters/api.go index 8393e5555e..490ac562e9 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -350,7 +350,6 @@ func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subsc select { case logs := <-matchedLogs: for _, log := range logs { - log := log notifier.Notify(rpcSub.ID, &log) } case <-rpcSub.Err(): // client send an unsubscribe request diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 7c70ae9b6b..5805179793 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -45,6 +45,10 @@ var ( errBeyondHistoricalLimit = errors.New("request beyond historical limit") ) +const ( + maxQueryLimit = 100 +) + // txGasAndReward is sorted in ascending order based on reward type txGasAndReward struct { gasUsed uint64 @@ -173,6 +177,9 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks uint64, unresolvedL if blocks < 1 { return common.Big0, nil, nil, nil, nil // returning with no data and no error means there are no retrievable blocks } + if len(rewardPercentiles) > maxQueryLimit { + return common.Big0, nil, nil, nil, fmt.Errorf("%w: over the query limit %d", errInvalidPercentile, maxQueryLimit) + } if blocks > oracle.maxCallBlockHistory { log.Warn("Sanitizing fee history length", "requested", blocks, "truncated", oracle.maxCallBlockHistory) blocks = oracle.maxCallBlockHistory diff --git a/go.mod b/go.mod index 84398572a3..7e505c7625 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,12 @@ module github.com/ava-labs/subnet-evm -go 1.22.8 +go 1.23.6 require ( github.com/VictoriaMetrics/fastcache v1.12.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/avalanchego v1.12.1-0.20250109213120-2fb6d3f63236 - github.com/ava-labs/libevm v1.13.14-0.1.0.rc-2 + github.com/ava-labs/avalanchego v1.13.0-fuji-rc.2.0.20250312161932-b8afc142faa7 + github.com/ava-labs/libevm v1.13.14-0.2.0.rc.3 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set/v2 v2.1.0 @@ -28,19 +28,20 @@ require ( github.com/spf13/cast v1.5.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.12.0 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/tyler-smith/go-bip39 v1.1.0 github.com/urfave/cli/v2 v2.25.7 go.uber.org/goleak v1.3.0 go.uber.org/mock v0.5.0 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.31.0 - golang.org/x/exp v0.0.0-20231127185646-65229373498e - golang.org/x/mod v0.18.0 + golang.org/x/crypto v0.32.0 + golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e + golang.org/x/mod v0.22.0 golang.org/x/sync v0.10.0 - golang.org/x/sys v0.28.0 + golang.org/x/sys v0.29.0 golang.org/x/time v0.3.0 - google.golang.org/protobuf v1.34.2 + golang.org/x/tools v0.29.0 + google.golang.org/protobuf v1.35.2 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -48,7 +49,7 @@ require ( github.com/DataDog/zstd v1.5.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect - github.com/ava-labs/coreth v0.13.9-0.20250109212847-a0898a97c321 // indirect + github.com/ava-labs/coreth v0.14.1-libevm.rc.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect @@ -63,7 +64,7 @@ require ( github.com/compose-spec/compose-go v1.20.2 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.12.1 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/distribution/reference v0.5.0 // indirect @@ -120,7 +121,6 @@ require ( github.com/mr-tron/base58 v1.2.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect @@ -140,6 +140,7 @@ require ( github.com/subosito/gotenv v1.3.0 // indirect github.com/supranational/blst v0.3.13 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect + github.com/thepudds/fzgen v0.4.3 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect @@ -153,11 +154,10 @@ require ( go.opentelemetry.io/otel/trace v1.22.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.33.0 // indirect + golang.org/x/net v0.34.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/term v0.27.0 // indirect + golang.org/x/term v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/tools v0.22.0 // indirect gonum.org/v1/gonum v0.11.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed // indirect diff --git a/go.sum b/go.sum index 12fce58e4f..635f670eb1 100644 --- a/go.sum +++ b/go.sum @@ -62,12 +62,12 @@ github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/ava-labs/avalanchego v1.12.1-0.20250109213120-2fb6d3f63236 h1:s2tPHtOjMJLfqC7n4QB4QwbWMUJAiZWImUmMjcDQD+E= -github.com/ava-labs/avalanchego v1.12.1-0.20250109213120-2fb6d3f63236/go.mod h1:yGx8w2ZkxQRpv+I3WszsvtZk1bGSwkrmL3CudLEQNec= -github.com/ava-labs/coreth v0.13.9-0.20250109212847-a0898a97c321 h1:Ou3VYMpQT/9VxRVZpFSpOEq9kN2SdIto9ta1GOdQ+04= -github.com/ava-labs/coreth v0.13.9-0.20250109212847-a0898a97c321/go.mod h1:RHovHXYBauSKgWEg0wlu37kL0vFXDdINGTwbdiQn5EE= -github.com/ava-labs/libevm v1.13.14-0.1.0.rc-2 h1:CVbn0hSsPCl6gCkTCnqwuN4vtJgdVbkCqLXzYAE7qF8= -github.com/ava-labs/libevm v1.13.14-0.1.0.rc-2/go.mod h1:yBctIV/wnxXTF38h95943jvpuk4aj07TrjbpoGor6LQ= +github.com/ava-labs/avalanchego v1.13.0-fuji-rc.2.0.20250312161932-b8afc142faa7 h1:lOwS2CoDvBxem0ydGNl+HdxzFFKPIEryQfyATqSD+Ws= +github.com/ava-labs/avalanchego v1.13.0-fuji-rc.2.0.20250312161932-b8afc142faa7/go.mod h1:pTFY9shpYMDDM5DDIMnQXNoYXiRSG9/gS0wcnE/2RLI= +github.com/ava-labs/coreth v0.14.1-libevm.rc.1 h1:FOWuBIVowuZ4fu+G0WVrjJjKXh17kG4ZgvpMQs4hlUo= +github.com/ava-labs/coreth v0.14.1-libevm.rc.1/go.mod h1:ky7EBkfihY1dQ6wU41SHB9RD2jLxnDBI6T9Rjlp/xRU= +github.com/ava-labs/libevm v1.13.14-0.2.0.rc.3 h1:1CWGo2icnX9dRqGQl7CFywYGIZWxe+ucy0w8NAsVTWE= +github.com/ava-labs/libevm v1.13.14-0.2.0.rc.3/go.mod h1:+Iol+sVQ1KyoBsHf3veyrBmHCXr3xXRWq6ZXkgVfNLU= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -140,8 +140,8 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= @@ -304,8 +304,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -384,6 +384,8 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -474,14 +476,14 @@ github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -589,8 +591,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk= @@ -598,8 +600,8 @@ github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/thepudds/fzgen v0.4.2 h1:HlEHl5hk2/cqEomf2uK5SA/FeJc12s/vIHmOG+FbACw= -github.com/thepudds/fzgen v0.4.2/go.mod h1:kHCWdsv5tdnt32NIHYDdgq083m6bMtaY0M+ipiO9xWE= +github.com/thepudds/fzgen v0.4.3 h1:srUP/34BulQaEwPP/uHZkdjUcUjIzL7Jkf4CBVryiP8= +github.com/thepudds/fzgen v0.4.3/go.mod h1:BhhwtRhzgvLWAjjcHDJ9pEiLD2Z9hrVIFjBCHJ//zJ4= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= @@ -679,8 +681,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -691,8 +693,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No= -golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -719,8 +721,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -765,8 +767,8 @@ golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -859,12 +861,12 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -938,8 +940,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1047,8 +1049,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/flags/flags_test.go b/internal/flags/flags_test.go index ce78870dcd..294c8149e9 100644 --- a/internal/flags/flags_test.go +++ b/internal/flags/flags_test.go @@ -27,7 +27,6 @@ package flags import ( - "os" "os/user" "runtime" "testing" @@ -61,7 +60,7 @@ func TestPathExpansion(t *testing.T) { } } - os.Setenv(`DDDXXX`, `/tmp`) + t.Setenv(`DDDXXX`, `/tmp`) for test, expected := range tests { got := expandPath(test) if got != expected { diff --git a/metrics/json_test.go b/metrics/json_test.go index f91fe8cfa5..811bc29f11 100644 --- a/metrics/json_test.go +++ b/metrics/json_test.go @@ -13,7 +13,7 @@ func TestRegistryMarshallJSON(t *testing.T) { r.Register("counter", NewCounter()) enc.Encode(r) if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" { - t.Fatalf(s) + t.Fatal(s) } } diff --git a/params/config_extra.go b/params/config_extra.go index 278fd6367b..a0cb6105ed 100644 --- a/params/config_extra.go +++ b/params/config_extra.go @@ -8,8 +8,8 @@ import ( "errors" "math/big" + "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/subnet-evm/params/extras" - "github.com/ava-labs/subnet-evm/predicate" "github.com/ava-labs/subnet-evm/utils" ) @@ -17,11 +17,8 @@ const ( maxJSONLen = 64 * 1024 * 1024 // 64MB // Consensus Params - RollupWindow uint64 = 10 - - // DynamicFeeExtraDataSize is defined in the predicate package to avoid a circular dependency. - // After Durango, the extra data past the dynamic fee rollup window represents predicate results. - DynamicFeeExtraDataSize = predicate.DynamicFeeExtraDataSize + RollupWindow = 10 // in seconds + DynamicFeeExtraDataSize = wrappers.LongLen * RollupWindow // For legacy tests MinGasPrice int64 = 225_000_000_000 @@ -62,10 +59,10 @@ func SetEthUpgrades(c *ChainConfig, avalancheUpgrades extras.NetworkUpgrades) { } func GetExtra(c *ChainConfig) *extras.ChainConfig { - ex := payloads.FromChainConfig(c) + ex := payloads.ChainConfig.Get(c) if ex == nil { ex = &extras.ChainConfig{} - payloads.SetOnChainConfig(c, ex) + payloads.ChainConfig.Set(c, ex) } return ex } @@ -78,7 +75,7 @@ func Copy(c *ChainConfig) ChainConfig { // WithExtra sets the extra payload on `c` and returns the modified argument. func WithExtra(c *ChainConfig, extra *extras.ChainConfig) *ChainConfig { - payloads.SetOnChainConfig(c, extra) + payloads.ChainConfig.Set(c, extra) return c } diff --git a/params/hooks_libevm.go b/params/hooks_libevm.go index 41fc856053..792735618e 100644 --- a/params/hooks_libevm.go +++ b/params/hooks_libevm.go @@ -11,19 +11,19 @@ import ( "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/core/vm" "github.com/ava-labs/libevm/libevm" + "github.com/ava-labs/libevm/libevm/legacy" "github.com/ava-labs/subnet-evm/params/extras" "github.com/ava-labs/subnet-evm/precompile/contract" "github.com/ava-labs/subnet-evm/precompile/contracts/deployerallowlist" "github.com/ava-labs/subnet-evm/precompile/modules" "github.com/ava-labs/subnet-evm/precompile/precompileconfig" "github.com/ava-labs/subnet-evm/predicate" - "github.com/holiman/uint256" ) type RulesExtra extras.Rules func GetRulesExtra(r Rules) *extras.Rules { - rules := payloads.PointerFromRules(&r) + rules := payloads.Rules.GetPointer(&r) return (*extras.Rules)(rules) } @@ -81,7 +81,7 @@ func makePrecompile(contract contract.StatefulPrecompiledContract) libevm.Precom } return contract.Run(accessableState, env.Addresses().Caller, env.Addresses().Self, input, suppliedGas, env.ReadOnly()) } - return vm.NewStatefulPrecompile(run) + return vm.NewStatefulPrecompile(legacy.PrecompiledStatefulContract(run).Upgrade()) } func (r RulesExtra) PrecompileOverride(addr common.Address) (libevm.PrecompiledContract, bool) { @@ -127,8 +127,8 @@ func (a accessableState) GetSnowContext() *snow.Context { return GetExtra(a.env.ChainConfig()).SnowCtx } -func (a accessableState) Call(addr common.Address, input []byte, gas uint64, value *uint256.Int, _ ...vm.CallOption) (ret []byte, gasRemaining uint64, _ error) { - return a.env.Call(addr, input, gas, value) +func (a accessableState) GetPrecompileEnv() vm.PrecompileEnvironment { + return a.env } type precompileBlockContext struct { diff --git a/peer/network.go b/peer/network.go index 7b806fffbe..256063171a 100644 --- a/peer/network.go +++ b/peer/network.go @@ -56,9 +56,6 @@ type Network interface { // by calling OnPeerConnected for each peer Shutdown() - // SetGossipHandler sets the provided gossip handler as the gossip handler - SetGossipHandler(handler message.GossipHandler) - // SetRequestHandler sets the provided request handler as the request handler SetRequestHandler(handler message.RequestHandler) @@ -87,7 +84,6 @@ type network struct { appSender common.AppSender // avalanchego AppSender for sending messages codec codec.Manager // Codec used for parsing messages appRequestHandler message.RequestHandler // maps request type => handler - gossipHandler message.GossipHandler // maps gossip type => handler peers *peerTracker // tracking of peers & bandwidth appStats stats.RequestHandlerStats // Provide request handler metrics @@ -110,7 +106,6 @@ func NewNetwork(p2pNetwork *p2p.Network, appSender common.AppSender, codec codec outstandingRequestHandlers: make(map[uint32]message.ResponseHandler), activeAppRequests: semaphore.NewWeighted(maxActiveAppRequests), p2pNetwork: p2pNetwork, - gossipHandler: message.NoopMempoolGossipHandler{}, appRequestHandler: message.NoopRequestHandler{}, peers: NewPeerTracker(), appStats: stats.NewRequestHandlerStats(), @@ -345,14 +340,7 @@ func (n *network) markRequestFulfilled(requestID uint32) (message.ResponseHandle // from a peer. An error returned by this function is treated as fatal by the // engine. func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) error { - var gossipMsg message.GossipMessage - if _, err := n.codec.Unmarshal(gossipBytes, &gossipMsg); err != nil { - log.Debug("forwarding AppGossip to SDK network", "nodeID", nodeID, "gossipLen", len(gossipBytes), "err", err) - return n.p2pNetwork.AppGossip(ctx, nodeID, gossipBytes) - } - - log.Debug("processing AppGossip from node", "nodeID", nodeID, "msg", gossipMsg) - return gossipMsg.Handle(n.gossipHandler, nodeID) + return n.p2pNetwork.AppGossip(ctx, nodeID, gossipBytes) } // Connected adds the given nodeID to the peer list so that it can receive messages @@ -407,13 +395,6 @@ func (n *network) Shutdown() { n.closed.Set(true) // mark network as closed } -func (n *network) SetGossipHandler(handler message.GossipHandler) { - n.lock.Lock() - defer n.lock.Unlock() - - n.gossipHandler = handler -} - func (n *network) SetRequestHandler(handler message.RequestHandler) { n.lock.Lock() defer n.lock.Unlock() diff --git a/peer/network_test.go b/peer/network_test.go index d93439b22b..d8df3267c7 100644 --- a/peer/network_test.go +++ b/peer/network_test.go @@ -46,9 +46,7 @@ var ( _ message.RequestHandler = &HelloGreetingRequestHandler{} _ message.RequestHandler = &testRequestHandler{} - _ common.AppSender = testAppSender{} - _ message.GossipMessage = HelloGossip{} - _ message.GossipHandler = &testGossipHandler{} + _ common.AppSender = testAppSender{} _ p2p.Handler = &testSDKHandler{} ) @@ -503,7 +501,6 @@ func TestHandleInvalidMessages(t *testing.T) { p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") require.NoError(t, err) clientNetwork := NewNetwork(p2pNetwork, sender, codecManager, ids.EmptyNodeID, 1) - clientNetwork.SetGossipHandler(message.NoopMempoolGossipHandler{}) clientNetwork.SetRequestHandler(&testRequestHandler{}) assert.NoError(t, clientNetwork.Connected(context.Background(), nodeID, defaultPeerVersion)) @@ -511,7 +508,8 @@ func TestHandleInvalidMessages(t *testing.T) { defer clientNetwork.Shutdown() // Ensure a valid gossip message sent as any App specific message type does not trigger a fatal error - gossipMsg, err := buildGossip(codecManager, HelloGossip{Msg: "hello there!"}) + marshaller := helloGossipMarshaller{codec: codecManager} + gossipMsg, err := marshaller.MarshalGossip(&HelloGossip{Msg: "hello there!"}) assert.NoError(t, err) // Ensure a valid request message sent as any App specific message type does not trigger a fatal error @@ -552,7 +550,6 @@ func TestNetworkPropagatesRequestHandlerError(t *testing.T) { p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") require.NoError(t, err) clientNetwork := NewNetwork(p2pNetwork, sender, codecManager, ids.EmptyNodeID, 1) - clientNetwork.SetGossipHandler(message.NoopMempoolGossipHandler{}) clientNetwork.SetRequestHandler(&testRequestHandler{err: errors.New("fail")}) // Return an error from the request handler assert.NoError(t, clientNetwork.Connected(context.Background(), nodeID, defaultPeerVersion)) @@ -625,10 +622,6 @@ func marshalStruct(codec codec.Manager, obj interface{}) ([]byte, error) { return codec.Marshal(message.Version, &obj) } -func buildGossip(codec codec.Manager, msg message.GossipMessage) ([]byte, error) { - return codec.Marshal(message.Version, &msg) -} - type testAppSender struct { sendAppRequestFn func(context.Context, set.Set[ids.NodeID], uint32, []byte) error sendAppResponseFn func(ids.NodeID, uint32, []byte) error @@ -719,28 +712,22 @@ type HelloGossip struct { Msg string `serialize:"true"` } -func (h HelloGossip) Handle(handler message.GossipHandler, nodeID ids.NodeID) error { - return handler.HandleEthTxs(nodeID, message.EthTxsGossip{}) +func (tx *HelloGossip) GossipID() ids.ID { + return ids.FromStringOrPanic(tx.Msg) } -func (h HelloGossip) String() string { - return fmt.Sprintf("HelloGossip(%s)", h.Msg) -} - -func (h HelloGossip) Bytes() []byte { - // no op - return nil +type helloGossipMarshaller struct { + codec codec.Manager } -type testGossipHandler struct { - received bool - nodeID ids.NodeID +func (g helloGossipMarshaller) MarshalGossip(tx *HelloGossip) ([]byte, error) { + return g.codec.Marshal(0, tx) } -func (t *testGossipHandler) HandleEthTxs(nodeID ids.NodeID, msg message.EthTxsGossip) error { - t.received = true - t.nodeID = nodeID - return nil +func (g helloGossipMarshaller) UnmarshalGossip(bytes []byte) (*HelloGossip, error) { + h := &HelloGossip{} + _, err := g.codec.Unmarshal(bytes, h) + return h, err } type testRequestHandler struct { diff --git a/plugin/evm/admin.go b/plugin/evm/admin.go index 746b6d8250..b8c790b013 100644 --- a/plugin/evm/admin.go +++ b/plugin/evm/admin.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/libevm/log" + "github.com/ava-labs/subnet-evm/plugin/evm/client" ) // Admin is the API service for admin API calls @@ -29,8 +30,8 @@ func NewAdminService(vm *VM, performanceDir string) *Admin { func (p *Admin) StartCPUProfiler(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { log.Info("Admin: StartCPUProfiler called") - p.vm.ctx.Lock.Lock() - defer p.vm.ctx.Lock.Unlock() + p.vm.vmLock.Lock() + defer p.vm.vmLock.Unlock() return p.profiler.StartCPUProfiler() } @@ -39,8 +40,8 @@ func (p *Admin) StartCPUProfiler(_ *http.Request, _ *struct{}, _ *api.EmptyReply func (p *Admin) StopCPUProfiler(r *http.Request, _ *struct{}, _ *api.EmptyReply) error { log.Info("Admin: StopCPUProfiler called") - p.vm.ctx.Lock.Lock() - defer p.vm.ctx.Lock.Unlock() + p.vm.vmLock.Lock() + defer p.vm.vmLock.Unlock() return p.profiler.StopCPUProfiler() } @@ -49,8 +50,8 @@ func (p *Admin) StopCPUProfiler(r *http.Request, _ *struct{}, _ *api.EmptyReply) func (p *Admin) MemoryProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { log.Info("Admin: MemoryProfile called") - p.vm.ctx.Lock.Lock() - defer p.vm.ctx.Lock.Unlock() + p.vm.vmLock.Lock() + defer p.vm.vmLock.Unlock() return p.profiler.MemoryProfile() } @@ -59,21 +60,17 @@ func (p *Admin) MemoryProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) e func (p *Admin) LockProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { log.Info("Admin: LockProfile called") - p.vm.ctx.Lock.Lock() - defer p.vm.ctx.Lock.Unlock() + p.vm.vmLock.Lock() + defer p.vm.vmLock.Unlock() return p.profiler.LockProfile() } -type SetLogLevelArgs struct { - Level string `json:"level"` -} - -func (p *Admin) SetLogLevel(_ *http.Request, args *SetLogLevelArgs, reply *api.EmptyReply) error { +func (p *Admin) SetLogLevel(_ *http.Request, args *client.SetLogLevelArgs, reply *api.EmptyReply) error { log.Info("EVM: SetLogLevel called", "logLevel", args.Level) - p.vm.ctx.Lock.Lock() - defer p.vm.ctx.Lock.Unlock() + p.vm.vmLock.Lock() + defer p.vm.vmLock.Unlock() if err := p.vm.logger.SetLogLevel(args.Level); err != nil { return fmt.Errorf("failed to parse log level: %w ", err) @@ -81,11 +78,7 @@ func (p *Admin) SetLogLevel(_ *http.Request, args *SetLogLevelArgs, reply *api.E return nil } -type ConfigReply struct { - Config *Config `json:"config"` -} - -func (p *Admin) GetVMConfig(_ *http.Request, _ *struct{}, reply *ConfigReply) error { +func (p *Admin) GetVMConfig(_ *http.Request, _ *struct{}, reply *client.ConfigReply) error { reply.Config = &p.vm.config return nil } diff --git a/plugin/evm/client.go b/plugin/evm/client/client.go similarity index 76% rename from plugin/evm/client.go rename to plugin/evm/client/client.go index 36a0ee675f..53a2122e6a 100644 --- a/plugin/evm/client.go +++ b/plugin/evm/client/client.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package client import ( "context" @@ -12,11 +12,24 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/rpc" + "github.com/ava-labs/subnet-evm/plugin/evm/config" ) // Interface compliance var _ Client = (*client)(nil) +type CurrentValidator struct { + ValidationID ids.ID `json:"validationID"` + NodeID ids.NodeID `json:"nodeID"` + Weight uint64 `json:"weight"` + StartTimestamp uint64 `json:"startTimestamp"` + IsActive bool `json:"isActive"` + IsL1Validator bool `json:"isL1Validator"` + IsConnected bool `json:"isConnected"` + UptimePercentage float32 `json:"uptimePercentage"` + UptimeSeconds uint64 `json:"uptimeSeconds"` +} + // Client interface for interacting with EVM [chain] type Client interface { StartCPUProfiler(ctx context.Context, options ...rpc.Option) error @@ -24,7 +37,7 @@ type Client interface { MemoryProfile(ctx context.Context, options ...rpc.Option) error LockProfile(ctx context.Context, options ...rpc.Option) error SetLogLevel(ctx context.Context, level slog.Level, options ...rpc.Option) error - GetVMConfig(ctx context.Context, options ...rpc.Option) (*Config, error) + GetVMConfig(ctx context.Context, options ...rpc.Option) (*config.Config, error) GetCurrentValidators(ctx context.Context, nodeIDs []ids.NodeID, options ...rpc.Option) ([]CurrentValidator, error) } @@ -63,6 +76,10 @@ func (c *client) LockProfile(ctx context.Context, options ...rpc.Option) error { return c.adminRequester.SendRequest(ctx, "admin.lockProfile", struct{}{}, &api.EmptyReply{}, options...) } +type SetLogLevelArgs struct { + Level string `json:"level"` +} + // SetLogLevel dynamically sets the log level for the C Chain func (c *client) SetLogLevel(ctx context.Context, level slog.Level, options ...rpc.Option) error { return c.adminRequester.SendRequest(ctx, "admin.setLogLevel", &SetLogLevelArgs{ @@ -70,13 +87,25 @@ func (c *client) SetLogLevel(ctx context.Context, level slog.Level, options ...r }, &api.EmptyReply{}, options...) } +type ConfigReply struct { + Config *config.Config `json:"config"` +} + // GetVMConfig returns the current config of the VM -func (c *client) GetVMConfig(ctx context.Context, options ...rpc.Option) (*Config, error) { +func (c *client) GetVMConfig(ctx context.Context, options ...rpc.Option) (*config.Config, error) { res := &ConfigReply{} err := c.adminRequester.SendRequest(ctx, "admin.getVMConfig", struct{}{}, res, options...) return res.Config, err } +type GetCurrentValidatorsRequest struct { + NodeIDs []ids.NodeID `json:"nodeIDs"` +} + +type GetCurrentValidatorsResponse struct { + Validators []CurrentValidator `json:"validators"` +} + // GetCurrentValidators returns the current validators func (c *client) GetCurrentValidators(ctx context.Context, nodeIDs []ids.NodeID, options ...rpc.Option) ([]CurrentValidator, error) { res := &GetCurrentValidatorsResponse{} diff --git a/plugin/evm/client_interface_test.go b/plugin/evm/client_interface_test.go deleted file mode 100644 index d88c4926b4..0000000000 --- a/plugin/evm/client_interface_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package evm - -import ( - "reflect" - "testing" -) - -func TestInterfaceStructOneToOne(t *testing.T) { - // checks struct provides at least the methods signatures in the interface - var _ Client = (*client)(nil) - // checks interface and struct have the same number of methods - clientType := reflect.TypeOf(&client{}) - ClientType := reflect.TypeOf((*Client)(nil)).Elem() - if clientType.NumMethod() != ClientType.NumMethod() { - t.Fatalf("no 1 to 1 compliance between struct methods (%v) and interface methods (%v)", clientType.NumMethod(), ClientType.NumMethod()) - } -} diff --git a/plugin/evm/config.go b/plugin/evm/config.go index b006a10a83..8b779951c6 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -1,370 +1,21 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// (c) 2025, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package evm import ( - "encoding/json" - "fmt" - "time" - - "github.com/ava-labs/avalanchego/database/pebbledb" - "github.com/ava-labs/libevm/common" - "github.com/ava-labs/libevm/common/hexutil" "github.com/ava-labs/subnet-evm/core/txpool/legacypool" - "github.com/ava-labs/subnet-evm/eth" - "github.com/spf13/cast" -) - -const ( - defaultAcceptorQueueLimit = 64 // Provides 2 minutes of buffer (2s block target) for a commit delay - defaultPruningEnabled = true - defaultCommitInterval = 4096 - defaultTrieCleanCache = 512 - defaultTrieDirtyCache = 512 - defaultTrieDirtyCommitTarget = 20 - defaultTriePrefetcherParallelism = 16 - defaultSnapshotCache = 256 - defaultSyncableCommitInterval = defaultCommitInterval * 4 - defaultSnapshotWait = false - defaultRpcGasCap = 50_000_000 // Default to 50M Gas Limit - defaultRpcTxFeeCap = 100 // 100 AVAX - defaultMetricsExpensiveEnabled = true - defaultApiMaxDuration = 0 // Default to no maximum API call duration - defaultWsCpuRefillRate = 0 // Default to no maximum WS CPU usage - defaultWsCpuMaxStored = 0 // Default to no maximum WS CPU usage - defaultMaxBlocksPerRequest = 0 // Default to no maximum on the number of blocks per getLogs request - defaultContinuousProfilerFrequency = 15 * time.Minute - defaultContinuousProfilerMaxFiles = 5 - defaultPushGossipPercentStake = .9 - defaultPushGossipNumValidators = 100 - defaultPushGossipNumPeers = 0 - defaultPushRegossipNumValidators = 10 - defaultPushRegossipNumPeers = 0 - defaultPushGossipFrequency = 100 * time.Millisecond - defaultPullGossipFrequency = 1 * time.Second - defaultRegossipFrequency = 30 * time.Second - defaultOfflinePruningBloomFilterSize uint64 = 512 // Default size (MB) for the offline pruner to use - defaultLogLevel = "info" - defaultLogJSONFormat = false - defaultMaxOutboundActiveRequests = 16 - defaultPopulateMissingTriesParallelism = 1024 - defaultStateSyncServerTrieCache = 64 // MB - defaultAcceptedCacheSize = 32 // blocks - - // defaultStateSyncMinBlocks is the minimum number of blocks the blockchain - // should be ahead of local last accepted to perform state sync. - // This constant is chosen so normal bootstrapping is preferred when it would - // be faster than state sync. - // time assumptions: - // - normal bootstrap processing time: ~14 blocks / second - // - state sync time: ~6 hrs. - defaultStateSyncMinBlocks = 300_000 - defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request - defaultDBType = pebbledb.Name - defaultValidatorAPIEnabled = true + "github.com/ava-labs/subnet-evm/plugin/evm/config" ) -type PBool bool - -var ( - defaultEnabledAPIs = []string{ - "eth", - "eth-filter", - "net", - "web3", - "internal-eth", - "internal-blockchain", - "internal-transaction", - } - defaultAllowUnprotectedTxHashes = []common.Hash{ - common.HexToHash("0xfefb2da535e927b85fe68eb81cb2e4a5827c905f78381a01ef2322aa9b0aee8e"), // EIP-1820: https://eips.ethereum.org/EIPS/eip-1820 - } -) - -type Duration struct { - time.Duration -} - -// Config ... -type Config struct { - // Airdrop - AirdropFile string `json:"airdrop"` - - // Subnet EVM APIs - SnowmanAPIEnabled bool `json:"snowman-api-enabled"` - ValidatorsAPIEnabled bool `json:"validators-api-enabled"` - AdminAPIEnabled bool `json:"admin-api-enabled"` - AdminAPIDir string `json:"admin-api-dir"` - WarpAPIEnabled bool `json:"warp-api-enabled"` - - // EnabledEthAPIs is a list of Ethereum services that should be enabled - // If none is specified, then we use the default list [defaultEnabledAPIs] - EnabledEthAPIs []string `json:"eth-apis"` - - // Continuous Profiler - ContinuousProfilerDir string `json:"continuous-profiler-dir"` // If set to non-empty string creates a continuous profiler - ContinuousProfilerFrequency Duration `json:"continuous-profiler-frequency"` // Frequency to run continuous profiler if enabled - ContinuousProfilerMaxFiles int `json:"continuous-profiler-max-files"` // Maximum number of files to maintain - - // API Gas/Price Caps - RPCGasCap uint64 `json:"rpc-gas-cap"` - RPCTxFeeCap float64 `json:"rpc-tx-fee-cap"` - - // Cache settings - TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB) - TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB) - TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB) - TriePrefetcherParallelism int `json:"trie-prefetcher-parallelism"` // Max concurrent disk reads trie prefetcher should perform at once - SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB) - - // Eth Settings - Preimages bool `json:"preimages-enabled"` - SnapshotWait bool `json:"snapshot-wait"` - SnapshotVerify bool `json:"snapshot-verification-enabled"` - - // Pruning Settings - Pruning bool `json:"pruning-enabled"` // If enabled, trie roots are only persisted every 4096 blocks - AcceptorQueueLimit int `json:"accepted-queue-limit"` // Maximum blocks to queue before blocking during acceptance - CommitInterval uint64 `json:"commit-interval"` // Specifies the commit interval at which to persist EVM and atomic tries. - AllowMissingTries bool `json:"allow-missing-tries"` // If enabled, warnings preventing an incomplete trie index are suppressed - PopulateMissingTries *uint64 `json:"populate-missing-tries,omitempty"` // Sets the starting point for re-populating missing tries. Disables re-generation if nil. - PopulateMissingTriesParallelism int `json:"populate-missing-tries-parallelism"` // Number of concurrent readers to use when re-populating missing tries on startup. - PruneWarpDB bool `json:"prune-warp-db-enabled"` // Determines if the warpDB should be cleared on startup - - // Metric Settings - MetricsExpensiveEnabled bool `json:"metrics-expensive-enabled"` // Debug-level metrics that might impact runtime performance - - // API Settings - LocalTxsEnabled bool `json:"local-txs-enabled"` - - TxPoolPriceLimit uint64 `json:"tx-pool-price-limit"` - TxPoolPriceBump uint64 `json:"tx-pool-price-bump"` - TxPoolAccountSlots uint64 `json:"tx-pool-account-slots"` - TxPoolGlobalSlots uint64 `json:"tx-pool-global-slots"` - TxPoolAccountQueue uint64 `json:"tx-pool-account-queue"` - TxPoolGlobalQueue uint64 `json:"tx-pool-global-queue"` - TxPoolLifetime Duration `json:"tx-pool-lifetime"` - - APIMaxDuration Duration `json:"api-max-duration"` - WSCPURefillRate Duration `json:"ws-cpu-refill-rate"` - WSCPUMaxStored Duration `json:"ws-cpu-max-stored"` - MaxBlocksPerRequest int64 `json:"api-max-blocks-per-request"` - AllowUnfinalizedQueries bool `json:"allow-unfinalized-queries"` - AllowUnprotectedTxs bool `json:"allow-unprotected-txs"` - AllowUnprotectedTxHashes []common.Hash `json:"allow-unprotected-tx-hashes"` - - // Keystore Settings - KeystoreDirectory string `json:"keystore-directory"` // both absolute and relative supported - KeystoreExternalSigner string `json:"keystore-external-signer"` - KeystoreInsecureUnlockAllowed bool `json:"keystore-insecure-unlock-allowed"` - - // Gossip Settings - PushGossipPercentStake float64 `json:"push-gossip-percent-stake"` - PushGossipNumValidators int `json:"push-gossip-num-validators"` - PushGossipNumPeers int `json:"push-gossip-num-peers"` - PushRegossipNumValidators int `json:"push-regossip-num-validators"` - PushRegossipNumPeers int `json:"push-regossip-num-peers"` - PushGossipFrequency Duration `json:"push-gossip-frequency"` - PullGossipFrequency Duration `json:"pull-gossip-frequency"` - RegossipFrequency Duration `json:"regossip-frequency"` - PriorityRegossipAddresses []common.Address `json:"priority-regossip-addresses"` - - // Log - LogLevel string `json:"log-level"` - LogJSONFormat bool `json:"log-json-format"` - - // Address for Tx Fees (must be empty if not supported by blockchain) - FeeRecipient string `json:"feeRecipient"` - - // Offline Pruning Settings - OfflinePruning bool `json:"offline-pruning-enabled"` - OfflinePruningBloomFilterSize uint64 `json:"offline-pruning-bloom-filter-size"` - OfflinePruningDataDirectory string `json:"offline-pruning-data-directory"` - - // VM2VM network - MaxOutboundActiveRequests int64 `json:"max-outbound-active-requests"` - - // Sync settings - StateSyncEnabled bool `json:"state-sync-enabled"` - StateSyncSkipResume bool `json:"state-sync-skip-resume"` // Forces state sync to use the highest available summary block - StateSyncServerTrieCache int `json:"state-sync-server-trie-cache"` - StateSyncIDs string `json:"state-sync-ids"` - StateSyncCommitInterval uint64 `json:"state-sync-commit-interval"` - StateSyncMinBlocks uint64 `json:"state-sync-min-blocks"` - StateSyncRequestSize uint16 `json:"state-sync-request-size"` - - // Database Settings - InspectDatabase bool `json:"inspect-database"` // Inspects the database on startup if enabled. - - // SkipUpgradeCheck disables checking that upgrades must take place before the last - // accepted block. Skipping this check is useful when a node operator does not update - // their node before the network upgrade and their node accepts blocks that have - // identical state with the pre-upgrade ruleset. - SkipUpgradeCheck bool `json:"skip-upgrade-check"` - - // AcceptedCacheSize is the depth to keep in the accepted headers cache and the - // accepted logs cache at the accepted tip. - // - // This is particularly useful for improving the performance of eth_getLogs - // on RPC nodes. - AcceptedCacheSize int `json:"accepted-cache-size"` - - // TransactionHistory is the maximum number of blocks from head whose tx indices - // are reserved: - // * 0: means no limit - // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes - TransactionHistory uint64 `json:"transaction-history"` - // Deprecated, use 'TransactionHistory' instead. - TxLookupLimit uint64 `json:"tx-lookup-limit"` - - // SkipTxIndexing skips indexing transactions. - // This is useful for validators that don't need to index transactions. - // TxLookupLimit can be still used to control unindexing old transactions. - SkipTxIndexing bool `json:"skip-tx-indexing"` - - // WarpOffChainMessages encodes off-chain messages (unrelated to any on-chain event ie. block or AddressedCall) - // that the node should be willing to sign. - // Note: only supports AddressedCall payloads as defined here: - // https://github.com/ava-labs/avalanchego/tree/7623ffd4be915a5185c9ed5e11fa9be15a6e1f00/vms/platformvm/warp/payload#addressedcall - WarpOffChainMessages []hexutil.Bytes `json:"warp-off-chain-messages"` - - // RPC settings - HttpBodyLimit uint64 `json:"http-body-limit"` - - // Database settings - UseStandaloneDatabase *PBool `json:"use-standalone-database"` - DatabaseConfigContent string `json:"database-config"` - DatabaseConfigFile string `json:"database-config-file"` - DatabaseType string `json:"database-type"` - DatabasePath string `json:"database-path"` - DatabaseReadOnly bool `json:"database-read-only"` -} - -// EthAPIs returns an array of strings representing the Eth APIs that should be enabled -func (c Config) EthAPIs() []string { - return c.EnabledEthAPIs -} - -func (c Config) EthBackendSettings() eth.Settings { - return eth.Settings{MaxBlocksPerRequest: c.MaxBlocksPerRequest} -} - -func (c *Config) SetDefaults() { - c.EnabledEthAPIs = defaultEnabledAPIs - c.RPCGasCap = defaultRpcGasCap - c.RPCTxFeeCap = defaultRpcTxFeeCap - c.MetricsExpensiveEnabled = defaultMetricsExpensiveEnabled - - c.TxPoolPriceLimit = legacypool.DefaultConfig.PriceLimit - c.TxPoolPriceBump = legacypool.DefaultConfig.PriceBump - c.TxPoolAccountSlots = legacypool.DefaultConfig.AccountSlots - c.TxPoolGlobalSlots = legacypool.DefaultConfig.GlobalSlots - c.TxPoolAccountQueue = legacypool.DefaultConfig.AccountQueue - c.TxPoolGlobalQueue = legacypool.DefaultConfig.GlobalQueue - c.TxPoolLifetime.Duration = legacypool.DefaultConfig.Lifetime - - c.APIMaxDuration.Duration = defaultApiMaxDuration - c.WSCPURefillRate.Duration = defaultWsCpuRefillRate - c.WSCPUMaxStored.Duration = defaultWsCpuMaxStored - c.MaxBlocksPerRequest = defaultMaxBlocksPerRequest - c.ContinuousProfilerFrequency.Duration = defaultContinuousProfilerFrequency - c.ContinuousProfilerMaxFiles = defaultContinuousProfilerMaxFiles - c.Pruning = defaultPruningEnabled - c.TrieCleanCache = defaultTrieCleanCache - c.TrieDirtyCache = defaultTrieDirtyCache - c.TrieDirtyCommitTarget = defaultTrieDirtyCommitTarget - c.TriePrefetcherParallelism = defaultTriePrefetcherParallelism - c.SnapshotCache = defaultSnapshotCache - c.AcceptorQueueLimit = defaultAcceptorQueueLimit - c.CommitInterval = defaultCommitInterval - c.SnapshotWait = defaultSnapshotWait - c.PushGossipPercentStake = defaultPushGossipPercentStake - c.PushGossipNumValidators = defaultPushGossipNumValidators - c.PushGossipNumPeers = defaultPushGossipNumPeers - c.PushRegossipNumValidators = defaultPushRegossipNumValidators - c.PushRegossipNumPeers = defaultPushRegossipNumPeers - c.PushGossipFrequency.Duration = defaultPushGossipFrequency - c.PullGossipFrequency.Duration = defaultPullGossipFrequency - c.RegossipFrequency.Duration = defaultRegossipFrequency - c.OfflinePruningBloomFilterSize = defaultOfflinePruningBloomFilterSize - c.LogLevel = defaultLogLevel - c.LogJSONFormat = defaultLogJSONFormat - c.MaxOutboundActiveRequests = defaultMaxOutboundActiveRequests - c.PopulateMissingTriesParallelism = defaultPopulateMissingTriesParallelism - c.StateSyncServerTrieCache = defaultStateSyncServerTrieCache - c.StateSyncCommitInterval = defaultSyncableCommitInterval - c.StateSyncMinBlocks = defaultStateSyncMinBlocks - c.StateSyncRequestSize = defaultStateSyncRequestSize - c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes - c.AcceptedCacheSize = defaultAcceptedCacheSize - c.DatabaseType = defaultDBType - c.ValidatorsAPIEnabled = defaultValidatorAPIEnabled -} - -func (d *Duration) UnmarshalJSON(data []byte) (err error) { - var v interface{} - if err := json.Unmarshal(data, &v); err != nil { - return err - } - d.Duration, err = cast.ToDurationE(v) - return err -} - -// String implements the stringer interface. -func (d Duration) String() string { - return d.Duration.String() -} - -// String implements the stringer interface. -func (d Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(d.Duration.String()) -} - -// Validate returns an error if this is an invalid config. -func (c *Config) Validate() error { - if c.PopulateMissingTries != nil && (c.OfflinePruning || c.Pruning) { - return fmt.Errorf("cannot enable populate missing tries while offline pruning (enabled: %t)/pruning (enabled: %t) are enabled", c.OfflinePruning, c.Pruning) - } - if c.PopulateMissingTries != nil && c.PopulateMissingTriesParallelism < 1 { - return fmt.Errorf("cannot enable populate missing tries without at least one reader (parallelism: %d)", c.PopulateMissingTriesParallelism) - } - - if !c.Pruning && c.OfflinePruning { - return fmt.Errorf("cannot run offline pruning while pruning is disabled") - } - // If pruning is enabled, the commit interval must be non-zero so the node commits state tries every CommitInterval blocks. - if c.Pruning && c.CommitInterval == 0 { - return fmt.Errorf("cannot use commit interval of 0 with pruning enabled") - } - - if c.PushGossipPercentStake < 0 || c.PushGossipPercentStake > 1 { - return fmt.Errorf("push-gossip-percent-stake is %f but must be in the range [0, 1]", c.PushGossipPercentStake) - } - return nil -} - -func (c *Config) Deprecate() string { - msg := "" - // Deprecate the old config options and set the new ones. - if c.TxLookupLimit != 0 { - msg += "tx-lookup-limit is deprecated, use transaction-history instead. " - c.TransactionHistory = c.TxLookupLimit - } - - return msg -} - -func (p *PBool) String() string { - if p == nil { - return "nil" - } - return fmt.Sprintf("%t", *p) -} - -func (p *PBool) Bool() bool { - if p == nil { - return false - } - return bool(*p) +// defaultTxPoolConfig uses [legacypool.DefaultConfig] to make a [config.TxPoolConfig] +// that can be passed to [config.Config.SetDefaults]. +var defaultTxPoolConfig = config.TxPoolConfig{ + PriceLimit: legacypool.DefaultConfig.PriceLimit, + PriceBump: legacypool.DefaultConfig.PriceBump, + AccountSlots: legacypool.DefaultConfig.AccountSlots, + GlobalSlots: legacypool.DefaultConfig.GlobalSlots, + AccountQueue: legacypool.DefaultConfig.AccountQueue, + GlobalQueue: legacypool.DefaultConfig.GlobalQueue, + Lifetime: legacypool.DefaultConfig.Lifetime, } diff --git a/plugin/evm/config/config.go b/plugin/evm/config/config.go new file mode 100644 index 0000000000..abd71c0c04 --- /dev/null +++ b/plugin/evm/config/config.go @@ -0,0 +1,377 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package config + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/ava-labs/avalanchego/database/pebbledb" + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/common/hexutil" + "github.com/spf13/cast" +) + +const ( + defaultAcceptorQueueLimit = 64 // Provides 2 minutes of buffer (2s block target) for a commit delay + defaultPruningEnabled = true + defaultCommitInterval = 4096 + defaultTrieCleanCache = 512 + defaultTrieDirtyCache = 512 + defaultTrieDirtyCommitTarget = 20 + defaultTriePrefetcherParallelism = 16 + defaultSnapshotCache = 256 + defaultSyncableCommitInterval = defaultCommitInterval * 4 + defaultSnapshotWait = false + defaultRpcGasCap = 50_000_000 // Default to 50M Gas Limit + defaultRpcTxFeeCap = 100 // 100 AVAX + defaultMetricsExpensiveEnabled = true + defaultApiMaxDuration = 0 // Default to no maximum API call duration + defaultWsCpuRefillRate = 0 // Default to no maximum WS CPU usage + defaultWsCpuMaxStored = 0 // Default to no maximum WS CPU usage + defaultMaxBlocksPerRequest = 0 // Default to no maximum on the number of blocks per getLogs request + defaultContinuousProfilerFrequency = 15 * time.Minute + defaultContinuousProfilerMaxFiles = 5 + defaultPushGossipPercentStake = .9 + defaultPushGossipNumValidators = 100 + defaultPushGossipNumPeers = 0 + defaultPushRegossipNumValidators = 10 + defaultPushRegossipNumPeers = 0 + defaultPushGossipFrequency = 100 * time.Millisecond + defaultPullGossipFrequency = 1 * time.Second + defaultRegossipFrequency = 30 * time.Second + defaultOfflinePruningBloomFilterSize uint64 = 512 // Default size (MB) for the offline pruner to use + defaultLogLevel = "info" + defaultLogJSONFormat = false + defaultMaxOutboundActiveRequests = 16 + defaultPopulateMissingTriesParallelism = 1024 + defaultStateSyncServerTrieCache = 64 // MB + defaultAcceptedCacheSize = 32 // blocks + + // defaultStateSyncMinBlocks is the minimum number of blocks the blockchain + // should be ahead of local last accepted to perform state sync. + // This constant is chosen so normal bootstrapping is preferred when it would + // be faster than state sync. + // time assumptions: + // - normal bootstrap processing time: ~14 blocks / second + // - state sync time: ~6 hrs. + defaultStateSyncMinBlocks = 300_000 + defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request + defaultDBType = pebbledb.Name + defaultValidatorAPIEnabled = true +) + +type PBool bool + +var ( + defaultEnabledAPIs = []string{ + "eth", + "eth-filter", + "net", + "web3", + "internal-eth", + "internal-blockchain", + "internal-transaction", + } + defaultAllowUnprotectedTxHashes = []common.Hash{ + common.HexToHash("0xfefb2da535e927b85fe68eb81cb2e4a5827c905f78381a01ef2322aa9b0aee8e"), // EIP-1820: https://eips.ethereum.org/EIPS/eip-1820 + } +) + +type Duration struct { + time.Duration +} + +// Config ... +type Config struct { + // Airdrop + AirdropFile string `json:"airdrop"` + + // Subnet EVM APIs + SnowmanAPIEnabled bool `json:"snowman-api-enabled"` + ValidatorsAPIEnabled bool `json:"validators-api-enabled"` + AdminAPIEnabled bool `json:"admin-api-enabled"` + AdminAPIDir string `json:"admin-api-dir"` + WarpAPIEnabled bool `json:"warp-api-enabled"` + + // EnabledEthAPIs is a list of Ethereum services that should be enabled + // If none is specified, then we use the default list [defaultEnabledAPIs] + EnabledEthAPIs []string `json:"eth-apis"` + + // Continuous Profiler + ContinuousProfilerDir string `json:"continuous-profiler-dir"` // If set to non-empty string creates a continuous profiler + ContinuousProfilerFrequency Duration `json:"continuous-profiler-frequency"` // Frequency to run continuous profiler if enabled + ContinuousProfilerMaxFiles int `json:"continuous-profiler-max-files"` // Maximum number of files to maintain + + // API Gas/Price Caps + RPCGasCap uint64 `json:"rpc-gas-cap"` + RPCTxFeeCap float64 `json:"rpc-tx-fee-cap"` + + // Cache settings + TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB) + TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB) + TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB) + TriePrefetcherParallelism int `json:"trie-prefetcher-parallelism"` // Max concurrent disk reads trie prefetcher should perform at once + SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB) + + // Eth Settings + Preimages bool `json:"preimages-enabled"` + SnapshotWait bool `json:"snapshot-wait"` + SnapshotVerify bool `json:"snapshot-verification-enabled"` + + // Pruning Settings + Pruning bool `json:"pruning-enabled"` // If enabled, trie roots are only persisted every 4096 blocks + AcceptorQueueLimit int `json:"accepted-queue-limit"` // Maximum blocks to queue before blocking during acceptance + CommitInterval uint64 `json:"commit-interval"` // Specifies the commit interval at which to persist EVM and atomic tries. + AllowMissingTries bool `json:"allow-missing-tries"` // If enabled, warnings preventing an incomplete trie index are suppressed + PopulateMissingTries *uint64 `json:"populate-missing-tries,omitempty"` // Sets the starting point for re-populating missing tries. Disables re-generation if nil. + PopulateMissingTriesParallelism int `json:"populate-missing-tries-parallelism"` // Number of concurrent readers to use when re-populating missing tries on startup. + PruneWarpDB bool `json:"prune-warp-db-enabled"` // Determines if the warpDB should be cleared on startup + + // Metric Settings + MetricsExpensiveEnabled bool `json:"metrics-expensive-enabled"` // Debug-level metrics that might impact runtime performance + + // API Settings + LocalTxsEnabled bool `json:"local-txs-enabled"` + + TxPoolPriceLimit uint64 `json:"tx-pool-price-limit"` + TxPoolPriceBump uint64 `json:"tx-pool-price-bump"` + TxPoolAccountSlots uint64 `json:"tx-pool-account-slots"` + TxPoolGlobalSlots uint64 `json:"tx-pool-global-slots"` + TxPoolAccountQueue uint64 `json:"tx-pool-account-queue"` + TxPoolGlobalQueue uint64 `json:"tx-pool-global-queue"` + TxPoolLifetime Duration `json:"tx-pool-lifetime"` + + APIMaxDuration Duration `json:"api-max-duration"` + WSCPURefillRate Duration `json:"ws-cpu-refill-rate"` + WSCPUMaxStored Duration `json:"ws-cpu-max-stored"` + MaxBlocksPerRequest int64 `json:"api-max-blocks-per-request"` + AllowUnfinalizedQueries bool `json:"allow-unfinalized-queries"` + AllowUnprotectedTxs bool `json:"allow-unprotected-txs"` + AllowUnprotectedTxHashes []common.Hash `json:"allow-unprotected-tx-hashes"` + + // Keystore Settings + KeystoreDirectory string `json:"keystore-directory"` // both absolute and relative supported + KeystoreExternalSigner string `json:"keystore-external-signer"` + KeystoreInsecureUnlockAllowed bool `json:"keystore-insecure-unlock-allowed"` + + // Gossip Settings + PushGossipPercentStake float64 `json:"push-gossip-percent-stake"` + PushGossipNumValidators int `json:"push-gossip-num-validators"` + PushGossipNumPeers int `json:"push-gossip-num-peers"` + PushRegossipNumValidators int `json:"push-regossip-num-validators"` + PushRegossipNumPeers int `json:"push-regossip-num-peers"` + PushGossipFrequency Duration `json:"push-gossip-frequency"` + PullGossipFrequency Duration `json:"pull-gossip-frequency"` + RegossipFrequency Duration `json:"regossip-frequency"` + PriorityRegossipAddresses []common.Address `json:"priority-regossip-addresses"` + + // Log + LogLevel string `json:"log-level"` + LogJSONFormat bool `json:"log-json-format"` + + // Address for Tx Fees (must be empty if not supported by blockchain) + FeeRecipient string `json:"feeRecipient"` + + // Offline Pruning Settings + OfflinePruning bool `json:"offline-pruning-enabled"` + OfflinePruningBloomFilterSize uint64 `json:"offline-pruning-bloom-filter-size"` + OfflinePruningDataDirectory string `json:"offline-pruning-data-directory"` + + // VM2VM network + MaxOutboundActiveRequests int64 `json:"max-outbound-active-requests"` + + // Sync settings + StateSyncEnabled bool `json:"state-sync-enabled"` + StateSyncSkipResume bool `json:"state-sync-skip-resume"` // Forces state sync to use the highest available summary block + StateSyncServerTrieCache int `json:"state-sync-server-trie-cache"` + StateSyncIDs string `json:"state-sync-ids"` + StateSyncCommitInterval uint64 `json:"state-sync-commit-interval"` + StateSyncMinBlocks uint64 `json:"state-sync-min-blocks"` + StateSyncRequestSize uint16 `json:"state-sync-request-size"` + + // Database Settings + InspectDatabase bool `json:"inspect-database"` // Inspects the database on startup if enabled. + + // SkipUpgradeCheck disables checking that upgrades must take place before the last + // accepted block. Skipping this check is useful when a node operator does not update + // their node before the network upgrade and their node accepts blocks that have + // identical state with the pre-upgrade ruleset. + SkipUpgradeCheck bool `json:"skip-upgrade-check"` + + // AcceptedCacheSize is the depth to keep in the accepted headers cache and the + // accepted logs cache at the accepted tip. + // + // This is particularly useful for improving the performance of eth_getLogs + // on RPC nodes. + AcceptedCacheSize int `json:"accepted-cache-size"` + + // TransactionHistory is the maximum number of blocks from head whose tx indices + // are reserved: + // * 0: means no limit + // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes + TransactionHistory uint64 `json:"transaction-history"` + // Deprecated, use 'TransactionHistory' instead. + TxLookupLimit uint64 `json:"tx-lookup-limit"` + + // SkipTxIndexing skips indexing transactions. + // This is useful for validators that don't need to index transactions. + // TxLookupLimit can be still used to control unindexing old transactions. + SkipTxIndexing bool `json:"skip-tx-indexing"` + + // WarpOffChainMessages encodes off-chain messages (unrelated to any on-chain event ie. block or AddressedCall) + // that the node should be willing to sign. + // Note: only supports AddressedCall payloads as defined here: + // https://github.com/ava-labs/avalanchego/tree/7623ffd4be915a5185c9ed5e11fa9be15a6e1f00/vms/platformvm/warp/payload#addressedcall + WarpOffChainMessages []hexutil.Bytes `json:"warp-off-chain-messages"` + + // RPC settings + HttpBodyLimit uint64 `json:"http-body-limit"` + + // Database settings + UseStandaloneDatabase *PBool `json:"use-standalone-database"` + DatabaseConfigContent string `json:"database-config"` + DatabaseConfigFile string `json:"database-config-file"` + DatabaseType string `json:"database-type"` + DatabasePath string `json:"database-path"` + DatabaseReadOnly bool `json:"database-read-only"` +} + +// TxPoolConfig contains the transaction pool config to be passed +// to [Config.SetDefaults]. +type TxPoolConfig struct { + PriceLimit uint64 + PriceBump uint64 + AccountSlots uint64 + GlobalSlots uint64 + AccountQueue uint64 + GlobalQueue uint64 + Lifetime time.Duration +} + +// EthAPIs returns an array of strings representing the Eth APIs that should be enabled +func (c Config) EthAPIs() []string { + return c.EnabledEthAPIs +} + +func (c *Config) SetDefaults(txPoolConfig TxPoolConfig) { + c.EnabledEthAPIs = defaultEnabledAPIs + c.RPCGasCap = defaultRpcGasCap + c.RPCTxFeeCap = defaultRpcTxFeeCap + c.MetricsExpensiveEnabled = defaultMetricsExpensiveEnabled + + // TxPool settings + c.TxPoolPriceLimit = txPoolConfig.PriceLimit + c.TxPoolPriceBump = txPoolConfig.PriceBump + c.TxPoolAccountSlots = txPoolConfig.AccountSlots + c.TxPoolGlobalSlots = txPoolConfig.GlobalSlots + c.TxPoolAccountQueue = txPoolConfig.AccountQueue + c.TxPoolGlobalQueue = txPoolConfig.GlobalQueue + c.TxPoolLifetime.Duration = txPoolConfig.Lifetime + + c.APIMaxDuration.Duration = defaultApiMaxDuration + c.WSCPURefillRate.Duration = defaultWsCpuRefillRate + c.WSCPUMaxStored.Duration = defaultWsCpuMaxStored + c.MaxBlocksPerRequest = defaultMaxBlocksPerRequest + c.ContinuousProfilerFrequency.Duration = defaultContinuousProfilerFrequency + c.ContinuousProfilerMaxFiles = defaultContinuousProfilerMaxFiles + c.Pruning = defaultPruningEnabled + c.TrieCleanCache = defaultTrieCleanCache + c.TrieDirtyCache = defaultTrieDirtyCache + c.TrieDirtyCommitTarget = defaultTrieDirtyCommitTarget + c.TriePrefetcherParallelism = defaultTriePrefetcherParallelism + c.SnapshotCache = defaultSnapshotCache + c.AcceptorQueueLimit = defaultAcceptorQueueLimit + c.CommitInterval = defaultCommitInterval + c.SnapshotWait = defaultSnapshotWait + c.PushGossipPercentStake = defaultPushGossipPercentStake + c.PushGossipNumValidators = defaultPushGossipNumValidators + c.PushGossipNumPeers = defaultPushGossipNumPeers + c.PushRegossipNumValidators = defaultPushRegossipNumValidators + c.PushRegossipNumPeers = defaultPushRegossipNumPeers + c.PushGossipFrequency.Duration = defaultPushGossipFrequency + c.PullGossipFrequency.Duration = defaultPullGossipFrequency + c.RegossipFrequency.Duration = defaultRegossipFrequency + c.OfflinePruningBloomFilterSize = defaultOfflinePruningBloomFilterSize + c.LogLevel = defaultLogLevel + c.LogJSONFormat = defaultLogJSONFormat + c.MaxOutboundActiveRequests = defaultMaxOutboundActiveRequests + c.PopulateMissingTriesParallelism = defaultPopulateMissingTriesParallelism + c.StateSyncServerTrieCache = defaultStateSyncServerTrieCache + c.StateSyncCommitInterval = defaultSyncableCommitInterval + c.StateSyncMinBlocks = defaultStateSyncMinBlocks + c.StateSyncRequestSize = defaultStateSyncRequestSize + c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes + c.AcceptedCacheSize = defaultAcceptedCacheSize + c.DatabaseType = defaultDBType + c.ValidatorsAPIEnabled = defaultValidatorAPIEnabled +} + +func (d *Duration) UnmarshalJSON(data []byte) (err error) { + var v interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + d.Duration, err = cast.ToDurationE(v) + return err +} + +// String implements the stringer interface. +func (d Duration) String() string { + return d.Duration.String() +} + +// String implements the stringer interface. +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.Duration.String()) +} + +// Validate returns an error if this is an invalid config. +func (c *Config) Validate() error { + if c.PopulateMissingTries != nil && (c.OfflinePruning || c.Pruning) { + return fmt.Errorf("cannot enable populate missing tries while offline pruning (enabled: %t)/pruning (enabled: %t) are enabled", c.OfflinePruning, c.Pruning) + } + if c.PopulateMissingTries != nil && c.PopulateMissingTriesParallelism < 1 { + return fmt.Errorf("cannot enable populate missing tries without at least one reader (parallelism: %d)", c.PopulateMissingTriesParallelism) + } + + if !c.Pruning && c.OfflinePruning { + return fmt.Errorf("cannot run offline pruning while pruning is disabled") + } + // If pruning is enabled, the commit interval must be non-zero so the node commits state tries every CommitInterval blocks. + if c.Pruning && c.CommitInterval == 0 { + return fmt.Errorf("cannot use commit interval of 0 with pruning enabled") + } + + if c.PushGossipPercentStake < 0 || c.PushGossipPercentStake > 1 { + return fmt.Errorf("push-gossip-percent-stake is %f but must be in the range [0, 1]", c.PushGossipPercentStake) + } + return nil +} + +func (c *Config) Deprecate() string { + msg := "" + // Deprecate the old config options and set the new ones. + if c.TxLookupLimit != 0 { + msg += "tx-lookup-limit is deprecated, use transaction-history instead. " + c.TransactionHistory = c.TxLookupLimit + } + + return msg +} + +func (p *PBool) String() string { + if p == nil { + return "nil" + } + return fmt.Sprintf("%t", *p) +} + +func (p *PBool) Bool() bool { + if p == nil { + return false + } + return bool(*p) +} diff --git a/plugin/evm/config_test.go b/plugin/evm/config/config_test.go similarity index 99% rename from plugin/evm/config_test.go rename to plugin/evm/config/config_test.go index 562b472525..3d209b750b 100644 --- a/plugin/evm/config_test.go +++ b/plugin/evm/config/config_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package config import ( "encoding/json" diff --git a/plugin/evm/gossip.go b/plugin/evm/gossip.go index b6624f328a..fab3332933 100644 --- a/plugin/evm/gossip.go +++ b/plugin/evm/gossip.go @@ -46,7 +46,7 @@ func newTxGossipHandler[T gossip.Gossipable]( maxMessageSize int, throttlingPeriod time.Duration, throttlingLimit int, - validators *p2p.Validators, + validators p2p.ValidatorSet, ) txGossipHandler { // push gossip messages can be handled from any peer handler := gossip.NewHandler( diff --git a/plugin/evm/gossip_stats.go b/plugin/evm/gossip_stats.go deleted file mode 100644 index 3a6f552fcc..0000000000 --- a/plugin/evm/gossip_stats.go +++ /dev/null @@ -1,46 +0,0 @@ -// (c) 2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import "github.com/ava-labs/subnet-evm/metrics" - -var _ GossipStats = &gossipStats{} - -// GossipStats contains methods for updating incoming and outgoing gossip stats. -type GossipStats interface { - IncEthTxsGossipReceived() - - // new vs. known txs received - IncEthTxsGossipReceivedError() - IncEthTxsGossipReceivedKnown() - IncEthTxsGossipReceivedNew() -} - -// gossipStats implements stats for incoming and outgoing gossip stats. -type gossipStats struct { - // messages - ethTxsGossipReceived metrics.Counter - - // new vs. known txs received - ethTxsGossipReceivedError metrics.Counter - ethTxsGossipReceivedKnown metrics.Counter - ethTxsGossipReceivedNew metrics.Counter -} - -func NewGossipStats() GossipStats { - return &gossipStats{ - ethTxsGossipReceived: metrics.GetOrRegisterCounter("gossip_eth_txs_received", nil), - ethTxsGossipReceivedError: metrics.GetOrRegisterCounter("gossip_eth_txs_received_error", nil), - ethTxsGossipReceivedKnown: metrics.GetOrRegisterCounter("gossip_eth_txs_received_known", nil), - ethTxsGossipReceivedNew: metrics.GetOrRegisterCounter("gossip_eth_txs_received_new", nil), - } -} - -// incoming messages -func (g *gossipStats) IncEthTxsGossipReceived() { g.ethTxsGossipReceived.Inc(1) } - -// new vs. known txs received -func (g *gossipStats) IncEthTxsGossipReceivedError() { g.ethTxsGossipReceivedError.Inc(1) } -func (g *gossipStats) IncEthTxsGossipReceivedKnown() { g.ethTxsGossipReceivedKnown.Inc(1) } -func (g *gossipStats) IncEthTxsGossipReceivedNew() { g.ethTxsGossipReceivedNew.Inc(1) } diff --git a/plugin/evm/handler.go b/plugin/evm/handler.go deleted file mode 100644 index 2cd32b8ba3..0000000000 --- a/plugin/evm/handler.go +++ /dev/null @@ -1,76 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "github.com/ava-labs/avalanchego/ids" - - "github.com/ava-labs/libevm/log" - "github.com/ava-labs/libevm/rlp" - - "github.com/ava-labs/subnet-evm/core/txpool" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/plugin/evm/message" -) - -// GossipHandler handles incoming gossip messages -type GossipHandler struct { - vm *VM - txPool *txpool.TxPool - stats GossipStats -} - -func NewGossipHandler(vm *VM, stats GossipStats) *GossipHandler { - return &GossipHandler{ - vm: vm, - txPool: vm.txPool, - stats: stats, - } -} - -func (h *GossipHandler) HandleEthTxs(nodeID ids.NodeID, msg message.EthTxsGossip) error { - log.Trace( - "AppGossip called with EthTxsGossip", - "peerID", nodeID, - "size(txs)", len(msg.Txs), - ) - - if len(msg.Txs) == 0 { - log.Trace( - "AppGossip received empty EthTxsGossip Message", - "peerID", nodeID, - ) - return nil - } - - // The maximum size of this encoded object is enforced by the codec. - txs := make([]*types.Transaction, 0) - if err := rlp.DecodeBytes(msg.Txs, &txs); err != nil { - log.Trace( - "AppGossip provided invalid txs", - "peerID", nodeID, - "err", err, - ) - return nil - } - h.stats.IncEthTxsGossipReceived() - errs := h.txPool.Add(txs, false, false) - for i, err := range errs { - if err != nil { - log.Trace( - "AppGossip failed to add to mempool", - "err", err, - "tx", txs[i].Hash(), - ) - if err == txpool.ErrAlreadyKnown { - h.stats.IncEthTxsGossipReceivedKnown() - } else { - h.stats.IncEthTxsGossipReceivedError() - } - continue - } - h.stats.IncEthTxsGossipReceivedNew() - } - return nil -} diff --git a/plugin/evm/imports_test.go b/plugin/evm/imports_test.go new file mode 100644 index 0000000000..6b5e6c1351 --- /dev/null +++ b/plugin/evm/imports_test.go @@ -0,0 +1,71 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/tools/go/packages" +) + +// getDependencies takes a fully qualified package name and returns a map of all +// its recursive package imports (including itself) in the same format. +func getDependencies(packageName string) (map[string]struct{}, error) { + // Configure the load mode to include dependencies + cfg := &packages.Config{Mode: packages.NeedDeps | packages.NeedImports | packages.NeedName | packages.NeedModule} + pkgs, err := packages.Load(cfg, packageName) + if err != nil { + return nil, fmt.Errorf("failed to load package: %v", err) + } + + if len(pkgs) == 0 || pkgs[0].Errors != nil { + return nil, fmt.Errorf("failed to load package %s", packageName) + } + + deps := make(map[string]struct{}) + var collectDeps func(pkg *packages.Package) + collectDeps = func(pkg *packages.Package) { + if _, ok := deps[pkg.PkgPath]; ok { + return // Avoid re-processing the same dependency + } + deps[pkg.PkgPath] = struct{}{} + for _, dep := range pkg.Imports { + collectDeps(dep) + } + } + + // Start collecting dependencies + collectDeps(pkgs[0]) + return deps, nil +} + +func TestMustNotImport(t *testing.T) { + withRepo := func(pkg string) string { + const repo = "github.com/ava-labs/subnet-evm" + return fmt.Sprintf("%s/%s", repo, pkg) + } + mustNotImport := map[string][]string{ + // The following sub-packages of plugin/evm must not import core, core/vm + // so clients (e.g., wallets, e2e tests) can import them without pulling in + // the entire VM logic. + // Importing these packages configures libevm globally and it is not + // possible to do so for both coreth and subnet-evm, where the client may + // wish to connect to multiple chains. + "plugin/evm/client": {"core", "core/vm"}, + "plugin/evm/config": {"core", "core/vm"}, + } + + for packageName, forbiddenImports := range mustNotImport { + imports, err := getDependencies(withRepo(packageName)) + require.NoError(t, err) + + for _, forbiddenImport := range forbiddenImports { + fullForbiddenImport := withRepo(forbiddenImport) + _, found := imports[fullForbiddenImport] + require.False(t, found, "package %s must not import %s, check output of go list -f '{{ .Deps }}' \"%s\" ", packageName, fullForbiddenImport, withRepo(packageName)) + } + } +} diff --git a/plugin/evm/message/codec.go b/plugin/evm/message/codec.go index 9ae2112b1a..64edbb81d6 100644 --- a/plugin/evm/message/codec.go +++ b/plugin/evm/message/codec.go @@ -23,11 +23,11 @@ func init() { Codec = codec.NewManager(maxMessageSize) c := linearcodec.NewDefault() + // Skip registration to keep registeredTypes unchanged after legacy gossip deprecation + c.SkipRegistrations(1) + errs := wrappers.Errs{} errs.Add( - // Gossip types - c.RegisterType(EthTxsGossip{}), - // Types for state sync frontier consensus c.RegisterType(SyncSummary{}), diff --git a/plugin/evm/message/handler.go b/plugin/evm/message/handler.go index d8c0c4f7ce..da81a8b0f2 100644 --- a/plugin/evm/message/handler.go +++ b/plugin/evm/message/handler.go @@ -6,28 +6,13 @@ package message import ( "context" - "github.com/ava-labs/libevm/log" - "github.com/ava-labs/avalanchego/ids" ) var ( - _ GossipHandler = NoopMempoolGossipHandler{} _ RequestHandler = NoopRequestHandler{} ) -// GossipHandler handles incoming gossip messages -type GossipHandler interface { - HandleEthTxs(nodeID ids.NodeID, msg EthTxsGossip) error -} - -type NoopMempoolGossipHandler struct{} - -func (NoopMempoolGossipHandler) HandleEthTxs(nodeID ids.NodeID, msg EthTxsGossip) error { - log.Debug("dropping unexpected EthTxsGossip message", "peerID", nodeID) - return nil -} - // RequestHandler interface handles incoming requests from peers // Must have methods in format of handleType(context.Context, ids.NodeID, uint32, request Type) error // so that the Request object of relevant Type can invoke its respective handle method diff --git a/plugin/evm/message/handler_test.go b/plugin/evm/message/handler_test.go deleted file mode 100644 index 8b87135ff5..0000000000 --- a/plugin/evm/message/handler_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "testing" - - "github.com/ava-labs/avalanchego/ids" - - "github.com/stretchr/testify/assert" -) - -type CounterHandler struct { - EthTxs int -} - -func (h *CounterHandler) HandleEthTxs(ids.NodeID, EthTxsGossip) error { - h.EthTxs++ - return nil -} - -func TestHandleEthTxs(t *testing.T) { - assert := assert.New(t) - - handler := CounterHandler{} - msg := EthTxsGossip{} - - err := msg.Handle(&handler, ids.EmptyNodeID) - assert.NoError(err) - assert.Equal(1, handler.EthTxs) -} - -func TestNoopHandler(t *testing.T) { - assert := assert.New(t) - - handler := NoopMempoolGossipHandler{} - - err := handler.HandleEthTxs(ids.EmptyNodeID, EthTxsGossip{}) - assert.NoError(err) -} diff --git a/plugin/evm/message/message.go b/plugin/evm/message/message.go deleted file mode 100644 index 35887911c9..0000000000 --- a/plugin/evm/message/message.go +++ /dev/null @@ -1,65 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "errors" - "fmt" - - "github.com/ava-labs/avalanchego/codec" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/units" -) - -const ( - // EthMsgSoftCapSize is the ideal size of encoded transaction bytes we send in - // any [EthTxsGossip] or [AtomicTxGossip] message. We do not limit inbound messages to - // this size, however. Max inbound message size is enforced by the codec - // (512KB). - EthMsgSoftCapSize = 64 * units.KiB -) - -var ( - _ GossipMessage = EthTxsGossip{} - - errUnexpectedCodecVersion = errors.New("unexpected codec version") -) - -type GossipMessage interface { - // types implementing GossipMessage should also implement fmt.Stringer for logging purposes. - fmt.Stringer - - // Handle this gossip message with the gossip handler. - Handle(handler GossipHandler, nodeID ids.NodeID) error -} - -type EthTxsGossip struct { - Txs []byte `serialize:"true"` -} - -func (msg EthTxsGossip) Handle(handler GossipHandler, nodeID ids.NodeID) error { - return handler.HandleEthTxs(nodeID, msg) -} - -func (msg EthTxsGossip) String() string { - return fmt.Sprintf("EthTxsGossip(Len=%d)", len(msg.Txs)) -} - -func ParseGossipMessage(codec codec.Manager, bytes []byte) (GossipMessage, error) { - var msg GossipMessage - version, err := codec.Unmarshal(bytes, &msg) - if err != nil { - return nil, err - } - if version != Version { - return nil, errUnexpectedCodecVersion - } - return msg, nil -} - -func BuildGossipMessage(codec codec.Manager, msg GossipMessage) ([]byte, error) { - bytes, err := codec.Marshal(Version, &msg) - return bytes, err -} diff --git a/plugin/evm/message/message_test.go b/plugin/evm/message/message_test.go deleted file mode 100644 index e1779d4b75..0000000000 --- a/plugin/evm/message/message_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "encoding/base64" - "testing" - - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/units" - - "github.com/stretchr/testify/assert" -) - -// TestMarshalEthTxs asserts that the structure or serialization logic hasn't changed, primarily to -// ensure compatibility with the network. -func TestMarshalEthTxs(t *testing.T) { - assert := assert.New(t) - - base64EthTxGossip := "AAAAAAAAAAAABGJsYWg=" - msg := []byte("blah") - builtMsg := EthTxsGossip{ - Txs: msg, - } - builtMsgBytes, err := BuildGossipMessage(Codec, builtMsg) - assert.NoError(err) - assert.Equal(base64EthTxGossip, base64.StdEncoding.EncodeToString(builtMsgBytes)) - - parsedMsgIntf, err := ParseGossipMessage(Codec, builtMsgBytes) - assert.NoError(err) - - parsedMsg, ok := parsedMsgIntf.(EthTxsGossip) - assert.True(ok) - - assert.Equal(msg, parsedMsg.Txs) -} - -func TestEthTxsTooLarge(t *testing.T) { - assert := assert.New(t) - - builtMsg := EthTxsGossip{ - Txs: utils.RandomBytes(maxMessageSize), - } - _, err := BuildGossipMessage(Codec, builtMsg) - assert.Error(err) -} - -func TestParseGibberish(t *testing.T) { - assert := assert.New(t) - - randomBytes := utils.RandomBytes(256 * units.KiB) - _, err := ParseGossipMessage(Codec, randomBytes) - assert.Error(err) -} diff --git a/plugin/evm/service.go b/plugin/evm/service.go index 0ad10a63b6..70b92bd538 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -10,35 +10,16 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/subnet-evm/plugin/evm/client" ) type ValidatorsAPI struct { vm *VM } -type GetCurrentValidatorsRequest struct { - NodeIDs []ids.NodeID `json:"nodeIDs"` -} - -type GetCurrentValidatorsResponse struct { - Validators []CurrentValidator `json:"validators"` -} - -type CurrentValidator struct { - ValidationID ids.ID `json:"validationID"` - NodeID ids.NodeID `json:"nodeID"` - Weight uint64 `json:"weight"` - StartTimestamp uint64 `json:"startTimestamp"` - IsActive bool `json:"isActive"` - IsL1Validator bool `json:"isL1Validator"` - IsConnected bool `json:"isConnected"` - UptimePercentage float32 `json:"uptimePercentage"` - UptimeSeconds uint64 `json:"uptimeSeconds"` -} - -func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, req *GetCurrentValidatorsRequest, reply *GetCurrentValidatorsResponse) error { - api.vm.ctx.Lock.RLock() - defer api.vm.ctx.Lock.RUnlock() +func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, req *client.GetCurrentValidatorsRequest, reply *client.GetCurrentValidatorsResponse) error { + api.vm.vmLock.RLock() + defer api.vm.vmLock.RUnlock() var vIDs set.Set[ids.ID] if len(req.NodeIDs) > 0 { @@ -54,7 +35,7 @@ func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, req *GetCurrentV vIDs = api.vm.validatorsManager.GetValidationIDs() } - reply.Validators = make([]CurrentValidator, 0, vIDs.Len()) + reply.Validators = make([]client.CurrentValidator, 0, vIDs.Len()) for _, vID := range vIDs.List() { validator, err := api.vm.validatorsManager.GetValidator(vID) @@ -81,7 +62,7 @@ func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, req *GetCurrentV // with currentValidators in PlatformVM API uptimePercentage := float32(uptimeFloat * 100) - reply.Validators = append(reply.Validators, CurrentValidator{ + reply.Validators = append(reply.Validators, client.CurrentValidator{ ValidationID: validator.ValidationID, NodeID: validator.NodeID, StartTimestamp: validator.StartTimestamp, diff --git a/plugin/evm/validators.go b/plugin/evm/validators.go new file mode 100644 index 0000000000..44c5560267 --- /dev/null +++ b/plugin/evm/validators.go @@ -0,0 +1,19 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +type validatorSet struct { + set set.Set[ids.NodeID] +} + +func (v *validatorSet) Has(ctx context.Context, nodeID ids.NodeID) bool { + return v.set.Contains(nodeID) +} diff --git a/plugin/evm/validators/interfaces/interfaces.go b/plugin/evm/validators/interfaces/interfaces.go index d8b88b3626..aeac266af9 100644 --- a/plugin/evm/validators/interfaces/interfaces.go +++ b/plugin/evm/validators/interfaces/interfaces.go @@ -5,6 +5,7 @@ package interfaces import ( "context" + "sync" "time" "github.com/ava-labs/avalanchego/ids" @@ -13,18 +14,21 @@ import ( ) type ValidatorReader interface { - // GetValidatorAndUptime returns the uptime of the validator specified by validationID + // GetValidatorAndUptime returns the calculated uptime of the validator specified by validationID + // and the last updated time. + // GetValidatorAndUptime holds the VM lock while performing the operation and can be called concurrently. GetValidatorAndUptime(validationID ids.ID) (stateinterfaces.Validator, time.Duration, time.Time, error) } type Manager interface { - stateinterfaces.State + stateinterfaces.StateReader avalancheuptime.Manager - ValidatorReader - - // Sync updates the validator set managed - // by the manager - Sync(ctx context.Context) error + // Initialize initializes the validator manager + // by syncing the validator state with the current validator set + // and starting the uptime tracking. + Initialize(ctx context.Context) error + // Shutdown stops the uptime tracking and writes the validator state to the database. + Shutdown() error // DispatchSync starts the sync process - DispatchSync(ctx context.Context) + DispatchSync(ctx context.Context, lock sync.Locker) } diff --git a/plugin/evm/validators/locked_reader.go b/plugin/evm/validators/locked_reader.go new file mode 100644 index 0000000000..f4b12f8eb9 --- /dev/null +++ b/plugin/evm/validators/locked_reader.go @@ -0,0 +1,53 @@ +// Copyright (C) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "fmt" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" + stateinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces" +) + +type RLocker interface { + RLock() + RUnlock() +} + +type lockedReader struct { + manager interfaces.Manager + lock RLocker +} + +func NewLockedValidatorReader( + manager interfaces.Manager, + lock RLocker, +) interfaces.ValidatorReader { + return &lockedReader{ + lock: lock, + manager: manager, + } +} + +// GetValidatorAndUptime returns the calculated uptime of the validator specified by validationID +// and the last updated time. +// GetValidatorAndUptime holds the lock while performing the operation and can be called concurrently. +func (l *lockedReader) GetValidatorAndUptime(validationID ids.ID) (stateinterfaces.Validator, time.Duration, time.Time, error) { + l.lock.RLock() + defer l.lock.RUnlock() + + vdr, err := l.manager.GetValidator(validationID) + if err != nil { + return stateinterfaces.Validator{}, 0, time.Time{}, fmt.Errorf("failed to get validator: %w", err) + } + + uptime, lastUpdated, err := l.manager.CalculateUptime(vdr.NodeID) + if err != nil { + return stateinterfaces.Validator{}, 0, time.Time{}, fmt.Errorf("failed to get uptime: %w", err) + } + + return vdr, uptime, lastUpdated, nil +} diff --git a/plugin/evm/validators/manager.go b/plugin/evm/validators/manager.go index afdb8f40d9..bf05a71fdf 100644 --- a/plugin/evm/validators/manager.go +++ b/plugin/evm/validators/manager.go @@ -6,6 +6,7 @@ package validators import ( "context" "fmt" + "sync" "time" "github.com/ava-labs/avalanchego/database" @@ -14,7 +15,6 @@ import ( avalancheuptime "github.com/ava-labs/avalanchego/snow/uptime" avalanchevalidators "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" validators "github.com/ava-labs/subnet-evm/plugin/evm/validators/state" stateinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces" "github.com/ava-labs/subnet-evm/plugin/evm/validators/uptime" @@ -35,11 +35,12 @@ type manager struct { // NewManager returns a new validator manager // that manages the validator state and the uptime manager. +// Manager is not thread safe and should be used with the VM locked. func NewManager( ctx *snow.Context, db database.Database, clock *mockable.Clock, -) (interfaces.Manager, error) { +) (*manager, error) { validatorState, err := validators.NewState(db) if err != nil { return nil, fmt.Errorf("failed to initialize validator state: %w", err) @@ -56,52 +57,60 @@ func NewManager( }, nil } -// GetValidatorAndUptime returns the calculated uptime of the validator specified by validationID -// and the last updated time. -// GetValidatorAndUptime holds the chain context lock while performing the operation and can be called concurrently. -func (m *manager) GetValidatorAndUptime(validationID ids.ID) (stateinterfaces.Validator, time.Duration, time.Time, error) { - // lock the state - m.chainCtx.Lock.RLock() - defer m.chainCtx.Lock.RUnlock() - - // Get validator first - vdr, err := m.GetValidator(validationID) - if err != nil { - return stateinterfaces.Validator{}, 0, time.Time{}, fmt.Errorf("failed to get validator: %w", err) +// Initialize initializes the validator manager +// by syncing the validator state with the current validator set +// and starting the uptime tracking. +func (m *manager) Initialize(ctx context.Context) error { + // sync validators first + if err := m.sync(ctx); err != nil { + return fmt.Errorf("failed to update validators: %w", err) } - - uptime, lastUpdated, err := m.CalculateUptime(vdr.NodeID) - if err != nil { - return stateinterfaces.Validator{}, 0, time.Time{}, fmt.Errorf("failed to get uptime: %w", err) + vdrIDs := m.GetNodeIDs().List() + // Then start tracking with updated validators + // StartTracking initializes the uptime tracking with the known validators + // and update their uptime to account for the time we were being offline. + if err := m.StartTracking(vdrIDs); err != nil { + return fmt.Errorf("failed to start tracking uptime: %w", err) } + return nil +} - return vdr, uptime, lastUpdated, nil +// Shutdown stops the uptime tracking and writes the validator state to the database. +func (m *manager) Shutdown() error { + vdrIDs := m.GetNodeIDs().List() + if err := m.StopTracking(vdrIDs); err != nil { + return fmt.Errorf("failed to stop tracking uptime: %w", err) + } + if err := m.WriteState(); err != nil { + return fmt.Errorf("failed to write validator: %w", err) + } + return nil } // DispatchSync starts the sync process -// DispatchSync holds the chain context lock while performing the sync. -func (m *manager) DispatchSync(ctx context.Context) { +// DispatchSync holds the given lock while performing the sync. +func (m *manager) DispatchSync(ctx context.Context, lock sync.Locker) { ticker := time.NewTicker(SyncFrequency) defer ticker.Stop() for { select { case <-ticker.C: - m.chainCtx.Lock.Lock() - if err := m.Sync(ctx); err != nil { + lock.Lock() + if err := m.sync(ctx); err != nil { log.Error("failed to sync validators", "error", err) } - m.chainCtx.Lock.Unlock() + lock.Unlock() case <-ctx.Done(): return } } } -// Sync synchronizes the validator state with the current validator set +// sync synchronizes the validator state with the current validator set // and writes the state to the database. -// Sync is not safe to call concurrently and should be called with the chain context locked. -func (m *manager) Sync(ctx context.Context) error { +// sync is not safe to call concurrently and should be called with the VM locked. +func (m *manager) sync(ctx context.Context) error { now := time.Now() log.Debug("performing validator sync") // get current validator set diff --git a/plugin/evm/validators/state/state.go b/plugin/evm/validators/state/state.go index c3150c10ac..2ffcf493a8 100644 --- a/plugin/evm/validators/state/state.go +++ b/plugin/evm/validators/state/state.go @@ -197,13 +197,14 @@ func (s *state) WriteState() error { if err := batch.Delete(vID[:]); err != nil { return err } - default: - return fmt.Errorf("unknown update status for %s", vID) } - // we're done, remove the updated marker - delete(s.updatedData, vID) } - return batch.Write() + if err := batch.Write(); err != nil { + return err + } + // we've successfully flushed the updates, clear the updated marker. + clear(s.updatedData) + return nil } // SetStatus sets the active status of the validator with the given vID diff --git a/plugin/evm/version.go b/plugin/evm/version.go index 158c6e1510..d6ccf8d553 100644 --- a/plugin/evm/version.go +++ b/plugin/evm/version.go @@ -11,7 +11,7 @@ var ( // GitCommit is set by the build script GitCommit string // Version is the version of Subnet EVM - Version string = "v0.7.1" + Version string = "v0.7.2" ) func init() { diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 38db3d078b..21421c5291 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -40,6 +40,7 @@ import ( "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/params/extras" "github.com/ava-labs/subnet-evm/peer" + "github.com/ava-labs/subnet-evm/plugin/evm/config" "github.com/ava-labs/subnet-evm/plugin/evm/message" "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" @@ -177,13 +178,16 @@ var legacyApiNames = map[string]string{ // VM implements the snowman.ChainVM interface type VM struct { ctx *snow.Context + // contextLock is used to coordinate global VM operations. + // This can be used safely instead of snow.Context.Lock which is deprecated and should not be used in rpcchainvm. + vmLock sync.RWMutex // [cancel] may be nil until [snow.NormalOp] starts cancel context.CancelFunc // *chain.State helps to implement the VM interface by wrapping blocks // with an efficient caching layer. *chain.State - config Config + config config.Config networkID uint64 genesisHash common.Hash @@ -278,7 +282,7 @@ func (vm *VM) Initialize( fxs []*commonEng.Fx, appSender commonEng.AppSender, ) error { - vm.config.SetDefaults() + vm.config.SetDefaults(defaultTxPoolConfig) if len(configBytes) > 0 { if err := json.Unmarshal(configBytes, &vm.config); err != nil { return fmt.Errorf("failed to unmarshal config %s: %w", string(configBytes), err) @@ -522,7 +526,7 @@ func (vm *VM) Initialize( vm.ctx.ChainID, vm.ctx.WarpSigner, vm, - vm.validatorsManager, + validators.NewLockedValidatorReader(vm.validatorsManager, &vm.vmLock), vm.warpDB, meteredCache, offchainWarpMessages, @@ -580,7 +584,7 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash, ethConfig ethconfig. &vm.ethConfig, &EthPushGossiper{vm: vm}, vm.chaindb, - vm.config.EthBackendSettings(), + eth.Settings{MaxBlocksPerRequest: vm.config.MaxBlocksPerRequest}, lastAcceptedHash, dummy.NewFakerWithClock(&vm.clock), &vm.clock, @@ -681,6 +685,8 @@ func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { } func (vm *VM) SetState(_ context.Context, state snow.State) error { + vm.vmLock.Lock() + defer vm.vmLock.Unlock() switch state { case snow.StateSyncing: vm.bootstrapped.Set(false) @@ -721,21 +727,15 @@ func (vm *VM) onNormalOperationsStarted() error { ctx, cancel := context.WithCancel(context.TODO()) vm.cancel = cancel - // sync validators first - if err := vm.validatorsManager.Sync(ctx); err != nil { - return fmt.Errorf("failed to update validators: %w", err) - } - vdrIDs := vm.validatorsManager.GetNodeIDs().List() - // Then start tracking with updated validators - // StartTracking initializes the uptime tracking with the known validators - // and update their uptime to account for the time we were being offline. - if err := vm.validatorsManager.StartTracking(vdrIDs); err != nil { - return fmt.Errorf("failed to start tracking uptime: %w", err) + // Start the validators manager + if err := vm.validatorsManager.Initialize(ctx); err != nil { + return fmt.Errorf("failed to initialize validators manager: %w", err) } + // dispatch validator set update vm.shutdownWg.Add(1) go func() { - vm.validatorsManager.DispatchSync(ctx) + vm.validatorsManager.DispatchSync(ctx, &vm.vmLock) vm.shutdownWg.Done() }() @@ -788,10 +788,13 @@ func (vm *VM) onNormalOperationsStarted() error { } // NOTE: gossip network must be initialized first otherwise ETH tx gossip will not work. - gossipStats := NewGossipStats() vm.builder = vm.NewBlockBuilder(vm.toEngine) vm.builder.awaitSubmittedTxs() - vm.Network.SetGossipHandler(NewGossipHandler(vm, gossipStats)) + + var p2pValidators p2p.ValidatorSet = &validatorSet{} + if vm.config.PullGossipFrequency.Duration > 0 { + p2pValidators = vm.p2pValidators + } if vm.ethTxGossipHandler == nil { vm.ethTxGossipHandler = newTxGossipHandler[*GossipEthTx]( @@ -802,7 +805,7 @@ func (vm *VM) onNormalOperationsStarted() error { txGossipTargetMessageSize, txGossipThrottlingPeriod, txGossipThrottlingLimit, - vm.p2pValidators, + p2pValidators, ) } @@ -827,15 +830,20 @@ func (vm *VM) onNormalOperationsStarted() error { } } - vm.shutdownWg.Add(2) - go func() { - gossip.Every(ctx, vm.ctx.Log, ethTxPushGossiper, vm.config.PushGossipFrequency.Duration) - vm.shutdownWg.Done() - }() - go func() { - gossip.Every(ctx, vm.ctx.Log, vm.ethTxPullGossiper, vm.config.PullGossipFrequency.Duration) - vm.shutdownWg.Done() - }() + if vm.config.PushGossipFrequency.Duration > 0 { + vm.shutdownWg.Add(1) + go func() { + gossip.Every(ctx, vm.ctx.Log, ethTxPushGossiper, vm.config.PushGossipFrequency.Duration) + vm.shutdownWg.Done() + }() + } + if vm.config.PullGossipFrequency.Duration > 0 { + vm.shutdownWg.Add(1) + go func() { + gossip.Every(ctx, vm.ctx.Log, vm.ethTxPullGossiper, vm.config.PullGossipFrequency.Duration) + vm.shutdownWg.Done() + }() + } return nil } @@ -861,6 +869,8 @@ func (vm *VM) setAppRequestHandlers() { // Shutdown implements the snowman.ChainVM interface func (vm *VM) Shutdown(context.Context) error { + vm.vmLock.Lock() + defer vm.vmLock.Unlock() if vm.ctx == nil { return nil } @@ -868,12 +878,8 @@ func (vm *VM) Shutdown(context.Context) error { vm.cancel() } if vm.bootstrapped.Get() { - vdrIDs := vm.validatorsManager.GetNodeIDs().List() - if err := vm.validatorsManager.StopTracking(vdrIDs); err != nil { - return fmt.Errorf("failed to stop tracking uptime: %w", err) - } - if err := vm.validatorsManager.WriteState(); err != nil { - return fmt.Errorf("failed to write validator: %w", err) + if err := vm.validatorsManager.Shutdown(); err != nil { + return fmt.Errorf("failed to shutdown validators manager: %w", err) } } vm.Network.Shutdown() @@ -1262,6 +1268,9 @@ func attachEthService(handler *rpc.Server, apis []rpc.API, names []string) error } func (vm *VM) Connected(ctx context.Context, nodeID ids.NodeID, version *version.Application) error { + vm.vmLock.Lock() + defer vm.vmLock.Unlock() + if err := vm.validatorsManager.Connect(nodeID); err != nil { return fmt.Errorf("uptime manager failed to connect node %s: %w", nodeID, err) } @@ -1269,6 +1278,9 @@ func (vm *VM) Connected(ctx context.Context, nodeID ids.NodeID, version *version } func (vm *VM) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + vm.vmLock.Lock() + defer vm.vmLock.Unlock() + if err := vm.validatorsManager.Disconnect(nodeID); err != nil { return fmt.Errorf("uptime manager failed to disconnect node %s: %w", nodeID, err) } diff --git a/plugin/evm/vm_database.go b/plugin/evm/vm_database.go index 16e9fae84c..f2e2d1b56f 100644 --- a/plugin/evm/vm_database.go +++ b/plugin/evm/vm_database.go @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/log" "github.com/ava-labs/subnet-evm/core/rawdb" + "github.com/ava-labs/subnet-evm/plugin/evm/config" "github.com/ava-labs/subnet-evm/plugin/evm/database" ) @@ -110,7 +111,7 @@ func (vm *VM) useStandaloneDatabase(acceptedDB avalanchedatabase.Database) (bool // getDatabaseConfig returns the database configuration for the chain // to be used by separate, standalone database. -func getDatabaseConfig(config Config, chainDataDir string) (node.DatabaseConfig, error) { +func getDatabaseConfig(config config.Config, chainDataDir string) (node.DatabaseConfig, error) { var ( configBytes []byte err error diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index d617ff824f..64ce4680b5 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -24,7 +24,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/api/keystore" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" @@ -36,7 +35,6 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/upgrade" "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/libevm/trie" @@ -50,6 +48,7 @@ import ( "github.com/ava-labs/subnet-evm/metrics" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/params/extras" + "github.com/ava-labs/subnet-evm/plugin/evm/config" "github.com/ava-labs/subnet-evm/precompile/allowlist" "github.com/ava-labs/subnet-evm/precompile/contracts/deployerallowlist" "github.com/ava-labs/subnet-evm/precompile/contracts/feemanager" @@ -68,8 +67,6 @@ var ( testMinGasPrice int64 = 225_000_000_000 testKeys []*ecdsa.PrivateKey testEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] - username = "Johns" - password = "CjasdjhiPeirbSenfeI13" // #nosec G101 firstTxAmount = new(big.Int).Mul(big.NewInt(testMinGasPrice), big.NewInt(21000*100)) genesisBalance = new(big.Int).Mul(big.NewInt(testMinGasPrice), big.NewInt(21000*1000)) @@ -156,16 +153,6 @@ func setupGenesis( atomicMemory := atomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) ctx.SharedMemory = atomicMemory.NewSharedMemory(ctx.ChainID) - // NB: this lock is intentionally left locked when this function returns. - // The caller of this function is responsible for unlocking. - ctx.Lock.Lock() - - userKeystore := keystore.New(logging.NoLog{}, memdb.New()) - if err := userKeystore.CreateUser(username, password); err != nil { - t.Fatal(err) - } - ctx.Keystore = userKeystore.NewBlockchainKeyStore(ctx.ChainID) - issuer := make(chan commonEng.Message, 1) prefixedDB := prefixdb.New([]byte{1}, baseDB) return ctx, prefixedDB, genesisBytes, issuer, atomicMemory @@ -228,8 +215,8 @@ func TestVMConfigDefaults(t *testing.T) { configJSON := fmt.Sprintf(`{"rpc-tx-fee-cap": %g,"eth-apis": %s}`, txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])) _, vm, _, _ := GenesisVM(t, false, "", configJSON, "") - var vmConfig Config - vmConfig.SetDefaults() + var vmConfig config.Config + vmConfig.SetDefaults(defaultTxPoolConfig) vmConfig.RPCTxFeeCap = txFeeCap vmConfig.EnabledEthAPIs = enabledEthAPIs require.Equal(t, vmConfig, vm.config, "VM Config should match default with overrides") @@ -240,8 +227,8 @@ func TestVMNilConfig(t *testing.T) { _, vm, _, _ := GenesisVM(t, false, "", "", "") // VM Config should match defaults if no config is passed in - var vmConfig Config - vmConfig.SetDefaults() + var vmConfig config.Config + vmConfig.SetDefaults(defaultTxPoolConfig) require.Equal(t, vmConfig, vm.config, "VM Config should match default config") require.NoError(t, vm.Shutdown(context.Background())) } @@ -2623,8 +2610,8 @@ func TestAllowFeeRecipientEnabled(t *testing.T) { } etherBase := common.HexToAddress("0x0123456789") - c := Config{} - c.SetDefaults() + c := config.Config{} + c.SetDefaults(defaultTxPoolConfig) c.FeeRecipient = etherBase.String() configJSON, err := json.Marshal(c) if err != nil { @@ -2683,8 +2670,8 @@ func TestRewardManagerPrecompileSetRewardAddress(t *testing.T) { require.NoError(t, err) etherBase := common.HexToAddress("0x0123456789") // give custom ether base - c := Config{} - c.SetDefaults() + c := config.Config{} + c.SetDefaults(defaultTxPoolConfig) c.FeeRecipient = etherBase.String() configJSON, err := json.Marshal(c) require.NoError(t, err) @@ -2824,8 +2811,8 @@ func TestRewardManagerPrecompileAllowFeeRecipients(t *testing.T) { genesisJSON, err := genesis.MarshalJSON() require.NoError(t, err) etherBase := common.HexToAddress("0x0123456789") // give custom ether base - c := Config{} - c.SetDefaults() + c := config.Config{} + c.SetDefaults(defaultTxPoolConfig) c.FeeRecipient = etherBase.String() configJSON, err := json.Marshal(c) require.NoError(t, err) diff --git a/plugin/evm/vm_validators_test.go b/plugin/evm/vm_validators_test.go index d8d0719fde..5e7fa523ca 100644 --- a/plugin/evm/vm_validators_test.go +++ b/plugin/evm/vm_validators_test.go @@ -151,8 +151,8 @@ func TestValidatorState(t *testing.T) { // new validator should be added to the state eventually after SyncFrequency require.EventuallyWithT(func(c *assert.CollectT) { - vm.ctx.Lock.Lock() - defer vm.ctx.Lock.Unlock() + vm.vmLock.Lock() + defer vm.vmLock.Unlock() assert.Len(c, vm.validatorsManager.GetNodeIDs(), 4) newValidator, err := vm.validatorsManager.GetValidator(newValidationID) assert.NoError(c, err) diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index 28b14d5184..6ceae3733a 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -22,6 +22,7 @@ import ( avagoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/chain" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" @@ -281,16 +282,18 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned defer logsSub.Unsubscribe() nodeID1 := ids.GenerateTestNodeID() - blsSecretKey1, err := bls.NewSigner() + blsSecretKey1, err := localsigner.New() require.NoError(err) blsPublicKey1 := blsSecretKey1.PublicKey() - blsSignature1 := blsSecretKey1.Sign(unsignedMessage.Bytes()) + blsSignature1, err := blsSecretKey1.Sign(unsignedMessage.Bytes()) + require.NoError(err) nodeID2 := ids.GenerateTestNodeID() - blsSecretKey2, err := bls.NewSigner() + blsSecretKey2, err := localsigner.New() require.NoError(err) blsPublicKey2 := blsSecretKey2.PublicKey() - blsSignature2 := blsSecretKey2.Sign(unsignedMessage.Bytes()) + blsSignature2, err := blsSecretKey2.Sign(unsignedMessage.Bytes()) + require.NoError(err) blsAggregatedSignature, err := bls.AggregateSignatures([]*bls.Signature{blsSignature1, blsSignature2}) require.NoError(err) @@ -551,13 +554,16 @@ func testReceiveWarpMessage( weight uint64 } newSigner := func(networkID ids.ID, weight uint64) signer { - secret, err := bls.NewSigner() + secret, err := localsigner.New() require.NoError(err) + sig, err := secret.Sign(unsignedMessage.Bytes()) + require.NoError(err) + return signer{ networkID: networkID, nodeID: ids.GenerateTestNodeID(), secret: secret, - signature: secret.Sign(unsignedMessage.Bytes()), + signature: sig, weight: weight, } } diff --git a/plugin/runner/runner.go b/plugin/runner/runner.go index 9b5b5efe29..e7d9ea6c3d 100644 --- a/plugin/runner/runner.go +++ b/plugin/runner/runner.go @@ -22,7 +22,7 @@ func Run(versionStr string) { os.Exit(1) } if printVersion && versionStr != "" { - fmt.Printf(versionStr) + fmt.Println(versionStr) os.Exit(0) } if err := ulimit.Set(ulimit.DefaultFDLimit, logging.NoLog{}); err != nil { diff --git a/precompile/allowlist/config.go b/precompile/allowlist/config.go index 3b147019ee..628d0bee08 100644 --- a/precompile/allowlist/config.go +++ b/precompile/allowlist/config.go @@ -5,6 +5,7 @@ package allowlist import ( "fmt" + "slices" "github.com/ava-labs/libevm/common" "github.com/ava-labs/subnet-evm/precompile/contract" @@ -50,15 +51,7 @@ func (c *AllowListConfig) Equal(other *AllowListConfig) bool { // areEqualAddressLists returns true iff [a] and [b] have the same addresses in the same order. func areEqualAddressLists(current []common.Address, other []common.Address) bool { - if len(current) != len(other) { - return false - } - for i, address := range current { - if address != other[i] { - return false - } - } - return true + return slices.Equal(current, other) } // Verify returns an error if there is an overlapping address between admin and enabled roles diff --git a/precompile/contract/mocks.go b/precompile/contract/mocks.go index eb8d371e75..c911743118 100644 --- a/precompile/contract/mocks.go +++ b/precompile/contract/mocks.go @@ -15,7 +15,6 @@ import ( snow "github.com/ava-labs/avalanchego/snow" common "github.com/ava-labs/libevm/common" - types "github.com/ava-labs/libevm/core/types" precompileconfig "github.com/ava-labs/subnet-evm/precompile/precompileconfig" uint256 "github.com/holiman/uint256" gomock "go.uber.org/mock/gomock" @@ -204,7 +203,7 @@ func (mr *MockStateDBMockRecorder) AddBalance(arg0, arg1 any) *gomock.Call { } // AddLog mocks base method. -func (m *MockStateDB) AddLog(arg0 *types.Log) { +func (m *MockStateDB) AddLog(arg0 *Log) { m.ctrl.T.Helper() m.ctrl.Call(m, "AddLog", arg0) } diff --git a/precompile/contracts/rewardmanager/contract.go b/precompile/contracts/rewardmanager/contract.go index f179b8c924..4296120197 100644 --- a/precompile/contracts/rewardmanager/contract.go +++ b/precompile/contracts/rewardmanager/contract.go @@ -167,7 +167,7 @@ func GetStoredRewardAddress(stateDB contract.StateReader) (common.Address, bool) return common.BytesToAddress(val.Bytes()), val == allowFeeRecipientsAddressValue } -// StoredRewardAddress stores the given [val] under rewardAddressStorageKey. +// StoreRewardAddress stores the given [val] under rewardAddressStorageKey. func StoreRewardAddress(stateDB contract.StateDB, val common.Address) { stateDB.SetState(ContractAddress, rewardAddressStorageKey, common.BytesToHash(val.Bytes())) } diff --git a/precompile/contracts/rewardmanager/contract_test.go b/precompile/contracts/rewardmanager/contract_test.go index a6dceb8ec1..b34e5076ff 100644 --- a/precompile/contracts/rewardmanager/contract_test.go +++ b/precompile/contracts/rewardmanager/contract_test.go @@ -366,7 +366,7 @@ var ( ReadOnly: true, ExpectedErr: vmerrs.ErrWriteProtection.Error(), }, - "readOnly set reward addresss with allowed role fails": { + "readOnly set reward address with allowed role fails": { Caller: allowlist.TestEnabledAddr, BeforeHook: allowlist.SetDefaultRoles(Module.Address), InputFn: func(t testing.TB) []byte { diff --git a/precompile/contracts/warp/config.go b/precompile/contracts/warp/config.go index 67ee7ee97b..b4a87e5fe8 100644 --- a/precompile/contracts/warp/config.go +++ b/precompile/contracts/warp/config.go @@ -31,16 +31,17 @@ var ( ) var ( - errOverflowSignersGasCost = errors.New("overflow calculating warp signers gas cost") - errInvalidPredicateBytes = errors.New("cannot unpack predicate bytes") - errInvalidWarpMsg = errors.New("cannot unpack warp message") - errCannotParseWarpMsg = errors.New("cannot parse warp message") - errInvalidWarpMsgPayload = errors.New("cannot unpack warp message payload") - errInvalidAddressedPayload = errors.New("cannot unpack addressed payload") - errInvalidBlockHashPayload = errors.New("cannot unpack block hash payload") - errCannotGetNumSigners = errors.New("cannot fetch num signers from warp message") - errWarpCannotBeActivated = errors.New("warp cannot be activated before Durango") - errFailedVerification = errors.New("cannot verify warp signature") + errOverflowSignersGasCost = errors.New("overflow calculating warp signers gas cost") + errInvalidPredicateBytes = errors.New("cannot unpack predicate bytes") + errInvalidWarpMsg = errors.New("cannot unpack warp message") + errCannotParseWarpMsg = errors.New("cannot parse warp message") + errInvalidWarpMsgPayload = errors.New("cannot unpack warp message payload") + errInvalidAddressedPayload = errors.New("cannot unpack addressed payload") + errInvalidBlockHashPayload = errors.New("cannot unpack block hash payload") + errCannotGetNumSigners = errors.New("cannot fetch num signers from warp message") + errWarpCannotBeActivated = errors.New("warp cannot be activated before Durango") + errFailedVerification = errors.New("cannot verify warp signature") + errCannotRetrieveValidatorSet = errors.New("cannot retrieve validator set") ) // Config implements the precompileconfig.Config interface and @@ -208,16 +209,25 @@ func (c *Config) VerifyPredicate(predicateContext *precompileconfig.PredicateCon warpMsg.SourceChainID, c.RequirePrimaryNetworkSigners, ) - err = warpMsg.Signature.Verify( + + validatorSet, err := warp.GetCanonicalValidatorSetFromChainID( context.Background(), - &warpMsg.UnsignedMessage, - predicateContext.SnowCtx.NetworkID, state, predicateContext.ProposerVMBlockCtx.PChainHeight, + warpMsg.UnsignedMessage.SourceChainID, + ) + if err != nil { + log.Debug("failed to retrieve canonical validator set", "msgID", warpMsg.ID(), "err", err) + return fmt.Errorf("%w: %w", errCannotRetrieveValidatorSet, err) + } + + err = warpMsg.Signature.Verify( + &warpMsg.UnsignedMessage, + predicateContext.SnowCtx.NetworkID, + validatorSet, quorumNumerator, WarpQuorumDenominator, ) - if err != nil { log.Debug("failed to verify warp signature", "msgID", warpMsg.ID(), "err", err) return fmt.Errorf("%w: %w", errFailedVerification, err) diff --git a/precompile/contracts/warp/contract_test.go b/precompile/contracts/warp/contract_test.go index 4738c2d1ed..a3ca3bf06d 100644 --- a/precompile/contracts/warp/contract_test.go +++ b/precompile/contracts/warp/contract_test.go @@ -12,6 +12,7 @@ import ( agoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/warp" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/libevm/common" "github.com/ava-labs/subnet-evm/core/extstate" @@ -95,7 +96,7 @@ func TestSendWarpMessage(t *testing.T) { sendWarpMessagePayload, ) require.NoError(t, err) - unsignedWarpMessage, err := warp.NewUnsignedMessage( + unsignedWarpMessage, err := avalancheWarp.NewUnsignedMessage( defaultSnowCtx.NetworkID, blockchainID, sendWarpMessageAddressedPayload.Bytes(), @@ -746,7 +747,7 @@ func TestPackEvents(t *testing.T) { ) require.NoError(t, err) - unsignedWarpMessage, err := warp.NewUnsignedMessage( + unsignedWarpMessage, err := avalancheWarp.NewUnsignedMessage( networkID, sourceChainID, addressedPayload.Bytes(), diff --git a/precompile/contracts/warp/predicate_test.go b/precompile/contracts/warp/predicate_test.go index 65d202de09..460dd4be2c 100644 --- a/precompile/contracts/warp/predicate_test.go +++ b/precompile/contracts/warp/predicate_test.go @@ -17,6 +17,7 @@ import ( agoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" "github.com/ava-labs/avalanchego/utils/set" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" @@ -93,7 +94,10 @@ func init() { } for _, testVdr := range testVdrs { - blsSignature := testVdr.sk.Sign(unsignedMsg.Bytes()) + blsSignature, err := testVdr.sk.Sign(unsignedMsg.Bytes()) + if err != nil { + panic(err) + } blsSignatures = append(blsSignatures, blsSignature) } @@ -111,7 +115,7 @@ func (v *testValidator) Compare(o *testValidator) int { } func newTestValidator() *testValidator { - sk, err := bls.NewSigner() + sk, err := localsigner.New() if err != nil { panic(err) } @@ -234,13 +238,16 @@ func testWarpMessageFromPrimaryNetwork(t *testing.T, requirePrimaryNetworkSigner getValidatorsOutput := make(map[ids.NodeID]*validators.GetValidatorOutput) blsSignatures := make([]*bls.Signature, 0, numKeys) for i := 0; i < numKeys; i++ { + sig, err := testVdrs[i].sk.Sign(unsignedMsg.Bytes()) + require.NoError(err) + validatorOutput := &validators.GetValidatorOutput{ NodeID: testVdrs[i].nodeID, Weight: 20, PublicKey: testVdrs[i].vdr.PublicKey, } getValidatorsOutput[testVdrs[i].nodeID] = validatorOutput - blsSignatures = append(blsSignatures, testVdrs[i].sk.Sign(unsignedMsg.Bytes())) + blsSignatures = append(blsSignatures, sig) } aggregateSignature, err := bls.AggregateSignatures(blsSignatures) require.NoError(err) diff --git a/precompile/contracts/warp/signature_verification_test.go b/precompile/contracts/warp/signature_verification_test.go index d52f0a0f89..e3b33e2d4d 100644 --- a/precompile/contracts/warp/signature_verification_test.go +++ b/precompile/contracts/warp/signature_verification_test.go @@ -19,12 +19,13 @@ import ( ) type signatureTest struct { - name string - stateF func(*gomock.Controller) validators.State - quorumNum uint64 - quorumDen uint64 - msgF func(*require.Assertions) *avalancheWarp.Message - err error + name string + stateF func(*gomock.Controller) validators.State + quorumNum uint64 + quorumDen uint64 + msgF func(*require.Assertions) *avalancheWarp.Message + verifyErr error + canonicalErr error } // This test copies the test coverage from https://github.com/ava-labs/avalanchego/blob/0117ab96/vms/platformvm/warp/signature_test.go#L137. @@ -55,7 +56,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: errTest, + canonicalErr: errTest, }, { name: "can't get validator set", @@ -82,7 +83,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: errTest, + canonicalErr: errTest, }, { name: "weight overflow", @@ -122,7 +123,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrWeightOverflow, + canonicalErr: avalancheWarp.ErrWeightOverflow, }, { name: "invalid bit set index", @@ -152,7 +153,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrInvalidBitSet, + verifyErr: avalancheWarp.ErrInvalidBitSet, }, { name: "unknown index", @@ -185,7 +186,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrUnknownValidator, + verifyErr: avalancheWarp.ErrUnknownValidator, }, { name: "insufficient weight", @@ -212,8 +213,10 @@ func TestSignatureVerification(t *testing.T) { signers.Add(1) unsignedBytes := unsignedMsg.Bytes() - vdr0Sig := testVdrs[0].sk.Sign(unsignedBytes) - vdr1Sig := testVdrs[1].sk.Sign(unsignedBytes) + vdr0Sig, err := testVdrs[0].sk.Sign(unsignedBytes) + require.NoError(err) + vdr1Sig, err := testVdrs[1].sk.Sign(unsignedBytes) + require.NoError(err) aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr0Sig, vdr1Sig}) require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} @@ -229,7 +232,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrInsufficientWeight, + verifyErr: avalancheWarp.ErrInsufficientWeight, }, { name: "can't parse sig", @@ -263,7 +266,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrParseSignature, + verifyErr: avalancheWarp.ErrParseSignature, }, { name: "no validators", @@ -284,7 +287,8 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) unsignedBytes := unsignedMsg.Bytes() - vdr0Sig := testVdrs[0].sk.Sign(unsignedBytes) + vdr0Sig, err := testVdrs[0].sk.Sign(unsignedBytes) + require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} copy(aggSigBytes[:], bls.SignatureToBytes(vdr0Sig)) @@ -298,7 +302,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: bls.ErrNoPublicKeys, + verifyErr: bls.ErrNoPublicKeys, }, { name: "invalid signature (substitute)", @@ -323,10 +327,12 @@ func TestSignatureVerification(t *testing.T) { signers.Add(1) unsignedBytes := unsignedMsg.Bytes() - vdr0Sig := testVdrs[0].sk.Sign(unsignedBytes) + vdr0Sig, err := testVdrs[0].sk.Sign(unsignedBytes) + require.NoError(err) // Give sig from vdr[2] even though the bit vector says it // should be from vdr[1] - vdr2Sig := testVdrs[2].sk.Sign(unsignedBytes) + vdr2Sig, err := testVdrs[2].sk.Sign(unsignedBytes) + require.NoError(err) aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr0Sig, vdr2Sig}) require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} @@ -342,7 +348,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrInvalidSignature, + verifyErr: avalancheWarp.ErrInvalidSignature, }, { name: "invalid signature (missing one)", @@ -367,7 +373,8 @@ func TestSignatureVerification(t *testing.T) { signers.Add(1) unsignedBytes := unsignedMsg.Bytes() - vdr0Sig := testVdrs[0].sk.Sign(unsignedBytes) + vdr0Sig, err := testVdrs[0].sk.Sign(unsignedBytes) + require.NoError(err) // Don't give the sig from vdr[1] aggSigBytes := [bls.SignatureLen]byte{} copy(aggSigBytes[:], bls.SignatureToBytes(vdr0Sig)) @@ -382,7 +389,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrInvalidSignature, + verifyErr: avalancheWarp.ErrInvalidSignature, }, { name: "invalid signature (extra one)", @@ -407,11 +414,14 @@ func TestSignatureVerification(t *testing.T) { signers.Add(1) unsignedBytes := unsignedMsg.Bytes() - vdr0Sig := testVdrs[0].sk.Sign(unsignedBytes) - vdr1Sig := testVdrs[1].sk.Sign(unsignedBytes) + vdr0Sig, err := testVdrs[0].sk.Sign(unsignedBytes) + require.NoError(err) + vdr1Sig, err := testVdrs[1].sk.Sign(unsignedBytes) + require.NoError(err) // Give sig from vdr[2] even though the bit vector doesn't have // it - vdr2Sig := testVdrs[2].sk.Sign(unsignedBytes) + vdr2Sig, err := testVdrs[2].sk.Sign(unsignedBytes) + require.NoError(err) aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr0Sig, vdr1Sig, vdr2Sig}) require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} @@ -427,7 +437,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: avalancheWarp.ErrInvalidSignature, + verifyErr: avalancheWarp.ErrInvalidSignature, }, { name: "valid signature", @@ -454,8 +464,10 @@ func TestSignatureVerification(t *testing.T) { signers.Add(2) unsignedBytes := unsignedMsg.Bytes() - vdr1Sig := testVdrs[1].sk.Sign(unsignedBytes) - vdr2Sig := testVdrs[2].sk.Sign(unsignedBytes) + vdr1Sig, err := testVdrs[1].sk.Sign(unsignedBytes) + require.NoError(err) + vdr2Sig, err := testVdrs[2].sk.Sign(unsignedBytes) + require.NoError(err) aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr1Sig, vdr2Sig}) require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} @@ -471,7 +483,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: nil, + verifyErr: nil, }, { name: "valid signature (boundary)", @@ -498,8 +510,10 @@ func TestSignatureVerification(t *testing.T) { signers.Add(2) unsignedBytes := unsignedMsg.Bytes() - vdr1Sig := testVdrs[1].sk.Sign(unsignedBytes) - vdr2Sig := testVdrs[2].sk.Sign(unsignedBytes) + vdr1Sig, err := testVdrs[1].sk.Sign(unsignedBytes) + require.NoError(err) + vdr2Sig, err := testVdrs[2].sk.Sign(unsignedBytes) + require.NoError(err) aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr1Sig, vdr2Sig}) require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} @@ -515,7 +529,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: nil, + verifyErr: nil, }, { name: "valid signature (missing key)", @@ -559,8 +573,10 @@ func TestSignatureVerification(t *testing.T) { signers.Add(1) // vdr[2] unsignedBytes := unsignedMsg.Bytes() - vdr1Sig := testVdrs[1].sk.Sign(unsignedBytes) - vdr2Sig := testVdrs[2].sk.Sign(unsignedBytes) + vdr1Sig, err := testVdrs[1].sk.Sign(unsignedBytes) + require.NoError(err) + vdr2Sig, err := testVdrs[2].sk.Sign(unsignedBytes) + require.NoError(err) aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr1Sig, vdr2Sig}) require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} @@ -576,7 +592,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: nil, + verifyErr: nil, }, { name: "valid signature (duplicate key)", @@ -621,7 +637,8 @@ func TestSignatureVerification(t *testing.T) { unsignedBytes := unsignedMsg.Bytes() // Because vdr[1] and vdr[2] share a key, only one of them sign. - vdr2Sig := testVdrs[2].sk.Sign(unsignedBytes) + vdr2Sig, err := testVdrs[2].sk.Sign(unsignedBytes) + require.NoError(err) aggSigBytes := [bls.SignatureLen]byte{} copy(aggSigBytes[:], bls.SignatureToBytes(vdr2Sig)) @@ -635,7 +652,7 @@ func TestSignatureVerification(t *testing.T) { require.NoError(err) return msg }, - err: nil, + verifyErr: nil, }, } @@ -648,16 +665,24 @@ func TestSignatureVerification(t *testing.T) { msg := tt.msgF(require) pChainState := tt.stateF(ctrl) - err := msg.Signature.Verify( + validatorSet, err := avalancheWarp.GetCanonicalValidatorSetFromChainID( context.Background(), - &msg.UnsignedMessage, - networkID, pChainState, pChainHeight, + msg.UnsignedMessage.SourceChainID, + ) + require.ErrorIs(err, tt.canonicalErr) + if err != nil { + return + } + err = msg.Signature.Verify( + &msg.UnsignedMessage, + networkID, + validatorSet, tt.quorumNum, tt.quorumDen, ) - require.ErrorIs(err, tt.err) + require.ErrorIs(err, tt.verifyErr) }) } } diff --git a/rpc/client_test.go b/rpc/client_test.go index 6a13e487da..826d1eed48 100644 --- a/rpc/client_test.go +++ b/rpc/client_test.go @@ -723,7 +723,6 @@ func TestClientHTTP(t *testing.T) { ) defer client.Close() for i := range results { - i := i go func() { errc <- client.Call(&results[i], "test_echo", wantResult.String, wantResult.Int, wantResult.Args) }() diff --git a/rpc/http.go b/rpc/http.go index b8670a9df8..8b18ed3895 100644 --- a/rpc/http.go +++ b/rpc/http.go @@ -250,7 +250,7 @@ func (hc *httpConn) doRequest(ctx context.Context, msg interface{}) (io.ReadClos if _, err := buf.ReadFrom(resp.Body); err == nil { body = buf.Bytes() } - + resp.Body.Close() return nil, HTTPError{ Status: resp.Status, StatusCode: resp.StatusCode, diff --git a/rpc/types_test.go b/rpc/types_test.go index 824c1c8155..d37632d62e 100644 --- a/rpc/types_test.go +++ b/rpc/types_test.go @@ -145,7 +145,6 @@ func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) { {"earliest", int64(EarliestBlockNumber)}, } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { bnh := BlockNumberOrHashWithNumber(BlockNumber(test.number)) marshalled, err := json.Marshal(bnh) diff --git a/scripts/build_antithesis_images.sh b/scripts/build_antithesis_images.sh index 13bad83552..b195af6785 100755 --- a/scripts/build_antithesis_images.sh +++ b/scripts/build_antithesis_images.sh @@ -54,7 +54,7 @@ echo "Generating compose configuration" gen_antithesis_compose_config "${IMAGE_TAG}" "${SUBNET_EVM_PATH}/tests/antithesis/gencomposeconfig" \ "${SUBNET_EVM_PATH}/build/antithesis" \ "AVALANCHEGO_PATH=${AVALANCHEGO_CLONE_PATH}/build/avalanchego \ - AVALANCHEGO_PLUGIN_DIR=${DEFAULT_PLUGIN_DIR}" + AVAGO_PLUGIN_DIR=${DEFAULT_PLUGIN_DIR}" build_antithesis_images "${GO_VERSION}" "${IMAGE_PREFIX}" "antithesis-subnet-evm" "${IMAGE_TAG}" \ "${AVALANCHEGO_IMAGE_TAG}" "${SUBNET_EVM_PATH}/tests/antithesis/Dockerfile" \ diff --git a/scripts/build_docker_image.sh b/scripts/build_docker_image.sh index 93474e4a36..48a9a016f8 100755 --- a/scripts/build_docker_image.sh +++ b/scripts/build_docker_image.sh @@ -36,21 +36,37 @@ BUILD_IMAGE_ID=${BUILD_IMAGE_ID:-"${CURRENT_BRANCH}"} # # Reference: https://docs.docker.com/build/buildkit/ DOCKER_CMD="docker buildx build" - +ispush=0 if [[ -n "${PUBLISH}" ]]; then - DOCKER_CMD="${DOCKER_CMD} --push" - - echo "Pushing $DOCKERHUB_REPO:$BUILD_IMAGE_ID" - + echo "Pushing $IMAGE_NAME:$BUILD_IMAGE_ID" + ispush=1 # A populated DOCKER_USERNAME env var triggers login if [[ -n "${DOCKER_USERNAME:-}" ]]; then echo "$DOCKER_PASS" | docker login --username "$DOCKER_USERNAME" --password-stdin fi fi -# Build a multi-arch image if requested +# Build a specified platform image if requested if [[ -n "${PLATFORMS}" ]]; then DOCKER_CMD="${DOCKER_CMD} --platform=${PLATFORMS}" + if [[ "$PLATFORMS" == *,* ]]; then ## Multi-arch + if [[ "${IMAGE_NAME}" != *"/"* ]]; then + echo "ERROR: Multi-arch images must be pushed to a registry." + exit 1 + fi + ispush=1 + fi +fi + +if [[ $ispush -eq 1 ]]; then + DOCKER_CMD="${DOCKER_CMD} --push" +else + ## Single arch + # + # Building a single-arch image with buildx and having the resulting image show up + # in the local store of docker images (ala 'docker build') requires explicitly + # loading it from the buildx store with '--load'. + DOCKER_CMD="${DOCKER_CMD} --load" fi VM_ID=${VM_ID:-"${DEFAULT_VM_ID}"} @@ -61,8 +77,35 @@ fi # Default to the release image. Will need to be overridden when testing against unreleased versions. AVALANCHEGO_NODE_IMAGE="${AVALANCHEGO_NODE_IMAGE:-${AVALANCHEGO_IMAGE_NAME}:${AVALANCHE_VERSION}}" -echo "Building Docker Image: $DOCKERHUB_REPO:$BUILD_IMAGE_ID based of AvalancheGo@$AVALANCHE_VERSION" -${DOCKER_CMD} -t "$DOCKERHUB_REPO:$BUILD_IMAGE_ID" -t "$DOCKERHUB_REPO:${DOCKERHUB_TAG}" \ +# Build the avalanchego image if it cannot be pulled. This will usually be due to +# AVALANCHE_VERSION being not yet merged since the image is published post-merge. +if ! docker pull "${AVALANCHEGO_NODE_IMAGE}"; then + # Build a multi-arch avalanchego image if the subnet-evm image build is multi-arch + BUILD_MULTI_ARCH="$([[ "$PLATFORMS" =~ , ]] && echo 1 || echo "")" + + # - Use a image name without a repository (i.e. without 'avaplatform/' prefix ) to build a + # local single-arch image that will not be pushed. + # - Use a image name with a repository to build a multi-arch image that will be pushed. + AVALANCHEGO_LOCAL_IMAGE_NAME="${AVALANCHEGO_LOCAL_IMAGE_NAME:-avalanchego}" + + if [[ -n "${BUILD_MULTI_ARCH}" && "${AVALANCHEGO_LOCAL_IMAGE_NAME}" != *"/"* ]]; then + echo "ERROR: Multi-arch images must be pushed to a registry." + exit 1 + fi + + AVALANCHEGO_NODE_IMAGE="${AVALANCHEGO_LOCAL_IMAGE_NAME}:${AVALANCHE_VERSION}" + echo "Building ${AVALANCHEGO_NODE_IMAGE} locally" + + source "${SUBNET_EVM_PATH}"/scripts/lib_avalanchego_clone.sh + clone_avalanchego "${AVALANCHE_VERSION}" + SKIP_BUILD_RACE=1 \ + DOCKER_IMAGE="${AVALANCHEGO_LOCAL_IMAGE_NAME}" \ + BUILD_MULTI_ARCH="${BUILD_MULTI_ARCH}" \ + "${AVALANCHEGO_CLONE_PATH}"/scripts/build_image.sh +fi + +echo "Building Docker Image: $IMAGE_NAME:$BUILD_IMAGE_ID based of AvalancheGo@$AVALANCHE_VERSION" +${DOCKER_CMD} -t "$IMAGE_NAME:$BUILD_IMAGE_ID" -t "$IMAGE_NAME:${DOCKERHUB_TAG}" \ "$SUBNET_EVM_PATH" -f "$SUBNET_EVM_PATH/Dockerfile" \ --build-arg AVALANCHEGO_NODE_IMAGE="$AVALANCHEGO_NODE_IMAGE" \ --build-arg SUBNET_EVM_COMMIT="$SUBNET_EVM_COMMIT" \ @@ -70,6 +113,6 @@ ${DOCKER_CMD} -t "$DOCKERHUB_REPO:$BUILD_IMAGE_ID" -t "$DOCKERHUB_REPO:${DOCKERH --build-arg VM_ID="$VM_ID" if [[ -n "${PUBLISH}" && $CURRENT_BRANCH == "master" ]]; then - echo "Tagging current image as $DOCKERHUB_REPO:latest" - docker buildx imagetools create -t "$DOCKERHUB_REPO:latest" "$DOCKERHUB_REPO:$BUILD_IMAGE_ID" + echo "Tagging current image as $IMAGE_NAME:latest" + docker buildx imagetools create -t "$IMAGE_NAME:latest" "$IMAGE_NAME:$BUILD_IMAGE_ID" fi diff --git a/scripts/constants.sh b/scripts/constants.sh index 4fad925775..4cb00af244 100644 --- a/scripts/constants.sh +++ b/scripts/constants.sh @@ -8,12 +8,13 @@ set -euo pipefail # Set the PATHS GOPATH="$(go env GOPATH)" DEFAULT_PLUGIN_DIR="${HOME}/.avalanchego/plugins" +DEFAULT_VM_NAME="subnet-evm" DEFAULT_VM_ID="srEXiWaHuhNyGwPUi444Tu47ZEDwxTWrbQiuD7FmgSAQ6X7Dy" # Avalabs docker hub # avaplatform/avalanchego - defaults to local as to avoid unintentional pushes -# You should probably set it - export DOCKER_REPO='avaplatform/subnet-evm' -DOCKERHUB_REPO=${DOCKER_REPO:-"subnet-evm"} +# You should probably set it - export IMAGE_NAME='avaplatform/subnet-evm' +IMAGE_NAME=${IMAGE_NAME:-"subnet-evm"} # Shared between ./scripts/build_docker_image.sh and ./scripts/tests.build_docker_image.sh AVALANCHEGO_IMAGE_NAME="${AVALANCHEGO_IMAGE_NAME:-avaplatform/avalanchego}" @@ -51,3 +52,6 @@ fi # We use "export" here instead of just setting a bash variable because we need # to pass this flag to all child processes spawned by the shell. export CGO_CFLAGS="-O2 -D__BLST_PORTABLE__" + +# CGO_ENABLED is required for multi-arch builds. +export CGO_ENABLED=1 diff --git a/scripts/eth-allowed-packages.txt b/scripts/eth-allowed-packages.txt index 6f69e52bcd..0f200f77f5 100644 --- a/scripts/eth-allowed-packages.txt +++ b/scripts/eth-allowed-packages.txt @@ -25,6 +25,7 @@ "github.com/ava-labs/libevm/ethdb/pebble" "github.com/ava-labs/libevm/event" "github.com/ava-labs/libevm/libevm" +"github.com/ava-labs/libevm/libevm/legacy" "github.com/ava-labs/libevm/libevm/stateconf" "github.com/ava-labs/libevm/log" "github.com/ava-labs/libevm/params" diff --git a/scripts/lint.sh b/scripts/lint.sh deleted file mode 100755 index 643291d934..0000000000 --- a/scripts/lint.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -# Upstream is compatible with v1.54.x at time of this writing, and -# checking for this specific version is an attempt to avoid skew -# between local and CI execution. The latest version (v1.55.1) seems -# to cause spurious failures -KNOWN_GOOD_VERSION="v1.56" -VERSION="$(golangci-lint --version | sed -e 's+golangci-lint has version \(v1.*\)\..* built.*+\1+')" -if [[ "${VERSION}" != "${KNOWN_GOOD_VERSION}" ]]; then - echo "expected golangci-lint ${KNOWN_GOOD_VERSION}, but ${VERSION} was used" - echo "${KNOWN_GOOD_VERSION} is used in CI and should be used locally to ensure compatible results" - echo "installation command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@${KNOWN_GOOD_VERSION}" - exit 255 -fi - -golangci-lint run --path-prefix=. --timeout 3m diff --git a/scripts/tests.build_docker_image.sh b/scripts/tests.build_docker_image.sh index 8c928e25a5..a60f56006f 100755 --- a/scripts/tests.build_docker_image.sh +++ b/scripts/tests.build_docker_image.sh @@ -5,34 +5,80 @@ set -euo pipefail # Sanity check the image build by attempting to build and run the image without error. # Directory above this script -SUBNET_EVM_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) +SUBNET_EVM_PATH=$( + cd "$(dirname "${BASH_SOURCE[0]}")" + cd .. && pwd +) # Load the constants source "$SUBNET_EVM_PATH"/scripts/constants.sh # Load the versions source "$SUBNET_EVM_PATH"/scripts/versions.sh -# Use the default node image -AVALANCHEGO_NODE_IMAGE="${AVALANCHEGO_IMAGE_NAME}:${AVALANCHE_VERSION}" - -# Build the avalanchego image if it cannot be pulled. This will usually be due to -# AVALANCHE_VERSION being not yet merged since the image is published post-merge. -if ! docker pull "${AVALANCHEGO_NODE_IMAGE}"; then - # Use a image name without a repository (i.e. without 'avaplatform/' prefix ) to build a - # local image that will not be pushed. - export AVALANCHEGO_IMAGE_NAME="avalanchego" - echo "Building ${AVALANCHEGO_IMAGE_NAME}:${AVALANCHE_VERSION} locally" - - source "${SUBNET_EVM_PATH}"/scripts/lib_avalanchego_clone.sh - clone_avalanchego "${AVALANCHE_VERSION}" - SKIP_BUILD_RACE=1 DOCKER_IMAGE="${AVALANCHEGO_IMAGE_NAME}" "${AVALANCHEGO_CLONE_PATH}"/scripts/build_image.sh -fi - -# Build a local image -bash -x "${SUBNET_EVM_PATH}"/scripts/build_docker_image.sh - -# Check that the image can be run and contains the plugin -echo "Checking version of the plugin provided by the image" -docker run -t --rm "${DOCKERHUB_REPO}:${DOCKERHUB_TAG}" /avalanchego/build/plugins/"${DEFAULT_VM_ID}" --version -echo "" # --version output doesn't include a newline -echo "Successfully checked image build" +build_and_test() { + local imagename="${1}" + local vm_id="${2}" + local multiarch_image="${3}" + # The local image name will be used to build a local image if the + # current avalanchego version lacks a published image. + local avalanchego_local_image_name="${4}" + + if [[ "${multiarch_image}" == true ]]; then + local arches="linux/amd64,linux/arm64" + else + # Test only the host platform for single arch builds + local host_arch + host_arch="$(go env GOARCH)" + local arches="linux/$host_arch" + fi + + local imgtag="testtag" + + PLATFORMS="${arches}" \ + BUILD_IMAGE_ID="${imgtag}" \ + VM_ID=$"${vm_id}" \ + IMAGE_NAME="${imagename}" \ + AVALANCHEGO_LOCAL_IMAGE_NAME="${avalanchego_local_image_name}" \ + ./scripts/build_docker_image.sh + + echo "listing images" + docker images + + # Check all of the images expected to have been built + local target_images=( + "$imagename:$imgtag" + "$imagename:$DOCKERHUB_TAG" + ) + IFS=',' read -r -a archarray <<<"$arches" + for arch in "${archarray[@]}"; do + for target_image in "${target_images[@]}"; do + echo "checking sanity of image $target_image for $arch by running '${VM_ID} version'" + docker run -t --rm --platform "$arch" "$target_image" /avalanchego/build/plugins/"${VM_ID}" --version + done + done +} + +VM_ID="${VM_ID:-${DEFAULT_VM_ID}}" + +echo "checking build of single-arch image" +build_and_test "subnet-evm" "${VM_ID}" false "avalanchego" + +echo "starting local docker registry to allow verification of multi-arch image builds" +REGISTRY_CONTAINER_ID="$(docker run --rm -d -P registry:2)" +REGISTRY_PORT="$(docker port "$REGISTRY_CONTAINER_ID" 5000/tcp | grep -v "::" | awk -F: '{print $NF}')" + +echo "starting docker builder that supports multiplatform builds" +# - '--driver-opt network=host' enables the builder to use the local registry +docker buildx create --use --name ci-builder --driver-opt network=host + +# Ensure registry and builder cleanup on teardown +function cleanup { + echo "stopping local docker registry" + docker stop "${REGISTRY_CONTAINER_ID}" + echo "removing multiplatform builder" + docker buildx rm ci-builder +} +trap cleanup EXIT + +echo "checking build of multi-arch images" +build_and_test "localhost:${REGISTRY_PORT}/subnet-evm" "${VM_ID}" true "localhost:${REGISTRY_PORT}/avalanchego" diff --git a/scripts/versions.sh b/scripts/versions.sh old mode 100644 new mode 100755 index 096bc8aa19..25fa377aab --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -4,5 +4,25 @@ # shellcheck disable=SC2034 # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'2fb6d3f6'} + +if [[ -z ${AVALANCHE_VERSION:-} ]]; then + # Get module details from go.mod + MODULE_DETAILS="$(go list -m "github.com/ava-labs/avalanchego" 2>/dev/null)" + + # Extract the version part + AVALANCHE_VERSION="$(echo "${MODULE_DETAILS}" | awk '{print $2}')" + + # Check if the version matches the pattern where the last part is the module hash + # v*YYYYMMDDHHMMSS-abcdef123456 + # + # If not, the value is assumed to represent a tag + if [[ "${AVALANCHE_VERSION}" =~ ^v.*[0-9]{14}-[0-9a-f]{12}$ ]]; then + # Extract module hash from version + MODULE_HASH="$(echo "${AVALANCHE_VERSION}" | grep -Eo '[0-9a-f]{12}$')" + + # The first 8 chars of the hash is used as the tag of avalanchego images + AVALANCHE_VERSION="${MODULE_HASH::8}" + fi +fi + GINKGO_VERSION=${GINKGO_VERSION:-'v2.2.0'} diff --git a/tests/antithesis/Dockerfile.node b/tests/antithesis/Dockerfile.node index 67538fefad..ebd9711247 100644 --- a/tests/antithesis/Dockerfile.node +++ b/tests/antithesis/Dockerfile.node @@ -25,8 +25,11 @@ COPY --from=builder /build/commit_hash.txt /avalanchego/build/commit_hash.txt # Copy the antithesis dependencies into the container COPY --from=builder /instrumented/symbols /symbols +# Configure the node with the location of the plugin +ENV AVAGO_PLUGIN_DIR=/avalanchego/build/plugins + # Copy the executable into the container -COPY --from=builder /build/srEXiWaHuhNyGwPUi444Tu47ZEDwxTWrbQiuD7FmgSAQ6X7Dy\ - /avalanchego/build/plugins/srEXiWaHuhNyGwPUi444Tu47ZEDwxTWrbQiuD7FmgSAQ6X7Dy +COPY --from=builder $BUILDER_WORKDIR/build/srEXiWaHuhNyGwPUi444Tu47ZEDwxTWrbQiuD7FmgSAQ6X7Dy\ + $AVAGO_PLUGIN_DIR/srEXiWaHuhNyGwPUi444Tu47ZEDwxTWrbQiuD7FmgSAQ6X7Dy # The node image's entrypoint will be reused. diff --git a/tests/antithesis/gencomposeconfig/main.go b/tests/antithesis/gencomposeconfig/main.go index 40cbf0037d..adc01df71f 100644 --- a/tests/antithesis/gencomposeconfig/main.go +++ b/tests/antithesis/gencomposeconfig/main.go @@ -32,10 +32,7 @@ func main() { utils.NewTmpnetSubnet("subnet-evm", genesisPath, utils.DefaultChainConfig, network.Nodes...), } - // Path to the plugin dir on subnet-evm node images that will be run by docker compose. - runtimePluginDir := "/avalanchego/build/plugins" - - if err := antithesis.GenerateComposeConfig(network, baseImageName, runtimePluginDir); err != nil { + if err := antithesis.GenerateComposeConfig(network, baseImageName); err != nil { log.Fatalf("failed to generate compose config: %v", err) } } diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go index c34cb44fc9..c8d527c509 100644 --- a/triedb/pathdb/database_test.go +++ b/triedb/pathdb/database_test.go @@ -319,7 +319,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin, nil) } -// lastRoot returns the latest root hash, or empty if nothing is cached. +// lastHash returns the latest root hash, or empty if nothing is cached. func (t *tester) lastHash() common.Hash { if len(t.roots) == 0 { return common.Hash{} diff --git a/utils/snow.go b/utils/snow.go index fe33a56512..f89b9cffdf 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -14,7 +14,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/avalanchego/upgrade/upgradetest" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) @@ -26,7 +26,7 @@ var ( ) func TestSnowContext() *snow.Context { - sk, err := bls.NewSigner() + sk, err := localsigner.New() if err != nil { panic(err) } diff --git a/warp/aggregator/aggregator_test.go b/warp/aggregator/aggregator_test.go index 055d3edfa8..2a976bb662 100644 --- a/warp/aggregator/aggregator_test.go +++ b/warp/aggregator/aggregator_test.go @@ -14,11 +14,12 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) func newValidator(t testing.TB, weight uint64) (bls.Signer, *avalancheWarp.Validator) { - sk, err := bls.NewSigner() + sk, err := localsigner.New() require.NoError(t, err) pk := sk.PublicKey() return sk, &avalancheWarp.Validator{ @@ -43,17 +44,21 @@ func TestAggregateSignatures(t *testing.T) { vdr1sk, vdr1 := newValidator(t, vdrWeight) vdr2sk, vdr2 := newValidator(t, vdrWeight+1) vdr3sk, vdr3 := newValidator(t, vdrWeight-1) - sig1 := vdr1sk.Sign(unsignedMsg.Bytes()) - sig2 := vdr2sk.Sign(unsignedMsg.Bytes()) - sig3 := vdr3sk.Sign(unsignedMsg.Bytes()) + sig1, err := vdr1sk.Sign(unsignedMsg.Bytes()) + require.NoError(t, err) + sig2, err := vdr2sk.Sign(unsignedMsg.Bytes()) + require.NoError(t, err) + sig3, err := vdr3sk.Sign(unsignedMsg.Bytes()) + require.NoError(t, err) vdrToSig := map[*avalancheWarp.Validator]*bls.Signature{ vdr1: sig1, vdr2: sig2, vdr3: sig3, } - nonVdrSk, err := bls.NewSigner() + nonVdrSk, err := localsigner.New() + require.NoError(t, err) + nonVdrSig, err := nonVdrSk.Sign(unsignedMsg.Bytes()) require.NoError(t, err) - nonVdrSig := nonVdrSk.Sign(unsignedMsg.Bytes()) vdrs := []*avalancheWarp.Validator{ { PublicKey: vdr1.PublicKey, diff --git a/warp/backend_test.go b/warp/backend_test.go index 764957882c..cee1150353 100644 --- a/warp/backend_test.go +++ b/warp/backend_test.go @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/subnet-evm/warp/warptest" @@ -41,7 +41,7 @@ func init() { func TestAddAndGetValidMessage(t *testing.T) { db := memdb.New() - sk, err := bls.NewSigner() + sk, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} @@ -64,7 +64,7 @@ func TestAddAndGetValidMessage(t *testing.T) { func TestAddAndGetUnknownMessage(t *testing.T) { db := memdb.New() - sk, err := bls.NewSigner() + sk, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} @@ -83,7 +83,7 @@ func TestGetBlockSignature(t *testing.T) { blockClient := warptest.MakeBlockClient(blkID) db := memdb.New() - sk, err := bls.NewSigner() + sk, err := localsigner.New() require.NoError(err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} @@ -108,7 +108,7 @@ func TestGetBlockSignature(t *testing.T) { func TestZeroSizedCache(t *testing.T) { db := memdb.New() - sk, err := bls.NewSigner() + sk, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) @@ -136,7 +136,7 @@ func TestOffChainMessages(t *testing.T) { check func(require *require.Assertions, b Backend) err error } - sk, err := bls.NewSigner() + sk, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index ac2a364a78..92eb72c724 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/subnet-evm/plugin/evm/message" @@ -23,7 +24,7 @@ import ( func TestMessageSignatureHandler(t *testing.T) { database := memdb.New() snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSigner() + blsSecretKey, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) @@ -127,7 +128,7 @@ func TestMessageSignatureHandler(t *testing.T) { func TestBlockSignatureHandler(t *testing.T) { database := memdb.New() snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSigner() + blsSecretKey, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) diff --git a/warp/service.go b/warp/service.go index f26a0171b5..30db286690 100644 --- a/warp/service.go +++ b/warp/service.go @@ -112,22 +112,22 @@ func (a *API) aggregateSignatures(ctx context.Context, unsignedMessage *warp.Uns } state := warpValidators.NewState(a.state, a.sourceSubnetID, a.sourceChainID, a.requirePrimaryNetworkSigners()) - validators, totalWeight, err := warp.GetCanonicalValidatorSet(ctx, state, pChainHeight, subnetID) + validatorSet, err := warp.GetCanonicalValidatorSetFromSubnetID(ctx, state, pChainHeight, subnetID) if err != nil { return nil, fmt.Errorf("failed to get validator set: %w", err) } - if len(validators) == 0 { + if len(validatorSet.Validators) == 0 { return nil, fmt.Errorf("%w (SubnetID: %s, Height: %d)", errNoValidators, subnetID, pChainHeight) } log.Debug("Fetching signature", "sourceSubnetID", subnetID, "height", pChainHeight, - "numValidators", len(validators), - "totalWeight", totalWeight, + "numValidators", len(validatorSet.Validators), + "totalWeight", validatorSet.TotalWeight, ) - agg := aggregator.New(aggregator.NewSignatureGetter(a.client), validators, totalWeight) + agg := aggregator.New(aggregator.NewSignatureGetter(a.client), validatorSet.Validators, validatorSet.TotalWeight) signatureResult, err := agg.AggregateSignatures(ctx, unsignedMessage, quorumNum) if err != nil { return nil, err diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index d58e9e6c90..bfcdd0986b 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -5,6 +5,7 @@ package warp import ( "context" + "sync" "testing" "time" @@ -14,7 +15,7 @@ import ( "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" "github.com/ava-labs/avalanchego/utils/timer/mockable" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" @@ -30,7 +31,7 @@ import ( func TestAddressedCallSignatures(t *testing.T) { database := memdb.New() snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSigner() + blsSecretKey, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) @@ -144,7 +145,7 @@ func TestAddressedCallSignatures(t *testing.T) { func TestBlockSignatures(t *testing.T) { database := memdb.New() snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSigner() + blsSecretKey, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) @@ -262,7 +263,7 @@ func TestBlockSignatures(t *testing.T) { func TestUptimeSignatures(t *testing.T) { database := memdb.New() snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSigner() + blsSecretKey, err := localsigner.New() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) @@ -291,8 +292,10 @@ func TestUptimeSignatures(t *testing.T) { clk := &mockable.Clock{} validatorsManager, err := validators.NewManager(chainCtx, memdb.New(), clk) require.NoError(t, err) + lock := &sync.RWMutex{} + newLockedValidatorManager := validators.NewLockedValidatorReader(validatorsManager, lock) validatorsManager.StartTracking([]ids.NodeID{}) - warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, validatorsManager, database, sigCache, nil) + warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, newLockedValidatorManager, database, sigCache, nil) require.NoError(t, err) handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner)