diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 42293d38a79..bcade948d79 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -13,87 +13,154 @@ concurrency: cancel-in-progress: true jobs: - run-local-testnet: - strategy: - matrix: - os: - - ubuntu-22.04 - - macos-12 - runs-on: ${{ matrix.os }} - env: - # Enable portable to prevent issues with caching `blst` for the wrong CPU type - FEATURES: portable,jemalloc + dockerfile-ubuntu: + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Install geth (ubuntu) - if: matrix.os == 'ubuntu-22.04' - run: | - sudo add-apt-repository -y ppa:ethereum/ethereum - sudo apt-get update - sudo apt-get install ethereum - - name: Install geth (mac) - if: matrix.os == 'macos-12' + - name: Build Docker image run: | - brew tap ethereum/ethereum - brew install ethereum - - name: Install GNU sed & GNU grep - if: matrix.os == 'macos-12' + docker build --build-arg FEATURES=portable -t lighthouse:local . + docker save lighthouse:local -o lighthouse-docker.tar + + - name: Upload Docker image artifact + uses: actions/upload-artifact@v4 + with: + name: lighthouse-docker + path: lighthouse-docker.tar + retention-days: 3 + + run-local-testnet: + runs-on: ubuntu-22.04 + needs: dockerfile-ubuntu + steps: + - uses: actions/checkout@v4 + + - name: Install dependencies run: | - brew install gnu-sed grep - echo "$(brew --prefix)/opt/gnu-sed/libexec/gnubin" >> $GITHUB_PATH - echo "$(brew --prefix)/opt/grep/libexec/gnubin" >> $GITHUB_PATH - # https://github.com/actions/cache/blob/main/examples.md#rust---cargo - - uses: actions/cache@v4 - id: cache-cargo + sudo add-apt-repository ppa:rmescandon/yq + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install -y kurtosis-cli yq + kurtosis analytics disable + + - name: Download Docker image artifact + uses: actions/download-artifact@v4 with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - target/ - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + name: lighthouse-docker + path: . - - name: Install lighthouse - run: make && make install-lcli + - name: Load Docker image + run: docker load -i lighthouse-docker.tar - name: Start local testnet - run: ./start_local_testnet.sh genesis.json && sleep 60 + run: ./start_local_testnet.sh -e local -c -b false && sleep 60 working-directory: scripts/local_testnet - - name: Print logs - run: ./dump_logs.sh + - name: Stop local testnet and dump logs + run: ./stop_local_testnet.sh local working-directory: scripts/local_testnet - - name: Stop local testnet - run: ./stop_local_testnet.sh + - name: Start local testnet with blinded block production + run: ./start_local_testnet.sh -e local-blinded -c -p -b false && sleep 60 working-directory: scripts/local_testnet - - name: Clean-up testnet - run: ./clean.sh + - name: Stop local testnet and dump logs + run: ./stop_local_testnet.sh local-blinded working-directory: scripts/local_testnet - - name: Start local testnet with blinded block production - run: ./start_local_testnet.sh -p genesis.json && sleep 60 - working-directory: scripts/local_testnet + - name: Upload logs artifact + uses: actions/upload-artifact@v4 + with: + name: logs-local-testnet + path: | + scripts/local_testnet/logs + retention-days: 3 - - name: Print logs for blinded block testnet - run: ./dump_logs.sh - working-directory: scripts/local_testnet + doppelganger-protection-success-test: + needs: dockerfile-ubuntu + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + + - name: Install dependencies + run: | + sudo add-apt-repository ppa:rmescandon/yq + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install -y kurtosis-cli yq + kurtosis analytics disable + + - name: Download Docker image artifact + uses: actions/download-artifact@v4 + with: + name: lighthouse-docker + path: . + + - name: Load Docker image + run: docker load -i lighthouse-docker.tar + + - name: Run the doppelganger protection success test script + run: | + ./doppelganger_protection.sh success + working-directory: scripts/tests + + - name: Upload logs artifact + uses: actions/upload-artifact@v4 + with: + name: logs-doppelganger-protection-success + path: | + scripts/local_testnet/logs + retention-days: 3 + + doppelganger-protection-failure-test: + needs: dockerfile-ubuntu + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + + - name: Install dependencies + run: | + sudo add-apt-repository ppa:rmescandon/yq + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install -y kurtosis-cli yq + kurtosis analytics disable + + - name: Download Docker image artifact + uses: actions/download-artifact@v4 + with: + name: lighthouse-docker + path: . + + - name: Load Docker image + run: docker load -i lighthouse-docker.tar + + - name: Run the doppelganger protection failure test script + run: | + ./doppelganger_protection.sh failure + working-directory: scripts/tests + + - name: Upload logs artifact + uses: actions/upload-artifact@v4 + with: + name: logs-doppelganger-protection-failure + path: | + scripts/local_testnet/logs + retention-days: 3 - - name: Stop local testnet with blinded block production - run: ./stop_local_testnet.sh - working-directory: scripts/local_testnet # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether # a PR is safe to merge. New jobs should be added here. local-testnet-success: name: local-testnet-success runs-on: ubuntu-latest - needs: ["run-local-testnet"] + needs: [ + 'dockerfile-ubuntu', + 'run-local-testnet', + 'doppelganger-protection-success-test', + 'doppelganger-protection-failure-test', + ] steps: - uses: actions/checkout@v4 - name: Check that success job is dependent on all others diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3d23b4110e7..86f99b53e10 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -31,38 +31,22 @@ jobs: strategy: matrix: arch: [aarch64-unknown-linux-gnu, - aarch64-unknown-linux-gnu-portable, x86_64-unknown-linux-gnu, - x86_64-unknown-linux-gnu-portable, x86_64-apple-darwin, - x86_64-apple-darwin-portable, - x86_64-windows, - x86_64-windows-portable] + x86_64-windows] include: - arch: aarch64-unknown-linux-gnu runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} profile: maxperf - - arch: aarch64-unknown-linux-gnu-portable - runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} - profile: maxperf - arch: x86_64-unknown-linux-gnu runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} profile: maxperf - - arch: x86_64-unknown-linux-gnu-portable - runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "release", "large"]') || 'ubuntu-latest' }} - profile: maxperf - arch: x86_64-apple-darwin - runner: macos-latest - profile: maxperf - - arch: x86_64-apple-darwin-portable - runner: macos-latest + runner: macos-13 profile: maxperf - arch: x86_64-windows runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "release"]') || 'windows-2019' }} profile: maxperf - - arch: x86_64-windows-portable - runner: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "release"]') || 'windows-2019' }} - profile: maxperf runs-on: ${{ matrix.runner }} needs: extract-version @@ -80,7 +64,7 @@ jobs: - uses: KyleMayes/install-llvm-action@v1 if: env.SELF_HOSTED_RUNNERS == 'false' && startsWith(matrix.arch, 'x86_64-windows') with: - version: "16.0" + version: "17.0" directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH if: startsWith(matrix.arch, 'x86_64-windows') @@ -90,53 +74,29 @@ jobs: # Builds # ============================== - - name: Build Lighthouse for aarch64-unknown-linux-gnu-portable - if: matrix.arch == 'aarch64-unknown-linux-gnu-portable' - run: | - cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-aarch64-portable - - name: Build Lighthouse for aarch64-unknown-linux-gnu if: matrix.arch == 'aarch64-unknown-linux-gnu' run: | cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-aarch64 - - - name: Build Lighthouse for x86_64-unknown-linux-gnu-portable - if: matrix.arch == 'x86_64-unknown-linux-gnu-portable' - run: | - cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-x86_64-portable + env CROSS_PROFILE=${{ matrix.profile }} make build-aarch64-portable - name: Build Lighthouse for x86_64-unknown-linux-gnu if: matrix.arch == 'x86_64-unknown-linux-gnu' run: | cargo install cross - env CROSS_PROFILE=${{ matrix.profile }} make build-x86_64 - - - name: Move cross-compiled binary - if: startsWith(matrix.arch, 'aarch64') - run: mv target/aarch64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ~/.cargo/bin/lighthouse + env CROSS_PROFILE=${{ matrix.profile }} make build-x86_64-portable - name: Move cross-compiled binary - if: startsWith(matrix.arch, 'x86_64-unknown-linux-gnu') - run: mv target/x86_64-unknown-linux-gnu/${{ matrix.profile }}/lighthouse ~/.cargo/bin/lighthouse - - - name: Build Lighthouse for x86_64-apple-darwin portable - if: matrix.arch == 'x86_64-apple-darwin-portable' - run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} + if: contains(matrix.arch, 'unknown-linux-gnu') + run: mv target/${{ matrix.arch }}/${{ matrix.profile }}/lighthouse ~/.cargo/bin/lighthouse - - name: Build Lighthouse for x86_64-apple-darwin modern + - name: Build Lighthouse for x86_64-apple-darwin if: matrix.arch == 'x86_64-apple-darwin' - run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }} - - - name: Build Lighthouse for Windows portable - if: matrix.arch == 'x86_64-windows-portable' run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} - - name: Build Lighthouse for Windows modern + - name: Build Lighthouse for Windows if: matrix.arch == 'x86_64-windows' - run: cargo install --path lighthouse --force --locked --features modern,gnosis --profile ${{ matrix.profile }} + run: cargo install --path lighthouse --force --locked --features portable,gnosis --profile ${{ matrix.profile }} - name: Configure GPG and create artifacts if: startsWith(matrix.arch, 'x86_64-windows') != true @@ -151,6 +111,11 @@ jobs: cd artifacts tar -czf lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz lighthouse echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz + for ext in "tar.gz" "tar.gz.asc";\ + do for f in *.$ext;\ + do cp $f "../${f%.$ext}-portable.$ext";\ + done;\ + done mv *tar.gz* .. - name: Configure GPG and create artifacts Windows @@ -179,6 +144,14 @@ jobs: path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz compression-level: 0 + - name: Upload artifact (copy) + if: startsWith(matrix.arch, 'x86_64-windows') != true + uses: actions/upload-artifact@v4 + with: + name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}-portable.tar.gz + path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}-portable.tar.gz + compression-level: 0 + - name: Upload signature uses: actions/upload-artifact@v4 with: @@ -186,6 +159,14 @@ jobs: path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc compression-level: 0 + - name: Upload signature (copy) + if: startsWith(matrix.arch, 'x86_64-windows') != true + uses: actions/upload-artifact@v4 + with: + name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}-portable.tar.gz.asc + path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}-portable.tar.gz.asc + compression-level: 0 + draft-release: name: Draft Release needs: [build, extract-version] @@ -229,9 +210,9 @@ jobs: ## Testing Checklist (DELETE ME) - - [ ] Run on synced Prater Sigma Prime nodes. + - [ ] Run on synced Holesky Sigma Prime nodes. - [ ] Run on synced Canary (mainnet) Sigma Prime nodes. - - [ ] Resync a Prater node. + - [ ] Resync a Holesky node. - [ ] Resync a mainnet node. ## Release Checklist (DELETE ME) @@ -277,8 +258,8 @@ jobs: | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-unknown-linux-gnu-portable.tar.gz.asc) | | | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz.asc) | | | aarch64 | [lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-aarch64-unknown-linux-gnu-portable.tar.gz.asc) | - | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz.asc) | - | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz.asc) | + | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows.tar.gz.asc) | + | | x86_64 | [lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/lighthouse-${{ env.VERSION }}-x86_64-windows-portable.tar.gz.asc) | | | | | | | **System** | **Option** | - | **Resource** | | | Docker | [${{ env.VERSION }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}/tags?page=1&ordering=last_updated&name=${{ env.VERSION }}) | [${{ env.IMAGE_NAME }}](https://hub.docker.com/r/${{ env.IMAGE_NAME }}) | diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 413dd2b95dd..769b889de4d 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -29,6 +29,31 @@ env: # Enable portable to prevent issues with caching `blst` for the wrong CPU type TEST_FEATURES: portable jobs: + check-labels: + runs-on: ubuntu-latest + name: Check for 'skip-ci' label + outputs: + skip_ci: ${{ steps.set-output.outputs.SKIP_CI }} + steps: + - name: check for skip-ci label + id: set-output + env: + LABELS: ${{ toJson(github.event.pull_request.labels) }} + run: | + SKIP_CI="false" + if [ -z "${LABELS}" ] || [ "${LABELS}" = "null" ]; then + LABELS="none"; + else + LABELS=$(echo ${LABELS} | jq -r '.[].name') + fi + for label in ${LABELS}; do + if [ "$label" = "skip-ci" ]; then + SKIP_CI="true" + break + fi + done + echo "skip_ci=$SKIP_CI" >> $GITHUB_OUTPUT + target-branch-check: name: target-branch-check runs-on: ubuntu-latest @@ -38,6 +63,8 @@ jobs: run: test ${{ github.base_ref }} != "stable" release-tests-ubuntu: name: release-tests-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} steps: @@ -63,6 +90,8 @@ jobs: run: sccache --show-stats release-tests-windows: name: release-tests-windows + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019' }} steps: - uses: actions/checkout@v4 @@ -83,11 +112,6 @@ jobs: - name: Install make if: env.SELF_HOSTED_RUNNERS == 'false' run: choco install -y make -# - uses: KyleMayes/install-llvm-action@v1 -# if: env.SELF_HOSTED_RUNNERS == 'false' -# with: -# version: "16.0" -# directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - name: Run tests in release @@ -97,6 +121,8 @@ jobs: run: sccache --show-stats beacon-chain-tests: name: beacon-chain-tests + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} env: @@ -117,6 +143,8 @@ jobs: run: sccache --show-stats op-pool-tests: name: op-pool-tests + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -132,6 +160,8 @@ jobs: run: make test-op-pool network-tests: name: network-tests + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -147,6 +177,8 @@ jobs: run: make test-network slasher-tests: name: slasher-tests + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -162,6 +194,8 @@ jobs: run: make test-slasher debug-tests-ubuntu: name: debug-tests-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} env: @@ -186,6 +220,8 @@ jobs: run: sccache --show-stats state-transition-vectors-ubuntu: name: state-transition-vectors-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -198,6 +234,8 @@ jobs: run: make run-state-transition-tests ef-tests-ubuntu: name: ef-tests-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} env: @@ -216,17 +254,10 @@ jobs: - name: Show cache stats if: env.SELF_HOSTED_RUNNERS == 'true' run: sccache --show-stats - dockerfile-ubuntu: - name: dockerfile-ubuntu - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Build the root Dockerfile - run: docker build --build-arg FEATURES=portable -t lighthouse:local . - - name: Test the built image - run: docker run -t lighthouse:local lighthouse --version basic-simulator-ubuntu: name: basic-simulator-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -239,6 +270,8 @@ jobs: run: cargo run --release --bin simulator basic-sim fallback-simulator-ubuntu: name: fallback-simulator-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -249,44 +282,10 @@ jobs: cache-target: release - name: Run a beacon chain sim which tests VC fallback behaviour run: cargo run --release --bin simulator fallback-sim - doppelganger-protection-test: - name: doppelganger-protection-test - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} - env: - # Enable portable to prevent issues with caching `blst` for the wrong CPU type - FEATURES: jemalloc,portable - steps: - - uses: actions/checkout@v4 - - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' - uses: moonrepo/setup-rust@v1 - with: - channel: stable - cache-target: release - - name: Install geth - if: env.SELF_HOSTED_RUNNERS == 'false' - run: | - sudo add-apt-repository -y ppa:ethereum/ethereum - sudo apt-get update - sudo apt-get install ethereum - - name: Install lighthouse - run: | - make - - name: Install lcli - # TODO: uncomment after the version of lcli in https://github.com/sigp/lighthouse/pull/5137 - # is installed on the runners - # if: env.SELF_HOSTED_RUNNERS == 'false' - run: make install-lcli - - name: Run the doppelganger protection failure test script - run: | - cd scripts/tests - ./doppelganger_protection.sh failure genesis.json - - name: Run the doppelganger protection success test script - run: | - cd scripts/tests - ./doppelganger_protection.sh success genesis.json execution-engine-integration-ubuntu: name: execution-engine-integration-ubuntu + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 @@ -332,6 +331,8 @@ jobs: run: make audit-CI - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose run: CARGO_HOME=$(readlink -f $HOME) make vendor + - name: Markdown-linter + run: make mdlint check-msrv: name: check-msrv runs-on: ubuntu-latest @@ -346,6 +347,8 @@ jobs: run: cargo check --workspace cargo-udeps: name: cargo-udeps + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -368,6 +371,8 @@ jobs: RUSTFLAGS: "" compile-with-beta-compiler: name: compile-with-beta-compiler + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -379,6 +384,8 @@ jobs: run: make cli-check: name: cli-check + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -393,8 +400,10 @@ jobs: # a PR is safe to merge. New jobs should be added here. test-suite-success: name: test-suite-success + if: needs.check-labels.outputs.skip_ci != 'true' runs-on: ubuntu-latest needs: [ + 'check-labels', 'target-branch-check', 'release-tests-ubuntu', 'release-tests-windows', @@ -405,10 +414,8 @@ jobs: 'debug-tests-ubuntu', 'state-transition-vectors-ubuntu', 'ef-tests-ubuntu', - 'dockerfile-ubuntu', 'basic-simulator-ubuntu', 'fallback-simulator-ubuntu', - 'doppelganger-protection-test', 'execution-engine-integration-ubuntu', 'check-code', 'check-msrv', diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a408fcdd52f..3c53558a100 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,12 +1,14 @@ # Contributors Guide + [![GitPOAP badge](https://public-api.gitpoap.io/v1/repo/sigp/lighthouse/badge)](https://www.gitpoap.io/gh/sigp/lighthouse) -Lighthouse is an open-source Ethereum 2.0 client. We're community driven and +Lighthouse is an open-source Ethereum consensus client. We're community driven and welcome all contribution. We aim to provide a constructive, respectful and fun environment for collaboration. -We are active contributors to the [Ethereum 2.0 specification](https://github.com/ethereum/eth2.0-specs) and attend all [Eth -2.0 implementers calls](https://github.com/ethereum/eth2.0-pm). +We are active contributors to +the [Ethereum Proof-of-Stake Consensus specification](https://github.com/ethereum/consensus-specs) and attend +all [Ethereum implementers calls](https://github.com/ethereum/pm/). This guide is geared towards beginners. If you're an open-source veteran feel free to just skim this document and get straight into crushing issues. @@ -41,7 +43,7 @@ We recommend the following work-flow for contributors: 1. **Find an issue** to work on, either because it's interesting or suitable to your skill-set. Use comments to communicate your intentions and ask -questions. + questions. 2. **Work in a feature branch** of your personal fork (github.com/YOUR_NAME/lighthouse) of the main repository (github.com/sigp/lighthouse). @@ -49,13 +51,13 @@ questions. `unstable` as the base branch to merge your changes into the main repository. 4. Wait for the repository maintainers to **review your changes** to ensure the issue is addressed satisfactorily. Optionally, mention your PR on -[discord](https://discord.gg/cyAszAh). + [discord](https://discord.gg/cyAszAh). 5. If the issue is addressed the repository maintainers will **merge your pull-request** and you'll be an official contributor! Generally, you find an issue you'd like to work on and announce your intentions to start work in a comment on the issue. Then, do your work on a separate -branch (a "feature branch") in your own fork of the main repository. Once +branch (a "feature branch") in your own fork of the main repository. Once you're happy and you think the issue has been addressed, create a pull request into the main repository. @@ -66,18 +68,20 @@ steps: 1. [Create a fork](https://help.github.com/articles/fork-a-repo/#fork-an-example-repository) -and [clone -it](https://help.github.com/articles/fork-a-repo/#step-2-create-a-local-clone-of-your-fork) -to your local machine. + and [clone + it](https://help.github.com/articles/fork-a-repo/#step-2-create-a-local-clone-of-your-fork) + to your local machine. 2. [Add an _"upstream"_ branch](https://help.github.com/articles/fork-a-repo/#step-3-configure-git-to-sync-your-fork-with-the-original-spoon-knife-repository) -that tracks github.com/sigp/lighthouse using `$ git remote add upstream -https://github.com/sigp/lighthouse.git` (pro-tip: [use SSH](https://help.github.com/articles/connecting-to-github-with-ssh/) instead of HTTPS). + that tracks github.com/sigp/lighthouse using `$ git remote add upstream + https://github.com/sigp/lighthouse.git` ( + pro-tip: [use SSH](https://help.github.com/articles/connecting-to-github-with-ssh/) instead of HTTPS). 3. Create a new feature branch with `$ git checkout -b your_feature_name`. The name of your branch isn't critical but it should be short and instructive. -E.g., if you're fixing a bug with serialization, you could name your branch -`fix_serialization_bug`. -4. Make sure you sign your commits. See [relevant doc](https://help.github.com/en/github/authenticating-to-github/about-commit-signature-verification). + E.g., if you're fixing a bug with serialization, you could name your branch + `fix_serialization_bug`. +4. Make sure you sign your commits. + See [relevant doc](https://help.github.com/en/github/authenticating-to-github/about-commit-signature-verification). 5. Commit your changes and push them to your fork with `$ git push origin your_feature_name`. 6. Go to your fork on github.com and use the web interface to create a pull @@ -92,22 +96,28 @@ by Rob Allen that provides much more detail on each of these steps, if you're having trouble. As always, jump on [discord](https://discord.gg/cyAszAh) if you get stuck. +Additionally, +the ["Contributing to Lighthouse" section](https://lighthouse-book.sigmaprime.io/contributing.html#contributing-to-lighthouse) +of the Lighthouse Book provides more details on the setup. ## FAQs ### I don't think I have anything to add There's lots to be done and there's all sorts of tasks. You can do anything -from correcting typos through to writing core consensus code. If you reach out, +from enhancing documentation through to writing core consensus code. If you reach out, we'll include you. +Please note, to maintain project quality, we may not accept PRs for small typos or changes +with minimal impact. + ### I'm not sure my Rust is good enough We're open to developers of all levels. If you create a PR and your code doesn't meet our standards, we'll help you fix it and we'll share the reasoning with you. Contributing to open-source is a great way to learn. -### I'm not sure I know enough about Ethereum 2.0 +### I'm not sure I know enough about Ethereum No problems, there's plenty of tasks that don't require extensive Ethereum knowledge. You can learn about Ethereum as you go. diff --git a/Cargo.lock b/Cargo.lock index cc38877fe76..28f1284068e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,10 +30,7 @@ dependencies = [ "filesystem", "safe_arith", "sensitive_url", - "serde", - "serde_json", "slashing_protection", - "slog", "slot_clock", "tempfile", "tokio", @@ -62,9 +59,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] @@ -248,9 +245,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d58d9f5da7b40e9bfff0b7e7816700be4019db97d4b6359fe7f94a9e22e42ac" +checksum = "b155716bab55763c95ba212806cf43d05bcc70e5f35b02bad20cf5ec7fe11fed" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -259,13 +256,13 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" +checksum = "8037e03c7f462a063f28daec9fda285a9a89da003c552f8637a80b9c8fd96241" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -284,19 +281,65 @@ dependencies = [ ] [[package]] -name = "ansi_term" -version = "0.12.1" +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstream" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ - "winapi", + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" + +[[package]] +name = "anstyle-parse" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +dependencies = [ + "anstyle", + "windows-sys 0.52.0", ] [[package]] name = "anyhow" -version = "1.0.82" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "arbitrary" @@ -466,9 +509,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "asn1-rs" -version = "0.5.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" +checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" dependencies = [ "asn1-rs-derive", "asn1-rs-impl", @@ -482,25 +525,25 @@ dependencies = [ [[package]] name = "asn1-rs-derive" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" +checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.66", "synstructure", ] [[package]] name = "asn1-rs-impl" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.66", ] [[package]] @@ -533,7 +576,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.33", + "rustix 0.38.34", "slab", "tracing", "windows-sys 0.52.0", @@ -558,7 +601,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -628,14 +671,14 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] name = "autocfg" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "axum" @@ -694,9 +737,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.71" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +checksum = "17c6a35df3749d2e8bb1b7b21a976d82b15548788d2735b9d82f329268f71a11" dependencies = [ "addr2line", "cc", @@ -739,9 +782,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" @@ -781,7 +824,7 @@ dependencies = [ "merkle_proof", "oneshot_broadcast", "operation_pool", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "proto_array", "rand", "rayon", @@ -812,7 +855,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "5.1.3" +version = "5.2.1" dependencies = [ "beacon_chain", "clap", @@ -822,19 +865,15 @@ dependencies = [ "dirs", "environment", "eth2_config", - "eth2_network_config", "execution_layer", - "futures", "genesis", "hex", "http_api", "hyper 1.3.1", "lighthouse_network", - "lighthouse_version", "monitoring_api", "node_test_rig", "sensitive_url", - "serde", "serde_json", "slasher", "slog", @@ -849,25 +888,22 @@ dependencies = [ name = "beacon_processor" version = "0.1.0" dependencies = [ - "derivative", - "ethereum_ssz", "fnv", "futures", - "hex", "itertools", "lazy_static", "lighthouse_metrics", "lighthouse_network", "logging", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "serde", "slog", "slot_clock", "strum", "task_executor", "tokio", - "tokio-util 0.6.10", + "tokio-util", "types", ] @@ -882,21 +918,22 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.59.2" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "cexpr", "clang-sys", + "itertools", "lazy_static", "lazycell", - "peeking_take_while", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", + "syn 2.0.66", ] [[package]] @@ -1025,7 +1062,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "5.1.3" +version = "5.2.1" dependencies = [ "beacon_node", "clap", @@ -1037,12 +1074,9 @@ dependencies = [ "log", "logging", "serde", - "serde_json", - "serde_yaml", "slog", "slog-async", "slog-scope", - "slog-stdlog", "slog-term", "tokio", "types", @@ -1072,7 +1106,6 @@ dependencies = [ "reqwest", "sensitive_url", "serde", - "serde_json", ] [[package]] @@ -1125,9 +1158,9 @@ dependencies = [ [[package]] name = "c-kzg" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3130f3d8717cc02e668a896af24984d5d5d4e8bf12e278e982e0f1bd88a0f9af" +checksum = "cdf100c4cea8f207e883ff91ca886d621d8a166cb04971dfaa9bb8fd99ed95df" dependencies = [ "blst", "cc", @@ -1136,26 +1169,11 @@ dependencies = [ "libc", ] -[[package]] -name = "cached_tree_hash" -version = "0.1.0" -dependencies = [ - "ethereum-types 0.14.1", - "ethereum_hashing", - "ethereum_ssz", - "ethereum_ssz_derive", - "quickcheck", - "quickcheck_macros", - "smallvec", - "ssz_types", - "tree_hash", -] - [[package]] name = "camino" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" dependencies = [ "serde", ] @@ -1177,7 +1195,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -1191,9 +1209,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.95" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" +checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" dependencies = [ "jobserver", "libc", @@ -1257,6 +1275,33 @@ dependencies = [ "windows-targets 0.52.5", ] +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "cipher" version = "0.3.0" @@ -1279,9 +1324,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.7.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" +checksum = "f803f94ecf597339c7a34eed2036ef83f86aaba937f001f7c5b5e251f043f1f9" dependencies = [ "glob", "libc", @@ -1290,19 +1335,45 @@ dependencies = [ [[package]] name = "clap" -version = "2.34.0" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ - "ansi_term", - "atty", - "bitflags 1.3.2", - "strsim 0.8.0", - "textwrap", - "unicode-width", - "vec_map", + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim 0.11.1", + "terminal_size", +] + +[[package]] +name = "clap_derive" +version = "4.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.66", ] +[[package]] +name = "clap_lex" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" + [[package]] name = "clap_utils" version = "0.1.0" @@ -1341,12 +1412,9 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lighthouse_network", - "logging", "monitoring_api", "network", - "num_cpus", "operation_pool", - "parking_lot 0.12.1", "sensitive_url", "serde", "serde_yaml", @@ -1360,7 +1428,6 @@ dependencies = [ "time", "timer", "tokio", - "tree_hash", "types", ] @@ -1373,6 +1440,12 @@ dependencies = [ "cc", ] +[[package]] +name = "colorchoice" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" + [[package]] name = "compare_fields" version = "0.2.0" @@ -1391,18 +1464,18 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] [[package]] name = "const-hex" -version = "1.11.3" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ba00838774b4ab0233e355d26710fbfc8327a05c017f6dc4873f876d1f79f78" +checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" dependencies = [ "cfg-if", "cpufeatures", @@ -1465,33 +1538,33 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] [[package]] name = "criterion" -version = "0.3.6" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" dependencies = [ - "atty", + "anes", "cast", + "ciborium", "clap", "criterion-plot", - "csv", + "is-terminal", "itertools", - "lazy_static", "num-traits", + "once_cell", "oorandom", "plotters", "rayon", "regex", "serde", - "serde_cbor", "serde_derive", "serde_json", "tinytemplate", @@ -1500,9 +1573,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.5" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", "itertools", @@ -1510,9 +1583,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ "crossbeam-utils", ] @@ -1538,9 +1611,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crunchy" @@ -1603,27 +1676,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "csv" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" -dependencies = [ - "csv-core", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "csv-core" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" -dependencies = [ - "memchr", -] - [[package]] name = "ctr" version = "0.7.0" @@ -1663,16 +1715,15 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.2" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "platforms 3.4.0", "rustc_version 0.4.0", "subtle", "zeroize", @@ -1686,7 +1737,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -1752,15 +1803,15 @@ checksum = "7762d17f1241643615821a8455a0b2c3e803784b058693d990b11f2dce25a0ca" [[package]] name = "data-encoding" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "data-encoding-macro" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20c01c06f5f429efdf2bae21eb67c28b3df3cf85b7dd2d8ef09c0838dac5d33e" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1768,9 +1819,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0047d07f2c89b17dd631c80450d69841a6b5d7fb17278cbc43d7e4cfcf2576f3" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" dependencies = [ "data-encoding", "syn 1.0.109", @@ -1786,12 +1837,10 @@ dependencies = [ "clap_utils", "environment", "hex", - "logging", + "serde", "slog", - "sloggers", "store", "strum", - "tempfile", "types", ] @@ -1808,7 +1857,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" dependencies = [ "futures", - "tokio-util 0.7.10", + "tokio-util", ] [[package]] @@ -1848,9 +1897,9 @@ dependencies = [ [[package]] name = "der-parser" -version = "8.2.0" +version = "9.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" dependencies = [ "asn1-rs", "displaydoc", @@ -1888,7 +1937,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -1927,7 +1976,7 @@ dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -1947,7 +1996,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -2034,7 +2083,7 @@ dependencies = [ "enr", "fnv", "futures", - "hashlink", + "hashlink 0.8.4", "hex", "hkdf", "lazy_static", @@ -2060,7 +2109,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -2132,13 +2181,11 @@ version = "0.2.0" dependencies = [ "beacon_chain", "bls", - "cached_tree_hash", "compare_fields", "compare_fields_derive", "derivative", "eth2_network_config", "ethereum-types 0.14.1", - "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "execution_layer", @@ -2154,7 +2201,6 @@ dependencies = [ "serde_yaml", "snap", "state_processing", - "store", "swap_or_not_shuffle", "tree_hash", "tree_hash_derive", @@ -2163,9 +2209,9 @@ dependencies = [ [[package]] name = "either" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" [[package]] name = "elliptic-curve" @@ -2240,10 +2286,10 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -2307,9 +2353,9 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", @@ -2336,15 +2382,12 @@ dependencies = [ "ethereum_ssz_derive", "execution_layer", "futures", - "hex", "lazy_static", "lighthouse_metrics", "merkle_proof", - "parking_lot 0.12.1", - "reqwest", + "parking_lot 0.12.3", "sensitive_url", "serde", - "serde_json", "serde_yaml", "slog", "sloggers", @@ -2386,7 +2429,6 @@ dependencies = [ "libsecp256k1", "lighthouse_network", "mediatype", - "mime", "pretty_reqwest_error", "procfs", "proto_array", @@ -2400,7 +2442,6 @@ dependencies = [ "ssz_types", "store", "tokio", - "tree_hash", "types", ] @@ -2472,7 +2513,6 @@ dependencies = [ "pretty_reqwest_error", "reqwest", "sensitive_url", - "serde_json", "serde_yaml", "sha2 0.9.9", "slog", @@ -2802,7 +2842,6 @@ version = "0.1.0" dependencies = [ "async-channel", "deposit_contract", - "environment", "ethers-core", "ethers-providers", "execution_layer", @@ -2827,7 +2866,6 @@ dependencies = [ "alloy-consensus", "alloy-rlp", "arc-swap", - "async-trait", "builder_client", "bytes", "environment", @@ -2837,7 +2875,6 @@ dependencies = [ "ethereum_ssz", "ethers-core", "fork_choice", - "futures", "hash-db", "hash256-std-hasher", "hex", @@ -2848,7 +2885,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_version", "lru", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pretty_reqwest_error", "rand", "reqwest", @@ -2897,9 +2934,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fastrlp" @@ -2912,6 +2949,16 @@ dependencies = [ "bytes", ] +[[package]] +name = "fdlimit" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" +dependencies = [ + "libc", + "thiserror", +] + [[package]] name = "ff" version = "0.12.1" @@ -2940,9 +2987,9 @@ checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" [[package]] name = "fiat-crypto" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "field-offset" @@ -2962,12 +3009,6 @@ dependencies = [ "windows-acl", ] -[[package]] -name = "finl_unicode" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" - [[package]] name = "fixed-hash" version = "0.7.0" @@ -2995,9 +3036,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.28" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" dependencies = [ "crc32fast", "libz-sys", @@ -3148,17 +3189,18 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] name = "futures-rustls" -version = "0.24.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" +checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.21.11", + "rustls 0.23.8", + "rustls-pki-types", ] [[package]] @@ -3251,9 +3293,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "js-sys", @@ -3284,9 +3326,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "git-version" @@ -3305,7 +3347,7 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -3329,8 +3371,8 @@ dependencies = [ "futures-ticker", "futures-timer", "getrandom", + "hashlink 0.9.0", "hex_fmt", - "instant", "libp2p", "prometheus-client", "quick-protobuf", @@ -3340,9 +3382,9 @@ dependencies = [ "regex", "serde", "sha2 0.10.8", - "smallvec", "tracing", "void", + "web-time", ] [[package]] @@ -3382,15 +3424,19 @@ dependencies = [ "indexmap 2.2.6", "slab", "tokio", - "tokio-util 0.7.10", + "tokio-util", "tracing", ] [[package]] name = "half" -version = "1.8.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] [[package]] name = "hash-db" @@ -3415,18 +3461,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.13.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" -dependencies = [ - "ahash", -] - -[[package]] -name = "hashbrown" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", @@ -3447,7 +3484,16 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", +] + +[[package]] +name = "hashlink" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" +dependencies = [ + "hashbrown 0.14.5", ] [[package]] @@ -3480,6 +3526,12 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" version = "0.1.19" @@ -3533,7 +3585,7 @@ dependencies = [ "ipnet", "once_cell", "rand", - "socket2 0.5.6", + "socket2 0.5.7", "thiserror", "tinyvec", "tokio", @@ -3553,7 +3605,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "rand", "resolv-conf", "smallvec", @@ -3704,7 +3756,7 @@ dependencies = [ "lru", "network", "operation_pool", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "proto_array", "safe_arith", "sensitive_url", @@ -3781,7 +3833,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -3816,7 +3868,7 @@ dependencies = [ "futures-util", "http 0.2.12", "hyper 0.14.28", - "rustls 0.21.11", + "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", ] @@ -3836,9 +3888,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" +checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" dependencies = [ "bytes", "futures-util", @@ -3846,7 +3898,6 @@ dependencies = [ "http-body 1.0.0", "hyper 1.3.1", "pin-project-lite", - "socket2 0.5.6", "tokio", ] @@ -3962,7 +4013,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.6.9", + "parity-scale-codec 3.6.12", ] [[package]] @@ -4026,7 +4077,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -4040,9 +4091,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if", "js-sys", @@ -4085,7 +4136,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.6", + "socket2 0.5.7", "widestring 1.1.0", "windows-sys 0.48.0", "winreg", @@ -4108,6 +4159,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + [[package]] name = "itertools" version = "0.10.5" @@ -4224,9 +4281,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb8515fff80ed850aea4a1595f2e519c003e2a00a82fe168ebf5269196caf444" +checksum = "47a3633291834c4fbebf8673acbc1b04ec9d151418ff9b8e26dcd79129928758" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -4275,7 +4332,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "5.1.3" +version = "5.2.1" dependencies = [ "account_utils", "beacon_chain", @@ -4283,25 +4340,20 @@ dependencies = [ "clap", "clap_utils", "deposit_contract", - "directory", "env_logger 0.9.3", "environment", - "eth1_test_rig", "eth2", "eth2_network_config", "eth2_wallet", "ethereum_hashing", "ethereum_ssz", "execution_layer", - "genesis", "hex", - "int_to_bytes", "lighthouse_network", "lighthouse_version", "log", "malloc_utils", "rayon", - "sensitive_url", "serde", "serde_json", "serde_yaml", @@ -4338,15 +4390,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.153" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libflate" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7d5654ae1795afc7ff76f4365c2c8791b0feb18e8996a96adad8ffd7c3b2bf" +checksum = "45d9dfdc14ea4ef0900c1cddbc8dcd553fbaacd8a4a282cf4018ae9dd04fb21e" dependencies = [ "adler32", "core2", @@ -4357,12 +4409,12 @@ dependencies = [ [[package]] name = "libflate_lz77" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be5f52fb8c451576ec6b79d3f4deb327398bc05bbdbd99021a6e77a4c855d524" +checksum = "e6e0d73b369f386f1c44abd9c570d5318f55ccde816ff4b562fa452e5182863d" dependencies = [ "core2", - "hashbrown 0.13.2", + "hashbrown 0.14.5", "rle-decode-fast", ] @@ -4385,7 +4437,7 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libmdbx" version = "0.1.4" -source = "git+https://github.com/sigp/libmdbx-rs?tag=v0.1.4#096da80a83d14343f8df833006483f48075cd135" +source = "git+https://github.com/sigp/libmdbx-rs?rev=e6ff4b9377c1619bcf0bfdf52bee5a980a432a1a#e6ff4b9377c1619bcf0bfdf52bee5a980a432a1a" dependencies = [ "bitflags 1.3.2", "byteorder", @@ -4393,7 +4445,7 @@ dependencies = [ "indexmap 1.9.3", "libc", "mdbx-sys", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "thiserror", ] @@ -4470,7 +4522,7 @@ dependencies = [ "multihash", "multistream-select", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", "quick-protobuf", "rand", @@ -4493,16 +4545,16 @@ dependencies = [ "hickory-resolver", "libp2p-core", "libp2p-identity", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "smallvec", "tracing", ] [[package]] name = "libp2p-identify" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20499a945d2f0221fdc6269b3848892c0f370d2ee3e19c7f65a29d8f860f6126" +checksum = "b5d635ebea5ca0c3c3e77d414ae9b67eccf2a822be06091b9c1a0d13029a1e2f" dependencies = [ "asynchronous-codec 0.7.0", "either", @@ -4559,7 +4611,7 @@ dependencies = [ "libp2p-swarm", "rand", "smallvec", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tracing", "void", @@ -4593,7 +4645,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "nohash-hasher", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "rand", "smallvec", "tracing", @@ -4644,9 +4696,9 @@ dependencies = [ [[package]] name = "libp2p-quic" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0375cdfee57b47b313ef1f0fdb625b78aed770d33a40cf1c294a371ff5e6666" +checksum = "c67296ad4e092e23f92aea3d2bdb6f24eab79c0929ed816dfb460ea2f4567d2b" dependencies = [ "bytes", "futures", @@ -4655,12 +4707,12 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-tls", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "quinn", "rand", - "ring 0.16.20", - "rustls 0.21.11", - "socket2 0.5.6", + "ring 0.17.8", + "rustls 0.23.8", + "socket2 0.5.7", "thiserror", "tokio", "tracing", @@ -4668,9 +4720,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e92532fc3c4fb292ae30c371815c9b10103718777726ea5497abc268a4761866" +checksum = "80cae6cb75f89dbca53862f9ebe0b9f463aa7b302762fcfaafb9e51dcc9b0f7e" dependencies = [ "either", "fnv", @@ -4680,6 +4732,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", + "lru", "multistream-select", "once_cell", "rand", @@ -4691,14 +4744,14 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.34.1" +version = "0.34.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b644268b4acfdaa6a6100b31226ee7a36d96ab4c43287d113bfd2308607d8b6f" +checksum = "5daceb9dd908417b6dfcfe8e94098bc4aac54500c282e78120b885dadc09b999" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -4713,24 +4766,24 @@ dependencies = [ "libc", "libp2p-core", "libp2p-identity", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tracing", ] [[package]] name = "libp2p-tls" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ce7e3c2e7569d685d08ec795157981722ff96e9e9f9eae75df3c29d02b07a5" +checksum = "251b17aebdd29df7e8f80e4d94b782fae42e934c49086e1a81ba23b60a8314f2" dependencies = [ "futures", "futures-rustls", "libp2p-core", "libp2p-identity", "rcgen", - "ring 0.16.20", - "rustls 0.21.11", + "ring 0.17.8", + "rustls 0.23.8", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -4739,9 +4792,9 @@ dependencies = [ [[package]] name = "libp2p-upnp" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b49cc89949bf0e06869297cd4fe2c132358c23fe93e76ad43950453df4da3d35" +checksum = "cccf04b0e3ff3de52d07d5fd6c3b061d0e7f908ffc683c32d9638caedce86fc8" dependencies = [ "futures", "futures-timer", @@ -4765,7 +4818,7 @@ dependencies = [ "thiserror", "tracing", "yamux 0.12.1", - "yamux 0.13.1", + "yamux 0.13.2", ] [[package]] @@ -4839,9 +4892,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.16" +version = "1.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" +checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" dependencies = [ "cc", "pkg-config", @@ -4850,7 +4903,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "5.1.3" +version = "5.2.1" dependencies = [ "account_manager", "account_utils", @@ -4881,10 +4934,8 @@ dependencies = [ "slasher", "slashing_protection", "slog", - "sloggers", "task_executor", "tempfile", - "tracing-subscriber", "types", "unused_port", "validator_client", @@ -4896,7 +4947,6 @@ dependencies = [ name = "lighthouse_metrics" version = "0.2.0" dependencies = [ - "lazy_static", "prometheus", ] @@ -4905,8 +4955,6 @@ name = "lighthouse_network" version = "0.2.0" dependencies = [ "async-channel", - "base64 0.21.7", - "byteorder", "bytes", "delay_map", "directory", @@ -4918,20 +4966,17 @@ dependencies = [ "ethereum_ssz_derive", "fnv", "futures", - "futures-ticker", - "getrandom", "gossipsub", "hex", - "hex_fmt", - "instant", "lazy_static", "libp2p", "libp2p-mplex", "lighthouse_metrics", "lighthouse_version", + "logging", "lru", "lru_cache", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "prometheus-client", "quickcheck", "quickcheck_macros", @@ -4952,12 +4997,9 @@ dependencies = [ "tiny-keccak", "tokio", "tokio-io-timeout", - "tokio-util 0.6.10", - "tracing", - "tree_hash", - "tree_hash_derive", + "tokio-util", "types", - "unsigned-varint 0.6.0", + "unsigned-varint 0.8.0", "unused_port", "void", ] @@ -4985,9 +5027,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lmdb-rkv" @@ -5012,9 +5054,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -5041,11 +5083,10 @@ dependencies = [ "chrono", "lazy_static", "lighthouse_metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "serde", "serde_json", "slog", - "slog-async", "slog-term", "sloggers", "take_mut", @@ -5063,7 +5104,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -5101,7 +5142,7 @@ dependencies = [ "lazy_static", "libc", "lighthouse_metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.3", ] [[package]] @@ -5150,7 +5191,7 @@ dependencies = [ [[package]] name = "mdbx-sys" version = "0.11.6-4" -source = "git+https://github.com/sigp/libmdbx-rs?tag=v0.1.4#096da80a83d14343f8df833006483f48075cd135" +source = "git+https://github.com/sigp/libmdbx-rs?rev=e6ff4b9377c1619bcf0bfdf52bee5a980a432a1a#e6ff4b9377c1619bcf0bfdf52bee5a980a432a1a" dependencies = [ "bindgen", "cc", @@ -5248,7 +5289,7 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "itertools", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "rayon", "serde", "smallvec", @@ -5282,9 +5323,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" dependencies = [ "adler", ] @@ -5387,11 +5428,10 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" dependencies = [ - "lazy_static", "libc", "log", "openssl", @@ -5479,10 +5519,8 @@ dependencies = [ "beacon_processor", "delay_map", "derivative", - "environment", "error-chain", "eth2", - "ethereum-types 0.14.1", "ethereum_ssz", "execution_layer", "fnv", @@ -5496,12 +5534,10 @@ dependencies = [ "lighthouse_metrics", "lighthouse_network", "logging", - "lru", "lru_cache", "matches", - "num_cpus", "operation_pool", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "rand", "rlp", "slog", @@ -5516,7 +5552,6 @@ dependencies = [ "task_executor", "tokio", "tokio-stream", - "tokio-util 0.6.10", "types", ] @@ -5596,11 +5631,10 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ - "autocfg", "num-integer", "num-traits", ] @@ -5640,9 +5674,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -5651,9 +5685,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -5671,18 +5705,18 @@ dependencies = [ [[package]] name = "object" -version = "0.32.2" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "b8ec7ab813848ba4522158d5517a6093db1ded27575b070f4177b8d12b41db5e" dependencies = [ "memchr", ] [[package]] name = "oid-registry" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" +checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" dependencies = [ "asn1-rs", ] @@ -5697,7 +5731,7 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" name = "oneshot_broadcast" version = "0.1.0" dependencies = [ - "parking_lot 0.12.1", + "parking_lot 0.12.3", ] [[package]] @@ -5760,7 +5794,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -5771,9 +5805,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.2.3+3.2.1" +version = "300.3.0+3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cff92b6f71555b61bb9315f7c64da3ca43d87531622120fea0195fc761b4843" +checksum = "eba8804a1c5765b18c4b3f907e6897ebabeedebc9830e1a0046c4a4cf44663e1" dependencies = [ "cc", ] @@ -5804,7 +5838,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "maplit", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "rand", "rayon", "serde", @@ -5848,15 +5882,15 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.9" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.6.9", + "parity-scale-codec-derive 3.6.12", "serde", ] @@ -5874,11 +5908,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.9" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 2.0.0", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 1.0.109", @@ -5903,12 +5937,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] @@ -5927,15 +5961,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", + "redox_syscall 0.5.1", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -5951,9 +5985,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pbkdf2" @@ -5976,12 +6010,6 @@ dependencies = [ "sha2 0.10.8", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "1.1.1" @@ -5997,7 +6025,7 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "serde", ] @@ -6018,9 +6046,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" dependencies = [ "memchr", "thiserror", @@ -6072,7 +6100,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -6119,17 +6147,11 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" -[[package]] -name = "platforms" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" - [[package]] name = "plotters" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" dependencies = [ "num-traits", "plotters-backend", @@ -6140,30 +6162,30 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" +checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" [[package]] name = "plotters-svg" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" dependencies = [ "plotters-backend", ] [[package]] name = "polling" -version = "3.6.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c976a60b2d7e99d6f229e414670a9b85d13ac305cc6d1e9c134de58c5aaaf6" +checksum = "645493cf344456ef24219d02a768cf1fb92ddf8c92161679ae3d91b91a637be3" dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi 0.3.9", "pin-project-lite", - "rustix 0.38.33", + "rustix 0.38.34", "tracing", "windows-sys 0.52.0", ] @@ -6309,18 +6331,18 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "2.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" dependencies = [ - "toml_edit 0.20.7", + "toml_edit 0.21.1", ] [[package]] name = "proc-macro2" -version = "1.0.81" +version = "1.0.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" +checksum = "ec96c6a92621310b51366f1e28d05ef11489516e93be030060e5fc12024a49d6" dependencies = [ "unicode-ident", ] @@ -6342,15 +6364,15 @@ dependencies = [ [[package]] name = "prometheus" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" +checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" dependencies = [ "cfg-if", "fnv", "lazy_static", "memchr", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "protobuf", "thiserror", ] @@ -6363,7 +6385,7 @@ checksum = "c1ca959da22a332509f2a73ae9e5f23f9dcfc31fd3a54d71f159495bd5909baa" dependencies = [ "dtoa", "itoa", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "prometheus-client-derive-encode", ] @@ -6375,7 +6397,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -6431,7 +6453,7 @@ dependencies = [ "nix 0.24.3", "num_cpus", "once_cell", - "platforms 2.0.0", + "platforms", "thiserror", "unescape", ] @@ -6501,9 +6523,9 @@ dependencies = [ [[package]] name = "quinn" -version = "0.10.2" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +checksum = "904e3d3ba178131798c6d9375db2b13b34337d489b089fc5ba0825a2ff1bee73" dependencies = [ "bytes", "futures-io", @@ -6511,7 +6533,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.21.11", + "rustls 0.23.8", "thiserror", "tokio", "tracing", @@ -6519,15 +6541,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.6" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" +checksum = "e974563a4b1c2206bbc61191ca4da9c22e4308b4c455e8906751cc7828393f08" dependencies = [ "bytes", "rand", - "ring 0.16.20", + "ring 0.17.8", "rustc-hash", - "rustls 0.21.11", + "rustls 0.23.8", "slab", "thiserror", "tinyvec", @@ -6536,15 +6558,15 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" +checksum = "e4f0def2590301f4f667db5a77f9694fb004f82796dc1a8b1508fafa3d0e8b72" dependencies = [ - "bytes", "libc", - "socket2 0.5.6", + "once_cell", + "socket2 0.5.7", "tracing", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -6563,7 +6585,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "scheduled-thread-pool", ] @@ -6660,6 +6682,15 @@ dependencies = [ "yasna", ] +[[package]] +name = "redb" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed7508e692a49b6b2290b56540384ccae9b1fb4d77065640b165835b56ffe3bb" +dependencies = [ + "libc", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -6678,6 +6709,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" +dependencies = [ + "bitflags 2.5.0", +] + [[package]] name = "redox_users" version = "0.4.5" @@ -6758,7 +6798,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.11", + "rustls 0.21.12", "rustls-pemfile 1.0.4", "serde", "serde_json", @@ -6768,7 +6808,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls 0.24.1", - "tokio-util 0.7.10", + "tokio-util", "tower-service", "url", "wasm-bindgen", @@ -6914,7 +6954,7 @@ dependencies = [ "fastrlp", "num-bigint", "num-traits", - "parity-scale-codec 3.6.9", + "parity-scale-codec 3.6.12", "primitive-types 0.12.2", "proptest", "rand", @@ -6940,16 +6980,16 @@ dependencies = [ "bitflags 1.3.2", "fallible-iterator", "fallible-streaming-iterator", - "hashlink", + "hashlink 0.8.4", "libsqlite3-sys", "smallvec", ] [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -6978,7 +7018,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.22", + "semver 1.0.23", ] [[package]] @@ -7006,22 +7046,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.33" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3cc72858054fcff6d7dea32df2aeaee6a7c24227366d7ea429aada2f26b16ad" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ "bitflags 2.5.0", "errno", "libc", - "linux-raw-sys 0.4.13", + "linux-raw-sys 0.4.14", "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.21.11" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring 0.17.8", @@ -7038,7 +7078,21 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki 0.102.4", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls" +version = "0.23.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79adb16721f56eb2d843e67676896a61ce7a0fa622dc18d3e372477a029d2740" +dependencies = [ + "once_cell", + "ring 0.17.8", + "rustls-pki-types", + "rustls-webpki 0.102.4", "subtle", "zeroize", ] @@ -7058,15 +7112,15 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.4.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] name = "rustls-webpki" @@ -7080,9 +7134,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.2" +version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -7091,9 +7145,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "rusty-fork" @@ -7120,9 +7174,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "safe_arith" @@ -7148,23 +7202,23 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.2" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c453e59a955f81fb62ee5d596b450383d699f152d350e9d23a0db2adb78e4c0" +checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.6.9", + "parity-scale-codec 3.6.12", "scale-info-derive", ] [[package]] name = "scale-info-derive" -version = "2.11.2" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18cf6c6447f813ef19eb450e985bcce6705f9ce7660db221b59093d15c79c4b7" +checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ - "proc-macro-crate 1.3.1", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 1.0.109", @@ -7185,7 +7239,7 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ - "parking_lot 0.12.1", + "parking_lot 0.12.3", ] [[package]] @@ -7252,11 +7306,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", @@ -7265,9 +7319,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -7284,9 +7338,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -7316,9 +7370,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.198" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" +checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" dependencies = [ "serde_derive", ] @@ -7333,32 +7387,22 @@ dependencies = [ "serde_urlencoded", ] -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - [[package]] name = "serde_derive" -version = "1.0.198" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" +checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] name = "serde_json" -version = "1.0.116" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" dependencies = [ "itoa", "ryu", @@ -7383,14 +7427,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -7501,9 +7545,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac61da6b35ad76b195eb4771210f947734321a8d81d7738e1580d953bc7a15e" +checksum = "a9b57fd861253bff08bb1919e995f90ba8f4889de2726091c8876f3a4e823b40" dependencies = [ "cc", "cfg-if", @@ -7571,17 +7615,14 @@ version = "0.2.0" dependencies = [ "clap", "env_logger 0.9.3", - "eth1", "eth2_network_config", - "ethereum-types 0.14.1", "execution_layer", "futures", "node_test_rig", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "rayon", "sensitive_url", "serde_json", - "ssz_types", "tokio", "types", ] @@ -7607,6 +7648,7 @@ version = "0.1.0" dependencies = [ "bincode", "byteorder", + "derivative", "ethereum_ssz", "ethereum_ssz_derive", "filesystem", @@ -7619,13 +7661,14 @@ dependencies = [ "logging", "lru", "maplit", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "rand", "rayon", + "redb", "safe_arith", "serde", "slog", - "sloggers", + "ssz_types", "strum", "tempfile", "tree_hash", @@ -7776,7 +7819,7 @@ version = "0.2.0" dependencies = [ "lazy_static", "lighthouse_metrics", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "types", ] @@ -7785,6 +7828,9 @@ name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +dependencies = [ + "arbitrary", +] [[package]] name = "snap" @@ -7821,9 +7867,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -7903,10 +7949,12 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "merkle_proof", + "rand", "rayon", "safe_arith", "smallvec", "ssz_types", + "test_random_derive", "tokio", "tree_hash", "types", @@ -7935,7 +7983,6 @@ name = "store" version = "0.2.0" dependencies = [ "beacon_chain", - "bls", "db-key", "directory", "ethereum_ssz", @@ -7944,14 +7991,11 @@ dependencies = [ "lazy_static", "leveldb", "lighthouse_metrics", - "logging", "lru", - "parking_lot 0.12.1", - "safe_arith", + "parking_lot 0.12.3", "serde", "slog", "sloggers", - "smallvec", "state_processing", "strum", "tempfile", @@ -7960,26 +8004,26 @@ dependencies = [ [[package]] name = "stringprep" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" dependencies = [ - "finl_unicode", "unicode-bidi", "unicode-normalization", + "unicode-properties", ] [[package]] name = "strsim" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" @@ -7996,7 +8040,7 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "rustversion", @@ -8011,9 +8055,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "superstruct" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f4e1f478a7728f8855d7e620e9a152cf8932c6614f86564c886f9b8141f3201" +checksum = "bf0f31f730ad9e579364950e10d6172b4a9bd04b447edf5988b066a860cc340e" dependencies = [ "darling", "itertools", @@ -8045,9 +8089,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.60" +version = "2.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" +checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" dependencies = [ "proc-macro2", "quote", @@ -8068,14 +8112,13 @@ checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" [[package]] name = "synstructure" -version = "0.12.6" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", - "unicode-xid", + "syn 2.0.66", ] [[package]] @@ -8119,9 +8162,8 @@ name = "system_health" version = "0.1.0" dependencies = [ "lighthouse_network", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "serde", - "serde_json", "sysinfo", "types", ] @@ -8173,7 +8215,7 @@ checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand", - "rustix 0.38.33", + "rustix 0.38.34", "windows-sys 0.52.0", ] @@ -8197,6 +8239,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +dependencies = [ + "rustix 0.38.34", + "windows-sys 0.48.0", +] + [[package]] name = "test-test_logger" version = "0.1.0" @@ -8230,33 +8282,24 @@ dependencies = [ "sha2 0.10.8", ] -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - [[package]] name = "thiserror" -version = "1.0.59" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.59" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -8386,7 +8429,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.6", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] @@ -8409,7 +8452,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -8435,16 +8478,16 @@ dependencies = [ "futures-channel", "futures-util", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "percent-encoding", "phf", "pin-project-lite", "postgres-protocol", "postgres-types", "rand", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", - "tokio-util 0.7.10", + "tokio-util", "whoami", ] @@ -8454,7 +8497,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.11", + "rustls 0.21.12", "tokio", ] @@ -8478,38 +8521,22 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", - "tokio-util 0.7.10", + "tokio-util", ] [[package]] name = "tokio-util" -version = "0.6.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", - "log", - "pin-project-lite", - "slab", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", "pin-project-lite", "slab", "tokio", - "tracing", ] [[package]] @@ -8535,9 +8562,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] @@ -8557,9 +8584,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.20.7" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ "indexmap 2.2.6", "toml_datetime", @@ -8626,7 +8653,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] @@ -8731,9 +8758,9 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.9" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee8098afad3fb0c54a9007aab6804558410503ad676d4633f9c2559a00ac0f" +checksum = "1b2cb4fbb9995eeb36ac86fadf24031ccd58f99d6b4b2d7b911db70bddb80d90" dependencies = [ "serde", "stable_deref_trait", @@ -8758,7 +8785,6 @@ dependencies = [ "arbitrary", "beacon_chain", "bls", - "cached_tree_hash", "compare_fields", "compare_fields_derive", "criterion", @@ -8779,7 +8805,7 @@ dependencies = [ "merkle_proof", "metastruct", "milhouse", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "paste", "rand", "rand_xorshift", @@ -8795,7 +8821,6 @@ dependencies = [ "smallvec", "ssz_types", "state_processing", - "strum", "superstruct", "swap_or_not_shuffle", "tempfile", @@ -8867,10 +8892,10 @@ dependencies = [ ] [[package]] -name = "unicode-width" -version = "0.1.11" +name = "unicode-properties" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "e4259d9d4425d9f0661581b804cb85fe66a4c631cadd8f490d1c13a35d5d9291" [[package]] name = "unicode-xid" @@ -8904,16 +8929,6 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" -[[package]] -name = "unsigned-varint" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35581ff83d4101e58b582e607120c7f5ffb17e632a980b1f38334d76b36908b2" -dependencies = [ - "bytes", - "tokio-util 0.6.10", -] - [[package]] name = "unsigned-varint" version = "0.7.2" @@ -8929,6 +8944,10 @@ name = "unsigned-varint" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" +dependencies = [ + "bytes", + "tokio-util", +] [[package]] name = "untrusted" @@ -8948,7 +8967,7 @@ version = "0.1.0" dependencies = [ "lazy_static", "lru_cache", - "parking_lot 0.12.1", + "parking_lot 0.12.3", ] [[package]] @@ -8962,6 +8981,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "uuid" version = "0.8.2" @@ -8988,6 +9013,7 @@ dependencies = [ "eth2", "eth2_keystore", "ethereum_serde_utils", + "fdlimit", "filesystem", "futures", "hex", @@ -9001,7 +9027,7 @@ dependencies = [ "logging", "malloc_utils", "monitoring_api", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "rand", "reqwest", "ring 0.16.20", @@ -9050,12 +9076,10 @@ name = "validator_manager" version = "0.1.0" dependencies = [ "account_utils", - "bls", "clap", "clap_utils", "environment", "eth2", - "eth2_keystore", "eth2_network_config", "eth2_wallet", "ethereum_serde_utils", @@ -9152,7 +9176,7 @@ dependencies = [ "serde_urlencoded", "tokio", "tokio-rustls 0.25.0", - "tokio-util 0.7.10", + "tokio-util", "tower-service", "tracing", ] @@ -9210,7 +9234,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", "wasm-bindgen-shared", ] @@ -9244,7 +9268,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9291,13 +9315,12 @@ dependencies = [ "beacon_chain", "beacon_node", "bls", - "byteorder", "clap", + "clap_utils", "diesel", "diesel_migrations", "env_logger 0.9.3", "eth2", - "hex", "http_api", "hyper 1.3.1", "log", @@ -9328,6 +9351,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "web3signer_tests" version = "0.1.0" @@ -9339,7 +9372,7 @@ dependencies = [ "eth2_network_config", "futures", "lazy_static", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "reqwest", "serde", "serde_json", @@ -9401,11 +9434,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -9726,9 +9759,9 @@ dependencies = [ [[package]] name = "x509-parser" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" dependencies = [ "asn1-rs", "data-encoding", @@ -9764,7 +9797,7 @@ checksum = "498f4d102a79ea1c9d4dd27573c0fc96ad74c023e8da38484e47883076da25fb" dependencies = [ "arraydeque", "encoding_rs", - "hashlink", + "hashlink 0.8.4", ] [[package]] @@ -9776,7 +9809,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", "rand", "static_assertions", @@ -9784,14 +9817,15 @@ dependencies = [ [[package]] name = "yamux" -version = "0.13.1" -source = "git+https://github.com/sigp/rust-yamux.git#12a23aa0e34b7807c0c5f87f06b3438f7d6c2ed0" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f97202f6b125031b95d83e01dc57292b529384f80bfae4677e4bbc10178cf72" dependencies = [ "futures", "instant", "log", "nohash-hasher", - "parking_lot 0.12.1", + "parking_lot 0.12.3", "pin-project", "rand", "static_assertions", @@ -9808,29 +9842,29 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ "zeroize_derive", ] @@ -9843,7 +9877,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.66", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index be2011ba286..fad5fbead14 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,7 +51,6 @@ members = [ "database_manager", - "consensus/cached_tree_hash", "consensus/int_to_bytes", "consensus/fork_choice", "consensus/proto_array", @@ -102,12 +101,12 @@ bincode = "1" bitvec = "1" byteorder = "1" bytes = "1" +clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } # Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable # feature ourselves when desired. c-kzg = { version = "1", default-features = false } -clap = "2" compare_fields_derive = { path = "common/compare_fields_derive" } -criterion = "0.3" +criterion = "0.5" delay_map = "0.3" derivative = "2" dirs = "3" @@ -127,6 +126,7 @@ fnv = "1" fs2 = "0.4" futures = "0.3" hex = "0.4" +hashlink = "0.9.0" hyper = "1" itertools = "0.10" lazy_static = "1" @@ -154,21 +154,21 @@ serde_json = "1" serde_repr = "0.1" serde_yaml = "0.9" sha2 = "0.9" -slog = { version = "2", features = ["max_level_trace", "release_max_level_trace", "nested-values"] } +slog = { version = "2", features = ["max_level_debug", "release_max_level_debug", "nested-values"] } slog-async = "2" slog-term = "2" sloggers = { version = "2", features = ["json"] } -smallvec = "1.11.2" +smallvec = { version = "1.11.2", features = ["arbitrary"] } snap = "1" ssz_types = "0.6" strum = { version = "0.24", features = ["derive"] } -superstruct = "0.7" +superstruct = "0.8" syn = "1" sysinfo = "0.26" tempfile = "3" tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal"] } tokio-stream = { version = "0.1", features = ["sync"] } -tokio-util = { version = "0.6", features = ["codec", "compat", "time"] } +tokio-util = { version = "0.7", features = ["codec", "compat", "time"] } tracing = "0.1.40" tracing-appender = "0.2" tracing-core = "0.1" @@ -188,7 +188,6 @@ beacon_chain = { path = "beacon_node/beacon_chain" } beacon_node = { path = "beacon_node" } beacon_processor = { path = "beacon_node/beacon_processor" } bls = { path = "crypto/bls" } -cached_tree_hash = { path = "consensus/cached_tree_hash" } clap_utils = { path = "common/clap_utils" } compare_fields = { path = "common/compare_fields" } deposit_contract = { path = "common/deposit_contract" } @@ -238,9 +237,6 @@ validator_client = { path = "validator_client" } validator_dir = { path = "common/validator_dir" } warp_utils = { path = "common/warp_utils" } -[patch.crates-io] -yamux = { git = "https://github.com/sigp/rust-yamux.git" } - [profile.maxperf] inherits = "release" lto = "fat" diff --git a/Dockerfile b/Dockerfile index 901c1b83d63..ff7f14d534e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,11 @@ -FROM rust:1.75.0-bullseye AS builder +FROM rust:1.78.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES ARG PROFILE=release ARG CARGO_USE_GIT_CLI=true -ENV FEATURES $FEATURES -ENV PROFILE $PROFILE +ENV FEATURES=$FEATURES +ENV PROFILE=$PROFILE ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_USE_GIT_CLI RUN cd lighthouse && make diff --git a/FUNDING.json b/FUNDING.json new file mode 100644 index 00000000000..5001999927c --- /dev/null +++ b/FUNDING.json @@ -0,0 +1,7 @@ +{ + "drips": { + "ethereum": { + "ownedBy": "0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b" + } + } +} \ No newline at end of file diff --git a/Makefile b/Makefile index 12d33cc3a8b..d18a6738803 100644 --- a/Makefile +++ b/Makefile @@ -14,16 +14,8 @@ BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 -# List of features to use when building natively. Can be overridden via the environment. -# No jemalloc on Windows -ifeq ($(OS),Windows_NT) - FEATURES?= -else - FEATURES?=jemalloc -endif - # List of features to use when cross-compiling. Can be overridden via the environment. -CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,jemalloc +CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,slasher-redb,jemalloc # Cargo profile for Cross builds. Default is for local builds, CI uses an override. CROSS_PROFILE ?= release @@ -182,8 +174,9 @@ test-network-%: # Run the tests in the `slasher` crate for all supported database backends. test-slasher: cargo nextest run --release -p slasher --features "lmdb,$(TEST_FEATURES)" + cargo nextest run --release -p slasher --no-default-features --features "redb,$(TEST_FEATURES)" cargo nextest run --release -p slasher --no-default-features --features "mdbx,$(TEST_FEATURES)" - cargo nextest run --release -p slasher --features "lmdb,mdbx,$(TEST_FEATURES)" # both backends enabled + cargo nextest run --release -p slasher --features "lmdb,mdbx,redb,$(TEST_FEATURES)" # all backends enabled # Runs only the tests/state_transition_vectors tests. run-state-transition-tests: @@ -214,6 +207,10 @@ cli: cli-local: make && ./scripts/cli.sh +# Check for markdown files +mdlint: + ./scripts/mdlint.sh + # Runs the entire test suite, downloading test vectors if required. test-full: cargo-fmt test-release test-debug test-ef test-exec-engine @@ -225,7 +222,6 @@ lint: -D clippy::manual_let_else \ -D warnings \ -A clippy::derive_partial_eq_without_eq \ - -A clippy::from-over-into \ -A clippy::upper-case-acronyms \ -A clippy::vec-init-then-push \ -A clippy::question-mark \ diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 0fab7b31fe3..7f2fa05a888 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -27,9 +27,6 @@ safe_arith = { workspace = true } slot_clock = { workspace = true } filesystem = { workspace = true } sensitive_url = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -slog = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/account_manager/src/lib.rs b/account_manager/src/lib.rs index ce7e8a42c24..534939cf6bd 100644 --- a/account_manager/src/lib.rs +++ b/account_manager/src/lib.rs @@ -2,8 +2,11 @@ mod common; pub mod validator; pub mod wallet; -use clap::App; +use clap::Arg; +use clap::ArgAction; use clap::ArgMatches; +use clap::Command; +use clap_utils::FLAG_HEADER; use environment::Environment; use types::EthSpec; @@ -13,25 +16,36 @@ pub const VALIDATOR_DIR_FLAG: &str = "validator-dir"; pub const VALIDATOR_DIR_FLAG_ALIAS: &str = "validators-dir"; pub const WALLETS_DIR_FLAG: &str = "wallets-dir"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) - .visible_aliases(&["a", "am", "account", CMD]) +pub fn cli_app() -> Command { + Command::new(CMD) + .visible_aliases(["a", "am", "account"]) .about("Utilities for generating and managing Ethereum 2.0 accounts.") + .display_order(0) + .arg( + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) .subcommand(wallet::cli_app()) .subcommand(validator::cli_app()) } /// Run the account manager, returning an error if the operation did not succeed. -pub fn run(matches: &ArgMatches<'_>, env: Environment) -> Result<(), String> { +pub fn run(matches: &ArgMatches, env: Environment) -> Result<(), String> { match matches.subcommand() { - (wallet::CMD, Some(matches)) => wallet::cli_run(matches)?, - (validator::CMD, Some(matches)) => validator::cli_run(matches, env)?, - (unknown, _) => { + Some((wallet::CMD, matches)) => wallet::cli_run(matches)?, + Some((validator::CMD, matches)) => validator::cli_run(matches, env)?, + Some((unknown, _)) => { return Err(format!( "{} is not a valid {} command. See --help.", unknown, CMD )); } + _ => return Err("No subcommand provided, see --help for options".to_string()), } Ok(()) diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index 93b041c61c4..cfe4d8e94ad 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -4,7 +4,8 @@ use crate::{SECRETS_DIR_FLAG, WALLETS_DIR_FLAG}; use account_utils::{ random_password, read_password_from_user, strip_off_newlines, validator_definitions, PlainText, }; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use directory::{ ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR, }; @@ -26,73 +27,83 @@ pub const COUNT_FLAG: &str = "count"; pub const AT_MOST_FLAG: &str = "at-most"; pub const WALLET_PASSWORD_PROMPT: &str = "Enter your wallet's password:"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Creates new validators from an existing EIP-2386 wallet using the EIP-2333 HD key \ derivation scheme.", ) .arg( - Arg::with_name(WALLET_NAME_FLAG) + Arg::new(WALLET_NAME_FLAG) .long(WALLET_NAME_FLAG) .value_name("WALLET_NAME") .help("Use the wallet identified by this name") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(WALLET_PASSWORD_FLAG) + Arg::new(WALLET_PASSWORD_FLAG) .long(WALLET_PASSWORD_FLAG) .value_name("WALLET_PASSWORD_PATH") .help("A path to a file containing the password which will unlock the wallet.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(WALLETS_DIR_FLAG) + Arg::new(WALLETS_DIR_FLAG) .long(WALLETS_DIR_FLAG) .value_name(WALLETS_DIR_FLAG) .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{network}/wallets") - .takes_value(true) - .conflicts_with("datadir"), + .action(ArgAction::Set) + .conflicts_with("datadir") + .display_order(0) ) .arg( - Arg::with_name(SECRETS_DIR_FLAG) + Arg::new(SECRETS_DIR_FLAG) .long(SECRETS_DIR_FLAG) .value_name("SECRETS_DIR") .help( "The path where the validator keystore passwords will be stored. \ Defaults to ~/.lighthouse/{network}/secrets", ) - .takes_value(true), + .conflicts_with("datadir") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(DEPOSIT_GWEI_FLAG) + Arg::new(DEPOSIT_GWEI_FLAG) .long(DEPOSIT_GWEI_FLAG) .value_name("DEPOSIT_GWEI") .help( "The GWEI value of the deposit amount. Defaults to the minimum amount \ required for an active validator (MAX_EFFECTIVE_BALANCE)", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(STORE_WITHDRAW_FLAG) + Arg::new(STORE_WITHDRAW_FLAG) .long(STORE_WITHDRAW_FLAG) .help( "If present, the withdrawal keystore will be stored alongside the voting \ keypair. It is generally recommended to *not* store the withdrawal key and \ instead generate them from the wallet seed when required.", - ), + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name(COUNT_FLAG) + Arg::new(COUNT_FLAG) .long(COUNT_FLAG) .value_name("VALIDATOR_COUNT") .help("The number of validators to create, regardless of how many already exist") .conflicts_with("at-most") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(AT_MOST_FLAG) + Arg::new(AT_MOST_FLAG) .long(AT_MOST_FLAG) .value_name("AT_MOST_VALIDATORS") .help( @@ -100,14 +111,18 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { reach the given count. Never deletes an existing validator.", ) .conflicts_with("count") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) + .action(ArgAction::SetTrue) ) } @@ -119,15 +134,15 @@ pub fn cli_run( let spec = env.core_context().eth2_config.spec; let name: Option = clap_utils::parse_optional(matches, WALLET_NAME_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); - let wallet_base_dir = if matches.value_of("datadir").is_some() { + let wallet_base_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_WALLET_DIR) } else { parse_path_or_default_with_flag(matches, WALLETS_DIR_FLAG, DEFAULT_WALLET_DIR)? }; - let secrets_dir = if matches.value_of("datadir").is_some() { + let secrets_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_SECRET_DIR) } else { @@ -144,7 +159,7 @@ pub fn cli_run( return Err(format!( "No wallet directory at {:?}. Use the `lighthouse --network {} {} {} {}` command to create a wallet", wallet_base_dir, - matches.value_of("network").unwrap_or(""), + matches.get_one::("network").unwrap_or(&String::from("")), crate::CMD, crate::wallet::CMD, crate::wallet::create::CMD @@ -245,7 +260,7 @@ pub fn cli_run( .voting_keystore(keystores.voting, voting_password.as_bytes()) .withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes()) .create_eth1_tx_data(deposit_gwei, &spec) - .store_withdrawal_keystore(matches.is_present(STORE_WITHDRAW_FLAG)) + .store_withdrawal_keystore(matches.get_flag(STORE_WITHDRAW_FLAG)) .build() .map_err(|e| format!("Unable to build validator directory: {:?}", e))?; diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index bc9e0ee1dd6..277d2ae8eca 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -1,6 +1,7 @@ use crate::wallet::create::STDIN_INPUTS_FLAG; use bls::{Keypair, PublicKey}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use environment::Environment; use eth2::{ types::{GenesisData, StateId, ValidatorData, ValidatorId, ValidatorStatus}, @@ -28,48 +29,59 @@ pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; pub const CONFIRMATION_PHRASE: &str = "Exit my validator"; pub const WEBSITE_URL: &str = "https://lighthouse-book.sigmaprime.io/voluntary-exit.html"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("exit") +pub fn cli_app() -> Command { + Command::new("exit") .about("Submits a VoluntaryExit to the beacon chain for a given validator keystore.") .arg( - Arg::with_name(KEYSTORE_FLAG) + Arg::new(KEYSTORE_FLAG) .long(KEYSTORE_FLAG) .value_name("KEYSTORE_PATH") .help("The path to the EIP-2335 voting keystore for the validator") - .takes_value(true) - .required(true), + .action(ArgAction::Set) + .required(true) + .display_order(0) ) .arg( - Arg::with_name(PASSWORD_FILE_FLAG) + Arg::new(PASSWORD_FILE_FLAG) .long(PASSWORD_FILE_FLAG) .value_name("PASSWORD_FILE_PATH") .help("The path to the password file which unlocks the validator voting keystore") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(BEACON_SERVER_FLAG) + Arg::new(BEACON_SERVER_FLAG) .long(BEACON_SERVER_FLAG) .value_name("NETWORK_ADDRESS") .help("Address to a beacon node HTTP API") .default_value(DEFAULT_BEACON_NODE) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(NO_WAIT) + Arg::new(NO_WAIT) .long(NO_WAIT) .help("Exits after publishing the voluntary exit without waiting for confirmation that the exit was included in the beacon chain") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name(NO_CONFIRMATION) + Arg::new(NO_CONFIRMATION) .long(NO_CONFIRMATION) .help("Exits without prompting for confirmation that you understand the implications of a voluntary exit. This should be used with caution") + .display_order(0) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) ) } @@ -78,9 +90,9 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< let password_file_path: Option = clap_utils::parse_optional(matches, PASSWORD_FILE_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); - let no_wait = matches.is_present(NO_WAIT); - let no_confirmation = matches.is_present(NO_CONFIRMATION); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); + let no_wait = matches.get_flag(NO_WAIT); + let no_confirmation = matches.get_flag(NO_CONFIRMATION); let spec = env.eth2_config().spec.clone(); let server_url: String = clap_utils::parse_required(matches, BEACON_SERVER_FLAG)?; diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index bf000385f3a..a7c72679f74 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -9,7 +9,8 @@ use account_utils::{ }, ZeroizeString, }; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use std::fs; use std::path::PathBuf; @@ -25,8 +26,8 @@ pub const PASSWORD_PROMPT: &str = "Enter the keystore password, or press enter t pub const KEYSTORE_REUSE_WARNING: &str = "DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH \ ANOTHER CLIENT, OR YOU WILL GET SLASHED."; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Imports one or more EIP-2335 passwords into a Lighthouse VC directory, \ requesting passwords interactively. The directory flag provides a convenient \ @@ -34,16 +35,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Python utility.", ) .arg( - Arg::with_name(KEYSTORE_FLAG) + Arg::new(KEYSTORE_FLAG) .long(KEYSTORE_FLAG) .value_name("KEYSTORE_PATH") .help("Path to a single keystore to be imported.") .conflicts_with(DIR_FLAG) - .required_unless(DIR_FLAG) - .takes_value(true), + .required_unless_present(DIR_FLAG) + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(DIR_FLAG) + Arg::new(DIR_FLAG) .long(DIR_FLAG) .value_name("KEYSTORES_DIRECTORY") .help( @@ -53,23 +55,29 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { has the '.json' extension will be attempted to be imported.", ) .conflicts_with(KEYSTORE_FLAG) - .required_unless(KEYSTORE_FLAG) - .takes_value(true), + .required_unless_present(KEYSTORE_FLAG) + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0), ) .arg( - Arg::with_name(REUSE_PASSWORD_FLAG) + Arg::new(REUSE_PASSWORD_FLAG) .long(REUSE_PASSWORD_FLAG) - .help("If present, the same password will be used for all imported keystores."), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("If present, the same password will be used for all imported keystores.") + .display_order(0), ) .arg( - Arg::with_name(PASSWORD_FLAG) + Arg::new(PASSWORD_FLAG) .long(PASSWORD_FLAG) .value_name("KEYSTORE_PASSWORD_PATH") .requires(REUSE_PASSWORD_FLAG) @@ -79,15 +87,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { The password will be copied to the `validator_definitions.yml` file, so after \ import we strongly recommend you delete the file at KEYSTORE_PASSWORD_PATH.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) } pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> { let keystore: Option = clap_utils::parse_optional(matches, KEYSTORE_FLAG)?; let keystores_dir: Option = clap_utils::parse_optional(matches, DIR_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); - let reuse_password = matches.is_present(REUSE_PASSWORD_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); + let reuse_password = matches.get_flag(REUSE_PASSWORD_FLAG); let keystore_password_path: Option = clap_utils::parse_optional(matches, PASSWORD_FLAG)?; diff --git a/account_manager/src/validator/list.rs b/account_manager/src/validator/list.rs index 33857283695..d082a49590b 100644 --- a/account_manager/src/validator/list.rs +++ b/account_manager/src/validator/list.rs @@ -1,11 +1,11 @@ use account_utils::validator_definitions::ValidatorDefinitions; -use clap::App; +use clap::Command; use std::path::PathBuf; pub const CMD: &str = "list"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD).about("Lists the public keys of all validators.") +pub fn cli_app() -> Command { + Command::new(CMD).about("Lists the public keys of all validators.") } pub fn cli_run(validator_dir: PathBuf) -> Result<(), String> { diff --git a/account_manager/src/validator/mod.rs b/account_manager/src/validator/mod.rs index af977dcf034..6616bb0c45c 100644 --- a/account_manager/src/validator/mod.rs +++ b/account_manager/src/validator/mod.rs @@ -7,7 +7,8 @@ pub mod recover; pub mod slashing_protection; use crate::{VALIDATOR_DIR_FLAG, VALIDATOR_DIR_FLAG_ALIAS}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use directory::{parse_path_or_default_with_flag, DEFAULT_VALIDATOR_DIR}; use environment::Environment; use std::path::PathBuf; @@ -15,11 +16,21 @@ use types::EthSpec; pub const CMD: &str = "validator"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) + .display_order(0) .about("Provides commands for managing Eth2 validators.") .arg( - Arg::with_name(VALIDATOR_DIR_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new(VALIDATOR_DIR_FLAG) .long(VALIDATOR_DIR_FLAG) .alias(VALIDATOR_DIR_FLAG_ALIAS) .value_name("VALIDATOR_DIRECTORY") @@ -27,7 +38,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { "The path to search for validator directories. \ Defaults to ~/.lighthouse/{network}/validators", ) - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("datadir"), ) .subcommand(create::cli_app()) @@ -40,7 +51,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { } pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result<(), String> { - let validator_base_dir = if matches.value_of("datadir").is_some() { + let validator_base_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_VALIDATOR_DIR) } else { @@ -49,18 +60,19 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< eprintln!("validator-dir path: {:?}", validator_base_dir); match matches.subcommand() { - (create::CMD, Some(matches)) => create::cli_run::(matches, env, validator_base_dir), - (modify::CMD, Some(matches)) => modify::cli_run(matches, validator_base_dir), - (import::CMD, Some(matches)) => import::cli_run(matches, validator_base_dir), - (list::CMD, Some(_)) => list::cli_run(validator_base_dir), - (recover::CMD, Some(matches)) => recover::cli_run(matches, validator_base_dir), - (slashing_protection::CMD, Some(matches)) => { + Some((create::CMD, matches)) => create::cli_run::(matches, env, validator_base_dir), + Some((modify::CMD, matches)) => modify::cli_run(matches, validator_base_dir), + Some((import::CMD, matches)) => import::cli_run(matches, validator_base_dir), + Some((list::CMD, _)) => list::cli_run(validator_base_dir), + Some((recover::CMD, matches)) => recover::cli_run(matches, validator_base_dir), + Some((slashing_protection::CMD, matches)) => { slashing_protection::cli_run(matches, env, validator_base_dir) } - (exit::CMD, Some(matches)) => exit::cli_run(matches, env), - (unknown, _) => Err(format!( + Some((exit::CMD, matches)) => exit::cli_run(matches, env), + Some((unknown, _)) => Err(format!( "{} does not have a {} command. See --help", CMD, unknown )), + _ => Err(format!("No command provided for {}. See --help", CMD)), } } diff --git a/account_manager/src/validator/modify.rs b/account_manager/src/validator/modify.rs index bd4ae4d8f49..571cd28bf5e 100644 --- a/account_manager/src/validator/modify.rs +++ b/account_manager/src/validator/modify.rs @@ -1,6 +1,7 @@ use account_utils::validator_definitions::ValidatorDefinitions; use bls::PublicKey; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use std::{collections::HashSet, path::PathBuf}; pub const CMD: &str = "modify"; @@ -10,43 +11,50 @@ pub const DISABLE: &str = "disable"; pub const PUBKEY_FLAG: &str = "pubkey"; pub const ALL: &str = "all"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Modify validator status in validator_definitions.yml.") + .display_order(0) .subcommand( - App::new(ENABLE) + Command::new(ENABLE) .about("Enable validator(s) in validator_definitions.yml.") .arg( - Arg::with_name(PUBKEY_FLAG) + Arg::new(PUBKEY_FLAG) .long(PUBKEY_FLAG) .value_name("PUBKEY") .help("Validator pubkey to enable") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(ALL) + Arg::new(ALL) .long(ALL) .help("Enable all validators in the validator directory") - .takes_value(false) - .conflicts_with(PUBKEY_FLAG), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with(PUBKEY_FLAG) + .display_order(0), ), ) .subcommand( - App::new(DISABLE) + Command::new(DISABLE) .about("Disable validator(s) in validator_definitions.yml.") .arg( - Arg::with_name(PUBKEY_FLAG) + Arg::new(PUBKEY_FLAG) .long(PUBKEY_FLAG) .value_name("PUBKEY") .help("Validator pubkey to disable") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(ALL) + Arg::new(ALL) .long(ALL) .help("Disable all validators in the validator directory") - .takes_value(false) - .conflicts_with(PUBKEY_FLAG), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with(PUBKEY_FLAG) + .display_order(0), ), ) } @@ -55,14 +63,15 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin // `true` implies we are setting `validator_definition.enabled = true` and // vice versa. let (enabled, sub_matches) = match matches.subcommand() { - (ENABLE, Some(sub_matches)) => (true, sub_matches), - (DISABLE, Some(sub_matches)) => (false, sub_matches), - (unknown, _) => { + Some((ENABLE, sub_matches)) => (true, sub_matches), + Some((DISABLE, sub_matches)) => (false, sub_matches), + Some((unknown, _)) => { return Err(format!( "{} does not have a {} command. See --help", CMD, unknown )) } + _ => return Err(format!("No command provided for {}. See --help", CMD)), }; let mut defs = ValidatorDefinitions::open(&validator_dir).map_err(|e| { format!( @@ -70,7 +79,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin validator_dir, e ) })?; - let pubkeys_to_modify = if sub_matches.is_present(ALL) { + let pubkeys_to_modify = if sub_matches.get_flag(ALL) { defs.as_slice() .iter() .map(|def| def.voting_public_key.clone()) diff --git a/account_manager/src/validator/recover.rs b/account_manager/src/validator/recover.rs index 33d3b189266..4677db18df3 100644 --- a/account_manager/src/validator/recover.rs +++ b/account_manager/src/validator/recover.rs @@ -4,7 +4,8 @@ use crate::wallet::create::STDIN_INPUTS_FLAG; use crate::SECRETS_DIR_FLAG; use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; use account_utils::{random_password, read_mnemonic_from_cli}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use directory::ensure_dir_exists; use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; use eth2_wallet::bip39::Seed; @@ -15,70 +16,79 @@ pub const CMD: &str = "recover"; pub const FIRST_INDEX_FLAG: &str = "first-index"; pub const MNEMONIC_FLAG: &str = "mnemonic-path"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Recovers validator private keys given a BIP-39 mnemonic phrase. \ If you did not specify a `--first-index` or count `--count`, by default this will \ only recover the keys associated with the validator at index 0 for an HD wallet \ in accordance with the EIP-2333 spec.") .arg( - Arg::with_name(FIRST_INDEX_FLAG) + Arg::new(FIRST_INDEX_FLAG) .long(FIRST_INDEX_FLAG) .value_name("FIRST_INDEX") .help("The first of consecutive key indexes you wish to recover.") - .takes_value(true) + .action(ArgAction::Set) .required(false) - .default_value("0"), + .default_value("0") + .display_order(0) ) .arg( - Arg::with_name(COUNT_FLAG) + Arg::new(COUNT_FLAG) .long(COUNT_FLAG) .value_name("COUNT") .help("The number of validator keys you wish to recover. Counted consecutively from the provided `--first_index`.") - .takes_value(true) + .action(ArgAction::Set) .required(false) - .default_value("1"), + .default_value("1") + .display_order(0) ) .arg( - Arg::with_name(MNEMONIC_FLAG) + Arg::new(MNEMONIC_FLAG) .long(MNEMONIC_FLAG) .value_name("MNEMONIC_PATH") .help( "If present, the mnemonic will be read in from this file.", ) - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(SECRETS_DIR_FLAG) + Arg::new(SECRETS_DIR_FLAG) .long(SECRETS_DIR_FLAG) .value_name("SECRETS_DIR") .help( "The path where the validator keystore passwords will be stored. \ Defaults to ~/.lighthouse/{network}/secrets", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(STORE_WITHDRAW_FLAG) + Arg::new(STORE_WITHDRAW_FLAG) .long(STORE_WITHDRAW_FLAG) .help( "If present, the withdrawal keystore will be stored alongside the voting \ keypair. It is generally recommended to *not* store the withdrawal key and \ instead generate them from the wallet seed when required.", - ), + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) ) } pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> { - let secrets_dir = if matches.value_of("datadir").is_some() { + let secrets_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_SECRET_DIR) } else { @@ -87,7 +97,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin let first_index: u32 = clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?; let count: u32 = clap_utils::parse_required(matches, COUNT_FLAG)?; let mnemonic_path: Option = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); eprintln!("secrets-dir path: {:?}", secrets_dir); @@ -131,7 +141,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin .password_dir(secrets_dir.clone()) .voting_keystore(keystores.voting, voting_password.as_bytes()) .withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes()) - .store_withdrawal_keystore(matches.is_present(STORE_WITHDRAW_FLAG)) + .store_withdrawal_keystore(matches.get_flag(STORE_WITHDRAW_FLAG)) .build() .map_err(|e| format!("Unable to build validator directory: {:?}", e))?; diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index ff2eeb9cbfe..bcd860a4847 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -1,4 +1,4 @@ -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; use environment::Environment; use slashing_protection::{ interchange::Interchange, InterchangeError, InterchangeImportOutcome, SlashingDatabase, @@ -18,43 +18,47 @@ pub const EXPORT_FILE_ARG: &str = "EXPORT-FILE"; pub const PUBKEYS_FLAG: &str = "pubkeys"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Import or export slashing protection data to or from another client") + .display_order(0) .subcommand( - App::new(IMPORT_CMD) + Command::new(IMPORT_CMD) .about("Import an interchange file") .arg( - Arg::with_name(IMPORT_FILE_ARG) - .takes_value(true) + Arg::new(IMPORT_FILE_ARG) + .action(ArgAction::Set) .value_name("FILE") + .display_order(0) .help("The slashing protection interchange file to import (.json)"), ) ) .subcommand( - App::new(EXPORT_CMD) + Command::new(EXPORT_CMD) .about("Export an interchange file") .arg( - Arg::with_name(EXPORT_FILE_ARG) - .takes_value(true) + Arg::new(EXPORT_FILE_ARG) + .action(ArgAction::Set) .value_name("FILE") - .help("The filename to export the interchange file to"), + .help("The filename to export the interchange file to") + .display_order(0) ) .arg( - Arg::with_name(PUBKEYS_FLAG) + Arg::new(PUBKEYS_FLAG) .long(PUBKEYS_FLAG) - .takes_value(true) + .action(ArgAction::Set) .value_name("PUBKEYS") .help( "List of public keys to export history for. Keys should be 0x-prefixed, \ comma-separated. All known keys will be exported if omitted", - ), + ) + .display_order(0) ) ) } pub fn cli_run( - matches: &ArgMatches<'_>, + matches: &ArgMatches, env: Environment, validator_base_dir: PathBuf, ) -> Result<(), String> { @@ -68,7 +72,7 @@ pub fn cli_run( .ok_or_else(|| "Unable to get genesis state, has genesis occurred?".to_string())?; match matches.subcommand() { - (IMPORT_CMD, Some(matches)) => { + Some((IMPORT_CMD, matches)) => { let import_filename: PathBuf = clap_utils::parse_required(matches, IMPORT_FILE_ARG)?; let import_file = File::open(&import_filename).map_err(|e| { format!( @@ -168,7 +172,7 @@ pub fn cli_run( Ok(()) } - (EXPORT_CMD, Some(matches)) => { + Some((EXPORT_CMD, matches)) => { let export_filename: PathBuf = clap_utils::parse_required(matches, EXPORT_FILE_ARG)?; let selected_pubkeys = if let Some(pubkeys) = @@ -215,7 +219,7 @@ pub fn cli_run( Ok(()) } - ("", _) => Err("No subcommand provided, see --help for options".to_string()), - (command, _) => Err(format!("No such subcommand `{}`", command)), + Some((command, _)) => Err(format!("No such subcommand `{}`", command)), + _ => Err("No subcommand provided, see --help for options".to_string()), } } diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index accee11b5a2..12aa5d3801a 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -3,7 +3,7 @@ use crate::WALLETS_DIR_FLAG; use account_utils::{ is_password_sufficiently_complex, random_password, read_password_from_user, strip_off_newlines, }; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2_wallet::{ bip39::{Language, Mnemonic, MnemonicType}, PlainText, @@ -33,21 +33,22 @@ pub const NEW_WALLET_PASSWORD_PROMPT: &str = "Enter a password for your new wallet that is at least 12 characters long:"; pub const RETYPE_PASSWORD_PROMPT: &str = "Please re-enter your wallet's new password:"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Creates a new HD (hierarchical-deterministic) EIP-2386 wallet.") .arg( - Arg::with_name(NAME_FLAG) + Arg::new(NAME_FLAG) .long(NAME_FLAG) .value_name("WALLET_NAME") .help( "The wallet will be created with this name. It is not allowed to \ create two wallets with the same name for the same --base-dir.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(PASSWORD_FLAG) + Arg::new(PASSWORD_FLAG) .long(PASSWORD_FLAG) .value_name("WALLET_PASSWORD_PATH") .help( @@ -56,49 +57,65 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { saved at that path. To avoid confusion, if the file does not already \ exist it must include a '.pass' suffix.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(TYPE_FLAG) + Arg::new(TYPE_FLAG) .long(TYPE_FLAG) .value_name("WALLET_TYPE") .help( "The type of wallet to create. Only HD (hierarchical-deterministic) \ wallets are supported presently..", ) - .takes_value(true) - .possible_values(&[HD_TYPE]) - .default_value(HD_TYPE), + .action(ArgAction::Set) + .value_parser([HD_TYPE]) + .default_value(HD_TYPE) + .display_order(0) ) .arg( - Arg::with_name(MNEMONIC_FLAG) + Arg::new(MNEMONIC_FLAG) .long(MNEMONIC_FLAG) .value_name("MNEMONIC_PATH") .help( "If present, the mnemonic will be saved to this file. DO NOT SHARE THE MNEMONIC.", ) - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) ) .arg( - Arg::with_name(MNEMONIC_LENGTH_FLAG) + Arg::new(MNEMONIC_LENGTH_FLAG) .long(MNEMONIC_LENGTH_FLAG) .value_name("MNEMONIC_LENGTH") .help("The number of words to use for the mnemonic phrase.") - .takes_value(true) - .validator(|len| { - match len.parse::().ok().and_then(|words| MnemonicType::for_word_count(words).ok()) { - Some(_) => Ok(()), - None => Err(format!("Mnemonic length must be one of {}", MNEMONIC_TYPES.iter().map(|t| t.word_count().to_string()).collect::>().join(", "))), - } + .action(ArgAction::Set) + .value_parser(|len: &str| { + match len + .parse::() + .ok() + .and_then(|words| MnemonicType::for_word_count(words).ok()) + { + Some(_) => Ok(len.to_string()), + None => Err(format!( + "Mnemonic length must be one of {}", + MNEMONIC_TYPES + .iter() + .map(|t| t.word_count().to_string()) + .collect::>() + .join(", ") + )), + } }) - .default_value("24"), + .default_value("24") + .display_order(0) ) } @@ -153,7 +170,7 @@ pub fn create_wallet_from_mnemonic( let name: Option = clap_utils::parse_optional(matches, NAME_FLAG)?; let wallet_password_path: Option = clap_utils::parse_optional(matches, PASSWORD_FLAG)?; let type_field: String = clap_utils::parse_required(matches, TYPE_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); let wallet_type = match type_field.as_ref() { HD_TYPE => WalletType::Hd, unknown => return Err(format!("--{} {} is not supported", TYPE_FLAG, unknown)), diff --git a/account_manager/src/wallet/list.rs b/account_manager/src/wallet/list.rs index 9190de3915d..a551ffae128 100644 --- a/account_manager/src/wallet/list.rs +++ b/account_manager/src/wallet/list.rs @@ -1,12 +1,12 @@ use crate::WALLETS_DIR_FLAG; -use clap::App; +use clap::Command; use eth2_wallet_manager::WalletManager; use std::path::PathBuf; pub const CMD: &str = "list"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD).about("Lists the names of all wallets.") +pub fn cli_app() -> Command { + Command::new(CMD).about("Lists the names of all wallets.") } pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> { diff --git a/account_manager/src/wallet/mod.rs b/account_manager/src/wallet/mod.rs index dfadebf57f3..59f5f362529 100644 --- a/account_manager/src/wallet/mod.rs +++ b/account_manager/src/wallet/mod.rs @@ -3,21 +3,32 @@ pub mod list; pub mod recover; use crate::WALLETS_DIR_FLAG; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use directory::{ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_WALLET_DIR}; use std::path::PathBuf; pub const CMD: &str = "wallet"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Manage wallets, from which validator keys can be derived.") + .display_order(0) .arg( - Arg::with_name(WALLETS_DIR_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) + ) + .arg( + Arg::new(WALLETS_DIR_FLAG) .long(WALLETS_DIR_FLAG) .value_name("WALLETS_DIRECTORY") .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{network}/wallets") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("datadir"), ) .subcommand(create::cli_app()) @@ -26,7 +37,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { } pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { - let wallet_base_dir = if matches.value_of("datadir").is_some() { + let wallet_base_dir = if matches.get_one::("datadir").is_some() { let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; path.join(DEFAULT_WALLET_DIR) } else { @@ -37,12 +48,13 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { eprintln!("wallet-dir path: {:?}", wallet_base_dir); match matches.subcommand() { - (create::CMD, Some(matches)) => create::cli_run(matches, wallet_base_dir), - (list::CMD, Some(_)) => list::cli_run(wallet_base_dir), - (recover::CMD, Some(matches)) => recover::cli_run(matches, wallet_base_dir), - (unknown, _) => Err(format!( + Some((create::CMD, matches)) => create::cli_run(matches, wallet_base_dir), + Some((list::CMD, _)) => list::cli_run(wallet_base_dir), + Some((recover::CMD, matches)) => recover::cli_run(matches, wallet_base_dir), + Some((unknown, _)) => Err(format!( "{} does not have a {} command. See --help", CMD, unknown )), + _ => Err("No subcommand provided, see --help for options".to_string()), } } diff --git a/account_manager/src/wallet/recover.rs b/account_manager/src/wallet/recover.rs index 6e047aca8d2..b9641f11521 100644 --- a/account_manager/src/wallet/recover.rs +++ b/account_manager/src/wallet/recover.rs @@ -1,27 +1,28 @@ use crate::wallet::create::{create_wallet_from_mnemonic, STDIN_INPUTS_FLAG}; use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG}; use account_utils::read_mnemonic_from_cli; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; use std::path::PathBuf; pub const CMD: &str = "recover"; pub const MNEMONIC_FLAG: &str = "mnemonic-path"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about("Recovers an EIP-2386 wallet from a given a BIP-39 mnemonic phrase.") .arg( - Arg::with_name(NAME_FLAG) + Arg::new(NAME_FLAG) .long(NAME_FLAG) .value_name("WALLET_NAME") .help( "The wallet will be created with this name. It is not allowed to \ create two wallets with the same name for the same --base-dir.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(PASSWORD_FLAG) + Arg::new(PASSWORD_FLAG) .long(PASSWORD_FLAG) .value_name("PASSWORD_FILE_PATH") .help( @@ -31,39 +32,43 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { saved at that path. To avoid confusion, if the file does not already \ exist it must include a '.pass' suffix.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(MNEMONIC_FLAG) + Arg::new(MNEMONIC_FLAG) .long(MNEMONIC_FLAG) .value_name("MNEMONIC_PATH") .help("If present, the mnemonic will be read in from this file.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(TYPE_FLAG) + Arg::new(TYPE_FLAG) .long(TYPE_FLAG) .value_name("WALLET_TYPE") .help( "The type of wallet to create. Only HD (hierarchical-deterministic) \ wallets are supported presently..", ) - .takes_value(true) - .possible_values(&[HD_TYPE]) - .default_value(HD_TYPE), + .action(ArgAction::Set) + .value_parser([HD_TYPE]) + .default_value(HD_TYPE) + .display_order(0), ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0), ) } pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), String> { let mnemonic_path: Option = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?; - let stdin_inputs = cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG); + let stdin_inputs = cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG); eprintln!(); eprintln!("WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING."); diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 7cc6e2b6ae8..a5fd29c971f 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "5.1.3" +version = "5.2.1" authors = [ "Paul Hauner ", "Age Manning ( // Store the unaggregated attestation in the validator monitor for later processing match chain.produce_unaggregated_attestation(current_slot, beacon_committee_index) { Ok(unaggregated_attestation) => { - let data = &unaggregated_attestation.data; + let data = unaggregated_attestation.data(); debug!( chain.log, diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 471c43d94f8..06fba937d84 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -35,17 +35,23 @@ mod batch; use crate::{ - beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics, - observed_aggregates::ObserveOutcome, observed_attesters::Error as ObservedAttestersError, + beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, + metrics, + observed_aggregates::{ObserveOutcome, ObservedAttestationKey}, + observed_attesters::Error as ObservedAttestersError, BeaconChain, BeaconChainError, BeaconChainTypes, }; use bls::verify_signature_sets; +use itertools::Itertools; use proto_array::Block as ProtoBlock; use slog::debug; use slot_clock::SlotClock; use state_processing::{ - common::get_indexed_attestation, - per_block_processing::errors::AttestationValidationError, + common::{ + attesting_indices_base, + attesting_indices_electra::{self, get_committee_indices}, + }, + per_block_processing::errors::{AttestationValidationError, BlockOperationError}, signature_sets::{ indexed_attestation_signature_set_from_pubkeys, signed_aggregate_selection_proof_signature_set, signed_aggregate_signature_set, @@ -55,8 +61,9 @@ use std::borrow::Cow; use strum::AsRefStr; use tree_hash::TreeHash; use types::{ - Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, ForkName, Hash256, - IndexedAttestation, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, + Attestation, AttestationRef, BeaconCommittee, BeaconStateError::NoCommitteeFound, ChainSpec, + CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, SelectionProof, + SignedAggregateAndProof, Slot, SubnetId, }; pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations}; @@ -137,6 +144,12 @@ pub enum Error { /// /// The peer has sent an invalid message. ValidatorIndexTooHigh(usize), + /// The validator index is not set to zero after Electra. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + CommitteeIndexNonZero(usize), /// The `attestation.data.beacon_block_root` block is unknown. /// /// ## Peer scoring @@ -185,6 +198,12 @@ pub enum Error { /// /// The peer has sent an invalid message. NotExactlyOneAggregationBitSet(usize), + /// The attestation doesn't have only one aggregation bit set. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + NotExactlyOneCommitteeBitSet(usize), /// We have already observed an attestation for the `validator_index` and refuse to process /// another. /// @@ -248,7 +267,7 @@ pub enum Error { impl From for Error { fn from(e: BeaconChainError) -> Self { - Error::BeaconChainError(e) + Self::BeaconChainError(e) } } @@ -263,10 +282,11 @@ enum CheckAttestationSignature { /// `IndexedAttestation` can be derived. /// /// These attestations have *not* undergone signature verification. +/// The `observed_attestation_key_root` is the hashed value of an `ObservedAttestationKey`. struct IndexedAggregatedAttestation<'a, T: BeaconChainTypes> { signed_aggregate: &'a SignedAggregateAndProof, indexed_attestation: IndexedAttestation, - attestation_data_root: Hash256, + observed_attestation_key_root: Hash256, } /// Wraps a `Attestation` that has been verified up until the point that an `IndexedAttestation` can @@ -274,7 +294,7 @@ struct IndexedAggregatedAttestation<'a, T: BeaconChainTypes> { /// /// These attestations have *not* undergone signature verification. struct IndexedUnaggregatedAttestation<'a, T: BeaconChainTypes> { - attestation: &'a Attestation, + attestation: AttestationRef<'a, T::EthSpec>, indexed_attestation: IndexedAttestation, subnet_id: SubnetId, validator_index: u64, @@ -295,7 +315,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { /// Wraps an `Attestation` that has been fully verified for propagation on the gossip network. pub struct VerifiedUnaggregatedAttestation<'a, T: BeaconChainTypes> { - attestation: &'a Attestation, + attestation: AttestationRef<'a, T::EthSpec>, indexed_attestation: IndexedAttestation, subnet_id: SubnetId, } @@ -322,20 +342,20 @@ impl<'a, T: BeaconChainTypes> Clone for IndexedUnaggregatedAttestation<'a, T> { /// A helper trait implemented on wrapper types that can be progressed to a state where they can be /// verified for application to fork choice. pub trait VerifiedAttestation: Sized { - fn attestation(&self) -> &Attestation; + fn attestation(&self) -> AttestationRef; fn indexed_attestation(&self) -> &IndexedAttestation; // Inefficient default implementation. This is overridden for gossip verified attestations. fn into_attestation_and_indices(self) -> (Attestation, Vec) { - let attestation = self.attestation().clone(); - let attesting_indices = self.indexed_attestation().attesting_indices.clone().into(); + let attestation = self.attestation().clone_as_attestation(); + let attesting_indices = self.indexed_attestation().attesting_indices_to_vec(); (attestation, attesting_indices) } } impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedAggregatedAttestation<'a, T> { - fn attestation(&self) -> &Attestation { + fn attestation(&self) -> AttestationRef { self.attestation() } @@ -345,7 +365,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedAggregatedAttes } impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedUnaggregatedAttestation<'a, T> { - fn attestation(&self) -> &Attestation { + fn attestation(&self) -> AttestationRef { self.attestation } @@ -357,7 +377,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedUnaggregatedAtt /// Information about invalid attestations which might still be slashable despite being invalid. pub enum AttestationSlashInfo<'a, T: BeaconChainTypes, TErr> { /// The attestation is invalid, but its signature wasn't checked. - SignatureNotChecked(&'a Attestation, TErr), + SignatureNotChecked(AttestationRef<'a, T::EthSpec>, TErr), /// As for `SignatureNotChecked`, but we know the `IndexedAttestation`. SignatureNotCheckedIndexed(IndexedAttestation, TErr), /// The attestation's signature is invalid, so it will never be slashable. @@ -381,6 +401,11 @@ fn process_slash_info( if let Some(slasher) = chain.slasher.as_ref() { let (indexed_attestation, check_signature, err) = match slash_info { SignatureNotChecked(attestation, err) => { + if let Error::UnknownHeadBlock { .. } = err { + if attestation.data().beacon_block_root == attestation.data().target.root { + return err; + } + } match obtain_indexed_attestation_and_committees_per_slot(chain, attestation) { Ok((indexed, _)) => (indexed, true, err), Err(e) => { @@ -446,7 +471,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { signed_aggregate: &SignedAggregateAndProof, chain: &BeaconChain, ) -> Result { - let attestation = &signed_aggregate.message.aggregate; + let attestation = signed_aggregate.message().aggregate(); // Ensure attestation is within the last ATTESTATION_PROPAGATION_SLOT_RANGE slots (within a // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). @@ -455,30 +480,39 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?; // Check the attestation's epoch matches its target. - if attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()) - != attestation.data.target.epoch + if attestation.data().slot.epoch(T::EthSpec::slots_per_epoch()) + != attestation.data().target.epoch { return Err(Error::InvalidTargetEpoch { - slot: attestation.data.slot, - epoch: attestation.data.target.epoch, + slot: attestation.data().slot, + epoch: attestation.data().target.epoch, }); } - // Ensure the valid aggregated attestation has not already been seen locally. - let attestation_data = &attestation.data; - let attestation_data_root = attestation_data.tree_hash_root(); + let observed_attestation_key_root = ObservedAttestationKey { + committee_index: attestation + .committee_index() + .ok_or(Error::NotExactlyOneCommitteeBitSet(0))?, + attestation_data: attestation.data().clone(), + } + .tree_hash_root(); + + // [New in Electra:EIP7549] + verify_committee_index(attestation)?; if chain .observed_attestations .write() - .is_known_subset(attestation, attestation_data_root) + .is_known_subset(attestation, observed_attestation_key_root) .map_err(|e| Error::BeaconChainError(e.into()))? { metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); - return Err(Error::AttestationSupersetKnown(attestation_data_root)); + return Err(Error::AttestationSupersetKnown( + observed_attestation_key_root, + )); } - let aggregator_index = signed_aggregate.message.aggregator_index; + let aggregator_index = signed_aggregate.message().aggregator_index(); // Ensure there has been no other observed aggregate for the given `aggregator_index`. // @@ -486,7 +520,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { match chain .observed_aggregators .read() - .validator_has_been_observed(attestation.data.target.epoch, aggregator_index as usize) + .validator_has_been_observed(attestation.data().target.epoch, aggregator_index as usize) { Ok(true) => Err(Error::AggregatorAlreadyKnown(aggregator_index)), Ok(false) => Ok(()), @@ -518,10 +552,10 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { verify_attestation_target_root::(&head_block, attestation)?; // Ensure that the attestation has participants. - if attestation.aggregation_bits.is_zero() { + if attestation.is_aggregation_bits_zero() { Err(Error::EmptyAggregationBitfield) } else { - Ok(attestation_data_root) + Ok(observed_attestation_key_root) } } @@ -531,23 +565,47 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { chain: &BeaconChain, ) -> Result> { use AttestationSlashInfo::*; - - let attestation = &signed_aggregate.message.aggregate; - let aggregator_index = signed_aggregate.message.aggregator_index; - let attestation_data_root = match Self::verify_early_checks(signed_aggregate, chain) { + let observed_attestation_key_root = match Self::verify_early_checks(signed_aggregate, chain) + { Ok(root) => root, - Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)), + Err(e) => { + return Err(SignatureNotChecked( + signed_aggregate.message().aggregate(), + e, + )) + } }; + // Committees must be sorted by ascending index order 0..committees_per_slot let get_indexed_attestation_with_committee = - |(committee, _): (BeaconCommittee, CommitteesPerSlot)| { - // Note: this clones the signature which is known to be a relatively slow operation. - // - // Future optimizations should remove this clone. - let selection_proof = - SelectionProof::from(signed_aggregate.message.selection_proof.clone()); - - if !selection_proof + |(committees, _): (Vec, CommitteesPerSlot)| { + let (index, aggregator_index, selection_proof, data) = match signed_aggregate { + SignedAggregateAndProof::Base(signed_aggregate) => ( + signed_aggregate.message.aggregate.data.index, + signed_aggregate.message.aggregator_index, + // Note: this clones the signature which is known to be a relatively slow operation. + // Future optimizations should remove this clone. + signed_aggregate.message.selection_proof.clone(), + signed_aggregate.message.aggregate.data.clone(), + ), + SignedAggregateAndProof::Electra(signed_aggregate) => ( + signed_aggregate + .message + .aggregate + .committee_index() + .ok_or(Error::NotExactlyOneCommitteeBitSet(0))?, + signed_aggregate.message.aggregator_index, + signed_aggregate.message.selection_proof.clone(), + signed_aggregate.message.aggregate.data.clone(), + ), + }; + let slot = data.slot; + + let committee = committees + .get(index as usize) + .ok_or(Error::NoCommitteeForSlotAndIndex { slot, index })?; + + if !SelectionProof::from(selection_proof) .is_aggregator(committee.committee.len(), &chain.spec) .map_err(|e| Error::BeaconChainError(e.into()))? { @@ -559,23 +617,44 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { return Err(Error::AggregatorNotInCommittee { aggregator_index }); } - get_indexed_attestation(committee.committee, attestation) - .map_err(|e| BeaconChainError::from(e).into()) + // p2p aggregates have a single committee, we can assert that aggregation_bits is always + // less then MaxValidatorsPerCommittee + match signed_aggregate { + SignedAggregateAndProof::Base(signed_aggregate) => { + attesting_indices_base::get_indexed_attestation( + committee.committee, + &signed_aggregate.message.aggregate, + ) + .map_err(|e| BeaconChainError::from(e).into()) + } + SignedAggregateAndProof::Electra(signed_aggregate) => { + attesting_indices_electra::get_indexed_attestation( + &committees, + &signed_aggregate.message.aggregate, + ) + .map_err(|e| BeaconChainError::from(e).into()) + } + } }; - let indexed_attestation = match map_attestation_committee( + let attestation = signed_aggregate.message().aggregate(); + let indexed_attestation = match map_attestation_committees( chain, attestation, get_indexed_attestation_with_committee, ) { Ok(indexed_attestation) => indexed_attestation, - Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)), + Err(e) => { + return Err(SignatureNotChecked( + signed_aggregate.message().aggregate(), + e, + )) + } }; - Ok(IndexedAggregatedAttestation { signed_aggregate, indexed_attestation, - attestation_data_root, + observed_attestation_key_root, }) } } @@ -584,11 +663,11 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { /// Run the checks that happen after the indexed attestation and signature have been checked. fn verify_late_checks( signed_aggregate: &SignedAggregateAndProof, - attestation_data_root: Hash256, + observed_attestation_key_root: Hash256, chain: &BeaconChain, ) -> Result<(), Error> { - let attestation = &signed_aggregate.message.aggregate; - let aggregator_index = signed_aggregate.message.aggregator_index; + let attestation = signed_aggregate.message().aggregate(); + let aggregator_index = signed_aggregate.message().aggregator_index(); // Observe the valid attestation so we do not re-process it. // @@ -597,11 +676,13 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { if let ObserveOutcome::Subset = chain .observed_attestations .write() - .observe_item(attestation, Some(attestation_data_root)) + .observe_item(attestation, Some(observed_attestation_key_root)) .map_err(|e| Error::BeaconChainError(e.into()))? { metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_SUBSETS); - return Err(Error::AttestationSupersetKnown(attestation_data_root)); + return Err(Error::AttestationSupersetKnown( + observed_attestation_key_root, + )); } // Observe the aggregator so we don't process another aggregate from them. @@ -611,12 +692,12 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { if chain .observed_aggregators .write() - .observe_validator(attestation.data.target.epoch, aggregator_index as usize) + .observe_validator(attestation.data().target.epoch, aggregator_index as usize) .map_err(BeaconChainError::from)? { return Err(Error::PriorAttestationKnown { validator_index: aggregator_index, - epoch: attestation.data.target.epoch, + epoch: attestation.data().target.epoch, }); } @@ -661,7 +742,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { let IndexedAggregatedAttestation { signed_aggregate, indexed_attestation, - attestation_data_root, + observed_attestation_key_root, } = signed_aggregate; match check_signature { @@ -685,7 +766,9 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { CheckAttestationSignature::No => (), }; - if let Err(e) = Self::verify_late_checks(signed_aggregate, attestation_data_root, chain) { + if let Err(e) = + Self::verify_late_checks(signed_aggregate, observed_attestation_key_root, chain) + { return Err(SignatureValid(indexed_attestation, e)); } @@ -696,8 +779,8 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { } /// Returns the underlying `attestation` for the `signed_aggregate`. - pub fn attestation(&self) -> &Attestation { - &self.signed_aggregate.message.aggregate + pub fn attestation(&self) -> AttestationRef<'a, T::EthSpec> { + self.signed_aggregate.message().aggregate() } /// Returns the underlying `signed_aggregate`. @@ -709,16 +792,16 @@ impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> { impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { /// Run the checks that happen before an indexed attestation is constructed. pub fn verify_early_checks( - attestation: &Attestation, + attestation: AttestationRef, chain: &BeaconChain, ) -> Result<(), Error> { - let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()); + let attestation_epoch = attestation.data().slot.epoch(T::EthSpec::slots_per_epoch()); // Check the attestation's epoch matches its target. - if attestation_epoch != attestation.data.target.epoch { + if attestation_epoch != attestation.data().target.epoch { return Err(Error::InvalidTargetEpoch { - slot: attestation.data.slot, - epoch: attestation.data.target.epoch, + slot: attestation.data().slot, + epoch: attestation.data().target.epoch, }); } @@ -730,11 +813,14 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one // aggregation bit set. - let num_aggregation_bits = attestation.aggregation_bits.num_set_bits(); + let num_aggregation_bits = attestation.num_set_aggregation_bits(); if num_aggregation_bits != 1 { return Err(Error::NotExactlyOneAggregationBitSet(num_aggregation_bits)); } + // [New in Electra:EIP7549] + verify_committee_index(attestation)?; + // Attestations must be for a known block. If the block is unknown, we simply drop the // attestation and do not delay consideration for later. // @@ -750,14 +836,14 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { /// Run the checks that apply to the indexed attestation before the signature is checked. pub fn verify_middle_checks( - attestation: &Attestation, + attestation: AttestationRef, indexed_attestation: &IndexedAttestation, committees_per_slot: u64, subnet_id: Option, chain: &BeaconChain, ) -> Result<(u64, SubnetId), Error> { - let expected_subnet_id = SubnetId::compute_subnet_for_attestation_data::( - &indexed_attestation.data, + let expected_subnet_id = SubnetId::compute_subnet_for_attestation::( + attestation, committees_per_slot, &chain.spec, ) @@ -774,8 +860,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { }; let validator_index = *indexed_attestation - .attesting_indices - .first() + .attesting_indices_first() .ok_or(Error::NotExactlyOneAggregationBitSet(0))?; /* @@ -785,12 +870,12 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { if chain .observed_gossip_attesters .read() - .validator_has_been_observed(attestation.data.target.epoch, validator_index as usize) + .validator_has_been_observed(attestation.data().target.epoch, validator_index as usize) .map_err(BeaconChainError::from)? { return Err(Error::PriorAttestationKnown { validator_index, - epoch: attestation.data.target.epoch, + epoch: attestation.data().target.epoch, }); } @@ -807,7 +892,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { subnet_id: Option, chain: &BeaconChain, ) -> Result { - Self::verify_slashable(attestation, subnet_id, chain) + Self::verify_slashable(attestation.to_ref(), subnet_id, chain) .map(|verified_unaggregated| { if let Some(slasher) = chain.slasher.as_ref() { slasher.accept_attestation(verified_unaggregated.indexed_attestation.clone()); @@ -819,7 +904,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { /// Verify the attestation, producing extra information about whether it might be slashable. pub fn verify_slashable( - attestation: &'a Attestation, + attestation: AttestationRef<'a, T::EthSpec>, subnet_id: Option, chain: &BeaconChain, ) -> Result> { @@ -868,7 +953,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { /// Run the checks that apply after the signature has been checked. fn verify_late_checks( - attestation: &Attestation, + attestation: AttestationRef, validator_index: u64, chain: &BeaconChain, ) -> Result<(), Error> { @@ -881,12 +966,12 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { if chain .observed_gossip_attesters .write() - .observe_validator(attestation.data.target.epoch, validator_index as usize) + .observe_validator(attestation.data().target.epoch, validator_index as usize) .map_err(BeaconChainError::from)? { return Err(Error::PriorAttestationKnown { validator_index, - epoch: attestation.data.target.epoch, + epoch: attestation.data().target.epoch, }); } Ok(()) @@ -962,7 +1047,7 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { } /// Returns the wrapped `attestation`. - pub fn attestation(&self) -> &Attestation { + pub fn attestation(&self) -> AttestationRef { self.attestation } @@ -992,34 +1077,34 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { /// already finalized. fn verify_head_block_is_known( chain: &BeaconChain, - attestation: &Attestation, + attestation: AttestationRef, max_skip_slots: Option, ) -> Result { let block_opt = chain .canonical_head .fork_choice_read_lock() - .get_block(&attestation.data.beacon_block_root) + .get_block(&attestation.data().beacon_block_root) .or_else(|| { chain .early_attester_cache - .get_proto_block(attestation.data.beacon_block_root) + .get_proto_block(attestation.data().beacon_block_root) }); if let Some(block) = block_opt { // Reject any block that exceeds our limit on skipped slots. if let Some(max_skip_slots) = max_skip_slots { - if attestation.data.slot > block.slot + max_skip_slots { + if attestation.data().slot > block.slot + max_skip_slots { return Err(Error::TooManySkippedSlots { head_block_slot: block.slot, - attestation_slot: attestation.data.slot, + attestation_slot: attestation.data().slot, }); } } Ok(block) - } else if chain.is_pre_finalization_block(attestation.data.beacon_block_root)? { + } else if chain.is_pre_finalization_block(attestation.data().beacon_block_root)? { Err(Error::HeadBlockFinalized { - beacon_block_root: attestation.data.beacon_block_root, + beacon_block_root: attestation.data().beacon_block_root, }) } else { // The block is either: @@ -1029,7 +1114,7 @@ fn verify_head_block_is_known( // 2) A post-finalization block that we don't know about yet. We'll queue // the attestation until the block becomes available (or we time out). Err(Error::UnknownHeadBlock { - beacon_block_root: attestation.data.beacon_block_root, + beacon_block_root: attestation.data().beacon_block_root, }) } } @@ -1040,10 +1125,10 @@ fn verify_head_block_is_known( /// Accounts for `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. pub fn verify_propagation_slot_range( slot_clock: &S, - attestation: &Attestation, + attestation: AttestationRef, spec: &ChainSpec, ) -> Result<(), Error> { - let attestation_slot = attestation.data.slot; + let attestation_slot = attestation.data().slot; let latest_permissible_slot = slot_clock .now_with_future_tolerance(spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)?; @@ -1062,14 +1147,13 @@ pub fn verify_propagation_slot_range( let current_fork = spec.fork_name_at_slot::(slot_clock.now().ok_or(BeaconChainError::UnableToReadSlot)?); - let earliest_permissible_slot = match current_fork { - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => { - one_epoch_prior - } - // EIP-7045 - ForkName::Deneb | ForkName::Electra => one_epoch_prior + let earliest_permissible_slot = if !current_fork.deneb_enabled() { + one_epoch_prior + // EIP-7045 + } else { + one_epoch_prior .epoch(E::slots_per_epoch()) - .start_slot(E::slots_per_epoch()), + .start_slot(E::slots_per_epoch()) }; if attestation_slot < earliest_permissible_slot { @@ -1097,18 +1181,17 @@ pub fn verify_attestation_signature( let fork = chain .spec - .fork_at_epoch(indexed_attestation.data.target.epoch); + .fork_at_epoch(indexed_attestation.data().target.epoch); let signature_set = indexed_attestation_signature_set_from_pubkeys( |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), - &indexed_attestation.signature, + indexed_attestation.signature(), indexed_attestation, &fork, chain.genesis_validators_root, &chain.spec, ) .map_err(BeaconChainError::SignatureSetError)?; - metrics::stop_timer(signature_setup_timer); let _signature_verification_timer = @@ -1125,11 +1208,11 @@ pub fn verify_attestation_signature( /// `attestation.data.beacon_block_root`. pub fn verify_attestation_target_root( head_block: &ProtoBlock, - attestation: &Attestation, + attestation: AttestationRef, ) -> Result<(), Error> { // Check the attestation target root. let head_block_epoch = head_block.slot.epoch(E::slots_per_epoch()); - let attestation_epoch = attestation.data.slot.epoch(E::slots_per_epoch()); + let attestation_epoch = attestation.data().slot.epoch(E::slots_per_epoch()); if head_block_epoch > attestation_epoch { // The epoch references an invalid head block from a future epoch. // @@ -1142,7 +1225,7 @@ pub fn verify_attestation_target_root( // Reference: // https://github.com/ethereum/eth2.0-specs/pull/2001#issuecomment-699246659 return Err(Error::InvalidTargetRoot { - attestation: attestation.data.target.root, + attestation: attestation.data().target.root, // It is not clear what root we should expect in this case, since the attestation is // fundamentally invalid. expected: None, @@ -1161,9 +1244,9 @@ pub fn verify_attestation_target_root( }; // Reject any attestation with an invalid target root. - if target_root != attestation.data.target.root { + if target_root != attestation.data().target.root { return Err(Error::InvalidTargetRoot { - attestation: attestation.data.target.root, + attestation: attestation.data().target.root, expected: Some(target_root), }); } @@ -1194,14 +1277,14 @@ pub fn verify_signed_aggregate_signatures( .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; - let aggregator_index = signed_aggregate.message.aggregator_index; + let aggregator_index = signed_aggregate.message().aggregator_index(); if aggregator_index >= pubkey_cache.len() as u64 { return Err(Error::AggregatorPubkeyUnknown(aggregator_index)); } let fork = chain .spec - .fork_at_epoch(indexed_attestation.data.target.epoch); + .fork_at_epoch(indexed_attestation.data().target.epoch); let signature_sets = vec![ signed_aggregate_selection_proof_signature_set( @@ -1222,7 +1305,7 @@ pub fn verify_signed_aggregate_signatures( .map_err(BeaconChainError::SignatureSetError)?, indexed_attestation_signature_set_from_pubkeys( |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), - &indexed_attestation.signature, + indexed_attestation.signature(), indexed_attestation, &fork, chain.genesis_validators_root, @@ -1234,6 +1317,28 @@ pub fn verify_signed_aggregate_signatures( Ok(verify_signature_sets(signature_sets.iter())) } +/// Verify that the `attestation` committee index is properly set for the attestation's fork. +/// This function will only apply verification post-Electra. +pub fn verify_committee_index(attestation: AttestationRef) -> Result<(), Error> { + if let Ok(committee_bits) = attestation.committee_bits() { + // Check to ensure that the attestation is for a single committee. + let num_committee_bits = get_committee_indices::(committee_bits); + if num_committee_bits.len() != 1 { + return Err(Error::NotExactlyOneCommitteeBitSet( + num_committee_bits.len(), + )); + } + + // Ensure the attestation index is set to zero post Electra. + if attestation.data().index != 0 { + return Err(Error::CommitteeIndexNonZero( + attestation.data().index as usize, + )); + } + } + Ok(()) +} + /// Assists in readability. type CommitteesPerSlot = u64; @@ -1241,35 +1346,71 @@ type CommitteesPerSlot = u64; /// public keys cached in the `chain`. pub fn obtain_indexed_attestation_and_committees_per_slot( chain: &BeaconChain, - attestation: &Attestation, + attestation: AttestationRef, ) -> Result<(IndexedAttestation, CommitteesPerSlot), Error> { - map_attestation_committee(chain, attestation, |(committee, committees_per_slot)| { - get_indexed_attestation(committee.committee, attestation) - .map(|attestation| (attestation, committees_per_slot)) - .map_err(Error::Invalid) + map_attestation_committees(chain, attestation, |(committees, committees_per_slot)| { + match attestation { + AttestationRef::Base(att) => { + let committee = committees + .iter() + .filter(|&committee| committee.index == att.data.index) + .at_most_one() + .map_err(|_| Error::NoCommitteeForSlotAndIndex { + slot: att.data.slot, + index: att.data.index, + })?; + + if let Some(committee) = committee { + attesting_indices_base::get_indexed_attestation(committee.committee, att) + .map(|attestation| (attestation, committees_per_slot)) + .map_err(Error::Invalid) + } else { + Err(Error::NoCommitteeForSlotAndIndex { + slot: att.data.slot, + index: att.data.index, + }) + } + } + AttestationRef::Electra(att) => { + attesting_indices_electra::get_indexed_attestation(&committees, att) + .map(|attestation| (attestation, committees_per_slot)) + .map_err(|e| { + if let BlockOperationError::BeaconStateError(NoCommitteeFound(index)) = e { + Error::NoCommitteeForSlotAndIndex { + slot: att.data.slot, + index, + } + } else { + Error::Invalid(e) + } + }) + } + } }) } /// Runs the `map_fn` with the committee and committee count per slot for the given `attestation`. /// -/// This function exists in this odd "map" pattern because efficiently obtaining the committee for -/// an attestation can be complex. It might involve reading straight from the +/// This function exists in this odd "map" pattern because efficiently obtaining the committees for +/// an attestation's slot can be complex. It might involve reading straight from the /// `beacon_chain.shuffling_cache` or it might involve reading it from a state from the DB. Due to /// the complexities of `RwLock`s on the shuffling cache, a simple `Cow` isn't suitable here. /// -/// If the committee for `attestation` isn't found in the `shuffling_cache`, we will read a state +/// If the committees for an `attestation`'s slot aren't found in the `shuffling_cache`, we will read a state /// from disk and then update the `shuffling_cache`. -fn map_attestation_committee( +/// +/// Committees are sorted by ascending index order 0..committees_per_slot +fn map_attestation_committees( chain: &BeaconChain, - attestation: &Attestation, + attestation: AttestationRef, map_fn: F, ) -> Result where T: BeaconChainTypes, - F: Fn((BeaconCommittee, CommitteesPerSlot)) -> Result, + F: Fn((Vec, CommitteesPerSlot)) -> Result, { - let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()); - let target = &attestation.data.target; + let attestation_epoch = attestation.data().slot.epoch(T::EthSpec::slots_per_epoch()); + let target = &attestation.data().target; // Attestation target must be for a known block. // @@ -1292,12 +1433,12 @@ where let committees_per_slot = committee_cache.committees_per_slot(); Ok(committee_cache - .get_beacon_committee(attestation.data.slot, attestation.data.index) - .map(|committee| map_fn((committee, committees_per_slot))) - .unwrap_or_else(|| { + .get_beacon_committees_at_slot(attestation.data().slot) + .map(|committees| map_fn((committees, committees_per_slot))) + .unwrap_or_else(|_| { Err(Error::NoCommitteeForSlotAndIndex { - slot: attestation.data.slot, - index: attestation.data.index, + slot: attestation.data().slot, + index: attestation.committee_index().unwrap_or(0), }) })) }) diff --git a/beacon_node/beacon_chain/src/attestation_verification/batch.rs b/beacon_node/beacon_chain/src/attestation_verification/batch.rs index 6aec2bef68a..07fad1bd4a8 100644 --- a/beacon_node/beacon_chain/src/attestation_verification/batch.rs +++ b/beacon_node/beacon_chain/src/attestation_verification/batch.rs @@ -66,14 +66,13 @@ where .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; let mut signature_sets = Vec::with_capacity(num_indexed * 3); - // Iterate, flattening to get only the `Ok` values. for indexed in indexing_results.iter().flatten() { let signed_aggregate = &indexed.signed_aggregate; let indexed_attestation = &indexed.indexed_attestation; let fork = chain .spec - .fork_at_epoch(indexed_attestation.data.target.epoch); + .fork_at_epoch(indexed_attestation.data().target.epoch); signature_sets.push( signed_aggregate_selection_proof_signature_set( @@ -98,7 +97,7 @@ where signature_sets.push( indexed_attestation_signature_set_from_pubkeys( |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), - &indexed_attestation.signature, + indexed_attestation.signature(), indexed_attestation, &fork, chain.genesis_validators_root, @@ -182,11 +181,11 @@ where let indexed_attestation = &partially_verified.indexed_attestation; let fork = chain .spec - .fork_at_epoch(indexed_attestation.data.target.epoch); + .fork_at_epoch(indexed_attestation.data().target.epoch); let signature_set = indexed_attestation_signature_set_from_pubkeys( |validator_index| pubkey_cache.get(validator_index).map(Cow::Borrowed), - &indexed_attestation.signature, + indexed_attestation.signature(), indexed_attestation, &fork, chain.genesis_validators_root, diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index 2e07cd32ed9..b5012e8e4e4 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -15,6 +15,7 @@ use state_processing::state_advance::{partial_state_advance, Error as StateAdvan use std::collections::HashMap; use std::ops::Range; use types::{ + attestation::Error as AttestationError, beacon_state::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, }, @@ -59,6 +60,7 @@ pub enum Error { InverseRange { range: Range, }, + AttestationError(AttestationError), } impl From for Error { diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs index 5b70215d225..33567001e3c 100644 --- a/beacon_node/beacon_chain/src/beacon_block_reward.rs +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -202,7 +202,7 @@ impl BeaconChain { let mut previous_epoch_participation = state.previous_epoch_participation()?.clone(); for attestation in block.body().attestations() { - let data = &attestation.data; + let data = attestation.data(); let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64(); // [Modified in Deneb:EIP7045] let participation_flag_indices = get_attestation_participation_flag_indices( diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index 0c92b7c1f62..f0a68b6be55 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -1,4 +1,4 @@ -use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes, BlockProcessStatus}; use execution_layer::{ExecutionLayer, ExecutionPayloadBodyV1}; use slog::{crit, debug, error, Logger}; use std::collections::HashMap; @@ -410,15 +410,14 @@ impl BeaconBlockStreamer { fn check_caches(&self, root: Hash256) -> Option>> { if self.check_caches == CheckCaches::Yes { - self.beacon_chain - .reqresp_pre_import_cache - .read() - .get(&root) - .map(|block| { + match self.beacon_chain.get_block_process_status(&root) { + BlockProcessStatus::Unknown => None, + BlockProcessStatus::NotValidated(block) + | BlockProcessStatus::ExecutionValidated(block) => { metrics::inc_counter(&metrics::BEACON_REQRESP_PRE_IMPORT_CACHE_HITS); - block.clone() - }) - .or(self.beacon_chain.early_attester_cache.get_block(root)) + Some(block) + } + } } else { None } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9c7ded313b6..19ee3d116c1 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -85,7 +85,9 @@ use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; use kzg::Kzg; -use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella}; +use operation_pool::{ + CompactAttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella, +}; use parking_lot::{Mutex, RwLock}; use proto_array::{DoNotReOrg, ProposerHeadError}; use safe_arith::SafeArith; @@ -112,7 +114,7 @@ use std::collections::HashSet; use std::io::prelude::*; use std::marker::PhantomData; use std::sync::Arc; -use std::time::{Duration, Instant}; +use std::time::Duration; use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterator}; use store::{ DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, @@ -337,6 +339,20 @@ struct PartialBeaconBlock { bls_to_execution_changes: Vec, } +pub enum BlockProcessStatus { + /// Block is not in any pre-import cache. Block may be in the data-base or in the fork-choice. + Unknown, + /// Block is currently processing but not yet validated. + NotValidated(Arc>), + /// Block is fully valid, but not yet imported. It's cached in the da_checker while awaiting + /// missing block components. + ExecutionValidated(Arc>), +} + +pub struct BeaconChainMetrics { + pub reqresp_pre_import_cache_len: usize, +} + pub type LightClientProducerEvent = (Hash256, Slot, SyncAggregate); pub type BeaconForkChoice = ForkChoice< @@ -707,13 +723,6 @@ impl BeaconChain { Ok(()) } - pub fn persist_data_availability_checker(&self) -> Result<(), Error> { - let _timer = metrics::start_timer(&metrics::PERSIST_DATA_AVAILABILITY_CHECKER); - self.data_availability_checker.persist_all()?; - - Ok(()) - } - /// Returns the slot _right now_ according to `self.slot_clock`. Returns `Err` if the slot is /// unavailable. /// @@ -1237,6 +1246,27 @@ impl BeaconChain { Ok(self.store.get_blinded_block(block_root)?) } + /// Return the status of a block as it progresses through the various caches of the beacon + /// chain. Used by sync to learn the status of a block and prevent repeated downloads / + /// processing attempts. + pub fn get_block_process_status(&self, block_root: &Hash256) -> BlockProcessStatus { + if let Some(block) = self + .data_availability_checker + .get_execution_valid_block(block_root) + { + return BlockProcessStatus::ExecutionValidated(block); + } + + if let Some(block) = self.reqresp_pre_import_cache.read().get(block_root) { + // A block is on the `reqresp_pre_import_cache` but NOT in the + // `data_availability_checker` only if it is actively processing. We can expect a future + // event with the result of processing + return BlockProcessStatus::NotValidated(block.clone()); + } + + BlockProcessStatus::Unknown + } + /// Returns the state at the given root, if any. /// /// ## Errors @@ -1373,10 +1403,6 @@ impl BeaconChain { ) } - let start_slot = head_state.slot(); - let task_start = Instant::now(); - let max_task_runtime = Duration::from_secs(self.spec.seconds_per_slot); - let head_state_slot = head_state.slot(); let mut state = head_state; @@ -1386,18 +1412,6 @@ impl BeaconChain { }; while state.slot() < slot { - // Do not allow and forward state skip that takes longer than the maximum task duration. - // - // This is a protection against nodes doing too much work when they're not synced - // to a chain. - if task_start + max_task_runtime < Instant::now() { - return Err(Error::StateSkipTooLarge { - start_slot, - requested_slot: slot, - max_task_runtime, - }); - } - // Note: supplying some `state_root` when it is known would be a cheap and easy // optimization. match per_slot_processing(&mut state, skip_state_root, &self.spec) { @@ -1608,14 +1622,49 @@ impl BeaconChain { Ok((duties, dependent_root, execution_status)) } + pub fn get_aggregated_attestation( + &self, + attestation: AttestationRef, + ) -> Result>, Error> { + match attestation { + AttestationRef::Base(att) => self.get_aggregated_attestation_base(&att.data), + AttestationRef::Electra(att) => self.get_aggregated_attestation_electra( + att.data.slot, + &att.data.tree_hash_root(), + att.committee_index() + .ok_or(Error::AttestationCommitteeIndexNotSet)?, + ), + } + } + /// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`. /// /// The attestation will be obtained from `self.naive_aggregation_pool`. - pub fn get_aggregated_attestation( + pub fn get_aggregated_attestation_base( &self, data: &AttestationData, ) -> Result>, Error> { - if let Some(attestation) = self.naive_aggregation_pool.read().get(data) { + let attestation_key = crate::naive_aggregation_pool::AttestationKey::new_base(data); + if let Some(attestation) = self.naive_aggregation_pool.read().get(&attestation_key) { + self.filter_optimistic_attestation(attestation) + .map(Option::Some) + } else { + Ok(None) + } + } + + pub fn get_aggregated_attestation_electra( + &self, + slot: Slot, + attestation_data_root: &Hash256, + committee_index: CommitteeIndex, + ) -> Result>, Error> { + let attestation_key = crate::naive_aggregation_pool::AttestationKey::new_electra( + slot, + *attestation_data_root, + committee_index, + ); + if let Some(attestation) = self.naive_aggregation_pool.read().get(&attestation_key) { self.filter_optimistic_attestation(attestation) .map(Option::Some) } else { @@ -1627,16 +1676,21 @@ impl BeaconChain { /// `attestation.data.tree_hash_root()`. /// /// The attestation will be obtained from `self.naive_aggregation_pool`. - pub fn get_aggregated_attestation_by_slot_and_root( + /// + /// NOTE: This function will *only* work with pre-electra attestations and it only + /// exists to support the pre-electra validator API method. + pub fn get_pre_electra_aggregated_attestation_by_slot_and_root( &self, slot: Slot, attestation_data_root: &Hash256, ) -> Result>, Error> { - if let Some(attestation) = self - .naive_aggregation_pool - .read() - .get_by_slot_and_root(slot, attestation_data_root) - { + let attestation_key = + crate::naive_aggregation_pool::AttestationKey::new_base_from_slot_and_root( + slot, + *attestation_data_root, + ); + + if let Some(attestation) = self.naive_aggregation_pool.read().get(&attestation_key) { self.filter_optimistic_attestation(attestation) .map(Option::Some) } else { @@ -1650,7 +1704,7 @@ impl BeaconChain { &self, attestation: Attestation, ) -> Result, Error> { - let beacon_block_root = attestation.data.beacon_block_root; + let beacon_block_root = attestation.data().beacon_block_root; match self .canonical_head .fork_choice_read_lock() @@ -1916,17 +1970,15 @@ impl BeaconChain { }; drop(cache_timer); - Ok(Attestation { - aggregation_bits: BitList::with_capacity(committee_len)?, - data: AttestationData { - slot: request_slot, - index: request_index, - beacon_block_root, - source: justified_checkpoint, - target, - }, - signature: AggregateSignature::empty(), - }) + Ok(Attestation::::empty_for_signing( + request_index, + committee_len, + request_slot, + beacon_block_root, + justified_checkpoint, + target, + &self.spec, + )?) } /// Performs the same validation as `Self::verify_unaggregated_attestation_for_gossip`, but for @@ -1964,8 +2016,9 @@ impl BeaconChain { // This method is called for API and gossip attestations, so this covers all unaggregated attestation events if let Some(event_handler) = self.event_handler.as_ref() { if event_handler.has_attestation_subscribers() { - event_handler - .register(EventKind::Attestation(Box::new(v.attestation().clone()))); + event_handler.register(EventKind::Attestation(Box::new( + v.attestation().clone_as_attestation(), + ))); } } metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES); @@ -2001,8 +2054,9 @@ impl BeaconChain { // This method is called for API and gossip attestations, so this covers all aggregated attestation events if let Some(event_handler) = self.event_handler.as_ref() { if event_handler.has_attestation_subscribers() { - event_handler - .register(EventKind::Attestation(Box::new(v.attestation().clone()))); + event_handler.register(EventKind::Attestation(Box::new( + v.attestation().clone_as_attestation(), + ))); } } metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_PROCESSING_SUCCESSES); @@ -2111,7 +2165,7 @@ impl BeaconChain { .fork_choice_write_lock() .on_attestation( self.slot()?, - verified.indexed_attestation(), + verified.indexed_attestation().to_ref(), AttestationFromBlock::False, ) .map_err(Into::into) @@ -2138,8 +2192,8 @@ impl BeaconChain { self.log, "Stored unaggregated attestation"; "outcome" => ?outcome, - "index" => attestation.data.index, - "slot" => attestation.data.slot.as_u64(), + "index" => attestation.committee_index(), + "slot" => attestation.data().slot.as_u64(), ), Err(NaiveAggregationError::SlotTooLow { slot, @@ -2157,8 +2211,8 @@ impl BeaconChain { self.log, "Failed to store unaggregated attestation"; "error" => ?e, - "index" => attestation.data.index, - "slot" => attestation.data.slot.as_u64(), + "index" => attestation.committee_index(), + "slot" => attestation.data().slot.as_u64(), ); return Err(Error::from(e).into()); } @@ -2282,7 +2336,7 @@ impl BeaconChain { pub fn filter_op_pool_attestation( &self, filter_cache: &mut HashMap<(Hash256, Epoch), bool>, - att: &AttestationRef, + att: &CompactAttestationRef, state: &BeaconState, ) -> bool { *filter_cache @@ -2422,6 +2476,7 @@ impl BeaconChain { proposer_slashing: ProposerSlashing, ) -> Result, Error> { let wall_clock_state = self.wall_clock_state()?; + Ok(self.observed_proposer_slashings.lock().verify_and_observe( proposer_slashing, &wall_clock_state, @@ -2434,6 +2489,14 @@ impl BeaconChain { &self, proposer_slashing: SigVerifiedOp, ) { + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_proposer_slashing_subscribers() { + event_handler.register(EventKind::ProposerSlashing(Box::new( + proposer_slashing.clone().into_inner(), + ))); + } + } + if self.eth1_chain.is_some() { self.op_pool.insert_proposer_slashing(proposer_slashing) } @@ -2445,6 +2508,7 @@ impl BeaconChain { attester_slashing: AttesterSlashing, ) -> Result, T::EthSpec>, Error> { let wall_clock_state = self.wall_clock_state()?; + Ok(self.observed_attester_slashings.lock().verify_and_observe( attester_slashing, &wall_clock_state, @@ -2463,7 +2527,15 @@ impl BeaconChain { // Add to fork choice. self.canonical_head .fork_choice_write_lock() - .on_attester_slashing(attester_slashing.as_inner()); + .on_attester_slashing(attester_slashing.as_inner().to_ref()); + + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_attester_slashing_subscribers() { + event_handler.register(EventKind::AttesterSlashing(Box::new( + attester_slashing.clone().into_inner(), + ))); + } + } // Add to the op pool (if we have the ability to propose blocks). if self.eth1_chain.is_some() { @@ -2536,6 +2608,14 @@ impl BeaconChain { bls_to_execution_change: SigVerifiedOp, received_pre_capella: ReceivedPreCapella, ) -> bool { + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_bls_to_execution_change_subscribers() { + event_handler.register(EventKind::BlsToExecutionChange(Box::new( + bls_to_execution_change.clone().into_inner(), + ))); + } + } + if self.eth1_chain.is_some() { self.op_pool .insert_bls_to_execution_change(bls_to_execution_change, received_pre_capella) @@ -2756,6 +2836,7 @@ impl BeaconChain { signature_verified_block.block_root(), signature_verified_block, notify_execution_layer, + BlockImportSource::RangeSync, || Ok(()), ) .await @@ -2938,6 +3019,7 @@ impl BeaconChain { self: &Arc, block_root: Hash256, unverified_block: B, + block_source: BlockImportSource, notify_execution_layer: NotifyExecutionLayer, ) -> Result> { self.reqresp_pre_import_cache @@ -2945,9 +3027,13 @@ impl BeaconChain { .insert(block_root, unverified_block.block_cloned()); let r = self - .process_block(block_root, unverified_block, notify_execution_layer, || { - Ok(()) - }) + .process_block( + block_root, + unverified_block, + notify_execution_layer, + block_source, + || Ok(()), + ) .await; self.remove_notified(&block_root, r) } @@ -2970,6 +3056,7 @@ impl BeaconChain { block_root: Hash256, unverified_block: B, notify_execution_layer: NotifyExecutionLayer, + block_source: BlockImportSource, publish_fn: impl FnOnce() -> Result<(), BlockError> + Send + 'static, ) -> Result> { // Start the Prometheus timer. @@ -3001,14 +3088,21 @@ impl BeaconChain { notify_execution_layer, )?; publish_fn()?; + + // Record the time it took to complete consensus verification. + if let Some(timestamp) = self.slot_clock.now_duration() { + self.block_times_cache + .write() + .set_time_consensus_verified(block_root, block_slot, timestamp) + } + let executed_block = chain.into_executed_block(execution_pending).await?; - // Record the time it took to ask the execution layer. - if let Some(seen_timestamp) = self.slot_clock.now_duration() { - self.block_times_cache.write().set_execution_time( - block_root, - block_slot, - seen_timestamp, - ) + + // Record the *additional* time it took to wait for execution layer verification. + if let Some(timestamp) = self.slot_clock.now_duration() { + self.block_times_cache + .write() + .set_time_executed(block_root, block_slot, timestamp) } match executed_block { @@ -3030,6 +3124,7 @@ impl BeaconChain { "Beacon block imported"; "block_root" => ?block_root, "block_slot" => block_slot, + "source" => %block_source, ); // Increment the Prometheus counter for block processing successes. @@ -3267,6 +3362,20 @@ impl BeaconChain { "payload_verification_handle", ) .await??; + + // Remove block components from da_checker AFTER completing block import. Then we can assert + // the following invariant: + // > A valid unfinalized block is either in fork-choice or da_checker. + // + // If we remove the block when it becomes available, there's some time window during + // `import_block` where the block is nowhere. Consumers of the da_checker can handle the + // extend time a block may exist in the da_checker. + // + // If `import_block` errors (only errors with internal errors), the pending components will + // be pruned on data_availability_checker maintenance as finality advances. + self.data_availability_checker + .remove_pending_components(block_root); + Ok(AvailabilityProcessingStatus::Imported(block_root)) } @@ -3703,7 +3812,7 @@ impl BeaconChain { self.log, "Failed to get indexed attestation"; "purpose" => "validator monitor", - "attestation_slot" => attestation.data.slot, + "attestation_slot" => attestation.data().slot, "error" => ?e, ); continue; @@ -3759,7 +3868,7 @@ impl BeaconChain { self.log, "Failed to register observed attestation"; "error" => ?e, - "epoch" => a.data.target.epoch + "epoch" => a.data().target.epoch ); } } @@ -3771,7 +3880,7 @@ impl BeaconChain { self.log, "Failed to get indexed attestation"; "purpose" => "observation", - "attestation_slot" => a.data.slot, + "attestation_slot" => a.data().slot, "error" => ?e, ); continue; @@ -3780,15 +3889,15 @@ impl BeaconChain { let mut observed_block_attesters = self.observed_block_attesters.write(); - for &validator_index in &indexed_attestation.attesting_indices { + for &validator_index in indexed_attestation.attesting_indices_iter() { if let Err(e) = observed_block_attesters - .observe_validator(a.data.target.epoch, validator_index as usize) + .observe_validator(a.data().target.epoch, validator_index as usize) { debug!( self.log, "Failed to register observed block attester"; "error" => ?e, - "epoch" => a.data.target.epoch, + "epoch" => a.data().target.epoch, "validator_index" => validator_index, ) } @@ -3812,13 +3921,13 @@ impl BeaconChain { self.log, "Failed to get indexed attestation"; "purpose" => "slasher", - "attestation_slot" => attestation.data.slot, + "attestation_slot" => attestation.data().slot, "error" => ?e, ); continue; } }; - slasher.accept_attestation(indexed_attestation.clone()); + slasher.accept_attestation(indexed_attestation.clone_as_indexed_attestation()); } } } @@ -3837,7 +3946,7 @@ impl BeaconChain { if block.slot() + 2 * T::EthSpec::slots_per_epoch() >= current_slot { metrics::observe( &metrics::OPERATIONS_PER_BLOCK_ATTESTATION, - block.body().attestations().len() as f64, + block.body().attestations_len() as f64, ); if let Ok(sync_aggregate) = block.body().sync_aggregate() { @@ -4397,12 +4506,6 @@ impl BeaconChain { if cached_head.head_block_root() == parent_block_root { (Cow::Borrowed(head_state), cached_head.head_state_root()) } else { - info!( - self.log, - "Missed snapshot cache during withdrawals calculation"; - "slot" => proposal_slot, - "parent_block_root" => ?parent_block_root - ); let block = self .get_blinded_block(&parent_block_root)? .ok_or(Error::MissingBeaconBlock(parent_block_root))?; @@ -4419,6 +4522,7 @@ impl BeaconChain { let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); if head_state.current_epoch() == proposal_epoch { return get_expected_withdrawals(&unadvanced_state, &self.spec) + .map(|(withdrawals, _)| withdrawals) .map_err(Error::PrepareProposerFailed); } @@ -4436,7 +4540,9 @@ impl BeaconChain { proposal_epoch.start_slot(T::EthSpec::slots_per_epoch()), &self.spec, )?; - get_expected_withdrawals(&advanced_state, &self.spec).map_err(Error::PrepareProposerFailed) + get_expected_withdrawals(&advanced_state, &self.spec) + .map(|(withdrawals, _)| withdrawals) + .map_err(Error::PrepareProposerFailed) } /// Determine whether a fork choice update to the execution layer should be overridden. @@ -4857,7 +4963,8 @@ impl BeaconChain { metrics::start_timer(&metrics::BLOCK_PRODUCTION_UNAGGREGATED_TIMES); for attestation in self.naive_aggregation_pool.read().iter() { let import = |attestation: &Attestation| { - let attesting_indices = get_attesting_indices_from_state(&state, attestation)?; + let attesting_indices = + get_attesting_indices_from_state(&state, attestation.to_ref())?; self.op_pool .insert_attestation(attestation.clone(), attesting_indices) }; @@ -4880,11 +4987,11 @@ impl BeaconChain { initialize_epoch_cache(&mut state, &self.spec)?; let mut prev_filter_cache = HashMap::new(); - let prev_attestation_filter = |att: &AttestationRef| { + let prev_attestation_filter = |att: &CompactAttestationRef| { self.filter_op_pool_attestation(&mut prev_filter_cache, att, &state) }; let mut curr_filter_cache = HashMap::new(); - let curr_attestation_filter = |att: &AttestationRef| { + let curr_attestation_filter = |att: &CompactAttestationRef| { self.filter_op_pool_attestation(&mut curr_filter_cache, att, &state) }; @@ -4907,7 +5014,7 @@ impl BeaconChain { attestations.retain(|att| { verify_attestation_for_block_inclusion( &state, - att, + att.to_ref(), &mut tmp_ctxt, VerifySignatures::True, &self.spec, @@ -5038,6 +5145,28 @@ impl BeaconChain { bls_to_execution_changes, } = partial_beacon_block; + let (attester_slashings_base, attester_slashings_electra) = + attester_slashings.into_iter().fold( + (Vec::new(), Vec::new()), + |(mut base, mut electra), slashing| { + match slashing { + AttesterSlashing::Base(slashing) => base.push(slashing), + AttesterSlashing::Electra(slashing) => electra.push(slashing), + } + (base, electra) + }, + ); + let (attestations_base, attestations_electra) = attestations.into_iter().fold( + (Vec::new(), Vec::new()), + |(mut base, mut electra), attestation| { + match attestation { + Attestation::Base(attestation) => base.push(attestation), + Attestation::Electra(attestation) => electra.push(attestation), + } + (base, electra) + }, + ); + let (inner_block, maybe_blobs_and_proofs, execution_payload_value) = match &state { BeaconState::Base(_) => ( BeaconBlock::Base(BeaconBlockBase { @@ -5050,8 +5179,8 @@ impl BeaconChain { eth1_data, graffiti, proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), + attester_slashings: attester_slashings_base.into(), + attestations: attestations_base.into(), deposits: deposits.into(), voluntary_exits: voluntary_exits.into(), _phantom: PhantomData, @@ -5071,8 +5200,8 @@ impl BeaconChain { eth1_data, graffiti, proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), + attester_slashings: attester_slashings_base.into(), + attestations: attestations_base.into(), deposits: deposits.into(), voluntary_exits: voluntary_exits.into(), sync_aggregate: sync_aggregate @@ -5098,8 +5227,8 @@ impl BeaconChain { eth1_data, graffiti, proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), + attester_slashings: attester_slashings_base.into(), + attestations: attestations_base.into(), deposits: deposits.into(), voluntary_exits: voluntary_exits.into(), sync_aggregate: sync_aggregate @@ -5130,8 +5259,8 @@ impl BeaconChain { eth1_data, graffiti, proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), + attester_slashings: attester_slashings_base.into(), + attestations: attestations_base.into(), deposits: deposits.into(), voluntary_exits: voluntary_exits.into(), sync_aggregate: sync_aggregate @@ -5164,8 +5293,8 @@ impl BeaconChain { eth1_data, graffiti, proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), + attester_slashings: attester_slashings_base.into(), + attestations: attestations_base.into(), deposits: deposits.into(), voluntary_exits: voluntary_exits.into(), sync_aggregate: sync_aggregate @@ -5202,8 +5331,8 @@ impl BeaconChain { eth1_data, graffiti, proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations: attestations.into(), + attester_slashings: attester_slashings_electra.into(), + attestations: attestations_electra.into(), deposits: deposits.into(), voluntary_exits: voluntary_exits.into(), sync_aggregate: sync_aggregate @@ -5214,6 +5343,8 @@ impl BeaconChain { bls_to_execution_changes: bls_to_execution_changes.into(), blob_kzg_commitments: kzg_commitments .ok_or(BlockProductionError::InvalidPayloadFork)?, + // TODO(electra): finish consolidations when they're more spec'd out + consolidations: Vec::new().into(), }, }), maybe_blobs_and_proofs, @@ -5319,7 +5450,7 @@ impl BeaconChain { self.log, "Produced beacon block"; "parent" => ?block.parent_root(), - "attestations" => block.body().attestations().len(), + "attestations" => block.body().attestations_len(), "slot" => block.slot() ); @@ -6365,9 +6496,8 @@ impl BeaconChain { /// account the current slot when accounting for skips. pub fn is_healthy(&self, parent_root: &Hash256) -> Result { let cached_head = self.canonical_head.cached_head(); - // Check if the merge has been finalized. - if let Some(finalized_hash) = cached_head.forkchoice_update_parameters().finalized_hash { - if ExecutionBlockHash::zero() == finalized_hash { + if let Some(head_hash) = cached_head.forkchoice_update_parameters().head_hash { + if ExecutionBlockHash::zero() == head_hash { return Ok(ChainHealth::PreMerge); } } else { @@ -6610,6 +6740,12 @@ impl BeaconChain { ForkName::Base => Err(Error::UnsupportedFork), } } + + pub fn metrics(&self) -> BeaconChainMetrics { + BeaconChainMetrics { + reqresp_pre_import_cache_len: self.reqresp_pre_import_cache.read().len(), + } + } } impl Drop for BeaconChain { @@ -6617,7 +6753,6 @@ impl Drop for BeaconChain { let drop = || -> Result<(), Error> { self.persist_head_and_fork_choice()?; self.persist_op_pool()?; - self.persist_data_availability_checker()?; self.persist_eth1_cache() }; diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 2a42b49b422..f746b68996f 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -20,26 +20,12 @@ use types::{ Hash256, Slot, }; -/// Ensure this justified checkpoint has an epoch of 0 so that it is never -/// greater than the justified checkpoint and enshrined as the actual justified -/// checkpoint. -const JUNK_BEST_JUSTIFIED_CHECKPOINT: Checkpoint = Checkpoint { - epoch: Epoch::new(0), - root: Hash256::repeat_byte(0), -}; - #[derive(Debug)] pub enum Error { - UnableToReadSlot, - UnableToReadTime, - InvalidGenesisSnapshot(Slot), - AncestorUnknown { ancestor_slot: Slot }, - UninitializedBestJustifiedBalances, FailedToReadBlock(StoreError), MissingBlock(Hash256), FailedToReadState(StoreError), MissingState(Hash256), - InvalidPersistedBytes(ssz::DecodeError), BeaconStateError(BeaconStateError), Arith(ArithError), } @@ -66,7 +52,6 @@ const MAX_BALANCE_CACHE_SIZE: usize = 4; )] pub(crate) struct CacheItem { pub(crate) block_root: Hash256, - #[superstruct(only(V8))] pub(crate) epoch: Epoch, pub(crate) balances: Vec, } @@ -79,7 +64,6 @@ pub(crate) type CacheItem = CacheItemV8; no_enum )] pub struct BalancesCache { - #[superstruct(only(V8))] pub(crate) items: Vec, } @@ -365,59 +349,15 @@ where pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV17; /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. -#[superstruct( - variants(V11, V17), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V17), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoiceStore { - #[superstruct(only(V11, V17))] pub balances_cache: BalancesCacheV8, pub time: Slot, pub finalized_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint, pub justified_balances: Vec, - #[superstruct(only(V11))] - pub best_justified_checkpoint: Checkpoint, - #[superstruct(only(V11, V17))] pub unrealized_justified_checkpoint: Checkpoint, - #[superstruct(only(V11, V17))] pub unrealized_finalized_checkpoint: Checkpoint, - #[superstruct(only(V11, V17))] pub proposer_boost_root: Hash256, - #[superstruct(only(V11, V17))] pub equivocating_indices: BTreeSet, } - -impl Into for PersistedForkChoiceStoreV11 { - fn into(self) -> PersistedForkChoiceStore { - PersistedForkChoiceStore { - balances_cache: self.balances_cache, - time: self.time, - finalized_checkpoint: self.finalized_checkpoint, - justified_checkpoint: self.justified_checkpoint, - justified_balances: self.justified_balances, - unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, - proposer_boost_root: self.proposer_boost_root, - equivocating_indices: self.equivocating_indices, - } - } -} - -impl Into for PersistedForkChoiceStore { - fn into(self) -> PersistedForkChoiceStoreV11 { - PersistedForkChoiceStoreV11 { - balances_cache: self.balances_cache, - time: self.time, - finalized_checkpoint: self.finalized_checkpoint, - justified_checkpoint: self.justified_checkpoint, - justified_balances: self.justified_balances, - best_justified_checkpoint: JUNK_BEST_JUSTIFIED_CHECKPOINT, - unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, - proposer_boost_root: self.proposer_boost_root, - equivocating_indices: self.equivocating_indices, - } - } -} diff --git a/beacon_node/beacon_chain/src/bellatrix_readiness.rs b/beacon_node/beacon_chain/src/bellatrix_readiness.rs index bf9e8481261..60b1abaf098 100644 --- a/beacon_node/beacon_chain/src/bellatrix_readiness.rs +++ b/beacon_node/beacon_chain/src/bellatrix_readiness.rs @@ -244,8 +244,8 @@ impl BeaconChain { }); } - if let Some(&expected) = expected_withdrawals_root { - if let Some(&got) = got_withdrawals_root { + if let Some(expected) = expected_withdrawals_root { + if let Some(got) = got_withdrawals_root { if got != expected { return Ok(GenesisExecutionPayloadStatus::WithdrawalsRootMismatch { got, diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 263b9f9e013..2b62a83194b 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -10,7 +10,6 @@ use crate::block_verification::{ use crate::kzg_utils::{validate_blob, validate_blobs}; use crate::{metrics, BeaconChainError}; use kzg::{Error as KzgError, Kzg, KzgCommitment}; -use merkle_proof::MerkleTreeError; use slog::debug; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; @@ -128,13 +127,6 @@ pub enum GossipBlobError { /// The blob sidecar is invalid and the peer is faulty. KzgError(kzg::Error), - /// The kzg commitment inclusion proof failed. - /// - /// ## Peer scoring - /// - /// The blob sidecar is invalid - InclusionProof(MerkleTreeError), - /// The pubkey cache timed out. /// /// ## Peer scoring @@ -459,10 +451,7 @@ pub fn validate_blob_sidecar_for_gossip( // Verify the inclusion proof in the sidecar let _timer = metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_VERIFICATION); - if !blob_sidecar - .verify_blob_sidecar_inclusion_proof() - .map_err(GossipBlobError::InclusionProof)? - { + if !blob_sidecar.verify_blob_sidecar_inclusion_proof() { return Err(GossipBlobError::InvalidInclusionProof); } drop(_timer); @@ -571,6 +560,14 @@ pub fn validate_blob_sidecar_for_gossip( }); } + // Kzg verification for gossip blob sidecar + let kzg = chain + .kzg + .as_ref() + .ok_or(GossipBlobError::KzgNotInitialized)?; + let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar.clone(), kzg, seen_timestamp) + .map_err(GossipBlobError::KzgError)?; + chain .observed_slashable .write() @@ -605,14 +602,6 @@ pub fn validate_blob_sidecar_for_gossip( }); } - // Kzg verification for gossip blob sidecar - let kzg = chain - .kzg - .as_ref() - .ok_or(GossipBlobError::KzgNotInitialized)?; - let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar, kzg, seen_timestamp) - .map_err(GossipBlobError::KzgError)?; - Ok(GossipVerifiedBlob { block_root, blob: kzg_verified_blob, diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs index fd0cfc7e9bd..69eecc89b8a 100644 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -27,10 +27,12 @@ impl BeaconChain { let split_attestations = block .body() .attestations() - .iter() .map(|att| { let attesting_indices = get_attesting_indices_from_state(state, att)?; - Ok(SplitAttestation::new(att.clone(), attesting_indices)) + Ok(SplitAttestation::new( + att.clone_as_attestation(), + attesting_indices, + )) }) .collect::, BeaconChainError>>()?; @@ -86,8 +88,7 @@ impl BeaconChain { block .body() .attestations() - .iter() - .map(|a| a.data.clone()) + .map(|a| a.data().clone()) .collect() } else { vec![] diff --git a/beacon_node/beacon_chain/src/block_times_cache.rs b/beacon_node/beacon_chain/src/block_times_cache.rs index db547a1186c..3b75046f3a4 100644 --- a/beacon_node/beacon_chain/src/block_times_cache.rs +++ b/beacon_node/beacon_chain/src/block_times_cache.rs @@ -19,7 +19,9 @@ type BlockRoot = Hash256; pub struct Timestamps { pub observed: Option, pub all_blobs_observed: Option, - pub execution_time: Option, + pub consensus_verified: Option, + pub started_execution: Option, + pub executed: Option, pub attestable: Option, pub imported: Option, pub set_as_head: Option, @@ -32,7 +34,9 @@ pub struct BlockDelays { pub observed: Option, /// The time after the start of the slot we saw all blobs. pub all_blobs_observed: Option, - /// The time it took to get verification from the EL for the block. + /// The time it took to complete consensus verification of the block. + pub consensus_verification_time: Option, + /// The time it took to complete execution verification of the block. pub execution_time: Option, /// The delay from the start of the slot before the block became available /// @@ -58,13 +62,16 @@ impl BlockDelays { let all_blobs_observed = times .all_blobs_observed .and_then(|all_blobs_observed| all_blobs_observed.checked_sub(slot_start_time)); + let consensus_verification_time = times + .consensus_verified + .and_then(|consensus_verified| consensus_verified.checked_sub(times.observed?)); let execution_time = times - .execution_time - .and_then(|execution_time| execution_time.checked_sub(times.observed?)); + .executed + .and_then(|executed| executed.checked_sub(times.started_execution?)); // Duration since UNIX epoch at which block became available. - let available_time = times.execution_time.map(|execution_time| { - std::cmp::max(execution_time, times.all_blobs_observed.unwrap_or_default()) - }); + let available_time = times + .executed + .map(|executed| std::cmp::max(executed, times.all_blobs_observed.unwrap_or_default())); // Duration from the start of the slot until the block became available. let available_delay = available_time.and_then(|available_time| available_time.checked_sub(slot_start_time)); @@ -80,6 +87,7 @@ impl BlockDelays { BlockDelays { observed, all_blobs_observed, + consensus_verification_time, execution_time, available: available_delay, attestable, @@ -155,6 +163,9 @@ impl BlockTimesCache { slot: Slot, timestamp: Duration, ) { + // Unlike other functions in this file, we update the blob observed time only if it is + // *greater* than existing blob observation times. This allows us to know the observation + // time of the last blob to arrive. let block_times = self .cache .entry(block_root) @@ -168,48 +179,89 @@ impl BlockTimesCache { } } - pub fn set_execution_time(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { + /// Set the timestamp for `field` if that timestamp is less than any previously known value. + /// + /// If no previous value is known for the field, then the supplied timestamp will always be + /// stored. + pub fn set_time_if_less( + &mut self, + block_root: BlockRoot, + slot: Slot, + field: impl Fn(&mut Timestamps) -> &mut Option, + timestamp: Duration, + ) { let block_times = self .cache .entry(block_root) .or_insert_with(|| BlockTimesCacheValue::new(slot)); - if block_times - .timestamps - .execution_time - .map_or(true, |prev| timestamp < prev) - { - block_times.timestamps.execution_time = Some(timestamp); + let existing_timestamp = field(&mut block_times.timestamps); + if existing_timestamp.map_or(true, |prev| timestamp < prev) { + *existing_timestamp = Some(timestamp); } } + pub fn set_time_consensus_verified( + &mut self, + block_root: BlockRoot, + slot: Slot, + timestamp: Duration, + ) { + self.set_time_if_less( + block_root, + slot, + |timestamps| &mut timestamps.consensus_verified, + timestamp, + ) + } + + pub fn set_time_executed(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { + self.set_time_if_less( + block_root, + slot, + |timestamps| &mut timestamps.executed, + timestamp, + ) + } + + pub fn set_time_started_execution( + &mut self, + block_root: BlockRoot, + slot: Slot, + timestamp: Duration, + ) { + self.set_time_if_less( + block_root, + slot, + |timestamps| &mut timestamps.started_execution, + timestamp, + ) + } + pub fn set_time_attestable(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { - let block_times = self - .cache - .entry(block_root) - .or_insert_with(|| BlockTimesCacheValue::new(slot)); - if block_times - .timestamps - .attestable - .map_or(true, |prev| timestamp < prev) - { - block_times.timestamps.attestable = Some(timestamp); - } + self.set_time_if_less( + block_root, + slot, + |timestamps| &mut timestamps.attestable, + timestamp, + ) } pub fn set_time_imported(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { - let block_times = self - .cache - .entry(block_root) - .or_insert_with(|| BlockTimesCacheValue::new(slot)); - block_times.timestamps.imported = Some(timestamp); + self.set_time_if_less( + block_root, + slot, + |timestamps| &mut timestamps.imported, + timestamp, + ) } pub fn set_time_set_as_head(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { - let block_times = self - .cache - .entry(block_root) - .or_insert_with(|| BlockTimesCacheValue::new(slot)); - block_times.timestamps.set_as_head = Some(timestamp); + self.set_time_if_less( + block_root, + slot, + |timestamps| &mut timestamps.set_as_head, + timestamp, + ) } pub fn get_block_delays( diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 866dde5a763..d906518ff5a 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -67,7 +67,7 @@ use crate::{ metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; -use eth2::types::{EventKind, PublishBlockRequest}; +use eth2::types::{BlockGossip, EventKind, PublishBlockRequest}; use execution_layer::PayloadStatus; pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; @@ -93,7 +93,6 @@ use std::io::Write; use std::sync::Arc; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; -use tree_hash::TreeHash; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, @@ -975,6 +974,16 @@ impl GossipVerifiedBlock { // Validate the block's execution_payload (if any). validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?; + // Beacon API block_gossip events + if let Some(event_handler) = chain.event_handler.as_ref() { + if event_handler.has_block_gossip_subscribers() { + event_handler.register(EventKind::BlockGossip(Box::new(BlockGossip { + slot: block.slot(), + block: block_root, + }))); + } + } + // Having checked the proposer index and the block root we can cache them. let consensus_context = ConsensusContext::new(block.slot()) .set_current_block_root(block_root) @@ -1335,6 +1344,13 @@ impl ExecutionPendingBlock { // The specification declares that this should be run *inside* `per_block_processing`, // however we run it here to keep `per_block_processing` pure (i.e., no calls to external // servers). + if let Some(started_execution) = chain.slot_clock.now_duration() { + chain.block_times_cache.write().set_time_started_execution( + block_root, + block.slot(), + started_execution, + ); + } let payload_verification_status = payload_notifier.notify_new_payload().await?; // If the payload did not validate or invalidate the block, check to see if this block is @@ -1382,18 +1398,20 @@ impl ExecutionPendingBlock { let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); // Stage a batch of operations to be completed atomically if this block is imported - // successfully. We include the state root of the pre-state, which may be an advanced state - // that was stored in the DB with a `temporary` flag. + // successfully. If there is a skipped slot, we include the state root of the pre-state, + // which may be an advanced state that was stored in the DB with a `temporary` flag. let mut state = parent.pre_state; - let mut confirmed_state_roots = if state.slot() > parent.beacon_block.slot() { - // Advanced pre-state. Delete its temporary flag. - let pre_state_root = state.update_tree_hash_cache()?; - vec![pre_state_root] - } else { - // Pre state is parent state. It is already stored in the DB without temporary status. - vec![] - }; + let mut confirmed_state_roots = + if block.slot() > state.slot() && state.slot() > parent.beacon_block.slot() { + // Advanced pre-state. Delete its temporary flag. + let pre_state_root = state.update_tree_hash_cache()?; + vec![pre_state_root] + } else { + // Pre state is either unadvanced, or should not be stored long-term because there + // is no skipped slot between `parent` and `block`. + vec![] + }; // The block must have a higher slot than its parent. if block.slot() <= parent.beacon_block.slot() { @@ -1623,7 +1641,7 @@ impl ExecutionPendingBlock { } // Register each attestation in the block with fork choice. - for (i, attestation) in block.message().body().attestations().iter().enumerate() { + for (i, attestation) in block.message().body().attestations().enumerate() { let _fork_choice_attestation_timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); @@ -2105,7 +2123,14 @@ pub fn verify_header_signature( fn write_state(prefix: &str, state: &BeaconState, log: &Logger) { if WRITE_BLOCK_PROCESSING_SSZ { - let root = state.tree_hash_root(); + let mut state = state.clone(); + let Ok(root) = state.canonical_root() else { + error!( + log, + "Unable to hash state for writing"; + ); + return; + }; let filename = format!("{}_slot_{}_root_{}.ssz", prefix, state.slot(), root); let mut path = std::env::temp_dir().join("lighthouse"); let _ = fs::create_dir_all(path.clone()); diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index fb0e0c965f1..70f1e99ef74 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -96,13 +96,18 @@ impl RpcBlock { } /// Constructs a new `BlockAndBlobs` variant after making consistency - /// checks between the provided blocks and blobs. + /// checks between the provided blocks and blobs. This struct makes no + /// guarantees about whether blobs should be present, only that they are + /// consistent with the block. An empty list passed in for `blobs` is + /// viewed the same as `None` passed in. pub fn new( block_root: Option, block: Arc>, blobs: Option>, ) -> Result { let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); + // Treat empty blob lists as if they are missing. + let blobs = blobs.filter(|b| !b.is_empty()); if let (Some(blobs), Ok(block_commitments)) = ( blobs.as_ref(), @@ -309,6 +314,26 @@ pub struct BlockImportData { pub consensus_context: ConsensusContext, } +impl BlockImportData { + pub fn __new_for_test( + block_root: Hash256, + state: BeaconState, + parent_block: SignedBeaconBlock>, + ) -> Self { + Self { + block_root, + state, + parent_block, + parent_eth1_finalization_data: Eth1FinalizationData { + eth1_data: <_>::default(), + eth1_deposit_index: 0, + }, + confirmed_state_roots: vec![], + consensus_context: ConsensusContext::new(Slot::new(0)), + } + } +} + pub type GossipVerifiedBlockContents = (GossipVerifiedBlock, Option>); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 376bc16c035..7217f2c640f 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -708,8 +708,8 @@ where .ok_or("Cannot build without a genesis state root")?; let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); let head_tracker = Arc::new(self.head_tracker.unwrap_or_default()); - let beacon_proposer_cache: Arc> = <_>::default(); + let mut validator_monitor = ValidatorMonitor::new( validator_monitor_config, beacon_proposer_cache.clone(), @@ -1195,7 +1195,7 @@ mod test { let head = chain.head_snapshot(); - let state = &head.beacon_state; + let mut state = head.beacon_state.clone(); let block = &head.beacon_block; assert_eq!(state.slot(), Slot::new(0), "should start from genesis"); @@ -1206,7 +1206,7 @@ mod test { ); assert_eq!( block.state_root(), - state.canonical_root(), + state.canonical_root().unwrap(), "block should have correct state root" ); assert_eq!( diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index a84cfab298d..84e1544451c 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -1385,6 +1385,15 @@ fn observe_head_block_delays( .as_millis() as i64, ); + // The time it took to check the validity within Lighthouse + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_CONSENSUS_VERIFICATION_TIME, + block_delays + .consensus_verification_time + .unwrap_or_else(|| Duration::from_secs(0)) + .as_millis() as i64, + ); + // The time it took to check the validity with the EL metrics::set_gauge( &metrics::BEACON_BLOCK_DELAY_EXECUTION_TIME, @@ -1447,6 +1456,7 @@ fn observe_head_block_delays( "total_delay_ms" => block_delay_total.as_millis(), "observed_delay_ms" => format_delay(&block_delays.observed), "blob_delay_ms" => format_delay(&block_delays.all_blobs_observed), + "consensus_time_ms" => format_delay(&block_delays.consensus_verification_time), "execution_time_ms" => format_delay(&block_delays.execution_time), "available_delay_ms" => format_delay(&block_delays.available), "attestable_delay_ms" => format_delay(&block_delays.attestable), @@ -1463,6 +1473,7 @@ fn observe_head_block_delays( "total_delay_ms" => block_delay_total.as_millis(), "observed_delay_ms" => format_delay(&block_delays.observed), "blob_delay_ms" => format_delay(&block_delays.all_blobs_observed), + "consensus_time_ms" => format_delay(&block_delays.consensus_verification_time), "execution_time_ms" => format_delay(&block_delays.execution_time), "available_delay_ms" => format_delay(&block_delays.available), "attestable_delay_ms" => format_delay(&block_delays.attestable), diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index dd0d97b1dae..2431769ddb0 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -2,14 +2,11 @@ use crate::blob_verification::{verify_kzg_for_blob_list, GossipVerifiedBlob, Kzg use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock, }; -pub use crate::data_availability_checker::child_components::ChildComponents; -use crate::data_availability_checker::overflow_lru_cache::OverflowLRUCache; +use crate::data_availability_checker::overflow_lru_cache::DataAvailabilityCheckerInner; use crate::{BeaconChain, BeaconChainTypes, BeaconStore}; use kzg::Kzg; -use slasher::test_utils::E; use slog::{debug, error, Logger}; use slot_clock::SlotClock; -use ssz_types::FixedVector; use std::fmt; use std::fmt::Debug; use std::num::NonZeroUsize; @@ -19,7 +16,6 @@ use task_executor::TaskExecutor; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; -mod child_components; mod error; mod overflow_lru_cache; mod state_lru_cache; @@ -37,12 +33,32 @@ pub const OVERFLOW_LRU_CAPACITY: NonZeroUsize = new_non_zero_usize(1024); pub const STATE_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(2); pub const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get(); -/// This includes a cache for any blocks or blobs that have been received over gossip or RPC -/// and are awaiting more components before they can be imported. Additionally the -/// `DataAvailabilityChecker` is responsible for KZG verification of block components as well as -/// checking whether a "availability check" is required at all. +/// Cache to hold fully valid data that can't be imported to fork-choice yet. After Dencun hard-fork +/// blocks have a sidecar of data that is received separately from the network. We call the concept +/// of a block "becoming available" when all of its import dependencies are inserted into this +/// cache. +/// +/// Usually a block becomes available on its slot within a second of receiving its first component +/// over gossip. However, a block may never become available if a malicious proposer does not +/// publish its data, or there are network issues that prevent us from receiving it. If the block +/// does not become available after some time we can safely forget about it. Consider these two +/// cases: +/// +/// - Global unavailability: If nobody has received the block components it's likely that the +/// proposer never made the block available. So we can safely forget about the block as it will +/// never become available. +/// - Local unavailability: Some fraction of the network has received all block components, but not us. +/// Some of our peers will eventually attest to a descendant of that block and lookup sync will +/// fetch its components. Therefore it's not strictly necessary to hold to the partially available +/// block for too long as we can recover from other peers. +/// +/// Even in periods of non-finality, the proposer is expected to publish the block's data +/// immediately. Because this cache only holds fully valid data, its capacity is bound to 1 block +/// per slot and fork: before inserting into this cache we check the proposer signature and correct +/// proposer. Having a capacity > 1 is an optimization to prevent sync lookup from having re-fetch +/// data during moments of unstable network conditions. pub struct DataAvailabilityChecker { - availability_cache: Arc>, + availability_cache: Arc>, slot_clock: T::SlotClock, kzg: Option>, log: Logger, @@ -78,7 +94,8 @@ impl DataAvailabilityChecker { log: &Logger, spec: ChainSpec, ) -> Result { - let overflow_cache = OverflowLRUCache::new(OVERFLOW_LRU_CAPACITY, store, spec.clone())?; + let overflow_cache = + DataAvailabilityCheckerInner::new(OVERFLOW_LRU_CAPACITY, store, spec.clone())?; Ok(Self { availability_cache: Arc::new(overflow_cache), slot_clock, @@ -88,74 +105,29 @@ impl DataAvailabilityChecker { }) } - /// Checks if the block root is currenlty in the availability cache awaiting processing because + /// Checks if the block root is currenlty in the availability cache awaiting import because /// of missing components. - pub fn has_block(&self, block_root: &Hash256) -> bool { - self.availability_cache.has_block(block_root) - } - - pub fn get_missing_blob_ids_with(&self, block_root: Hash256) -> MissingBlobs { + pub fn get_execution_valid_block( + &self, + block_root: &Hash256, + ) -> Option>> { self.availability_cache - .with_pending_components(&block_root, |pending_components| match pending_components { - Some(pending_components) => self.get_missing_blob_ids( - block_root, - pending_components - .get_cached_block() - .as_ref() - .map(|b| b.as_block()), - &pending_components.verified_blobs, - ), - None => MissingBlobs::new_without_block(block_root, self.is_deneb()), - }) + .get_execution_valid_block(block_root) } - /// If there's no block, all possible ids will be returned that don't exist in the given blobs. - /// If there no blobs, all possible ids will be returned. - pub fn get_missing_blob_ids( - &self, - block_root: Hash256, - block: Option<&SignedBeaconBlock>, - blobs: &FixedVector, ::MaxBlobsPerBlock>, - ) -> MissingBlobs { - let Some(current_slot) = self.slot_clock.now_or_genesis() else { - error!( - self.log, - "Failed to read slot clock when checking for missing blob ids" - ); - return MissingBlobs::BlobsNotRequired; - }; - - let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - - if self.da_check_required_for_epoch(current_epoch) { - match block { - Some(cached_block) => { - let block_commitments_len = cached_block - .message() - .body() - .blob_kzg_commitments() - .map(|v| v.len()) - .unwrap_or(0); - let blob_ids = blobs + /// Return the set of imported blob indexes for `block_root`. Returns None if there is no block + /// component for `block_root`. + pub fn imported_blob_indexes(&self, block_root: &Hash256) -> Option> { + self.availability_cache + .peek_pending_components(block_root, |components| { + components.map(|components| { + components + .get_cached_blobs() .iter() - .take(block_commitments_len) - .enumerate() - .filter_map(|(index, blob_commitment_opt)| { - blob_commitment_opt.is_none().then_some(BlobIdentifier { - block_root, - index: index as u64, - }) - }) - .collect(); - MissingBlobs::KnownMissing(blob_ids) - } - None => { - MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::(block_root)) - } - } - } else { - MissingBlobs::BlobsNotRequired - } + .filter_map(|blob| blob.as_ref().map(|blob| blob.blob_index())) + .collect::>() + }) + }) } /// Get a blob from the availability cache. @@ -213,6 +185,11 @@ impl DataAvailabilityChecker { .put_pending_executed_block(executed_block) } + pub fn remove_pending_components(&self, block_root: Hash256) { + self.availability_cache + .remove_pending_components(block_root) + } + /// Verifies kzg commitments for an RpcBlock, returns a `MaybeAvailableBlock` that may /// include the fully available block. /// @@ -351,6 +328,18 @@ impl DataAvailabilityChecker { .map_or(false, |da_epoch| block_epoch >= da_epoch) } + pub fn da_check_required_for_current_epoch(&self) -> bool { + let Some(current_slot) = self.slot_clock.now_or_genesis() else { + error!( + self.log, + "Failed to read slot clock when checking for missing blob ids" + ); + return false; + }; + + self.da_check_required_for_epoch(current_slot.epoch(T::EthSpec::slots_per_epoch())) + } + /// Returns `true` if the current epoch is greater than or equal to the `Deneb` epoch. pub fn is_deneb(&self) -> bool { self.slot_clock.now().map_or(false, |slot| { @@ -361,15 +350,9 @@ impl DataAvailabilityChecker { }) } - /// Persist all in memory components to disk - pub fn persist_all(&self) -> Result<(), AvailabilityCheckError> { - self.availability_cache.write_all_to_disk() - } - /// Collects metrics from the data availability checker. pub fn metrics(&self) -> DataAvailabilityCheckerMetrics { DataAvailabilityCheckerMetrics { - num_store_entries: self.availability_cache.num_store_entries(), state_cache_size: self.availability_cache.state_cache_size(), block_cache_size: self.availability_cache.block_cache_size(), } @@ -378,7 +361,6 @@ impl DataAvailabilityChecker { /// Helper struct to group data availability checker metrics. pub struct DataAvailabilityCheckerMetrics { - pub num_store_entries: usize, pub state_cache_size: usize, pub block_cache_size: usize, } @@ -404,7 +386,7 @@ pub fn start_availability_cache_maintenance_service( async fn availability_cache_maintenance_service( chain: Arc>, - overflow_cache: Arc>, + overflow_cache: Arc>, ) { let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32; loop { @@ -544,61 +526,3 @@ impl MaybeAvailableBlock { } } } - -#[derive(Debug, Clone)] -pub enum MissingBlobs { - /// We know for certain these blobs are missing. - KnownMissing(Vec), - /// We think these blobs might be missing. - PossibleMissing(Vec), - /// Blobs are not required. - BlobsNotRequired, -} - -impl MissingBlobs { - pub fn new_without_block(block_root: Hash256, is_deneb: bool) -> Self { - if is_deneb { - MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::(block_root)) - } else { - MissingBlobs::BlobsNotRequired - } - } - pub fn is_empty(&self) -> bool { - match self { - MissingBlobs::KnownMissing(v) => v.is_empty(), - MissingBlobs::PossibleMissing(v) => v.is_empty(), - MissingBlobs::BlobsNotRequired => true, - } - } - pub fn contains(&self, blob_id: &BlobIdentifier) -> bool { - match self { - MissingBlobs::KnownMissing(v) => v.contains(blob_id), - MissingBlobs::PossibleMissing(v) => v.contains(blob_id), - MissingBlobs::BlobsNotRequired => false, - } - } - pub fn remove(&mut self, blob_id: &BlobIdentifier) { - match self { - MissingBlobs::KnownMissing(v) => v.retain(|id| id != blob_id), - MissingBlobs::PossibleMissing(v) => v.retain(|id| id != blob_id), - MissingBlobs::BlobsNotRequired => {} - } - } - pub fn indices(&self) -> Vec { - match self { - MissingBlobs::KnownMissing(v) => v.iter().map(|id| id.index).collect(), - MissingBlobs::PossibleMissing(v) => v.iter().map(|id| id.index).collect(), - MissingBlobs::BlobsNotRequired => vec![], - } - } -} - -impl Into> for MissingBlobs { - fn into(self) -> Vec { - match self { - MissingBlobs::KnownMissing(v) => v, - MissingBlobs::PossibleMissing(v) => v, - MissingBlobs::BlobsNotRequired => vec![], - } - } -} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/child_components.rs b/beacon_node/beacon_chain/src/data_availability_checker/child_components.rs deleted file mode 100644 index 184dfc45001..00000000000 --- a/beacon_node/beacon_chain/src/data_availability_checker/child_components.rs +++ /dev/null @@ -1,69 +0,0 @@ -use crate::block_verification_types::RpcBlock; -use bls::Hash256; -use std::sync::Arc; -use types::blob_sidecar::FixedBlobSidecarList; -use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; - -/// For requests triggered by an `UnknownBlockParent` or `UnknownBlobParent`, this struct -/// is used to cache components as they are sent to the network service. We can't use the -/// data availability cache currently because any blocks or blobs without parents -/// won't pass validation and therefore won't make it into the cache. -pub struct ChildComponents { - pub block_root: Hash256, - pub downloaded_block: Option>>, - pub downloaded_blobs: FixedBlobSidecarList, -} - -impl From> for ChildComponents { - fn from(value: RpcBlock) -> Self { - let (block_root, block, blobs) = value.deconstruct(); - let fixed_blobs = blobs.map(|blobs| { - FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::>()) - }); - Self::new(block_root, Some(block), fixed_blobs) - } -} - -impl ChildComponents { - pub fn empty(block_root: Hash256) -> Self { - Self { - block_root, - downloaded_block: None, - downloaded_blobs: <_>::default(), - } - } - pub fn new( - block_root: Hash256, - block: Option>>, - blobs: Option>, - ) -> Self { - let mut cache = Self::empty(block_root); - if let Some(block) = block { - cache.merge_block(block); - } - if let Some(blobs) = blobs { - cache.merge_blobs(blobs); - } - cache - } - - pub fn merge_block(&mut self, block: Arc>) { - self.downloaded_block = Some(block); - } - - pub fn merge_blob(&mut self, blob: Arc>) { - if let Some(blob_ref) = self.downloaded_blobs.get_mut(blob.index as usize) { - *blob_ref = Some(blob); - } - } - - pub fn merge_blobs(&mut self, blobs: FixedBlobSidecarList) { - for blob in blobs.iter().flatten() { - self.merge_blob(blob.clone()); - } - } - - pub fn clear_blobs(&mut self) { - self.downloaded_blobs = FixedBlobSidecarList::default(); - } -} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index 6c524786bfa..d22f6b2cc9f 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -22,6 +22,7 @@ pub enum Error { SlotClockError, } +#[derive(PartialEq, Eq)] pub enum ErrorCategory { /// Internal Errors (not caused by peers) Internal, diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 91c776adc10..5e0513c8d30 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -1,32 +1,3 @@ -//! This module implements a LRU cache for storing partially available blocks and blobs. -//! When the cache overflows, the least recently used items are persisted to the database. -//! This prevents lighthouse from using too much memory storing unfinalized blocks and blobs -//! if the chain were to lose finality. -//! -//! ## Deadlock safety -//! -//! The main object in this module is the `OverflowLruCache`. It contains two locks: -//! -//! - `self.critical` is an `RwLock` that protects content stored in memory. -//! - `self.maintenance_lock` is held when moving data between memory and disk. -//! -//! You mostly need to ensure that you don't try to hold the critical lock more than once -//! -//! ## Basic Algorithm -//! -//! As blocks and blobs come in from the network, their components are stored in memory in -//! this cache. When a block becomes fully available, it is removed from the cache and -//! imported into fork-choice. Blocks/blobs that remain unavailable will linger in the -//! cache until they are older than the finalized epoch or older than the data availability -//! cutoff. In the event the chain is not finalizing, the cache will eventually overflow and -//! the least recently used items will be persisted to disk. When this happens, we will still -//! store the hash of the block in memory so we always know we have data for that block -//! without needing to check the database. -//! -//! When the client is shut down, all pending components are persisted in the database. -//! On startup, the keys of these components are stored in memory and will be loaded in -//! the cache when they are accessed. - use super::state_lru_cache::{DietAvailabilityPendingExecutedBlock, StateLRUCache}; use crate::beacon_chain::BeaconStore; use crate::blob_verification::KzgVerifiedBlob; @@ -34,17 +5,15 @@ use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableBlock, AvailableExecutedBlock, }; use crate::data_availability_checker::{Availability, AvailabilityCheckError}; -use crate::store::{DBColumn, KeyValueStore}; use crate::BeaconChainTypes; use lru::LruCache; -use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard}; -use ssz::{Decode, Encode}; +use parking_lot::RwLock; use ssz_derive::{Decode, Encode}; use ssz_types::{FixedVector, VariableList}; use std::num::NonZeroUsize; -use std::{collections::HashSet, sync::Arc}; +use std::sync::Arc; use types::blob_sidecar::BlobIdentifier; -use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256}; +use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; /// This represents the components of a partially available block /// @@ -243,314 +212,44 @@ impl PendingComponents { AvailableExecutedBlock::new(available_block, import_data, payload_verification_outcome), ))) } - - /// Returns the epoch of the block if it is cached, otherwise returns the epoch of the first blob. - pub fn epoch(&self) -> Option { - self.executed_block - .as_ref() - .map(|pending_block| pending_block.as_block().epoch()) - .or_else(|| { - for maybe_blob in self.verified_blobs.iter() { - if maybe_blob.is_some() { - return maybe_blob.as_ref().map(|kzg_verified_blob| { - kzg_verified_blob - .as_blob() - .slot() - .epoch(E::slots_per_epoch()) - }); - } - } - None - }) - } -} - -/// Blocks and blobs are stored in the database sequentially so that it's -/// fast to iterate over all the data for a particular block. -#[derive(Debug, PartialEq)] -enum OverflowKey { - Block(Hash256), - Blob(Hash256, u8), -} - -impl OverflowKey { - pub fn from_block_root(block_root: Hash256) -> Self { - Self::Block(block_root) - } - - pub fn from_blob_id( - blob_id: BlobIdentifier, - ) -> Result { - if blob_id.index > E::max_blobs_per_block() as u64 || blob_id.index > u8::MAX as u64 { - return Err(AvailabilityCheckError::BlobIndexInvalid(blob_id.index)); - } - Ok(Self::Blob(blob_id.block_root, blob_id.index as u8)) - } - - pub fn root(&self) -> &Hash256 { - match self { - Self::Block(root) => root, - Self::Blob(root, _) => root, - } - } -} - -/// A wrapper around BeaconStore that implements various -/// methods used for saving and retrieving blocks / blobs -/// from the store (for organization) -struct OverflowStore(BeaconStore); - -impl OverflowStore { - /// Store pending components in the database - pub fn persist_pending_components( - &self, - block_root: Hash256, - mut pending_components: PendingComponents, - ) -> Result<(), AvailabilityCheckError> { - let col = DBColumn::OverflowLRUCache; - - if let Some(block) = pending_components.executed_block.take() { - let key = OverflowKey::from_block_root(block_root); - self.0 - .hot_db - .put_bytes(col.as_str(), &key.as_ssz_bytes(), &block.as_ssz_bytes())? - } - - for blob in Vec::from(pending_components.verified_blobs) - .into_iter() - .flatten() - { - let key = OverflowKey::from_blob_id::(BlobIdentifier { - block_root, - index: blob.blob_index(), - })?; - - self.0 - .hot_db - .put_bytes(col.as_str(), &key.as_ssz_bytes(), &blob.as_ssz_bytes())? - } - - Ok(()) - } - - /// Load the pending components that we have in the database for a given block root - pub fn load_pending_components( - &self, - block_root: Hash256, - ) -> Result>, AvailabilityCheckError> { - // read everything from disk and reconstruct - let mut maybe_pending_components = None; - for res in self - .0 - .hot_db - .iter_raw_entries(DBColumn::OverflowLRUCache, block_root.as_bytes()) - { - let (key_bytes, value_bytes) = res?; - match OverflowKey::from_ssz_bytes(&key_bytes)? { - OverflowKey::Block(_) => { - maybe_pending_components - .get_or_insert_with(|| PendingComponents::empty(block_root)) - .executed_block = - Some(DietAvailabilityPendingExecutedBlock::from_ssz_bytes( - value_bytes.as_slice(), - )?); - } - OverflowKey::Blob(_, index) => { - *maybe_pending_components - .get_or_insert_with(|| PendingComponents::empty(block_root)) - .verified_blobs - .get_mut(index as usize) - .ok_or(AvailabilityCheckError::BlobIndexInvalid(index as u64))? = - Some(KzgVerifiedBlob::from_ssz_bytes(value_bytes.as_slice())?); - } - } - } - - Ok(maybe_pending_components) - } - - /// Returns the hashes of all the blocks we have any data for on disk - pub fn read_keys_on_disk(&self) -> Result, AvailabilityCheckError> { - let mut disk_keys = HashSet::new(); - for res in self.0.hot_db.iter_raw_keys(DBColumn::OverflowLRUCache, &[]) { - let key_bytes = res?; - disk_keys.insert(*OverflowKey::from_ssz_bytes(&key_bytes)?.root()); - } - Ok(disk_keys) - } - - /// Load a single blob from the database - pub fn load_blob( - &self, - blob_id: &BlobIdentifier, - ) -> Result>>, AvailabilityCheckError> { - let key = OverflowKey::from_blob_id::(*blob_id)?; - - self.0 - .hot_db - .get_bytes(DBColumn::OverflowLRUCache.as_str(), &key.as_ssz_bytes())? - .map(|blob_bytes| Arc::>::from_ssz_bytes(blob_bytes.as_slice())) - .transpose() - .map_err(|e| e.into()) - } - - /// Delete a set of keys from the database - pub fn delete_keys(&self, keys: &Vec) -> Result<(), AvailabilityCheckError> { - for key in keys { - self.0 - .hot_db - .key_delete(DBColumn::OverflowLRUCache.as_str(), &key.as_ssz_bytes())?; - } - Ok(()) - } -} - -/// This data stores the *critical* data that we need to keep in memory -/// protected by the RWLock -struct Critical { - /// This is the LRU cache of pending components - pub in_memory: LruCache>, - /// This holds all the roots of the blocks for which we have - /// `PendingComponents` in the database. - pub store_keys: HashSet, -} - -impl Critical { - pub fn new(capacity: NonZeroUsize) -> Self { - Self { - in_memory: LruCache::new(capacity), - store_keys: HashSet::new(), - } - } - - pub fn reload_store_keys( - &mut self, - overflow_store: &OverflowStore, - ) -> Result<(), AvailabilityCheckError> { - let disk_keys = overflow_store.read_keys_on_disk()?; - self.store_keys = disk_keys; - Ok(()) - } - - /// Returns true if the block root is known, without altering the LRU ordering - pub fn has_block(&self, block_root: &Hash256) -> bool { - self.in_memory.peek(block_root).is_some() || self.store_keys.contains(block_root) - } - - /// This only checks for the blobs in memory - pub fn peek_blob( - &self, - blob_id: &BlobIdentifier, - ) -> Result>>, AvailabilityCheckError> { - if let Some(pending_components) = self.in_memory.peek(&blob_id.block_root) { - Ok(pending_components - .verified_blobs - .get(blob_id.index as usize) - .ok_or(AvailabilityCheckError::BlobIndexInvalid(blob_id.index))? - .as_ref() - .map(|blob| blob.clone_blob())) - } else { - Ok(None) - } - } - - pub fn peek_pending_components( - &self, - block_root: &Hash256, - ) -> Option<&PendingComponents> { - self.in_memory.peek(block_root) - } - - /// Puts the pending components in the LRU cache. If the cache - /// is at capacity, the LRU entry is written to the store first - pub fn put_pending_components( - &mut self, - block_root: Hash256, - pending_components: PendingComponents, - overflow_store: &OverflowStore, - ) -> Result<(), AvailabilityCheckError> { - if self.in_memory.len() == self.in_memory.cap().get() { - // cache will overflow, must write lru entry to disk - if let Some((lru_key, lru_value)) = self.in_memory.pop_lru() { - overflow_store.persist_pending_components(lru_key, lru_value)?; - self.store_keys.insert(lru_key); - } - } - self.in_memory.put(block_root, pending_components); - Ok(()) - } - - /// Removes and returns the pending_components corresponding to - /// the `block_root` or `None` if it does not exist - pub fn pop_pending_components( - &mut self, - block_root: Hash256, - store: &OverflowStore, - ) -> Result>, AvailabilityCheckError> { - match self.in_memory.pop_entry(&block_root) { - Some((_, pending_components)) => Ok(Some(pending_components)), - None => { - // not in memory, is it in the store? - if self.store_keys.remove(&block_root) { - // We don't need to remove the data from the store as we have removed it from - // `store_keys` so we won't go looking for it on disk. The maintenance thread - // will remove it from disk the next time it runs. - store.load_pending_components(block_root) - } else { - Ok(None) - } - } - } - } - - /// Returns the number of pending component entries in memory. - pub fn num_blocks(&self) -> usize { - self.in_memory.len() - } - - /// Returns the number of entries that have overflowed to disk. - pub fn num_store_entries(&self) -> usize { - self.store_keys.len() - } } /// This is the main struct for this module. Outside methods should /// interact with the cache through this. -pub struct OverflowLRUCache { +pub struct DataAvailabilityCheckerInner { /// Contains all the data we keep in memory, protected by an RwLock - critical: RwLock>, - /// This is how we read and write components to the disk - overflow_store: OverflowStore, + critical: RwLock>>, /// This cache holds a limited number of states in memory and reconstructs them /// from disk when necessary. This is necessary until we merge tree-states state_cache: StateLRUCache, - /// Mutex to guard maintenance methods which move data between disk and memory - maintenance_lock: Mutex<()>, - /// The capacity of the LRU cache - capacity: NonZeroUsize, } -impl OverflowLRUCache { +impl DataAvailabilityCheckerInner { pub fn new( capacity: NonZeroUsize, beacon_store: BeaconStore, spec: ChainSpec, ) -> Result { - let overflow_store = OverflowStore(beacon_store.clone()); - let mut critical = Critical::new(capacity); - critical.reload_store_keys(&overflow_store)?; Ok(Self { - critical: RwLock::new(critical), - overflow_store, + critical: RwLock::new(LruCache::new(capacity)), state_cache: StateLRUCache::new(beacon_store, spec), - maintenance_lock: Mutex::new(()), - capacity, }) } /// Returns true if the block root is known, without altering the LRU ordering - pub fn has_block(&self, block_root: &Hash256) -> bool { - self.critical.read().has_block(block_root) + pub fn get_execution_valid_block( + &self, + block_root: &Hash256, + ) -> Option>> { + self.critical + .read() + .peek(block_root) + .and_then(|pending_components| { + pending_components + .executed_block + .as_ref() + .map(|block| block.block_cloned()) + }) } /// Fetch a blob from the cache without affecting the LRU ordering @@ -558,23 +257,24 @@ impl OverflowLRUCache { &self, blob_id: &BlobIdentifier, ) -> Result>>, AvailabilityCheckError> { - let read_lock = self.critical.read(); - if let Some(blob) = read_lock.peek_blob(blob_id)? { - Ok(Some(blob)) - } else if read_lock.store_keys.contains(&blob_id.block_root) { - drop(read_lock); - self.overflow_store.load_blob(blob_id) + if let Some(pending_components) = self.critical.read().peek(&blob_id.block_root) { + Ok(pending_components + .verified_blobs + .get(blob_id.index as usize) + .ok_or(AvailabilityCheckError::BlobIndexInvalid(blob_id.index))? + .as_ref() + .map(|blob| blob.clone_blob())) } else { Ok(None) } } - pub fn with_pending_components>) -> R>( + pub fn peek_pending_components>) -> R>( &self, block_root: &Hash256, f: F, ) -> R { - f(self.critical.read().peek_pending_components(block_root)) + f(self.critical.read().peek(block_root)) } pub fn put_kzg_verified_blobs>>( @@ -594,24 +294,22 @@ impl OverflowLRUCache { // Grab existing entry or create a new entry. let mut pending_components = write_lock - .pop_pending_components(block_root, &self.overflow_store)? + .pop_entry(&block_root) + .map(|(_, v)| v) .unwrap_or_else(|| PendingComponents::empty(block_root)); // Merge in the blobs. pending_components.merge_blobs(fixed_blobs); if pending_components.is_available() { + write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); pending_components.make_available(|diet_block| { self.state_cache.recover_pending_executed_block(diet_block) }) } else { - write_lock.put_pending_components( - block_root, - pending_components, - &self.overflow_store, - )?; + write_lock.put(block_root, pending_components); Ok(Availability::MissingComponents(block_root)) } } @@ -632,7 +330,8 @@ impl OverflowLRUCache { // Grab existing entry or create a new entry. let mut pending_components = write_lock - .pop_pending_components(block_root, &self.overflow_store)? + .pop_entry(&block_root) + .map(|(_, v)| v) .unwrap_or_else(|| PendingComponents::empty(block_root)); // Merge in the block. @@ -640,194 +339,29 @@ impl OverflowLRUCache { // Check if we have all components and entire set is consistent. if pending_components.is_available() { + write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); pending_components.make_available(|diet_block| { self.state_cache.recover_pending_executed_block(diet_block) }) } else { - write_lock.put_pending_components( - block_root, - pending_components, - &self.overflow_store, - )?; + write_lock.put(block_root, pending_components); Ok(Availability::MissingComponents(block_root)) } } - /// write all in memory objects to disk - pub fn write_all_to_disk(&self) -> Result<(), AvailabilityCheckError> { - let maintenance_lock = self.maintenance_lock.lock(); - let mut critical_lock = self.critical.write(); - - let mut swap_lru = LruCache::new(self.capacity); - std::mem::swap(&mut swap_lru, &mut critical_lock.in_memory); - - for (root, pending_components) in swap_lru.into_iter() { - self.overflow_store - .persist_pending_components(root, pending_components)?; - critical_lock.store_keys.insert(root); - } - - drop(critical_lock); - drop(maintenance_lock); - Ok(()) + pub fn remove_pending_components(&self, block_root: Hash256) { + self.critical.write().pop_entry(&block_root); } /// maintain the cache pub fn do_maintenance(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> { - // ensure memory usage is below threshold - let threshold = self.capacity.get() * 3 / 4; - self.maintain_threshold(threshold, cutoff_epoch)?; - // clean up any keys on the disk that shouldn't be there - self.prune_disk(cutoff_epoch)?; // clean up any lingering states in the state cache self.state_cache.do_maintenance(cutoff_epoch); Ok(()) } - /// Enforce that the size of the cache is below a given threshold by - /// moving the least recently used items to disk. - fn maintain_threshold( - &self, - threshold: usize, - cutoff_epoch: Epoch, - ) -> Result<(), AvailabilityCheckError> { - // ensure only one thread at a time can be deleting things from the disk or - // moving things between memory and storage - let maintenance_lock = self.maintenance_lock.lock(); - - let mut stored = self.critical.read().in_memory.len(); - while stored > threshold { - let read_lock = self.critical.upgradable_read(); - let lru_entry = read_lock - .in_memory - .peek_lru() - .map(|(key, value)| (*key, value.clone())); - - let Some((lru_root, lru_pending_components)) = lru_entry else { - break; - }; - - if lru_pending_components - .epoch() - .map(|epoch| epoch < cutoff_epoch) - .unwrap_or(true) - { - // this data is no longer needed -> delete it - let mut write_lock = RwLockUpgradableReadGuard::upgrade(read_lock); - write_lock.in_memory.pop_entry(&lru_root); - stored = write_lock.in_memory.len(); - continue; - } else { - drop(read_lock); - } - - // write the lru entry to disk (we aren't holding any critical locks while we do this) - self.overflow_store - .persist_pending_components(lru_root, lru_pending_components)?; - // now that we've written to disk, grab the critical write lock - let mut write_lock = self.critical.write(); - if let Some((new_lru_root_ref, _)) = write_lock.in_memory.peek_lru() { - // need to ensure the entry we just wrote to disk wasn't updated - // while we were writing and is still the LRU entry - if *new_lru_root_ref == lru_root { - // it is still LRU entry -> delete it from memory & record that it's on disk - write_lock.in_memory.pop_entry(&lru_root); - write_lock.store_keys.insert(lru_root); - } - } - stored = write_lock.in_memory.len(); - drop(write_lock); - } - - drop(maintenance_lock); - Ok(()) - } - - /// Delete any data on disk that shouldn't be there. This can happen if - /// 1. The entry has been moved back to memory (or become fully available) - /// 2. The entry belongs to a block beyond the cutoff epoch - fn prune_disk(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> { - // ensure only one thread at a time can be deleting things from the disk or - // moving things between memory and storage - let maintenance_lock = self.maintenance_lock.lock(); - - struct BlockData { - keys: Vec, - root: Hash256, - epoch: Epoch, - } - - let delete_if_outdated = |cache: &OverflowLRUCache, - block_data: Option| - -> Result<(), AvailabilityCheckError> { - let Some(block_data) = block_data else { - return Ok(()); - }; - let not_in_store_keys = !cache.critical.read().store_keys.contains(&block_data.root); - if not_in_store_keys { - // these keys aren't supposed to be on disk - cache.overflow_store.delete_keys(&block_data.keys)?; - } else { - // check this data is still relevant - if block_data.epoch < cutoff_epoch { - // this data is no longer needed -> delete it - self.overflow_store.delete_keys(&block_data.keys)?; - } - } - Ok(()) - }; - - let mut current_block_data: Option = None; - for res in self - .overflow_store - .0 - .hot_db - .iter_raw_entries(DBColumn::OverflowLRUCache, &[]) - { - let (key_bytes, value_bytes) = res?; - let overflow_key = OverflowKey::from_ssz_bytes(&key_bytes)?; - let current_root = *overflow_key.root(); - - match &mut current_block_data { - Some(block_data) if block_data.root == current_root => { - // still dealing with the same block - block_data.keys.push(overflow_key); - } - _ => { - // first time encountering data for this block - delete_if_outdated(self, current_block_data)?; - let current_epoch = match &overflow_key { - OverflowKey::Block(_) => { - DietAvailabilityPendingExecutedBlock::::from_ssz_bytes( - value_bytes.as_slice(), - )? - .as_block() - .epoch() - } - OverflowKey::Blob(_, _) => { - KzgVerifiedBlob::::from_ssz_bytes(value_bytes.as_slice())? - .as_blob() - .slot() - .epoch(T::EthSpec::slots_per_epoch()) - } - }; - current_block_data = Some(BlockData { - keys: vec![overflow_key], - root: current_root, - epoch: current_epoch, - }); - } - } - } - // can't fall off the end - delete_if_outdated(self, current_block_data)?; - - drop(maintenance_lock); - Ok(()) - } - #[cfg(test)] /// get the state cache for inspection (used only for tests) pub fn state_lru_cache(&self) -> &StateLRUCache { @@ -841,74 +375,7 @@ impl OverflowLRUCache { /// Number of pending component entries in memory in the cache. pub fn block_cache_size(&self) -> usize { - self.critical.read().num_blocks() - } - - /// Returns the number of entries in the cache that have overflowed to disk. - pub fn num_store_entries(&self) -> usize { - self.critical.read().num_store_entries() - } -} - -impl ssz::Encode for OverflowKey { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_append(&self, buf: &mut Vec) { - match self { - OverflowKey::Block(block_hash) => { - block_hash.ssz_append(buf); - buf.push(0u8) - } - OverflowKey::Blob(block_hash, index) => { - block_hash.ssz_append(buf); - buf.push(*index + 1) - } - } - } - - fn ssz_fixed_len() -> usize { - ::ssz_fixed_len() + 1 - } - - fn ssz_bytes_len(&self) -> usize { - match self { - Self::Block(root) => root.ssz_bytes_len() + 1, - Self::Blob(root, _) => root.ssz_bytes_len() + 1, - } - } -} - -impl ssz::Decode for OverflowKey { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - ::ssz_fixed_len() + 1 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let h256_len = ::ssz_fixed_len(); - let expected = h256_len + 1; - - if len != expected { - Err(ssz::DecodeError::InvalidByteLength { len, expected }) - } else { - let root_bytes = bytes - .get(..h256_len) - .ok_or(ssz::DecodeError::OutOfBoundsByte { i: 0 })?; - let block_root = Hash256::from_ssz_bytes(root_bytes)?; - let id_byte = *bytes - .get(h256_len) - .ok_or(ssz::DecodeError::OutOfBoundsByte { i: h256_len })?; - match id_byte { - 0 => Ok(OverflowKey::Block(block_root)), - n => Ok(OverflowKey::Blob(block_root, n - 1)), - } - } + self.critical.read().len() } } @@ -927,8 +394,7 @@ mod test { use logging::test_logger; use slog::{info, Logger}; use state_processing::ConsensusContext; - use std::collections::{BTreeMap, HashMap, VecDeque}; - use std::ops::AddAssign; + use std::collections::VecDeque; use store::{HotColdDB, ItemStore, LevelDB, StoreConfig}; use tempfile::{tempdir, TempDir}; use types::non_zero_usize::new_non_zero_usize; @@ -1012,39 +478,6 @@ mod test { harness } - #[test] - fn overflow_key_encode_decode_equality() { - type E = types::MainnetEthSpec; - let key_block = OverflowKey::Block(Hash256::random()); - let key_blob_0 = OverflowKey::from_blob_id::(BlobIdentifier { - block_root: Hash256::random(), - index: 0, - }) - .expect("should create overflow key 0"); - let key_blob_1 = OverflowKey::from_blob_id::(BlobIdentifier { - block_root: Hash256::random(), - index: 1, - }) - .expect("should create overflow key 1"); - let key_blob_2 = OverflowKey::from_blob_id::(BlobIdentifier { - block_root: Hash256::random(), - index: 2, - }) - .expect("should create overflow key 2"); - let key_blob_3 = OverflowKey::from_blob_id::(BlobIdentifier { - block_root: Hash256::random(), - index: 3, - }) - .expect("should create overflow key 3"); - - let keys = vec![key_block, key_blob_0, key_blob_1, key_blob_2, key_blob_3]; - for key in keys { - let encoded = key.as_ssz_bytes(); - let decoded = OverflowKey::from_ssz_bytes(&encoded).expect("should decode"); - assert_eq!(key, decoded, "Encoded and decoded keys should be equal"); - } - } - async fn availability_pending_block( harness: &BeaconChainHarness>, ) -> ( @@ -1141,7 +574,7 @@ mod test { capacity: usize, ) -> ( BeaconChainHarness>, - Arc>, + Arc>, TempDir, ) where @@ -1155,7 +588,7 @@ mod test { let test_store = harness.chain.store.clone(); let capacity_non_zero = new_non_zero_usize(capacity); let cache = Arc::new( - OverflowLRUCache::::new(capacity_non_zero, test_store, spec.clone()) + DataAvailabilityCheckerInner::::new(capacity_non_zero, test_store, spec.clone()) .expect("should create cache"), ); (harness, cache, chain_db_path) @@ -1177,10 +610,7 @@ mod test { blobs_expected, "should have expected number of blobs" ); - assert!( - cache.critical.read().in_memory.is_empty(), - "cache should be empty" - ); + assert!(cache.critical.read().is_empty(), "cache should be empty"); let availability = cache .put_pending_executed_block(pending_block) .expect("should put block"); @@ -1190,9 +620,16 @@ mod test { "block doesn't have blobs, should be available" ); assert_eq!( - cache.critical.read().in_memory.len(), + cache.critical.read().len(), + 1, + "cache should still have block as it hasn't been imported yet" + ); + // remove the blob to simulate successful import + cache.remove_pending_components(root); + assert_eq!( + cache.critical.read().len(), 0, - "cache should be empty because we don't have blobs" + "cache should be empty now that block has been imported" ); } else { assert!( @@ -1200,12 +637,12 @@ mod test { "should be pending blobs" ); assert_eq!( - cache.critical.read().in_memory.len(), + cache.critical.read().len(), 1, "cache should have one block" ); assert!( - cache.critical.read().in_memory.peek(&root).is_some(), + cache.critical.read().peek(&root).is_some(), "newly inserted block should exist in memory" ); } @@ -1220,11 +657,11 @@ mod test { assert!(matches!(availability, Availability::Available(_))); } else { assert!(matches!(availability, Availability::MissingComponents(_))); - assert_eq!(cache.critical.read().in_memory.len(), 1); + assert_eq!(cache.critical.read().len(), 1); } } assert!( - cache.critical.read().in_memory.is_empty(), + cache.critical.read().is_empty(), "cache should be empty now that all components available" ); @@ -1247,7 +684,7 @@ mod test { Availability::MissingComponents(root), "should be pending block" ); - assert_eq!(cache.critical.read().in_memory.len(), 1); + assert_eq!(cache.critical.read().len(), 1); } let availability = cache .put_pending_executed_block(pending_block) @@ -1258,447 +695,15 @@ mod test { availability ); assert!( - cache.critical.read().in_memory.is_empty(), - "cache should be empty now that all components available" - ); - } - - #[tokio::test] - async fn overflow_cache_test_overflow() { - type E = MinimalEthSpec; - type T = DiskHarnessType; - let capacity = 4; - let (harness, cache, _path) = setup_harness_and_cache::(capacity).await; - - let mut pending_blocks = VecDeque::new(); - let mut pending_blobs = VecDeque::new(); - let mut roots = VecDeque::new(); - while pending_blobs.len() < capacity + 1 { - let (pending_block, blobs) = availability_pending_block(&harness).await; - if pending_block.num_blobs_expected() == 0 { - // we need blocks with blobs - continue; - } - let root = pending_block.block.canonical_root(); - pending_blocks.push_back(pending_block); - pending_blobs.push_back(blobs); - roots.push_back(root); - } - - for i in 0..capacity { - cache - .put_pending_executed_block(pending_blocks.pop_front().expect("should have block")) - .expect("should put block"); - assert_eq!(cache.critical.read().in_memory.len(), i + 1); - } - for root in roots.iter().take(capacity) { - assert!(cache.critical.read().in_memory.peek(root).is_some()); - } - assert_eq!( - cache.critical.read().in_memory.len(), - capacity, - "cache should be full" - ); - // the first block should be the lru entry - assert_eq!( - *cache - .critical - .read() - .in_memory - .peek_lru() - .expect("should exist") - .0, - roots[0], - "first block should be lru" - ); - - cache - .put_pending_executed_block(pending_blocks.pop_front().expect("should have block")) - .expect("should put block"); - assert_eq!( - cache.critical.read().in_memory.len(), - capacity, - "cache should be full" - ); - assert!( - cache.critical.read().in_memory.peek(&roots[0]).is_none(), - "first block should be evicted" - ); - assert_eq!( - *cache - .critical - .read() - .in_memory - .peek_lru() - .expect("should exist") - .0, - roots[1], - "second block should be lru" - ); - - assert!(cache - .overflow_store - .load_pending_components(roots[0]) - .expect("should exist") - .is_some()); - - let threshold = capacity * 3 / 4; - cache - .maintain_threshold(threshold, Epoch::new(0)) - .expect("should maintain threshold"); - assert_eq!( - cache.critical.read().in_memory.len(), - threshold, - "cache should have been maintained" - ); - - let store_keys = cache - .overflow_store - .read_keys_on_disk() - .expect("should read keys"); - assert_eq!(store_keys.len(), 2); - assert!(store_keys.contains(&roots[0])); - assert!(store_keys.contains(&roots[1])); - assert!(cache.critical.read().store_keys.contains(&roots[0])); - assert!(cache.critical.read().store_keys.contains(&roots[1])); - - let blobs_0 = pending_blobs.pop_front().expect("should have blobs"); - let expected_blobs = blobs_0.len(); - let mut kzg_verified_blobs = vec![]; - for (blob_index, gossip_blob) in blobs_0.into_iter().enumerate() { - kzg_verified_blobs.push(gossip_blob.into_inner()); - let availability = cache - .put_kzg_verified_blobs(roots[0], kzg_verified_blobs.clone()) - .expect("should put blob"); - if blob_index == expected_blobs - 1 { - assert!(matches!(availability, Availability::Available(_))); - } else { - // the first block should be brought back into memory - assert!( - cache.critical.read().in_memory.peek(&roots[0]).is_some(), - "first block should be in memory" - ); - assert!(matches!(availability, Availability::MissingComponents(_))); - } - } - assert_eq!( - cache.critical.read().in_memory.len(), - threshold, - "cache should no longer have the first block" - ); - cache.prune_disk(Epoch::new(0)).expect("should prune disk"); - assert!( - cache - .overflow_store - .load_pending_components(roots[1]) - .expect("no error") - .is_some(), - "second block should still be on disk" + cache.critical.read().len() == 1, + "cache should still have available block until import" ); + // remove the blob to simulate successful import + cache.remove_pending_components(root); assert!( - cache - .overflow_store - .load_pending_components(roots[0]) - .expect("no error") - .is_none(), - "first block should not be on disk" - ); - } - - #[tokio::test] - async fn overflow_cache_test_maintenance() { - type E = MinimalEthSpec; - type T = DiskHarnessType; - let capacity = E::slots_per_epoch() as usize; - let (harness, cache, _path) = setup_harness_and_cache::(capacity).await; - - let n_epochs = 4; - let mut pending_blocks = VecDeque::new(); - let mut pending_blobs = VecDeque::new(); - let mut epoch_count = BTreeMap::new(); - while pending_blobs.len() < n_epochs * capacity { - let (pending_block, blobs) = availability_pending_block(&harness).await; - if pending_block.num_blobs_expected() == 0 { - // we need blocks with blobs - continue; - } - let epoch = pending_block - .block - .as_block() - .slot() - .epoch(E::slots_per_epoch()); - epoch_count.entry(epoch).or_insert_with(|| 0).add_assign(1); - - pending_blocks.push_back(pending_block); - pending_blobs.push_back(blobs); - } - - for _ in 0..(n_epochs * capacity) { - let pending_block = pending_blocks.pop_front().expect("should have block"); - let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs"); - let block_root = pending_block.block.as_block().canonical_root(); - let expected_blobs = pending_block.num_blobs_expected(); - if expected_blobs > 1 { - // might as well add a blob too - let one_blob = pending_block_blobs - .pop() - .expect("should have at least one blob"); - let kzg_verified_blobs = vec![one_blob.into_inner()]; - // generate random boolean - let block_first = (rand::random::() % 2) == 0; - if block_first { - let availability = cache - .put_pending_executed_block(pending_block) - .expect("should put block"); - assert!( - matches!(availability, Availability::MissingComponents(_)), - "should have pending blobs" - ); - let availability = cache - .put_kzg_verified_blobs(block_root, kzg_verified_blobs) - .expect("should put blob"); - assert!( - matches!(availability, Availability::MissingComponents(_)), - "availabilty should be pending blobs: {:?}", - availability - ); - } else { - let availability = cache - .put_kzg_verified_blobs(block_root, kzg_verified_blobs) - .expect("should put blob"); - let root = pending_block.block.as_block().canonical_root(); - assert_eq!( - availability, - Availability::MissingComponents(root), - "should be pending block" - ); - let availability = cache - .put_pending_executed_block(pending_block) - .expect("should put block"); - assert!( - matches!(availability, Availability::MissingComponents(_)), - "should have pending blobs" - ); - } - } else { - let availability = cache - .put_pending_executed_block(pending_block) - .expect("should put block"); - assert!( - matches!(availability, Availability::MissingComponents(_)), - "should be pending blobs" - ); - } - } - - // now we should have a full cache spanning multiple epochs - // run the maintenance routine for increasing epochs and ensure that the cache is pruned - assert_eq!( - cache.critical.read().in_memory.len(), - capacity, - "cache memory should be full" - ); - let store_keys = cache - .overflow_store - .read_keys_on_disk() - .expect("should read keys"); - assert_eq!( - store_keys.len(), - capacity * (n_epochs - 1), - "cache disk should have the rest" - ); - let mut expected_length = n_epochs * capacity; - for (epoch, count) in epoch_count { - cache - .do_maintenance(epoch + 1) - .expect("should run maintenance"); - let disk_keys = cache - .overflow_store - .read_keys_on_disk() - .expect("should read keys") - .len(); - let mem_keys = cache.critical.read().in_memory.len(); - expected_length -= count; - info!( - harness.chain.log, - "EPOCH: {} DISK KEYS: {} MEM KEYS: {} TOTAL: {} EXPECTED: {}", - epoch, - disk_keys, - mem_keys, - (disk_keys + mem_keys), - std::cmp::max(expected_length, capacity * 3 / 4), - ); - assert_eq!( - (disk_keys + mem_keys), - std::cmp::max(expected_length, capacity * 3 / 4), - "cache should be pruned" - ); - } - } - - #[tokio::test] - async fn overflow_cache_test_persist_recover() { - type E = MinimalEthSpec; - type T = DiskHarnessType; - let capacity = E::slots_per_epoch() as usize; - let (harness, cache, _path) = setup_harness_and_cache::(capacity).await; - - let n_epochs = 4; - let mut pending_blocks = VecDeque::new(); - let mut pending_blobs = VecDeque::new(); - let mut epoch_count = BTreeMap::new(); - while pending_blobs.len() < n_epochs * capacity { - let (pending_block, blobs) = availability_pending_block(&harness).await; - if pending_block.num_blobs_expected() == 0 { - // we need blocks with blobs - continue; - } - let epoch = pending_block - .block - .as_block() - .slot() - .epoch(E::slots_per_epoch()); - epoch_count.entry(epoch).or_insert_with(|| 0).add_assign(1); - - pending_blocks.push_back(pending_block); - pending_blobs.push_back(blobs); - } - - let mut remaining_blobs = HashMap::new(); - for _ in 0..(n_epochs * capacity) { - let pending_block = pending_blocks.pop_front().expect("should have block"); - let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs"); - let block_root = pending_block.block.as_block().canonical_root(); - let expected_blobs = pending_block.num_blobs_expected(); - if expected_blobs > 1 { - // might as well add a blob too - let one_blob = pending_block_blobs - .pop() - .expect("should have at least one blob"); - let kzg_verified_blobs = vec![one_blob.into_inner()]; - // generate random boolean - let block_first = (rand::random::() % 2) == 0; - if block_first { - let availability = cache - .put_pending_executed_block(pending_block) - .expect("should put block"); - assert!( - matches!(availability, Availability::MissingComponents(_)), - "should have pending blobs" - ); - let availability = cache - .put_kzg_verified_blobs(block_root, kzg_verified_blobs) - .expect("should put blob"); - assert!( - matches!(availability, Availability::MissingComponents(_)), - "availabilty should be pending blobs: {:?}", - availability - ); - } else { - let availability = cache - .put_kzg_verified_blobs(block_root, kzg_verified_blobs) - .expect("should put blob"); - let root = pending_block.block.as_block().canonical_root(); - assert_eq!( - availability, - Availability::MissingComponents(root), - "should be pending block" - ); - let availability = cache - .put_pending_executed_block(pending_block) - .expect("should put block"); - assert!( - matches!(availability, Availability::MissingComponents(_)), - "should have pending blobs" - ); - } - } else { - let availability = cache - .put_pending_executed_block(pending_block) - .expect("should put block"); - assert!( - matches!(availability, Availability::MissingComponents(_)), - "should be pending blobs" - ); - } - remaining_blobs.insert(block_root, pending_block_blobs); - } - - // now we should have a full cache spanning multiple epochs - // cache should be at capacity - assert_eq!( - cache.critical.read().in_memory.len(), - capacity, - "cache memory should be full" - ); - // write all components to disk - cache.write_all_to_disk().expect("should write all to disk"); - // everything should be on disk now - assert_eq!( - cache - .overflow_store - .read_keys_on_disk() - .expect("should read keys") - .len(), - capacity * n_epochs, - "cache disk should have the rest" - ); - assert_eq!( - cache.critical.read().in_memory.len(), - 0, - "cache memory should be empty" - ); - assert_eq!( - cache.critical.read().store_keys.len(), - n_epochs * capacity, - "cache store should have the rest" - ); - drop(cache); - - // create a new cache with the same store - let recovered_cache = OverflowLRUCache::::new( - new_non_zero_usize(capacity), - harness.chain.store.clone(), - harness.chain.spec.clone(), - ) - .expect("should recover cache"); - // again, everything should be on disk - assert_eq!( - recovered_cache - .overflow_store - .read_keys_on_disk() - .expect("should read keys") - .len(), - capacity * n_epochs, - "cache disk should have the rest" - ); - assert_eq!( - recovered_cache.critical.read().in_memory.len(), - 0, - "cache memory should be empty" - ); - assert_eq!( - recovered_cache.critical.read().store_keys.len(), - n_epochs * capacity, - "cache store should have the rest" + cache.critical.read().is_empty(), + "cache should be empty now that all components available" ); - - // now lets insert the remaining blobs until the cache is empty - for (root, blobs) in remaining_blobs { - let additional_blobs = blobs.len(); - let mut kzg_verified_blobs = vec![]; - for (i, gossip_blob) in blobs.into_iter().enumerate() { - kzg_verified_blobs.push(gossip_blob.into_inner()); - let availability = recovered_cache - .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) - .expect("should put blob"); - if i == additional_blobs - 1 { - assert!(matches!(availability, Availability::Available(_))) - } else { - assert!(matches!(availability, Availability::MissingComponents(_))); - } - } - } } #[tokio::test] @@ -1715,12 +720,12 @@ mod test { let mut state_roots = Vec::new(); // Get enough blocks to fill the cache to capacity, ensuring all blocks have blobs while pending_blocks.len() < capacity { - let (pending_block, _) = availability_pending_block(&harness).await; + let (mut pending_block, _) = availability_pending_block(&harness).await; if pending_block.num_blobs_expected() == 0 { // we need blocks with blobs continue; } - let state_root = pending_block.import_data.state.canonical_root(); + let state_root = pending_block.import_data.state.canonical_root().unwrap(); states.push(pending_block.import_data.state.clone()); pending_blocks.push_back(pending_block); state_roots.push(state_root); @@ -1757,7 +762,6 @@ mod test { let diet_block = cache .critical .read() - .in_memory .peek(&block_root) .map(|pending_components| { pending_components @@ -1786,7 +790,7 @@ mod test { // reconstruct the pending block by replaying the block on the parent state let recovered_pending_block = cache .state_lru_cache() - .reconstruct_pending_executed_block(diet_block) + .recover_pending_executed_block(diet_block) .expect("should reconstruct pending block"); // assert the recovered state is the same as the original diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index f8a243bd9e8..cf6eb669d5e 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -37,6 +37,10 @@ impl DietAvailabilityPendingExecutedBlock { &self.block } + pub fn block_cloned(&self) -> Arc> { + self.block.clone() + } + pub fn num_blobs_expected(&self) -> usize { self.block .message() @@ -110,38 +114,12 @@ impl StateLRUCache { &self, diet_executed_block: DietAvailabilityPendingExecutedBlock, ) -> Result, AvailabilityCheckError> { - let maybe_state = self.states.write().pop(&diet_executed_block.state_root); - if let Some(state) = maybe_state { - let block_root = diet_executed_block.block.canonical_root(); - Ok(AvailabilityPendingExecutedBlock { - block: diet_executed_block.block, - import_data: BlockImportData { - block_root, - state, - parent_block: diet_executed_block.parent_block, - parent_eth1_finalization_data: diet_executed_block - .parent_eth1_finalization_data, - confirmed_state_roots: diet_executed_block.confirmed_state_roots, - consensus_context: diet_executed_block - .consensus_context - .into_consensus_context(), - }, - payload_verification_outcome: diet_executed_block.payload_verification_outcome, - }) + let state = if let Some(state) = self.states.write().pop(&diet_executed_block.state_root) { + state } else { - self.reconstruct_pending_executed_block(diet_executed_block) - } - } - - /// Reconstruct the `AvailabilityPendingExecutedBlock` by loading the parent - /// state from disk and replaying the block. This function does NOT check the - /// LRU cache. - pub fn reconstruct_pending_executed_block( - &self, - diet_executed_block: DietAvailabilityPendingExecutedBlock, - ) -> Result, AvailabilityCheckError> { + self.reconstruct_state(&diet_executed_block)? + }; let block_root = diet_executed_block.block.canonical_root(); - let state = self.reconstruct_state(&diet_executed_block)?; Ok(AvailabilityPendingExecutedBlock { block: diet_executed_block.block, import_data: BlockImportData { @@ -231,10 +209,10 @@ impl StateLRUCache { impl From> for DietAvailabilityPendingExecutedBlock { - fn from(value: AvailabilityPendingExecutedBlock) -> Self { + fn from(mut value: AvailabilityPendingExecutedBlock) -> Self { Self { block: value.block, - state_root: value.import_data.state.canonical_root(), + state_root: value.import_data.state.canonical_root().unwrap(), parent_block: value.import_data.parent_block, parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data, confirmed_state_roots: value.import_data.confirmed_state_roots, diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 79d732f51b1..dda699cc6c1 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -122,18 +122,16 @@ impl EarlyAttesterCache { item.committee_lengths .get_committee_length::(request_slot, request_index, spec)?; - let attestation = Attestation { - aggregation_bits: BitList::with_capacity(committee_len) - .map_err(BeaconStateError::from)?, - data: AttestationData { - slot: request_slot, - index: request_index, - beacon_block_root: item.beacon_block_root, - source: item.source, - target: item.target, - }, - signature: AggregateSignature::empty(), - }; + let attestation = Attestation::empty_for_signing( + request_index, + committee_len, + request_slot, + item.beacon_block_root, + item.source, + item.target, + spec, + ) + .map_err(Error::AttestationError)?; metrics::inc_counter(&metrics::BEACON_EARLY_ATTESTER_CACHE_HITS); diff --git a/beacon_node/beacon_chain/src/electra_readiness.rs b/beacon_node/beacon_chain/src/electra_readiness.rs index 42ee743fe6b..551d43f9fd6 100644 --- a/beacon_node/beacon_chain/src/electra_readiness.rs +++ b/beacon_node/beacon_chain/src/electra_readiness.rs @@ -2,9 +2,7 @@ //! transition. use crate::{BeaconChain, BeaconChainTypes}; -use execution_layer::http::{ - ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V3, -}; +use execution_layer::http::{ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V4}; use serde::{Deserialize, Serialize}; use std::fmt; use std::time::Duration; @@ -21,8 +19,8 @@ pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300; pub enum ElectraReadiness { /// The execution engine is electra-enabled (as far as we can tell) Ready, - /// We are connected to an execution engine which doesn't support the V3 engine api methods - V3MethodsNotSupported { error: String }, + /// We are connected to an execution engine which doesn't support the V4 engine api methods + V4MethodsNotSupported { error: String }, /// The transition configuration with the EL failed, there might be a problem with /// connectivity, authentication or a difference in configuration. ExchangeCapabilitiesFailed { error: String }, @@ -47,7 +45,7 @@ impl fmt::Display for ElectraReadiness { "The --execution-endpoint flag is not specified, this is a \ requirement post-merge" ), - ElectraReadiness::V3MethodsNotSupported { error } => write!( + ElectraReadiness::V4MethodsNotSupported { error } => write!( f, "Execution endpoint does not support Electra methods: {}", error @@ -88,29 +86,23 @@ impl BeaconChain { } } Ok(capabilities) => { - // TODO(electra): Update in the event we get V4s. let mut missing_methods = String::from("Required Methods Unsupported:"); let mut all_good = true; - if !capabilities.get_payload_v3 { + if !capabilities.get_payload_v4 { missing_methods.push(' '); - missing_methods.push_str(ENGINE_GET_PAYLOAD_V3); + missing_methods.push_str(ENGINE_GET_PAYLOAD_V4); all_good = false; } - if !capabilities.forkchoice_updated_v3 { + if !capabilities.new_payload_v4 { missing_methods.push(' '); - missing_methods.push_str(ENGINE_FORKCHOICE_UPDATED_V3); - all_good = false; - } - if !capabilities.new_payload_v3 { - missing_methods.push(' '); - missing_methods.push_str(ENGINE_NEW_PAYLOAD_V3); + missing_methods.push_str(ENGINE_NEW_PAYLOAD_V4); all_good = false; } if all_good { ElectraReadiness::Ready } else { - ElectraReadiness::V3MethodsNotSupported { + ElectraReadiness::V4MethodsNotSupported { error: missing_methods, } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 340f1f9f797..819de1f5c19 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -28,7 +28,6 @@ use state_processing::{ state_advance::Error as StateAdvanceError, BlockProcessingError, BlockReplayError, EpochProcessingError, SlotProcessingError, }; -use std::time::Duration; use task_executor::ShutdownReason; use tokio::task::JoinError; use types::milhouse::Error as MilhouseError; @@ -77,11 +76,6 @@ pub enum BeaconChainError { ProposerSlashingValidationError(ProposerSlashingValidationError), AttesterSlashingValidationError(AttesterSlashingValidationError), BlsExecutionChangeValidationError(BlsExecutionChangeValidationError), - StateSkipTooLarge { - start_slot: Slot, - requested_slot: Slot, - max_task_runtime: Duration, - }, MissingFinalizedStateRoot(Slot), /// Returned when an internal check fails, indicating corrupt data. InvariantViolated(String), @@ -226,6 +220,8 @@ pub enum BeaconChainError { LightClientError(LightClientError), UnsupportedFork, MilhouseError(MilhouseError), + AttestationError(AttestationError), + AttestationCommitteeIndexNotSet, } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -256,6 +252,7 @@ easy_from_to!(AvailabilityCheckError, BeaconChainError); easy_from_to!(EpochCacheError, BeaconChainError); easy_from_to!(LightClientError, BeaconChainError); easy_from_to!(MilhouseError, BeaconChainError); +easy_from_to!(AttestationError, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 31297244e3e..b4005f22fd1 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -546,12 +546,20 @@ impl Eth1ChainBackend for CachingEth1Backend { state.eth1_data().deposit_count }; - match deposit_index.cmp(&deposit_count) { + // [New in Electra:EIP6110] + let deposit_index_limit = + if let Ok(deposit_requests_start_index) = state.deposit_requests_start_index() { + std::cmp::min(deposit_count, deposit_requests_start_index) + } else { + deposit_count + }; + + match deposit_index.cmp(&deposit_index_limit) { Ordering::Greater => Err(Error::DepositIndexTooHigh), Ordering::Equal => Ok(vec![]), Ordering::Less => { let next = deposit_index; - let last = std::cmp::min(deposit_count, next + E::MaxDeposits::to_u64()); + let last = std::cmp::min(deposit_index_limit, next + E::MaxDeposits::to_u64()); self.core .deposits() @@ -685,7 +693,7 @@ mod test { fn get_eth1_data(i: u64) -> Eth1Data { Eth1Data { block_hash: Hash256::from_low_u64_be(i), - deposit_root: Hash256::from_low_u64_be(u64::max_value() - i), + deposit_root: Hash256::from_low_u64_be(u64::MAX - i), deposit_count: i, } } diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 1fdcfdf8d07..267d56220c9 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -20,6 +20,10 @@ pub struct ServerSentEventHandler { light_client_finality_update_tx: Sender>, light_client_optimistic_update_tx: Sender>, block_reward_tx: Sender>, + proposer_slashing_tx: Sender>, + attester_slashing_tx: Sender>, + bls_to_execution_change_tx: Sender>, + block_gossip_tx: Sender>, log: Logger, } @@ -45,6 +49,10 @@ impl ServerSentEventHandler { let (light_client_finality_update_tx, _) = broadcast::channel(capacity); let (light_client_optimistic_update_tx, _) = broadcast::channel(capacity); let (block_reward_tx, _) = broadcast::channel(capacity); + let (proposer_slashing_tx, _) = broadcast::channel(capacity); + let (attester_slashing_tx, _) = broadcast::channel(capacity); + let (bls_to_execution_change_tx, _) = broadcast::channel(capacity); + let (block_gossip_tx, _) = broadcast::channel(capacity); Self { attestation_tx, @@ -60,6 +68,10 @@ impl ServerSentEventHandler { light_client_finality_update_tx, light_client_optimistic_update_tx, block_reward_tx, + proposer_slashing_tx, + attester_slashing_tx, + bls_to_execution_change_tx, + block_gossip_tx, log, } } @@ -126,6 +138,22 @@ impl ServerSentEventHandler { .block_reward_tx .send(kind) .map(|count| log_count("block reward", count)), + EventKind::ProposerSlashing(_) => self + .proposer_slashing_tx + .send(kind) + .map(|count| log_count("proposer slashing", count)), + EventKind::AttesterSlashing(_) => self + .attester_slashing_tx + .send(kind) + .map(|count| log_count("attester slashing", count)), + EventKind::BlsToExecutionChange(_) => self + .bls_to_execution_change_tx + .send(kind) + .map(|count| log_count("bls to execution change", count)), + EventKind::BlockGossip(_) => self + .block_gossip_tx + .send(kind) + .map(|count| log_count("block gossip", count)), }; if let Err(SendError(event)) = result { trace!(self.log, "No receivers registered to listen for event"; "event" => ?event); @@ -184,6 +212,22 @@ impl ServerSentEventHandler { self.block_reward_tx.subscribe() } + pub fn subscribe_attester_slashing(&self) -> Receiver> { + self.attester_slashing_tx.subscribe() + } + + pub fn subscribe_proposer_slashing(&self) -> Receiver> { + self.proposer_slashing_tx.subscribe() + } + + pub fn subscribe_bls_to_execution_change(&self) -> Receiver> { + self.bls_to_execution_change_tx.subscribe() + } + + pub fn subscribe_block_gossip(&self) -> Receiver> { + self.block_gossip_tx.subscribe() + } + pub fn has_attestation_subscribers(&self) -> bool { self.attestation_tx.receiver_count() > 0 } @@ -227,4 +271,20 @@ impl ServerSentEventHandler { pub fn has_block_reward_subscribers(&self) -> bool { self.block_reward_tx.receiver_count() > 0 } + + pub fn has_proposer_slashing_subscribers(&self) -> bool { + self.proposer_slashing_tx.receiver_count() > 0 + } + + pub fn has_attester_slashing_subscribers(&self) -> bool { + self.attester_slashing_tx.receiver_count() > 0 + } + + pub fn has_bls_to_execution_change_subscribers(&self) -> bool { + self.bls_to_execution_change_tx.receiver_count() > 0 + } + + pub fn has_block_gossip_subscribers(&self) -> bool { + self.block_gossip_tx.receiver_count() > 0 + } } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index cbffe363422..a6e0d247dc2 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -413,7 +413,7 @@ pub fn get_execution_payload( state.latest_execution_payload_header()?.block_hash(); let withdrawals = match state { &BeaconState::Capella(_) | &BeaconState::Deneb(_) | &BeaconState::Electra(_) => { - Some(get_expected_withdrawals(state, spec)?.into()) + Some(get_expected_withdrawals(state, spec)?.0.into()) } &BeaconState::Bellatrix(_) => None, // These shouldn't happen but they're here to make the pattern irrefutable diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 221bb8b2922..466ab0b67e7 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -39,7 +39,7 @@ mod light_client_server_cache; pub mod metrics; pub mod migrate; mod naive_aggregation_pool; -mod observed_aggregates; +pub mod observed_aggregates; mod observed_attesters; mod observed_blob_sidecars; pub mod observed_block_producers; @@ -62,9 +62,10 @@ pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse, - BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, LightClientProducerEvent, OverrideForkchoiceUpdate, ProduceBlockVerification, - StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, BlockProcessStatus, + ChainSegmentResult, ForkChoiceError, LightClientProducerEvent, OverrideForkchoiceUpdate, + ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; pub use self::beacon_snapshot::BeaconSnapshot; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index fc3f032cdc3..064b2b199ff 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -32,18 +32,11 @@ lazy_static! { "beacon_block_processing_successes_total", "Count of blocks processed without error" ); + // Keeping the existing "snapshot_cache" metric name as it would break existing dashboards pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE: Result = try_create_int_gauge( "beacon_block_processing_snapshot_cache_size", "Count snapshots in the snapshot cache" ); - pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES: Result = try_create_int_counter( - "beacon_block_processing_snapshot_cache_misses", - "Count of snapshot cache misses" - ); - pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_CLONES: Result = try_create_int_counter( - "beacon_block_processing_snapshot_cache_clones", - "Count of snapshot cache clones" - ); pub static ref BLOCK_PROCESSING_TIMES: Result = try_create_histogram("beacon_block_processing_seconds", "Full runtime of block processing"); pub static ref BLOCK_PROCESSING_BLOCK_ROOT: Result = try_create_histogram( @@ -408,8 +401,6 @@ lazy_static! { try_create_histogram("beacon_persist_eth1_cache", "Time taken to persist the eth1 caches"); pub static ref PERSIST_FORK_CHOICE: Result = try_create_histogram("beacon_persist_fork_choice", "Time taken to persist the fork choice struct"); - pub static ref PERSIST_DATA_AVAILABILITY_CHECKER: Result = - try_create_histogram("beacon_persist_data_availability_checker", "Time taken to persist the data availability checker"); /* * Eth1 @@ -866,6 +857,11 @@ lazy_static! { "Duration between the start of the block's slot and the time the block was observed.", ); + pub static ref BEACON_BLOCK_DELAY_CONSENSUS_VERIFICATION_TIME: Result = try_create_int_gauge( + "beacon_block_delay_consensus_verification_time", + "The time taken to verify the block within Lighthouse", + ); + pub static ref BEACON_BLOCK_DELAY_EXECUTION_TIME: Result = try_create_int_gauge( "beacon_block_delay_execution_time", "The duration in verifying the block with the execution layer.", @@ -1199,6 +1195,7 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { } let attestation_stats = beacon_chain.op_pool.attestation_stats(); + let chain_metrics = beacon_chain.metrics(); set_gauge_by_usize( &BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE, @@ -1207,7 +1204,7 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { set_gauge_by_usize( &BEACON_REQRESP_PRE_IMPORT_CACHE_SIZE, - beacon_chain.reqresp_pre_import_cache.read().len(), + chain_metrics.reqresp_pre_import_cache_len, ); let da_checker_metrics = beacon_chain.data_availability_checker.metrics(); @@ -1219,10 +1216,6 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { &DATA_AVAILABILITY_OVERFLOW_MEMORY_STATE_CACHE_SIZE, da_checker_metrics.state_cache_size, ); - set_gauge_by_usize( - &DATA_AVAILABILITY_OVERFLOW_STORE_CACHE_SIZE, - da_checker_metrics.num_store_entries, - ); if let Some((size, num_lookups)) = beacon_chain.pre_finalization_block_cache.metrics() { set_gauge_by_usize(&PRE_FINALIZATION_BLOCK_CACHE_SIZE, size); diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index ad597bf92aa..08b2a51720d 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -703,6 +703,11 @@ impl, Cold: ItemStore> BackgroundMigrator, + slot: Slot, +} + +// A custom implementation of `TreeHash` such that: +// AttestationKey(data, None).tree_hash_root() == data.tree_hash_root() +// AttestationKey(data, Some(index)).tree_hash_root() == (data, index).tree_hash_root() +// This is necessary because pre-Electra, the validator will ask for the tree_hash_root() +// of the `AttestationData` +impl TreeHash for AttestationKey { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Container + } + + fn tree_hash_packed_encoding(&self) -> SmallVec<[u8; 32]> { + unreachable!("AttestationKey should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("AttestationKey should never be packed.") + } + + fn tree_hash_root(&self) -> Hash256 { + match self.committee_index { + None => self.data_root, // Return just the data root if no committee index is present + Some(index) => { + // Combine the hash of the data with the hash of the index + let mut hasher = MerkleHasher::with_leaves(2); + hasher + .write(self.data_root.as_bytes()) + .expect("should write data hash"); + hasher + .write(&index.to_le_bytes()) + .expect("should write index"); + hasher.finish().expect("should give tree hash") + } + } + } +} + +impl AttestationKey { + pub fn from_attestation_ref(attestation: AttestationRef) -> Result { + let slot = attestation.data().slot; + match attestation { + AttestationRef::Base(att) => Ok(Self { + data_root: att.data.tree_hash_root(), + committee_index: None, + slot, + }), + AttestationRef::Electra(att) => { + let committee_index = att + .committee_bits + .iter() + .enumerate() + .filter_map(|(i, bit)| if bit { Some(i) } else { None }) + .at_most_one() + .map_err(|_| Error::MoreThanOneCommitteeBitSet)? + .ok_or(Error::NoCommitteeBitSet)?; + + Ok(Self { + data_root: att.data.tree_hash_root(), + committee_index: Some(committee_index as u64), + slot, + }) + } + } + } + + pub fn new_base(data: &AttestationData) -> Self { + let slot = data.slot; + Self { + data_root: data.tree_hash_root(), + committee_index: None, + slot, + } + } + + pub fn new_electra(slot: Slot, data_root: Hash256, committee_index: CommitteeIndex) -> Self { + Self { + data_root, + committee_index: Some(committee_index), + slot, + } + } + + pub fn new_base_from_slot_and_root(slot: Slot, data_root: Hash256) -> Self { + Self { + data_root, + committee_index: None, + slot, + } + } +} + +impl SlotData for AttestationKey { + fn get_slot(&self) -> Slot { + self.slot + } +} + /// The number of slots that will be stored in the pool. /// /// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all items @@ -46,6 +156,10 @@ pub enum Error { /// The given `aggregation_bits` field had more than one signature. The number of /// signatures found is included. MoreThanOneAggregationBitSet(usize), + /// The electra attestation has more than one committee bit set + MoreThanOneCommitteeBitSet, + /// The electra attestation has NO committee bit set + NoCommitteeBitSet, /// We have reached the maximum number of unique items that can be stored in a /// slot. This is a DoS protection function. ReachedMaxItemsPerSlot(usize), @@ -59,12 +173,15 @@ pub enum Error { /// Implemented for items in the `NaiveAggregationPool`. Requires that items implement `SlotData`, /// which means they have an associated slot. This handles aggregation of items that are inserted. -pub trait AggregateMap { +pub trait AggregateMap +where + for<'a> ::Reference<'a>: SlotData, +{ /// `Key` should be a hash of `Data`. type Key; /// The item stored in the map - type Value: Clone + SlotData; + type Value: Clone + SlotData + AsReference; /// The unique fields of `Value`, hashed to create `Key`. type Data: SlotData; @@ -73,7 +190,10 @@ pub trait AggregateMap { fn new(initial_capacity: usize) -> Self; /// Insert a `Value` into `Self`, returning a result. - fn insert(&mut self, value: &Self::Value) -> Result; + fn insert( + &mut self, + value: ::Reference<'_>, + ) -> Result; /// Get a `Value` from `Self` based on `Data`. fn get(&self, data: &Self::Data) -> Option; @@ -81,9 +201,6 @@ pub trait AggregateMap { /// Get a reference to the inner `HashMap`. fn get_map(&self) -> &HashMap; - /// Get a `Value` from `Self` based on `Key`, which is a hash of `Data`. - fn get_by_root(&self, root: &Self::Key) -> Option<&Self::Value>; - /// The number of items store in `Self`. fn len(&self) -> usize; @@ -103,13 +220,13 @@ pub trait AggregateMap { /// A collection of `Attestation` objects, keyed by their `attestation.data`. Enforces that all /// `attestation` are from the same slot. pub struct AggregatedAttestationMap { - map: HashMap>, + map: HashMap>, } impl AggregateMap for AggregatedAttestationMap { - type Key = AttestationDataRoot; + type Key = AttestationKeyRoot; type Value = Attestation; - type Data = AttestationData; + type Data = AttestationKey; /// Create an empty collection with the given `initial_capacity`. fn new(initial_capacity: usize) -> Self { @@ -121,48 +238,45 @@ impl AggregateMap for AggregatedAttestationMap { /// Insert an attestation into `self`, aggregating it into the pool. /// /// The given attestation (`a`) must only have one signature. - fn insert(&mut self, a: &Self::Value) -> Result { + fn insert(&mut self, a: AttestationRef) -> Result { let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_CORE_INSERT); - let set_bits = a - .aggregation_bits + let aggregation_bit = *a + .set_aggregation_bits() .iter() - .enumerate() - .filter(|(_i, bit)| *bit) - .map(|(i, _bit)| i) - .collect::>(); - - let committee_index = set_bits - .first() - .copied() + .at_most_one() + .map_err(|iter| Error::MoreThanOneAggregationBitSet(iter.count()))? .ok_or(Error::NoAggregationBitsSet)?; - if set_bits.len() > 1 { - return Err(Error::MoreThanOneAggregationBitSet(set_bits.len())); - } - - let attestation_data_root = a.data.tree_hash_root(); + let attestation_key = AttestationKey::from_attestation_ref(a)?; + let attestation_key_root = attestation_key.tree_hash_root(); - if let Some(existing_attestation) = self.map.get_mut(&attestation_data_root) { + if let Some(existing_attestation) = self.map.get_mut(&attestation_key_root) { if existing_attestation - .aggregation_bits - .get(committee_index) + .get_aggregation_bit(aggregation_bit) .map_err(|_| Error::InconsistentBitfieldLengths)? { - Ok(InsertOutcome::SignatureAlreadyKnown { committee_index }) + Ok(InsertOutcome::SignatureAlreadyKnown { + committee_index: aggregation_bit, + }) } else { let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_AGG_POOL_AGGREGATION); existing_attestation.aggregate(a); - Ok(InsertOutcome::SignatureAggregated { committee_index }) + Ok(InsertOutcome::SignatureAggregated { + committee_index: aggregation_bit, + }) } } else { if self.map.len() >= MAX_ATTESTATIONS_PER_SLOT { return Err(Error::ReachedMaxItemsPerSlot(MAX_ATTESTATIONS_PER_SLOT)); } - self.map.insert(attestation_data_root, a.clone()); - Ok(InsertOutcome::NewItemInserted { committee_index }) + self.map + .insert(attestation_key_root, a.clone_as_attestation()); + Ok(InsertOutcome::NewItemInserted { + committee_index: aggregation_bit, + }) } } @@ -177,11 +291,6 @@ impl AggregateMap for AggregatedAttestationMap { &self.map } - /// Returns an aggregated `Attestation` with the given `root`, if any. - fn get_by_root(&self, root: &Self::Key) -> Option<&Self::Value> { - self.map.get(root) - } - fn len(&self) -> usize { self.map.len() } @@ -288,11 +397,6 @@ impl AggregateMap for SyncContributionAggregateMap { &self.map } - /// Returns an aggregated `SyncCommitteeContribution` with the given `root`, if any. - fn get_by_root(&self, root: &SyncDataRoot) -> Option<&SyncCommitteeContribution> { - self.map.get(root) - } - fn len(&self) -> usize { self.map.len() } @@ -336,12 +440,20 @@ impl AggregateMap for SyncContributionAggregateMap { /// `current_slot - SLOTS_RETAINED` will be removed and any future item with a slot lower /// than that will also be refused. Pruning is done automatically based upon the items it /// receives and it can be triggered manually. -pub struct NaiveAggregationPool { +pub struct NaiveAggregationPool +where + T: AggregateMap, + for<'a> ::Reference<'a>: SlotData, +{ lowest_permissible_slot: Slot, maps: HashMap, } -impl Default for NaiveAggregationPool { +impl Default for NaiveAggregationPool +where + T: AggregateMap, + for<'a> ::Reference<'a>: SlotData, +{ fn default() -> Self { Self { lowest_permissible_slot: Slot::new(0), @@ -350,7 +462,11 @@ impl Default for NaiveAggregationPool { } } -impl NaiveAggregationPool { +impl NaiveAggregationPool +where + T: AggregateMap, + for<'a> ::Reference<'a>: SlotData, +{ /// Insert an item into `self`, aggregating it into the pool. /// /// The given item must only have one signature and have an @@ -358,7 +474,10 @@ impl NaiveAggregationPool { /// /// The pool may be pruned if the given item has a slot higher than any /// previously seen. - pub fn insert(&mut self, item: &T::Value) -> Result { + pub fn insert( + &mut self, + item: ::Reference<'_>, + ) -> Result { let _timer = T::start_insert_timer(); let slot = item.get_slot(); let lowest_permissible_slot = self.lowest_permissible_slot; @@ -412,13 +531,6 @@ impl NaiveAggregationPool { .and_then(|map| map.get(data)) } - /// Returns an aggregated `T::Value` with the given `slot` and `root`, if any. - pub fn get_by_slot_and_root(&self, slot: Slot, root: &T::Key) -> Option { - self.maps - .get(&slot) - .and_then(|map| map.get_by_root(root).cloned()) - } - /// Iterate all items in all slots of `self`. pub fn iter(&self) -> impl Iterator { self.maps.values().flat_map(|map| map.get_map().values()) @@ -467,18 +579,30 @@ mod tests { use super::*; use ssz_types::BitList; use store::BitVector; + use tree_hash::TreeHash; use types::{ test_utils::{generate_deterministic_keypair, test_random_instance}, - Fork, Hash256, SyncCommitteeMessage, + Attestation, AttestationBase, AttestationElectra, Fork, Hash256, SyncCommitteeMessage, }; type E = types::MainnetEthSpec; - fn get_attestation(slot: Slot) -> Attestation { - let mut a: Attestation = test_random_instance(); + fn get_attestation_base(slot: Slot) -> Attestation { + let mut a: AttestationBase = test_random_instance(); a.data.slot = slot; a.aggregation_bits = BitList::with_capacity(4).expect("should create bitlist"); - a + Attestation::Base(a) + } + + fn get_attestation_electra(slot: Slot) -> Attestation { + let mut a: AttestationElectra = test_random_instance(); + a.data.slot = slot; + a.aggregation_bits = BitList::with_capacity(4).expect("should create bitlist"); + a.committee_bits = BitVector::new(); + a.committee_bits + .set(0, true) + .expect("should set committee bit"); + Attestation::Electra(a) } fn get_sync_contribution(slot: Slot) -> SyncCommitteeContribution { @@ -521,9 +645,16 @@ mod tests { } fn unset_attestation_bit(a: &mut Attestation, i: usize) { - a.aggregation_bits - .set(i, false) - .expect("should unset aggregation bit") + match a { + Attestation::Base(ref mut att) => att + .aggregation_bits + .set(i, false) + .expect("should unset aggregation bit"), + Attestation::Electra(ref mut att) => att + .aggregation_bits + .set(i, false) + .expect("should unset aggregation bit"), + } } fn unset_sync_contribution_bit(a: &mut SyncCommitteeContribution, i: usize) { @@ -533,19 +664,19 @@ mod tests { } fn mutate_attestation_block_root(a: &mut Attestation, block_root: Hash256) { - a.data.beacon_block_root = block_root + a.data_mut().beacon_block_root = block_root } fn mutate_attestation_slot(a: &mut Attestation, slot: Slot) { - a.data.slot = slot + a.data_mut().slot = slot } fn attestation_block_root_comparator(a: &Attestation, block_root: Hash256) -> bool { - a.data.beacon_block_root == block_root + a.data().beacon_block_root == block_root } - fn key_from_attestation(a: &Attestation) -> AttestationData { - a.data.clone() + fn key_from_attestation(a: &Attestation) -> AttestationKey { + AttestationKey::from_attestation_ref(a.to_ref()).expect("should create attestation key") } fn mutate_sync_contribution_block_root( @@ -570,6 +701,45 @@ mod tests { SyncContributionData::from_contribution(a) } + #[test] + fn attestation_key_tree_hash_tests() { + let attestation_base = get_attestation_base(Slot::new(42)); + // for a base attestation, the tree_hash_root() of the key should be the same as the tree_hash_root() of the data + let attestation_key_base = AttestationKey::from_attestation_ref(attestation_base.to_ref()) + .expect("should create attestation key"); + assert_eq!( + attestation_key_base.tree_hash_root(), + attestation_base.data().tree_hash_root() + ); + let mut attestation_electra = get_attestation_electra(Slot::new(42)); + // for an electra attestation, the tree_hash_root() of the key should be different from the tree_hash_root() of the data + let attestation_key_electra = + AttestationKey::from_attestation_ref(attestation_electra.to_ref()) + .expect("should create attestation key"); + assert_ne!( + attestation_key_electra.tree_hash_root(), + attestation_electra.data().tree_hash_root() + ); + // for an electra attestation, the tree_hash_root() of the key should be dependent on which committee bit is set + let committe_bits = attestation_electra + .committee_bits_mut() + .expect("should get committee bits"); + committe_bits + .set(0, false) + .expect("should set committee bit"); + committe_bits + .set(1, true) + .expect("should set committee bit"); + let new_attestation_key_electra = + AttestationKey::from_attestation_ref(attestation_electra.to_ref()) + .expect("should create attestation key"); + // this new key should have a different tree_hash_root() than the previous key + assert_ne!( + attestation_key_electra.tree_hash_root(), + new_attestation_key_electra.tree_hash_root() + ); + } + macro_rules! test_suite { ( $mod_name: ident, @@ -592,10 +762,10 @@ mod tests { let mut a = $get_method_name(Slot::new(0)); let mut pool: NaiveAggregationPool<$map_type> = - NaiveAggregationPool::default(); + NaiveAggregationPool::<$map_type>::default(); assert_eq!( - pool.insert(&a), + pool.insert(a.as_reference()), Err(Error::NoAggregationBitsSet), "should not accept item without any signatures" ); @@ -603,12 +773,12 @@ mod tests { $sign_method_name(&mut a, 0, Hash256::random()); assert_eq!( - pool.insert(&a), + pool.insert(a.as_reference()), Ok(InsertOutcome::NewItemInserted { committee_index: 0 }), "should accept new item" ); assert_eq!( - pool.insert(&a), + pool.insert(a.as_reference()), Ok(InsertOutcome::SignatureAlreadyKnown { committee_index: 0 }), "should acknowledge duplicate signature" ); @@ -621,7 +791,7 @@ mod tests { $sign_method_name(&mut a, 1, Hash256::random()); assert_eq!( - pool.insert(&a), + pool.insert(a.as_reference()), Err(Error::MoreThanOneAggregationBitSet(2)), "should not accept item with multiple signatures" ); @@ -637,15 +807,15 @@ mod tests { $sign_method_name(&mut a_1, 1, genesis_validators_root); let mut pool: NaiveAggregationPool<$map_type> = - NaiveAggregationPool::default(); + NaiveAggregationPool::<$map_type>::default(); assert_eq!( - pool.insert(&a_0), + pool.insert(a_0.as_reference()), Ok(InsertOutcome::NewItemInserted { committee_index: 0 }), "should accept a_0" ); assert_eq!( - pool.insert(&a_1), + pool.insert(a_1.as_reference()), Ok(InsertOutcome::SignatureAggregated { committee_index: 1 }), "should accept a_1" ); @@ -655,7 +825,7 @@ mod tests { .expect("should not error while getting attestation"); let mut a_01 = a_0.clone(); - a_01.aggregate(&a_1); + a_01.aggregate(a_1.as_reference()); assert_eq!(retrieved, a_01, "retrieved item should be aggregated"); @@ -671,7 +841,7 @@ mod tests { $block_root_mutator(&mut a_different, different_root); assert_eq!( - pool.insert(&a_different), + pool.insert(a_different.as_reference()), Ok(InsertOutcome::NewItemInserted { committee_index: 2 }), "should accept a_different" ); @@ -690,7 +860,7 @@ mod tests { $sign_method_name(&mut base, 0, Hash256::random()); let mut pool: NaiveAggregationPool<$map_type> = - NaiveAggregationPool::default(); + NaiveAggregationPool::<$map_type>::default(); for i in 0..SLOTS_RETAINED * 2 { let slot = Slot::from(i); @@ -698,7 +868,7 @@ mod tests { $slot_mutator(&mut a, slot); assert_eq!( - pool.insert(&a), + pool.insert(a.as_reference()), Ok(InsertOutcome::NewItemInserted { committee_index: 0 }), "should accept new item" ); @@ -739,7 +909,7 @@ mod tests { $sign_method_name(&mut base, 0, Hash256::random()); let mut pool: NaiveAggregationPool<$map_type> = - NaiveAggregationPool::default(); + NaiveAggregationPool::<$map_type>::default(); for i in 0..=$item_limit { let mut a = base.clone(); @@ -747,13 +917,13 @@ mod tests { if i < $item_limit { assert_eq!( - pool.insert(&a), + pool.insert(a.as_reference()), Ok(InsertOutcome::NewItemInserted { committee_index: 0 }), "should accept item below limit" ); } else { assert_eq!( - pool.insert(&a), + pool.insert(a.as_reference()), Err(Error::ReachedMaxItemsPerSlot($item_limit)), "should not accept item above limit" ); @@ -765,8 +935,21 @@ mod tests { } test_suite! { - attestation_tests, - get_attestation, + attestation_tests_base, + get_attestation_base, + sign_attestation, + unset_attestation_bit, + mutate_attestation_block_root, + mutate_attestation_slot, + attestation_block_root_comparator, + key_from_attestation, + AggregatedAttestationMap, + MAX_ATTESTATIONS_PER_SLOT + } + + test_suite! { + attestation_tests_electra, + get_attestation_electra, sign_attestation, unset_attestation_bit, mutate_attestation_block_root, diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index ab00aefcd3e..00476bfe7af 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -6,22 +6,33 @@ use ssz_types::{BitList, BitVector}; use std::collections::HashMap; use std::marker::PhantomData; use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; use types::consts::altair::{ SYNC_COMMITTEE_SUBNET_COUNT, TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE, }; use types::slot_data::SlotData; -use types::{Attestation, EthSpec, Hash256, Slot, SyncCommitteeContribution}; +use types::{ + Attestation, AttestationData, AttestationRef, EthSpec, Hash256, Slot, SyncCommitteeContribution, +}; pub type ObservedSyncContributions = ObservedAggregates< SyncCommitteeContribution, E, BitVector<::SyncSubcommitteeSize>, >; -pub type ObservedAggregateAttestations = ObservedAggregates< - Attestation, - E, - BitList<::MaxValidatorsPerCommittee>, ->; +pub type ObservedAggregateAttestations = + ObservedAggregates, E, BitList<::MaxValidatorsPerSlot>>; + +/// Attestation data augmented with committee index +/// +/// This is hashed and used to key the map of observed aggregate attestations. This is important +/// post-Electra where the attestation data committee index is 0 and we want to avoid accidentally +/// comparing aggregation bits for *different* committees. +#[derive(TreeHash)] +pub struct ObservedAttestationKey { + pub committee_index: u64, + pub attestation_data: AttestationData, +} /// A trait use to associate capacity constants with the type being stored in `ObservedAggregates`. pub trait Consts { @@ -95,35 +106,60 @@ pub trait SubsetItem { /// Returns the item that gets stored in `ObservedAggregates` for later subset /// comparison with incoming aggregates. - fn get_item(&self) -> Self::Item; + fn get_item(&self) -> Result; /// Returns a unique value that keys the object to the item that is being stored /// in `ObservedAggregates`. - fn root(&self) -> Hash256; + fn root(&self) -> Result; } -impl SubsetItem for Attestation { - type Item = BitList; +impl<'a, E: EthSpec> SubsetItem for AttestationRef<'a, E> { + type Item = BitList; fn is_subset(&self, other: &Self::Item) -> bool { - self.aggregation_bits.is_subset(other) + match self { + Self::Base(att) => { + if let Ok(extended_aggregation_bits) = att.extend_aggregation_bits() { + return extended_aggregation_bits.is_subset(other); + } + false + } + Self::Electra(att) => att.aggregation_bits.is_subset(other), + } } fn is_superset(&self, other: &Self::Item) -> bool { - other.is_subset(&self.aggregation_bits) + match self { + Self::Base(att) => { + if let Ok(extended_aggregation_bits) = att.extend_aggregation_bits() { + return other.is_subset(&extended_aggregation_bits); + } + false + } + Self::Electra(att) => other.is_subset(&att.aggregation_bits), + } } /// Returns the sync contribution aggregation bits. - fn get_item(&self) -> Self::Item { - self.aggregation_bits.clone() + fn get_item(&self) -> Result { + match self { + Self::Base(att) => att + .extend_aggregation_bits() + .map_err(|_| Error::GetItemError), + Self::Electra(att) => Ok(att.aggregation_bits.clone()), + } } - /// Returns the hash tree root of the attestation data. - fn root(&self) -> Hash256 { - self.data.tree_hash_root() + /// Returns the hash tree root of the attestation data augmented with the committee index. + fn root(&self) -> Result { + Ok(ObservedAttestationKey { + committee_index: self.committee_index().ok_or(Error::RootError)?, + attestation_data: self.data().clone(), + } + .tree_hash_root()) } } -impl SubsetItem for SyncCommitteeContribution { +impl<'a, E: EthSpec> SubsetItem for &'a SyncCommitteeContribution { type Item = BitVector; fn is_subset(&self, other: &Self::Item) -> bool { self.aggregation_bits.is_subset(other) @@ -134,19 +170,19 @@ impl SubsetItem for SyncCommitteeContribution { } /// Returns the sync contribution aggregation bits. - fn get_item(&self) -> Self::Item { - self.aggregation_bits.clone() + fn get_item(&self) -> Result { + Ok(self.aggregation_bits.clone()) } /// Returns the hash tree root of the root, slot and subcommittee index /// of the sync contribution. - fn root(&self) -> Hash256 { - SyncCommitteeData { + fn root(&self) -> Result { + Ok(SyncCommitteeData { root: self.beacon_block_root, slot: self.slot, subcommittee_index: self.subcommittee_index, } - .tree_hash_root() + .tree_hash_root()) } } @@ -173,6 +209,8 @@ pub enum Error { expected: Slot, attestation: Slot, }, + GetItemError, + RootError, } /// A `HashMap` that contains entries related to some `Slot`. @@ -196,7 +234,7 @@ impl SlotHashSet { /// Store the items in self so future observations recognise its existence. pub fn observe_item>( &mut self, - item: &S, + item: S, root: Hash256, ) -> Result { if item.get_slot() != self.slot { @@ -215,7 +253,7 @@ impl SlotHashSet { // If true, we replace the new item with its existing subset. This allows us // to hold fewer items in the list. } else if item.is_superset(existing) { - *existing = item.get_item(); + *existing = item.get_item()?; return Ok(ObserveOutcome::New); } } @@ -233,7 +271,7 @@ impl SlotHashSet { return Err(Error::ReachedMaxObservationsPerSlot(self.max_capacity)); } - let item = item.get_item(); + let item = item.get_item()?; self.map.entry(root).or_default().push(item); Ok(ObserveOutcome::New) } @@ -242,7 +280,7 @@ impl SlotHashSet { /// the given root and slot. pub fn is_known_subset>( &self, - item: &S, + item: S, root: Hash256, ) -> Result { if item.get_slot() != self.slot { @@ -264,16 +302,43 @@ impl SlotHashSet { } } +/// Trait for observable items that can be observed from their reference type. +/// +/// This is used to make observations for `Attestation`s from `AttestationRef`s. +pub trait AsReference { + type Reference<'a> + where + Self: 'a; + + fn as_reference(&self) -> Self::Reference<'_>; +} + +impl AsReference for Attestation { + type Reference<'a> = AttestationRef<'a, E>; + + fn as_reference(&self) -> AttestationRef<'_, E> { + self.to_ref() + } +} + +impl AsReference for SyncCommitteeContribution { + type Reference<'a> = &'a Self; + + fn as_reference(&self) -> &Self { + self + } +} + /// Stores the roots of objects for some number of `Slots`, so we can determine if /// these have previously been seen on the network. -pub struct ObservedAggregates { +pub struct ObservedAggregates { lowest_permissible_slot: Slot, sets: Vec>, _phantom_spec: PhantomData, _phantom_tree_hash: PhantomData, } -impl Default for ObservedAggregates { +impl Default for ObservedAggregates { fn default() -> Self { Self { lowest_permissible_slot: Slot::new(0), @@ -284,17 +349,22 @@ impl Default for ObservedAggregates, E: EthSpec, I> ObservedAggregates { +impl ObservedAggregates +where + T: Consts + AsReference, + E: EthSpec, + for<'a> T::Reference<'a>: SubsetItem + SlotData, +{ /// Store `item` in `self` keyed at `root`. /// /// `root` must equal `item.root::()`. pub fn observe_item( &mut self, - item: &T, + item: T::Reference<'_>, root_opt: Option, ) -> Result { let index = self.get_set_index(item.get_slot())?; - let root = root_opt.unwrap_or_else(|| item.root()); + let root = root_opt.map_or_else(|| item.root(), Ok)?; self.sets .get_mut(index) @@ -307,7 +377,11 @@ impl, E: EthSpec, I> ObservedAggrega /// /// `root` must equal `item.root::()`. #[allow(clippy::wrong_self_convention)] - pub fn is_known_subset(&mut self, item: &T, root: Hash256) -> Result { + pub fn is_known_subset( + &mut self, + item: T::Reference<'_>, + root: Hash256, + ) -> Result { let index = self.get_set_index(item.get_slot())?; self.sets @@ -399,14 +473,15 @@ impl, E: EthSpec, I> ObservedAggrega #[cfg(not(debug_assertions))] mod tests { use super::*; - use types::{test_utils::test_random_instance, Hash256}; + use types::{test_utils::test_random_instance, AttestationBase, Hash256}; type E = types::MainnetEthSpec; fn get_attestation(slot: Slot, beacon_block_root: u64) -> Attestation { - let mut a: Attestation = test_random_instance(); - a.data.slot = slot; - a.data.beacon_block_root = Hash256::from_low_u64_be(beacon_block_root); + let a: AttestationBase = test_random_instance(); + let mut a = Attestation::Base(a); + a.data_mut().slot = slot; + a.data_mut().beacon_block_root = Hash256::from_low_u64_be(beacon_block_root); a } @@ -432,12 +507,15 @@ mod tests { for a in &items { assert_eq!( - store.is_known_subset(a, a.root()), + store.is_known_subset( + a.as_reference(), + a.as_reference().root().unwrap() + ), Ok(false), "should indicate an unknown attestation is unknown" ); assert_eq!( - store.observe_item(a, None), + store.observe_item(a.as_reference(), None), Ok(ObserveOutcome::New), "should observe new attestation" ); @@ -445,12 +523,18 @@ mod tests { for a in &items { assert_eq!( - store.is_known_subset(a, a.root()), + store.is_known_subset( + a.as_reference(), + a.as_reference().root().unwrap() + ), Ok(true), "should indicate a known attestation is known" ); assert_eq!( - store.observe_item(a, Some(a.root())), + store.observe_item( + a.as_reference(), + Some(a.as_reference().root().unwrap()) + ), Ok(ObserveOutcome::Subset), "should acknowledge an existing attestation" ); diff --git a/beacon_node/beacon_chain/src/observed_operations.rs b/beacon_node/beacon_chain/src/observed_operations.rs index 04861fbe318..969d03a11b6 100644 --- a/beacon_node/beacon_chain/src/observed_operations.rs +++ b/beacon_node/beacon_chain/src/observed_operations.rs @@ -1,7 +1,6 @@ use derivative::Derivative; use smallvec::{smallvec, SmallVec}; -use ssz::{Decode, Encode}; -use state_processing::{SigVerifiedOp, VerifyOperation, VerifyOperationAt}; +use state_processing::{SigVerifiedOp, TransformPersist, VerifyOperation, VerifyOperationAt}; use std::collections::HashSet; use std::marker::PhantomData; use types::{ @@ -34,7 +33,7 @@ pub struct ObservedOperations, E: EthSpec> { /// Was the observed operation new and valid for further processing, or a useless duplicate? #[derive(Debug, PartialEq, Eq, Clone)] -pub enum ObservationOutcome { +pub enum ObservationOutcome { New(SigVerifiedOp), AlreadyKnown, } @@ -62,15 +61,13 @@ impl ObservableOperation for ProposerSlashing { impl ObservableOperation for AttesterSlashing { fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> { let attestation_1_indices = self - .attestation_1 - .attesting_indices - .iter() + .attestation_1() + .attesting_indices_iter() .copied() .collect::>(); let attestation_2_indices = self - .attestation_2 - .attesting_indices - .iter() + .attestation_2() + .attesting_indices_iter() .copied() .collect::>(); attestation_1_indices diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index 8297ea93457..8961a74c3d2 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -1,4 +1,4 @@ -use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV11, PersistedForkChoiceStoreV17}; +use crate::beacon_fork_choice_store::PersistedForkChoiceStoreV17; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use store::{DBColumn, Error, StoreItem}; @@ -7,37 +7,12 @@ use superstruct::superstruct; // If adding a new version you should update this type alias and fix the breakages. pub type PersistedForkChoice = PersistedForkChoiceV17; -#[superstruct( - variants(V11, V17), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V17), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct PersistedForkChoice { pub fork_choice: fork_choice::PersistedForkChoice, - #[superstruct(only(V11))] - pub fork_choice_store: PersistedForkChoiceStoreV11, - #[superstruct(only(V17))] pub fork_choice_store: PersistedForkChoiceStoreV17, } -impl Into for PersistedForkChoiceV11 { - fn into(self) -> PersistedForkChoice { - PersistedForkChoice { - fork_choice: self.fork_choice, - fork_choice_store: self.fork_choice_store.into(), - } - } -} - -impl Into for PersistedForkChoice { - fn into(self) -> PersistedForkChoiceV11 { - PersistedForkChoiceV11 { - fork_choice: self.fork_choice, - fork_choice_store: self.fork_choice_store.into(), - } - } -} - macro_rules! impl_store_item { ($type:ty) => { impl StoreItem for $type { @@ -56,5 +31,4 @@ macro_rules! impl_store_item { }; } -impl_store_item!(PersistedForkChoiceV11); impl_store_item!(PersistedForkChoiceV17); diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 63eb72c43ab..4f7770e22c6 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,7 +1,6 @@ //! Utilities for managing database schema changes. -mod migration_schema_v17; -mod migration_schema_v18; -mod migration_schema_v19; +mod migration_schema_v20; +mod migration_schema_v21; use crate::beacon_chain::BeaconChainTypes; use crate::types::ChainSpec; @@ -52,30 +51,22 @@ pub fn migrate_schema( } // - // Migrations from before SchemaVersion(16) are deprecated. + // Migrations from before SchemaVersion(19) are deprecated. // - (SchemaVersion(16), SchemaVersion(17)) => { - let ops = migration_schema_v17::upgrade_to_v17::(db.clone(), log)?; + (SchemaVersion(19), SchemaVersion(20)) => { + let ops = migration_schema_v20::upgrade_to_v20::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } - (SchemaVersion(17), SchemaVersion(16)) => { - let ops = migration_schema_v17::downgrade_from_v17::(db.clone(), log)?; + (SchemaVersion(20), SchemaVersion(19)) => { + let ops = migration_schema_v20::downgrade_from_v20::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } - (SchemaVersion(17), SchemaVersion(18)) => { - let ops = migration_schema_v18::upgrade_to_v18::(db.clone(), log)?; + (SchemaVersion(20), SchemaVersion(21)) => { + let ops = migration_schema_v21::upgrade_to_v21::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } - (SchemaVersion(18), SchemaVersion(17)) => { - let ops = migration_schema_v18::downgrade_from_v18::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(18), SchemaVersion(19)) => { - let ops = migration_schema_v19::upgrade_to_v19::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(19), SchemaVersion(18)) => { - let ops = migration_schema_v19::downgrade_from_v19::(db.clone(), log)?; + (SchemaVersion(21), SchemaVersion(20)) => { + let ops = migration_schema_v21::downgrade_from_v21::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } // Anything else is an error. diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v17.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v17.rs deleted file mode 100644 index 770cbb8ab55..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v17.rs +++ /dev/null @@ -1,88 +0,0 @@ -use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; -use crate::persisted_fork_choice::{PersistedForkChoiceV11, PersistedForkChoiceV17}; -use proto_array::core::{SszContainerV16, SszContainerV17}; -use slog::{debug, Logger}; -use ssz::{Decode, Encode}; -use std::sync::Arc; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; - -pub fn upgrade_fork_choice( - mut fork_choice: PersistedForkChoiceV11, -) -> Result { - let ssz_container_v16 = SszContainerV16::from_ssz_bytes( - &fork_choice.fork_choice.proto_array_bytes, - ) - .map_err(|e| { - Error::SchemaMigrationError(format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - )) - })?; - - let ssz_container_v17: SszContainerV17 = ssz_container_v16.try_into().map_err(|e| { - Error::SchemaMigrationError(format!( - "Missing checkpoint during schema migration: {:?}", - e - )) - })?; - fork_choice.fork_choice.proto_array_bytes = ssz_container_v17.as_ssz_bytes(); - - Ok(fork_choice.into()) -} - -pub fn downgrade_fork_choice( - mut fork_choice: PersistedForkChoiceV17, -) -> Result { - let ssz_container_v17 = SszContainerV17::from_ssz_bytes( - &fork_choice.fork_choice.proto_array_bytes, - ) - .map_err(|e| { - Error::SchemaMigrationError(format!( - "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", - e - )) - })?; - - let ssz_container_v16: SszContainerV16 = ssz_container_v17.into(); - fork_choice.fork_choice.proto_array_bytes = ssz_container_v16.as_ssz_bytes(); - - Ok(fork_choice.into()) -} - -pub fn upgrade_to_v17( - db: Arc>, - log: Logger, -) -> Result, Error> { - // Get persisted_fork_choice. - let v11 = db - .get_item::(&FORK_CHOICE_DB_KEY)? - .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; - - let v17 = upgrade_fork_choice(v11)?; - - debug!( - log, - "Removing unused best_justified_checkpoint from fork choice store." - ); - - Ok(vec![v17.as_kv_store_op(FORK_CHOICE_DB_KEY)]) -} - -pub fn downgrade_from_v17( - db: Arc>, - log: Logger, -) -> Result, Error> { - // Get persisted_fork_choice. - let v17 = db - .get_item::(&FORK_CHOICE_DB_KEY)? - .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; - - let v11 = downgrade_fork_choice(v17)?; - - debug!( - log, - "Adding junk best_justified_checkpoint to fork choice store." - ); - - Ok(vec![v11.as_kv_store_op(FORK_CHOICE_DB_KEY)]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v18.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v18.rs deleted file mode 100644 index 04a9da84128..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v18.rs +++ /dev/null @@ -1,119 +0,0 @@ -use crate::beacon_chain::BeaconChainTypes; -use slog::{error, info, warn, Logger}; -use slot_clock::SlotClock; -use std::sync::Arc; -use std::time::Duration; -use store::{ - get_key_for_col, metadata::BLOB_INFO_KEY, DBColumn, Error, HotColdDB, KeyValueStoreOp, -}; -use types::{Epoch, EthSpec, Hash256, Slot}; - -/// The slot clock isn't usually available before the database is initialized, so we construct a -/// temporary slot clock by reading the genesis state. It should always exist if the database is -/// initialized at a prior schema version, however we still handle the lack of genesis state -/// gracefully. -fn get_slot_clock( - db: &HotColdDB, - log: &Logger, -) -> Result, Error> { - let spec = db.get_chain_spec(); - let Some(genesis_block) = db.get_blinded_block(&Hash256::zero())? else { - error!(log, "Missing genesis block"); - return Ok(None); - }; - let Some(genesis_state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? else { - error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); - return Ok(None); - }; - Ok(Some(T::SlotClock::new( - spec.genesis_slot, - Duration::from_secs(genesis_state.genesis_time()), - Duration::from_secs(spec.seconds_per_slot), - ))) -} - -fn get_current_epoch( - db: &Arc>, - log: &Logger, -) -> Result { - get_slot_clock::(db, log)? - .and_then(|clock| clock.now()) - .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) - .ok_or(Error::SlotClockUnavailableForMigration) -} - -pub fn upgrade_to_v18( - db: Arc>, - log: Logger, -) -> Result, Error> { - db.heal_freezer_block_roots_at_split()?; - db.heal_freezer_block_roots_at_genesis()?; - info!(log, "Healed freezer block roots"); - - // No-op, even if Deneb has already occurred. The database is probably borked in this case, but - // *maybe* the fork recovery will revert the minority fork and succeed. - if let Some(deneb_fork_epoch) = db.get_chain_spec().deneb_fork_epoch { - let current_epoch = get_current_epoch::(&db, &log)?; - if current_epoch >= deneb_fork_epoch { - warn!( - log, - "Attempting upgrade to v18 schema"; - "info" => "this may not work as Deneb has already been activated" - ); - } else { - info!( - log, - "Upgrading to v18 schema"; - "info" => "ready for Deneb", - "epochs_until_deneb" => deneb_fork_epoch - current_epoch - ); - } - } else { - info!( - log, - "Upgrading to v18 schema"; - "info" => "ready for Deneb once it is scheduled" - ); - } - Ok(vec![]) -} - -pub fn downgrade_from_v18( - db: Arc>, - log: Logger, -) -> Result, Error> { - // We cannot downgrade from V18 once the Deneb fork has been activated, because there will - // be blobs and blob metadata in the database that aren't understood by the V17 schema. - if let Some(deneb_fork_epoch) = db.get_chain_spec().deneb_fork_epoch { - let current_epoch = get_current_epoch::(&db, &log)?; - if current_epoch >= deneb_fork_epoch { - error!( - log, - "Deneb already active: v18+ is mandatory"; - "current_epoch" => current_epoch, - "deneb_fork_epoch" => deneb_fork_epoch, - ); - return Err(Error::UnableToDowngrade); - } else { - info!( - log, - "Downgrading to v17 schema"; - "info" => "you will need to upgrade before Deneb", - "epochs_until_deneb" => deneb_fork_epoch - current_epoch - ); - } - } else { - info!( - log, - "Downgrading to v17 schema"; - "info" => "you need to upgrade before Deneb", - ); - } - - let ops = vec![KeyValueStoreOp::DeleteKey(get_key_for_col( - DBColumn::BeaconMeta.into(), - BLOB_INFO_KEY.as_bytes(), - ))]; - - Ok(ops) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v19.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v19.rs deleted file mode 100644 index 578e9bad314..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v19.rs +++ /dev/null @@ -1,65 +0,0 @@ -use crate::beacon_chain::BeaconChainTypes; -use slog::{debug, info, Logger}; -use std::sync::Arc; -use store::{get_key_for_col, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp}; - -pub fn upgrade_to_v19( - db: Arc>, - log: Logger, -) -> Result, Error> { - let mut hot_delete_ops = vec![]; - let mut blob_keys = vec![]; - let column = DBColumn::BeaconBlob; - - debug!(log, "Migrating from v18 to v19"); - // Iterate through the blobs on disk. - for res in db.hot_db.iter_column_keys::>(column) { - let key = res?; - let key_col = get_key_for_col(column.as_str(), &key); - hot_delete_ops.push(KeyValueStoreOp::DeleteKey(key_col)); - blob_keys.push(key); - } - - let num_blobs = blob_keys.len(); - debug!(log, "Collected {} blob lists to migrate", num_blobs); - - let batch_size = 500; - let mut batch = Vec::with_capacity(batch_size); - - for key in blob_keys { - let next_blob = db.hot_db.get_bytes(column.as_str(), &key)?; - if let Some(next_blob) = next_blob { - let key_col = get_key_for_col(column.as_str(), &key); - batch.push(KeyValueStoreOp::PutKeyValue(key_col, next_blob)); - - if batch.len() >= batch_size { - db.blobs_db.do_atomically(batch.clone())?; - batch.clear(); - } - } - } - - // Process the remaining batch if it's not empty - if !batch.is_empty() { - db.blobs_db.do_atomically(batch)?; - } - - debug!(log, "Wrote {} blobs to the blobs db", num_blobs); - - // Delete all the blobs - info!(log, "Upgrading to v19 schema"); - Ok(hot_delete_ops) -} - -pub fn downgrade_from_v19( - _db: Arc>, - log: Logger, -) -> Result, Error> { - // No-op - info!( - log, - "Downgrading to v18 schema"; - ); - - Ok(vec![]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs new file mode 100644 index 00000000000..d556d5988d6 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs @@ -0,0 +1,107 @@ +use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; +use operation_pool::{ + PersistedOperationPool, PersistedOperationPoolV15, PersistedOperationPoolV20, +}; +use slog::{debug, info, Logger}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; +use types::Attestation; + +pub fn upgrade_to_v20( + db: Arc>, + log: Logger, +) -> Result, Error> { + info!(log, "Upgrading from v19 to v20"); + + // Load a V15 op pool and transform it to V20. + let Some(PersistedOperationPoolV15:: { + attestations_v15, + sync_contributions, + attester_slashings_v15, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, + }) = db.get_item(&OP_POOL_DB_KEY)? + else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + let attestations = attestations_v15 + .into_iter() + .map(|(attestation, indices)| (Attestation::Base(attestation).into(), indices)) + .collect(); + + let attester_slashings = attester_slashings_v15 + .into_iter() + .map(|slashing| slashing.into()) + .collect(); + + let v20 = PersistedOperationPool::V20(PersistedOperationPoolV20 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, + }); + Ok(vec![v20.as_kv_store_op(OP_POOL_DB_KEY)]) +} + +pub fn downgrade_from_v20( + db: Arc>, + log: Logger, +) -> Result, Error> { + info!(log, "Downgrading from v20 to v19"); + + // Load a V20 op pool and transform it to V15. + let Some(PersistedOperationPoolV20:: { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, + }) = db.get_item(&OP_POOL_DB_KEY)? + else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + let attestations_v15 = attestations + .into_iter() + .filter_map(|(attestation, indices)| { + if let Attestation::Base(attestation) = attestation.into() { + Some((attestation, indices)) + } else { + info!(log, "Dropping attestation during downgrade"; "reason" => "not a base attestation"); + None + } + }) + .collect(); + + let attester_slashings_v15 = attester_slashings + .into_iter() + .filter_map(|slashing| match slashing.try_into() { + Ok(slashing) => Some(slashing), + Err(_) => { + info!(log, "Dropping attester slashing during downgrade"; "reason" => "not a base attester slashing"); + None + } + }) + .collect(); + + let v15 = PersistedOperationPool::V15(PersistedOperationPoolV15 { + attestations_v15, + sync_contributions, + attester_slashings_v15, + proposer_slashings, + voluntary_exits, + bls_to_execution_changes, + capella_bls_change_broadcast_indices, + }); + Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)]) +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs new file mode 100644 index 00000000000..4042d328207 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs @@ -0,0 +1,83 @@ +use crate::beacon_chain::BeaconChainTypes; +use crate::validator_pubkey_cache::DatabasePubkey; +use slog::{info, Logger}; +use ssz::{Decode, Encode}; +use std::sync::Arc; +use store::{ + get_key_for_col, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, +}; +use types::{Hash256, PublicKey}; + +const LOG_EVERY: usize = 200_000; + +pub fn upgrade_to_v21( + db: Arc>, + log: Logger, +) -> Result, Error> { + info!(log, "Upgrading from v20 to v21"); + + let mut ops = vec![]; + + // Iterate through all pubkeys and decompress them. + for (i, res) in db + .hot_db + .iter_column::(DBColumn::PubkeyCache) + .enumerate() + { + let (key, value) = res?; + let pubkey = PublicKey::from_ssz_bytes(&value)?; + let decompressed = DatabasePubkey::from_pubkey(&pubkey); + ops.push(decompressed.as_kv_store_op(key)); + + if i > 0 && i % LOG_EVERY == 0 { + info!( + log, + "Public key decompression in progress"; + "keys_decompressed" => i + ); + } + } + info!(log, "Public key decompression complete"); + + Ok(ops) +} + +pub fn downgrade_from_v21( + db: Arc>, + log: Logger, +) -> Result, Error> { + info!(log, "Downgrading from v21 to v20"); + + let mut ops = vec![]; + + // Iterate through all pubkeys and recompress them. + for (i, res) in db + .hot_db + .iter_column::(DBColumn::PubkeyCache) + .enumerate() + { + let (key, value) = res?; + let decompressed = DatabasePubkey::from_ssz_bytes(&value)?; + let (_, pubkey_bytes) = decompressed.as_pubkey().map_err(|e| Error::DBError { + message: format!("{e:?}"), + })?; + + let db_key = get_key_for_col(DBColumn::PubkeyCache.into(), key.as_bytes()); + ops.push(KeyValueStoreOp::PutKeyValue( + db_key, + pubkey_bytes.as_ssz_bytes(), + )); + + if i > 0 && i % LOG_EVERY == 0 { + info!( + log, + "Public key compression in progress"; + "keys_compressed" => i + ); + } + } + + info!(log, "Public key compression complete"); + + Ok(ops) +} diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs deleted file mode 100644 index ac4e71d3d5a..00000000000 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ /dev/null @@ -1,521 +0,0 @@ -use crate::BeaconSnapshot; -use itertools::process_results; -use std::cmp; -use std::sync::Arc; -use std::time::Duration; -use types::{ - beacon_state::CloneConfig, BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, - SignedBeaconBlock, Slot, -}; - -/// The default size of the cache. -pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 3; - -/// The minimum block delay to clone the state in the cache instead of removing it. -/// This helps keep block processing fast during re-orgs from late blocks. -fn minimum_block_delay_for_clone(seconds_per_slot: u64) -> Duration { - // If the block arrived at the attestation deadline or later, it might get re-orged. - Duration::from_secs(seconds_per_slot) / 3 -} - -/// This snapshot is to be used for verifying a child of `self.beacon_block`. -#[derive(Debug)] -pub struct PreProcessingSnapshot { - /// This state is equivalent to the `self.beacon_block.state_root()` state that has been - /// advanced forward one slot using `per_slot_processing`. This state is "primed and ready" for - /// the application of another block. - pub pre_state: BeaconState, - /// This value is only set to `Some` if the `pre_state` was *not* advanced forward. - pub beacon_state_root: Option, - pub beacon_block: SignedBeaconBlock>, - pub beacon_block_root: Hash256, -} - -impl From> for PreProcessingSnapshot { - fn from(snapshot: BeaconSnapshot) -> Self { - let beacon_state_root = Some(snapshot.beacon_state_root()); - Self { - pre_state: snapshot.beacon_state, - beacon_state_root, - beacon_block: snapshot.beacon_block.clone_as_blinded(), - beacon_block_root: snapshot.beacon_block_root, - } - } -} - -impl CacheItem { - pub fn new_without_pre_state(snapshot: BeaconSnapshot) -> Self { - Self { - beacon_block: snapshot.beacon_block, - beacon_block_root: snapshot.beacon_block_root, - beacon_state: snapshot.beacon_state, - pre_state: None, - } - } - - fn clone_to_snapshot_with(&self, clone_config: CloneConfig) -> BeaconSnapshot { - BeaconSnapshot { - beacon_state: self.beacon_state.clone_with(clone_config), - beacon_block: self.beacon_block.clone(), - beacon_block_root: self.beacon_block_root, - } - } - - pub fn into_pre_state(self) -> PreProcessingSnapshot { - // Do not include the beacon state root if the state has been advanced. - let beacon_state_root = - Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); - - PreProcessingSnapshot { - beacon_block: self.beacon_block.clone_as_blinded(), - beacon_block_root: self.beacon_block_root, - pre_state: self.pre_state.unwrap_or(self.beacon_state), - beacon_state_root, - } - } - - pub fn clone_as_pre_state(&self) -> PreProcessingSnapshot { - // Do not include the beacon state root if the state has been advanced. - let beacon_state_root = - Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); - - PreProcessingSnapshot { - beacon_block: self.beacon_block.clone_as_blinded(), - beacon_block_root: self.beacon_block_root, - pre_state: self - .pre_state - .as_ref() - .map_or_else(|| self.beacon_state.clone(), |pre_state| pre_state.clone()), - beacon_state_root, - } - } -} - -/// The information required for block production. -pub struct BlockProductionPreState { - /// This state may or may not have been advanced forward a single slot. - /// - /// See the documentation in the `crate::state_advance_timer` module for more information. - pub pre_state: BeaconState, - /// This value will only be `Some` if `self.pre_state` was **not** advanced forward a single - /// slot. - /// - /// This value can be used to avoid tree-hashing the state during the first call to - /// `per_slot_processing`. - pub state_root: Option, -} - -pub enum StateAdvance { - /// The cache does not contain the supplied block root. - BlockNotFound, - /// The cache contains the supplied block root but the state has already been advanced. - AlreadyAdvanced, - /// The cache contains the supplied block root and the state has not yet been advanced. - State { - state: Box>, - state_root: Hash256, - block_slot: Slot, - }, -} - -/// The item stored in the `SnapshotCache`. -pub struct CacheItem { - beacon_block: Arc>, - beacon_block_root: Hash256, - /// This state is equivalent to `self.beacon_block.state_root()`. - beacon_state: BeaconState, - /// This state is equivalent to `self.beacon_state` that has had `per_slot_processing` applied - /// to it. This state assists in optimizing block processing. - pre_state: Option>, -} - -impl Into> for CacheItem { - fn into(self) -> BeaconSnapshot { - BeaconSnapshot { - beacon_state: self.beacon_state, - beacon_block: self.beacon_block, - beacon_block_root: self.beacon_block_root, - } - } -} - -/// Provides a cache of `BeaconSnapshot` that is intended primarily for block processing. -/// -/// ## Cache Queuing -/// -/// The cache has a non-standard queue mechanism (specifically, it is not LRU). -/// -/// The cache has a max number of elements (`max_len`). Until `max_len` is achieved, all snapshots -/// are simply added to the queue. Once `max_len` is achieved, adding a new snapshot will cause an -/// existing snapshot to be ejected. The ejected snapshot will: -/// -/// - Never be the `head_block_root`. -/// - Be the snapshot with the lowest `state.slot` (ties broken arbitrarily). -pub struct SnapshotCache { - max_len: usize, - head_block_root: Hash256, - snapshots: Vec>, -} - -impl SnapshotCache { - /// Instantiate a new cache which contains the `head` snapshot. - /// - /// Setting `max_len = 0` is equivalent to setting `max_len = 1`. - pub fn new(max_len: usize, head: BeaconSnapshot) -> Self { - Self { - max_len: cmp::max(max_len, 1), - head_block_root: head.beacon_block_root, - snapshots: vec![CacheItem::new_without_pre_state(head)], - } - } - - /// The block roots of all snapshots contained in `self`. - pub fn beacon_block_roots(&self) -> Vec { - self.snapshots.iter().map(|s| s.beacon_block_root).collect() - } - - #[allow(clippy::len_without_is_empty)] - /// The number of snapshots contained in `self`. - pub fn len(&self) -> usize { - self.snapshots.len() - } - - /// Insert a snapshot, potentially removing an existing snapshot if `self` is at capacity (see - /// struct-level documentation for more info). - pub fn insert( - &mut self, - snapshot: BeaconSnapshot, - pre_state: Option>, - spec: &ChainSpec, - ) { - let parent_root = snapshot.beacon_block.message().parent_root(); - let item = CacheItem { - beacon_block: snapshot.beacon_block.clone(), - beacon_block_root: snapshot.beacon_block_root, - beacon_state: snapshot.beacon_state, - pre_state, - }; - - // Remove the grandparent of the block that was just inserted. - // - // Assuming it's unlikely to see re-orgs deeper than one block, this method helps keep the - // cache small by removing any states that already have more than one descendant. - // - // Remove the grandparent first to free up room in the cache. - let grandparent_result = - process_results(item.beacon_state.rev_iter_block_roots(spec), |iter| { - iter.map(|(_slot, root)| root) - .find(|root| *root != item.beacon_block_root && *root != parent_root) - }); - if let Ok(Some(grandparent_root)) = grandparent_result { - let head_block_root = self.head_block_root; - self.snapshots.retain(|snapshot| { - let root = snapshot.beacon_block_root; - root == head_block_root || root != grandparent_root - }); - } - - if self.snapshots.len() < self.max_len { - self.snapshots.push(item); - } else { - let insert_at = self - .snapshots - .iter() - .enumerate() - .filter_map(|(i, snapshot)| { - if snapshot.beacon_block_root != self.head_block_root { - Some((i, snapshot.beacon_state.slot())) - } else { - None - } - }) - .min_by_key(|(_i, slot)| *slot) - .map(|(i, _slot)| i); - - if let Some(i) = insert_at { - self.snapshots[i] = item; - } - } - } - - /// If available, returns a `CacheItem` that should be used for importing/processing a block. - /// The method will remove the block from `self`, carrying across any caches that may or may not - /// be built. - /// - /// In the event the block being processed was observed late, clone the cache instead of - /// moving it. This allows us to process the next block quickly in the case of a re-org. - /// Additionally, if the slot was skipped, clone the cache. This ensures blocks that are - /// later than 1 slot still have access to the cache and can be processed quickly. - pub fn get_state_for_block_processing( - &mut self, - block_root: Hash256, - block_slot: Slot, - block_delay: Option, - spec: &ChainSpec, - ) -> Option<(PreProcessingSnapshot, bool)> { - self.snapshots - .iter() - .position(|snapshot| snapshot.beacon_block_root == block_root) - .map(|i| { - if let Some(cache) = self.snapshots.get(i) { - // Avoid cloning the block during sync (when the `block_delay` is `None`). - if let Some(delay) = block_delay { - if delay >= minimum_block_delay_for_clone(spec.seconds_per_slot) - && delay <= Duration::from_secs(spec.seconds_per_slot) * 4 - || block_slot > cache.beacon_block.slot() + 1 - { - return (cache.clone_as_pre_state(), true); - } - } - } - (self.snapshots.remove(i).into_pre_state(), false) - }) - } - - /// If available, obtains a clone of a `BeaconState` that should be used for block production. - /// The clone will use `CloneConfig:all()`, ensuring any tree-hash cache is cloned too. - /// - /// ## Note - /// - /// This method clones the `BeaconState` (instead of removing it) since we assume that any block - /// we produce will soon be pushed to the `BeaconChain` for importing/processing. Keeping a copy - /// of that `BeaconState` in `self` will greatly help with import times. - pub fn get_state_for_block_production( - &self, - block_root: Hash256, - ) -> Option> { - self.snapshots - .iter() - .find(|snapshot| snapshot.beacon_block_root == block_root) - .map(|snapshot| { - if let Some(pre_state) = &snapshot.pre_state { - BlockProductionPreState { - pre_state: pre_state.clone_with(CloneConfig::all()), - state_root: None, - } - } else { - BlockProductionPreState { - pre_state: snapshot.beacon_state.clone_with(CloneConfig::all()), - state_root: Some(snapshot.beacon_block.state_root()), - } - } - }) - } - - /// If there is a snapshot with `block_root`, clone it and return the clone. - pub fn get_cloned( - &self, - block_root: Hash256, - clone_config: CloneConfig, - ) -> Option> { - self.snapshots - .iter() - .find(|snapshot| snapshot.beacon_block_root == block_root) - .map(|snapshot| snapshot.clone_to_snapshot_with(clone_config)) - } - - pub fn get_for_state_advance(&mut self, block_root: Hash256) -> StateAdvance { - if let Some(snapshot) = self - .snapshots - .iter_mut() - .find(|snapshot| snapshot.beacon_block_root == block_root) - { - if snapshot.pre_state.is_some() { - StateAdvance::AlreadyAdvanced - } else { - let cloned = snapshot - .beacon_state - .clone_with(CloneConfig::committee_caches_only()); - - StateAdvance::State { - state: Box::new(std::mem::replace(&mut snapshot.beacon_state, cloned)), - state_root: snapshot.beacon_block.state_root(), - block_slot: snapshot.beacon_block.slot(), - } - } - } else { - StateAdvance::BlockNotFound - } - } - - pub fn update_pre_state(&mut self, block_root: Hash256, state: BeaconState) -> Option<()> { - self.snapshots - .iter_mut() - .find(|snapshot| snapshot.beacon_block_root == block_root) - .map(|snapshot| { - snapshot.pre_state = Some(state); - }) - } - - /// Removes all snapshots from the queue that are less than or equal to the finalized epoch. - pub fn prune(&mut self, finalized_epoch: Epoch) { - self.snapshots.retain(|snapshot| { - snapshot.beacon_state.slot() > finalized_epoch.start_slot(E::slots_per_epoch()) - }) - } - - /// Inform the cache that the head of the beacon chain has changed. - /// - /// The snapshot that matches this `head_block_root` will never be ejected from the cache - /// during `Self::insert`. - pub fn update_head(&mut self, head_block_root: Hash256) { - self.head_block_root = head_block_root - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType}; - use types::{test_utils::generate_deterministic_keypair, BeaconBlock, MainnetEthSpec}; - - fn get_harness() -> BeaconChainHarness> { - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .default_spec() - .deterministic_keypairs(1) - .fresh_ephemeral_store() - .build(); - - harness.advance_slot(); - - harness - } - - const CACHE_SIZE: usize = 4; - - fn get_snapshot(i: u64) -> BeaconSnapshot { - let spec = MainnetEthSpec::default_spec(); - - let beacon_state = get_harness().chain.head_beacon_state_cloned(); - - let signed_beacon_block = SignedBeaconBlock::from_block( - BeaconBlock::empty(&spec), - generate_deterministic_keypair(0) - .sk - .sign(Hash256::from_low_u64_be(42)), - ); - - BeaconSnapshot { - beacon_state, - beacon_block: Arc::new(signed_beacon_block), - beacon_block_root: Hash256::from_low_u64_be(i), - } - } - - #[test] - fn insert_get_prune_update() { - let spec = MainnetEthSpec::default_spec(); - let mut cache = SnapshotCache::new(CACHE_SIZE, get_snapshot(0)); - - // Insert a bunch of entries in the cache. It should look like this: - // - // Index Root - // 0 0 <--head - // 1 1 - // 2 2 - // 3 3 - for i in 1..CACHE_SIZE as u64 { - let mut snapshot = get_snapshot(i); - - // Each snapshot should be one slot into an epoch, with each snapshot one epoch apart. - *snapshot.beacon_state.slot_mut() = - Slot::from(i * MainnetEthSpec::slots_per_epoch() + 1); - - cache.insert(snapshot, None, &spec); - - assert_eq!( - cache.snapshots.len(), - i as usize + 1, - "cache length should be as expected" - ); - assert_eq!(cache.head_block_root, Hash256::from_low_u64_be(0)); - } - - // Insert a new value in the cache. Afterwards it should look like: - // - // Index Root - // 0 0 <--head - // 1 42 - // 2 2 - // 3 3 - assert_eq!(cache.snapshots.len(), CACHE_SIZE); - cache.insert(get_snapshot(42), None, &spec); - assert_eq!(cache.snapshots.len(), CACHE_SIZE); - - assert!( - cache - .get_state_for_block_processing( - Hash256::from_low_u64_be(1), - Slot::new(0), - None, - &spec - ) - .is_none(), - "the snapshot with the lowest slot should have been removed during the insert function" - ); - assert!(cache - .get_cloned(Hash256::from_low_u64_be(1), CloneConfig::none()) - .is_none()); - - assert_eq!( - cache - .get_cloned(Hash256::from_low_u64_be(0), CloneConfig::none()) - .expect("the head should still be in the cache") - .beacon_block_root, - Hash256::from_low_u64_be(0), - "get_cloned should get the correct snapshot" - ); - assert_eq!( - cache - .get_state_for_block_processing( - Hash256::from_low_u64_be(0), - Slot::new(0), - None, - &spec - ) - .expect("the head should still be in the cache") - .0 - .beacon_block_root, - Hash256::from_low_u64_be(0), - "get_state_for_block_processing should get the correct snapshot" - ); - - assert_eq!( - cache.snapshots.len(), - CACHE_SIZE - 1, - "get_state_for_block_processing should shorten the cache" - ); - - // Prune the cache. Afterwards it should look like: - // - // Index Root - // 0 2 - // 1 3 - cache.prune(Epoch::new(2)); - - assert_eq!(cache.snapshots.len(), 2); - - cache.update_head(Hash256::from_low_u64_be(2)); - - // Over-fill the cache so it needs to eject some old values on insert. - for i in 0..CACHE_SIZE as u64 { - cache.insert(get_snapshot(u64::max_value() - i), None, &spec); - } - - // Ensure that the new head value was not removed from the cache. - assert_eq!( - cache - .get_state_for_block_processing( - Hash256::from_low_u64_be(2), - Slot::new(0), - None, - &spec - ) - .expect("the new head should still be in the cache") - .0 - .beacon_block_root, - Hash256::from_low_u64_be(2), - "get_state_for_block_processing should get the correct snapshot" - ); - } -} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 8fbd5d575f9..6b85d7aadf7 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -58,6 +58,7 @@ use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore}; use task_executor::TaskExecutor; use task_executor::{test_utils::TestRuntime, ShutdownReason}; use tree_hash::TreeHash; +use types::indexed_attestation::IndexedAttestationBase; use types::payload::BlockProductionVersion; pub use types::test_utils::generate_deterministic_keypairs; use types::test_utils::TestRandom; @@ -684,6 +685,7 @@ where .set_builder_url( SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(), None, + None, ) .unwrap(); @@ -1030,20 +1032,18 @@ where *state.get_block_root(target_slot)? }; - Ok(Attestation { - aggregation_bits: BitList::with_capacity(committee_len)?, - data: AttestationData { - slot, - index, - beacon_block_root, - source: state.current_justified_checkpoint(), - target: Checkpoint { - epoch, - root: target_root, - }, + Ok(Attestation::empty_for_signing( + index, + committee_len, + slot, + beacon_block_root, + state.current_justified_checkpoint(), + Checkpoint { + epoch, + root: target_root, }, - signature: AggregateSignature::empty(), - }) + &self.spec, + )?) } /// A list of attestations for each committee for the given slot. @@ -1118,17 +1118,24 @@ where ) .unwrap(); - attestation.aggregation_bits.set(i, true).unwrap(); + match attestation { + Attestation::Base(ref mut att) => { + att.aggregation_bits.set(i, true).unwrap() + } + Attestation::Electra(ref mut att) => { + att.aggregation_bits.set(i, true).unwrap() + } + } - attestation.signature = { + *attestation.signature_mut() = { let domain = self.spec.get_domain( - attestation.data.target.epoch, + attestation.data().target.epoch, Domain::BeaconAttester, &fork, state.genesis_validators_root(), ); - let message = attestation.data.signing_root(domain); + let message = attestation.data().signing_root(domain); let mut agg_sig = AggregateSignature::infinity(); @@ -1139,8 +1146,8 @@ where agg_sig }; - let subnet_id = SubnetId::compute_subnet_for_attestation_data::( - &attestation.data, + let subnet_id = SubnetId::compute_subnet_for_attestation::( + attestation.to_ref(), committee_count, &self.chain.spec, ) @@ -1314,7 +1321,10 @@ where // If there are any attestations in this committee, create an aggregate. if let Some((attestation, _)) = committee_attestations.first() { let bc = state - .get_beacon_committee(attestation.data.slot, attestation.data.index) + .get_beacon_committee( + attestation.data().slot, + attestation.committee_index().unwrap(), + ) .unwrap(); // Find an aggregator if one exists. Return `None` if there are no @@ -1341,25 +1351,35 @@ where }) .copied()?; + let fork_name = self.spec.fork_name_at_slot::(slot); + + let aggregate = if fork_name.electra_enabled() { + self.chain.get_aggregated_attestation_electra( + slot, + &attestation.data().tree_hash_root(), + bc.index, + ) + } else { + self.chain + .get_aggregated_attestation_base(attestation.data()) + } + .unwrap() + .unwrap_or_else(|| { + committee_attestations.iter().skip(1).fold( + attestation.clone(), + |mut agg, (att, _)| { + agg.aggregate(att.to_ref()); + agg + }, + ) + }); + // If the chain is able to produce an aggregate, use that. Otherwise, build an // aggregate locally. - let aggregate = self - .chain - .get_aggregated_attestation(&attestation.data) - .unwrap() - .unwrap_or_else(|| { - committee_attestations.iter().skip(1).fold( - attestation.clone(), - |mut agg, (att, _)| { - agg.aggregate(att); - agg - }, - ) - }); let signed_aggregate = SignedAggregateAndProof::from_aggregate( aggregator_index as u64, - aggregate, + aggregate.to_ref(), None, &self.validator_keypairs[aggregator_index].sk, &fork, @@ -1485,50 +1505,89 @@ where ) -> AttesterSlashing { let fork = self.chain.canonical_head.cached_head().head_fork(); - let mut attestation_1 = IndexedAttestation { - attesting_indices: VariableList::new(validator_indices).unwrap(), - data: AttestationData { - slot: Slot::new(0), - index: 0, - beacon_block_root: Hash256::zero(), - target: Checkpoint { - root: Hash256::zero(), - epoch: target1.unwrap_or(fork.epoch), - }, - source: Checkpoint { - root: Hash256::zero(), - epoch: source1.unwrap_or(Epoch::new(0)), - }, + let fork_name = self.spec.fork_name_at_slot::(Slot::new(0)); + + let data = AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + target: Checkpoint { + root: Hash256::zero(), + epoch: target1.unwrap_or(fork.epoch), }, - signature: AggregateSignature::infinity(), + source: Checkpoint { + root: Hash256::zero(), + epoch: source1.unwrap_or(Epoch::new(0)), + }, + }; + let mut attestation_1 = if fork_name.electra_enabled() { + IndexedAttestation::Electra(IndexedAttestationElectra { + attesting_indices: VariableList::new(validator_indices).unwrap(), + data, + signature: AggregateSignature::infinity(), + }) + } else { + IndexedAttestation::Base(IndexedAttestationBase { + attesting_indices: VariableList::new(validator_indices).unwrap(), + data, + signature: AggregateSignature::infinity(), + }) }; let mut attestation_2 = attestation_1.clone(); - attestation_2.data.index += 1; - attestation_2.data.source.epoch = source2.unwrap_or(Epoch::new(0)); - attestation_2.data.target.epoch = target2.unwrap_or(fork.epoch); + attestation_2.data_mut().index += 1; + attestation_2.data_mut().source.epoch = source2.unwrap_or(Epoch::new(0)); + attestation_2.data_mut().target.epoch = target2.unwrap_or(fork.epoch); for attestation in &mut [&mut attestation_1, &mut attestation_2] { - for &i in &attestation.attesting_indices { - let sk = &self.validator_keypairs[i as usize].sk; + match attestation { + IndexedAttestation::Base(attestation) => { + for i in attestation.attesting_indices.iter() { + let sk = &self.validator_keypairs[*i as usize].sk; + + let genesis_validators_root = self.chain.genesis_validators_root; + + let domain = self.chain.spec.get_domain( + attestation.data.target.epoch, + Domain::BeaconAttester, + &fork, + genesis_validators_root, + ); + let message = attestation.data.signing_root(domain); - let genesis_validators_root = self.chain.genesis_validators_root; + attestation.signature.add_assign(&sk.sign(message)); + } + } + IndexedAttestation::Electra(attestation) => { + for i in attestation.attesting_indices.iter() { + let sk = &self.validator_keypairs[*i as usize].sk; + + let genesis_validators_root = self.chain.genesis_validators_root; - let domain = self.chain.spec.get_domain( - attestation.data.target.epoch, - Domain::BeaconAttester, - &fork, - genesis_validators_root, - ); - let message = attestation.data.signing_root(domain); + let domain = self.chain.spec.get_domain( + attestation.data.target.epoch, + Domain::BeaconAttester, + &fork, + genesis_validators_root, + ); + let message = attestation.data.signing_root(domain); - attestation.signature.add_assign(&sk.sign(message)); + attestation.signature.add_assign(&sk.sign(message)); + } + } } } - AttesterSlashing { - attestation_1, - attestation_2, + if fork_name.electra_enabled() { + AttesterSlashing::Electra(AttesterSlashingElectra { + attestation_1: attestation_1.as_electra().unwrap().clone(), + attestation_2: attestation_2.as_electra().unwrap().clone(), + }) + } else { + AttesterSlashing::Base(AttesterSlashingBase { + attestation_1: attestation_1.as_base().unwrap().clone(), + attestation_2: attestation_2.as_base().unwrap().clone(), + }) } } @@ -1537,6 +1596,8 @@ where validator_indices_1: Vec, validator_indices_2: Vec, ) -> AttesterSlashing { + let fork_name = self.spec.fork_name_at_slot::(Slot::new(0)); + let data = AttestationData { slot: Slot::new(0), index: 0, @@ -1551,42 +1612,94 @@ where }, }; - let mut attestation_1 = IndexedAttestation { - attesting_indices: VariableList::new(validator_indices_1).unwrap(), - data: data.clone(), - signature: AggregateSignature::infinity(), - }; + let (mut attestation_1, mut attestation_2) = if fork_name.electra_enabled() { + let attestation_1 = IndexedAttestationElectra { + attesting_indices: VariableList::new(validator_indices_1).unwrap(), + data: data.clone(), + signature: AggregateSignature::infinity(), + }; + + let attestation_2 = IndexedAttestationElectra { + attesting_indices: VariableList::new(validator_indices_2).unwrap(), + data, + signature: AggregateSignature::infinity(), + }; - let mut attestation_2 = IndexedAttestation { - attesting_indices: VariableList::new(validator_indices_2).unwrap(), - data, - signature: AggregateSignature::infinity(), + ( + IndexedAttestation::Electra(attestation_1), + IndexedAttestation::Electra(attestation_2), + ) + } else { + let attestation_1 = IndexedAttestationBase { + attesting_indices: VariableList::new(validator_indices_1).unwrap(), + data: data.clone(), + signature: AggregateSignature::infinity(), + }; + + let attestation_2 = IndexedAttestationBase { + attesting_indices: VariableList::new(validator_indices_2).unwrap(), + data, + signature: AggregateSignature::infinity(), + }; + + ( + IndexedAttestation::Base(attestation_1), + IndexedAttestation::Base(attestation_2), + ) }; - attestation_2.data.index += 1; + attestation_2.data_mut().index += 1; let fork = self.chain.canonical_head.cached_head().head_fork(); for attestation in &mut [&mut attestation_1, &mut attestation_2] { - for &i in &attestation.attesting_indices { - let sk = &self.validator_keypairs[i as usize].sk; + match attestation { + IndexedAttestation::Base(attestation) => { + for i in attestation.attesting_indices.iter() { + let sk = &self.validator_keypairs[*i as usize].sk; + + let genesis_validators_root = self.chain.genesis_validators_root; + + let domain = self.chain.spec.get_domain( + attestation.data.target.epoch, + Domain::BeaconAttester, + &fork, + genesis_validators_root, + ); + let message = attestation.data.signing_root(domain); - let genesis_validators_root = self.chain.genesis_validators_root; + attestation.signature.add_assign(&sk.sign(message)); + } + } + IndexedAttestation::Electra(attestation) => { + for i in attestation.attesting_indices.iter() { + let sk = &self.validator_keypairs[*i as usize].sk; - let domain = self.chain.spec.get_domain( - attestation.data.target.epoch, - Domain::BeaconAttester, - &fork, - genesis_validators_root, - ); - let message = attestation.data.signing_root(domain); + let genesis_validators_root = self.chain.genesis_validators_root; - attestation.signature.add_assign(&sk.sign(message)); + let domain = self.chain.spec.get_domain( + attestation.data.target.epoch, + Domain::BeaconAttester, + &fork, + genesis_validators_root, + ); + let message = attestation.data.signing_root(domain); + + attestation.signature.add_assign(&sk.sign(message)); + } + } } } - AttesterSlashing { - attestation_1, - attestation_2, + if fork_name.electra_enabled() { + AttesterSlashing::Electra(AttesterSlashingElectra { + attestation_1: attestation_1.as_electra().unwrap().clone(), + attestation_2: attestation_2.as_electra().unwrap().clone(), + }) + } else { + AttesterSlashing::Base(AttesterSlashingBase { + attestation_1: attestation_1.as_base().unwrap().clone(), + attestation_2: attestation_2.as_base().unwrap().clone(), + }) } } @@ -1881,6 +1994,7 @@ where block_root, RpcBlock::new(Some(block_root), block, sidecars).unwrap(), NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, || Ok(()), ) .await? @@ -1907,6 +2021,7 @@ where block_root, RpcBlock::new(Some(block_root), block, sidecars).unwrap(), NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, || Ok(()), ) .await? @@ -2131,7 +2246,7 @@ where .unwrap(); state = new_state; block_hash_from_slot.insert(*slot, block_hash); - state_hash_from_slot.insert(*slot, state.tree_hash_root().into()); + state_hash_from_slot.insert(*slot, state.canonical_root().unwrap().into()); latest_block_hash = Some(block_hash); } ( @@ -2377,6 +2492,7 @@ where AttestationStrategy::AllValidators => self.get_all_validators(), AttestationStrategy::SomeValidators(vals) => vals, }; + let state_root = state.update_tree_hash_cache().unwrap(); let (_, _, last_produced_block_hash, _) = self .add_attested_blocks_at_slots_with_sync( @@ -2503,6 +2619,7 @@ pub fn generate_rand_block_and_blobs( rng: &mut impl Rng, ) -> (SignedBeaconBlock>, Vec>) { let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); + let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng)); let mut blob_sidecars = vec![]; @@ -2539,7 +2656,6 @@ pub fn generate_rand_block_and_blobs( }; let (bundle, transactions) = execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); - payload.execution_payload.transactions = <_>::default(); for tx in Vec::from(transactions) { payload.execution_payload.transactions.push(tx).unwrap(); diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index a63940074b4..d4524900818 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -25,9 +25,10 @@ use types::consts::altair::{ TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, }; use types::{ - Attestation, AttestationData, AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, - ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, PublicKeyBytes, - SignedAggregateAndProof, SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit, + Attestation, AttestationData, AttesterSlashingRef, BeaconBlockRef, BeaconState, + BeaconStateError, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, + IndexedAttestationRef, ProposerSlashing, PublicKeyBytes, SignedAggregateAndProof, + SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit, }; /// Used for Prometheus labels. @@ -469,7 +470,7 @@ impl ValidatorMonitor { unaggregated_attestations.remove(&oldest_slot); } } - let slot = attestation.data.slot; + let slot = attestation.data().slot; self.unaggregated_attestations.insert(slot, attestation); } @@ -730,12 +731,12 @@ impl ValidatorMonitor { // that qualifies the committee index for reward is included let inclusion_delay = spec.min_attestation_inclusion_delay; - let data = &unaggregated_attestation.data; + let data = unaggregated_attestation.data(); // Get the reward indices for the unaggregated attestation or log an error match get_attestation_participation_flag_indices( state, - &unaggregated_attestation.data, + unaggregated_attestation.data(), inclusion_delay, spec, ) { @@ -1233,7 +1234,7 @@ impl ValidatorMonitor { indexed_attestation: &IndexedAttestation, slot_clock: &S, ) { - let data = &indexed_attestation.data; + let data = indexed_attestation.data(); let epoch = data.slot.epoch(E::slots_per_epoch()); let delay = get_message_delay_ms( seen_timestamp, @@ -1242,7 +1243,7 @@ impl ValidatorMonitor { slot_clock, ); - indexed_attestation.attesting_indices.iter().for_each(|i| { + indexed_attestation.attesting_indices_iter().for_each(|i| { if let Some(validator) = self.get_validator(*i) { let id = &validator.id; @@ -1321,7 +1322,7 @@ impl ValidatorMonitor { indexed_attestation: &IndexedAttestation, slot_clock: &S, ) { - let data = &indexed_attestation.data; + let data = indexed_attestation.data(); let epoch = data.slot.epoch(E::slots_per_epoch()); let delay = get_message_delay_ms( seen_timestamp, @@ -1330,7 +1331,7 @@ impl ValidatorMonitor { slot_clock, ); - let aggregator_index = signed_aggregate_and_proof.message.aggregator_index; + let aggregator_index = signed_aggregate_and_proof.message().aggregator_index(); if let Some(validator) = self.get_validator(aggregator_index) { let id = &validator.id; @@ -1365,7 +1366,7 @@ impl ValidatorMonitor { }); } - indexed_attestation.attesting_indices.iter().for_each(|i| { + indexed_attestation.attesting_indices_iter().for_each(|i| { if let Some(validator) = self.get_validator(*i) { let id = &validator.id; @@ -1382,17 +1383,43 @@ impl ValidatorMonitor { }); if self.individual_tracking() { - info!( - self.log, - "Attestation included in aggregate"; - "head" => ?data.beacon_block_root, - "index" => %data.index, - "delay_ms" => %delay.as_millis(), - "epoch" => %epoch, - "slot" => %data.slot, - "src" => src, - "validator" => %id, - ); + let is_first_inclusion_aggregate = validator + .get_from_epoch_summary(epoch, |summary_opt| { + if let Some(summary) = summary_opt { + Some(summary.attestation_aggregate_inclusions == 0) + } else { + // No data for this validator: no inclusion. + Some(true) + } + }) + .unwrap_or(true); + + if is_first_inclusion_aggregate { + info!( + self.log, + "Attestation included in aggregate"; + "head" => ?data.beacon_block_root, + "index" => %data.index, + "delay_ms" => %delay.as_millis(), + "epoch" => %epoch, + "slot" => %data.slot, + "src" => src, + "validator" => %id, + ); + } else { + // Downgrade to Debug for second and onwards of logging to reduce verbosity + debug!( + self.log, + "Attestation included in aggregate"; + "head" => ?data.beacon_block_root, + "index" => %data.index, + "delay_ms" => %delay.as_millis(), + "epoch" => %epoch, + "slot" => %data.slot, + "src" => src, + "validator" => %id, + ) + }; } validator.with_epoch_summary(epoch, |summary| { @@ -1410,11 +1437,11 @@ impl ValidatorMonitor { /// Note: Blocks that get orphaned will skew the inclusion distance calculation. pub fn register_attestation_in_block( &self, - indexed_attestation: &IndexedAttestation, + indexed_attestation: IndexedAttestationRef<'_, E>, parent_slot: Slot, spec: &ChainSpec, ) { - let data = &indexed_attestation.data; + let data = indexed_attestation.data(); // Best effort inclusion distance which ignores skip slots between the parent // and the current block. Skipped slots between the attestation slot and the parent // slot are still counted for simplicity's sake. @@ -1423,7 +1450,7 @@ impl ValidatorMonitor { let delay = inclusion_distance - spec.min_attestation_inclusion_delay; let epoch = data.slot.epoch(E::slots_per_epoch()); - indexed_attestation.attesting_indices.iter().for_each(|i| { + indexed_attestation.attesting_indices_iter().for_each(|i| { if let Some(validator) = self.get_validator(*i) { let id = &validator.id; @@ -1433,7 +1460,6 @@ impl ValidatorMonitor { &["block", label], ); }); - if self.individual_tracking() { metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_DELAY_SLOTS, @@ -1441,16 +1467,41 @@ impl ValidatorMonitor { delay.as_u64() as i64, ); - info!( - self.log, - "Attestation included in block"; - "head" => ?data.beacon_block_root, - "index" => %data.index, - "inclusion_lag" => format!("{} slot(s)", delay), - "epoch" => %epoch, - "slot" => %data.slot, - "validator" => %id, - ); + let is_first_inclusion_block = validator + .get_from_epoch_summary(epoch, |summary_opt| { + if let Some(summary) = summary_opt { + Some(summary.attestation_block_inclusions == 0) + } else { + // No data for this validator: no inclusion. + Some(true) + } + }) + .unwrap_or(true); + + if is_first_inclusion_block { + info!( + self.log, + "Attestation included in block"; + "head" => ?data.beacon_block_root, + "index" => %data.index, + "inclusion_lag" => format!("{} slot(s)", delay), + "epoch" => %epoch, + "slot" => %data.slot, + "validator" => %id, + ); + } else { + // Downgrade to Debug for second and onwards of logging to reduce verbosity + debug!( + self.log, + "Attestation included in block"; + "head" => ?data.beacon_block_root, + "index" => %data.index, + "inclusion_lag" => format!("{} slot(s)", delay), + "epoch" => %epoch, + "slot" => %data.slot, + "validator" => %id, + ); + } } validator.with_epoch_summary(epoch, |summary| { @@ -1783,33 +1834,31 @@ impl ValidatorMonitor { } /// Register an attester slashing from the gossip network. - pub fn register_gossip_attester_slashing(&self, slashing: &AttesterSlashing) { + pub fn register_gossip_attester_slashing(&self, slashing: AttesterSlashingRef<'_, E>) { self.register_attester_slashing("gossip", slashing) } /// Register an attester slashing from the HTTP API. - pub fn register_api_attester_slashing(&self, slashing: &AttesterSlashing) { + pub fn register_api_attester_slashing(&self, slashing: AttesterSlashingRef<'_, E>) { self.register_attester_slashing("api", slashing) } /// Register an attester slashing included in a *valid* `BeaconBlock`. - pub fn register_block_attester_slashing(&self, slashing: &AttesterSlashing) { + pub fn register_block_attester_slashing(&self, slashing: AttesterSlashingRef<'_, E>) { self.register_attester_slashing("block", slashing) } - fn register_attester_slashing(&self, src: &str, slashing: &AttesterSlashing) { - let data = &slashing.attestation_1.data; + fn register_attester_slashing(&self, src: &str, slashing: AttesterSlashingRef<'_, E>) { + let data = slashing.attestation_1().data(); let attestation_1_indices: HashSet = slashing - .attestation_1 - .attesting_indices - .iter() + .attestation_1() + .attesting_indices_iter() .copied() .collect(); slashing - .attestation_2 - .attesting_indices - .iter() + .attestation_2() + .attesting_indices_iter() .filter(|index| attestation_1_indices.contains(index)) .filter_map(|index| self.get_validator(*index)) .for_each(|validator| { @@ -2068,7 +2117,7 @@ pub fn timestamp_now() -> Duration { } fn u64_to_i64(n: impl Into) -> i64 { - i64::try_from(n.into()).unwrap_or(i64::max_value()) + i64::try_from(n.into()).unwrap_or(i64::MAX) } /// Returns the delay between the start of `block.slot` and `seen_timestamp`. diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index e1b50706286..576fbf0fd1f 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -1,6 +1,9 @@ use crate::errors::BeaconChainError; use crate::{BeaconChainTypes, BeaconStore}; +use bls::PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN; +use smallvec::SmallVec; use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; use std::collections::HashMap; use std::marker::PhantomData; use store::{DBColumn, Error as StoreError, StoreItem, StoreOp}; @@ -49,14 +52,13 @@ impl ValidatorPubkeyCache { let mut pubkey_bytes = vec![]; for validator_index in 0.. { - if let Some(DatabasePubkey(pubkey)) = + if let Some(db_pubkey) = store.get_item(&DatabasePubkey::key_for_index(validator_index))? { - pubkeys.push((&pubkey).try_into().map_err(|e| { - BeaconChainError::ValidatorPubkeyCacheError(format!("{:?}", e)) - })?); - pubkey_bytes.push(pubkey); - indices.insert(pubkey, validator_index); + let (pk, pk_bytes) = DatabasePubkey::as_pubkey(&db_pubkey)?; + pubkeys.push(pk); + indices.insert(pk_bytes, validator_index); + pubkey_bytes.push(pk_bytes); } else { break; } @@ -104,29 +106,29 @@ impl ValidatorPubkeyCache { self.indices.reserve(validator_keys.len()); let mut store_ops = Vec::with_capacity(validator_keys.len()); - for pubkey in validator_keys { + for pubkey_bytes in validator_keys { let i = self.pubkeys.len(); - if self.indices.contains_key(&pubkey) { + if self.indices.contains_key(&pubkey_bytes) { return Err(BeaconChainError::DuplicateValidatorPublicKey); } + let pubkey = (&pubkey_bytes) + .try_into() + .map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?; + // Stage the new validator key for writing to disk. // It will be committed atomically when the block that introduced it is written to disk. // Notably it is NOT written while the write lock on the cache is held. // See: https://github.com/sigp/lighthouse/issues/2327 store_ops.push(StoreOp::KeyValueOp( - DatabasePubkey(pubkey).as_kv_store_op(DatabasePubkey::key_for_index(i)), + DatabasePubkey::from_pubkey(&pubkey) + .as_kv_store_op(DatabasePubkey::key_for_index(i)), )); - self.pubkeys.push( - (&pubkey) - .try_into() - .map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?, - ); - self.pubkey_bytes.push(pubkey); - - self.indices.insert(pubkey, i); + self.pubkeys.push(pubkey); + self.pubkey_bytes.push(pubkey_bytes); + self.indices.insert(pubkey_bytes, i); } Ok(store_ops) @@ -166,7 +168,10 @@ impl ValidatorPubkeyCache { /// Wrapper for a public key stored in the database. /// /// Keyed by the validator index as `Hash256::from_low_u64_be(index)`. -struct DatabasePubkey(PublicKeyBytes); +#[derive(Encode, Decode)] +pub struct DatabasePubkey { + pubkey: SmallVec<[u8; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN]>, +} impl StoreItem for DatabasePubkey { fn db_column() -> DBColumn { @@ -174,11 +179,11 @@ impl StoreItem for DatabasePubkey { } fn as_store_bytes(&self) -> Vec { - self.0.as_ssz_bytes() + self.as_ssz_bytes() } fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self(PublicKeyBytes::from_ssz_bytes(bytes)?)) + Ok(Self::from_ssz_bytes(bytes)?) } } @@ -186,6 +191,19 @@ impl DatabasePubkey { fn key_for_index(index: usize) -> Hash256 { Hash256::from_low_u64_be(index as u64) } + + pub fn from_pubkey(pubkey: &PublicKey) -> Self { + Self { + pubkey: pubkey.serialize_uncompressed().into(), + } + } + + pub fn as_pubkey(&self) -> Result<(PublicKey, PublicKeyBytes), BeaconChainError> { + let pubkey = PublicKey::deserialize_uncompressed(&self.pubkey) + .map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?; + let pubkey_bytes = pubkey.compress(); + Ok((pubkey, pubkey_bytes)) + } } #[cfg(test)] diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index ff83b253205..697e449dc6e 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -8,7 +8,9 @@ use beacon_chain::{metrics, StateSkipConfig, WhenSlotSkipped}; use lazy_static::lazy_static; use std::sync::Arc; use tree_hash::TreeHash; -use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot}; +use types::{ + AggregateSignature, Attestation, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot, +}; pub const VALIDATOR_COUNT: usize = 16; @@ -188,20 +190,22 @@ async fn produces_attestations() { .produce_unaggregated_attestation(slot, index) .expect("should produce attestation"); - let data = &attestation.data; + let (aggregation_bits_len, aggregation_bits_zero) = match &attestation { + Attestation::Base(att) => { + (att.aggregation_bits.len(), att.aggregation_bits.is_zero()) + } + Attestation::Electra(att) => { + (att.aggregation_bits.len(), att.aggregation_bits.is_zero()) + } + }; + assert_eq!(aggregation_bits_len, committee_len, "bad committee len"); + assert!(aggregation_bits_zero, "some committee bits are set"); + + let data = attestation.data(); assert_eq!( - attestation.aggregation_bits.len(), - committee_len, - "bad committee len" - ); - assert!( - attestation.aggregation_bits.is_zero(), - "some committee bits are set" - ); - assert_eq!( - attestation.signature, - AggregateSignature::empty(), + attestation.signature(), + &AggregateSignature::infinity(), "bad signature" ); assert_eq!(data.index, index, "bad index"); @@ -329,10 +333,10 @@ async fn early_attester_cache_old_request() { .produce_unaggregated_attestation(attest_slot, 0) .unwrap(); - assert_eq!(attestation.data.slot, attest_slot); + assert_eq!(attestation.data().slot, attest_slot); let attested_block = harness .chain - .get_blinded_block(&attestation.data.beacon_block_root) + .get_blinded_block(&attestation.data().beacon_block_root) .unwrap() .unwrap(); assert_eq!(attested_block.slot(), attest_slot); diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 1463d1c5c15..19efe10c6d0 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -3,6 +3,7 @@ use beacon_chain::attestation_verification::{ batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations, Error, }; +use beacon_chain::observed_aggregates::ObservedAttestationKey; use beacon_chain::test_utils::{MakeAttestationOptions, HARNESS_GENESIS_TIME}; use beacon_chain::{ attestation_verification::Error as AttnError, @@ -14,14 +15,17 @@ use beacon_chain::{ use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use lazy_static::lazy_static; +use ssz_types::BitVector; use state_processing::{ per_block_processing::errors::AttestationValidationError, per_slot_processing, }; use tree_hash::TreeHash; use types::{ + signed_aggregate_and_proof::SignedAggregateAndProofRefMut, test_utils::generate_deterministic_keypair, Address, AggregateSignature, Attestation, - BeaconStateError, BitList, ChainSpec, Epoch, EthSpec, ForkName, Hash256, Keypair, - MainnetEthSpec, SecretKey, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, Unsigned, + AttestationRef, AttestationRefMut, BeaconStateError, BitList, ChainSpec, Epoch, EthSpec, + ForkName, Hash256, Keypair, MainnetEthSpec, SecretKey, SelectionProof, SignedAggregateAndProof, + Slot, SubnetId, Unsigned, }; pub type E = MainnetEthSpec; @@ -125,7 +129,12 @@ fn get_valid_unaggregated_attestation( let validator_committee_index = 0; let validator_index = *head .beacon_state - .get_beacon_committee(current_slot, valid_attestation.data.index) + .get_beacon_committee( + current_slot, + valid_attestation + .committee_index() + .expect("should get committee index"), + ) .expect("should get committees") .committee .get(validator_committee_index) @@ -143,8 +152,8 @@ fn get_valid_unaggregated_attestation( ) .expect("should sign attestation"); - let subnet_id = SubnetId::compute_subnet_for_attestation_data::( - &valid_attestation.data, + let subnet_id = SubnetId::compute_subnet_for_attestation::( + valid_attestation.to_ref(), head.beacon_state .get_committee_count_at_slot(current_slot) .expect("should get committee count"), @@ -170,7 +179,12 @@ fn get_valid_aggregated_attestation( let current_slot = chain.slot().expect("should get slot"); let committee = state - .get_beacon_committee(current_slot, aggregate.data.index) + .get_beacon_committee( + current_slot, + aggregate + .committee_index() + .expect("should get committee index"), + ) .expect("should get committees"); let committee_len = committee.committee.len(); @@ -181,7 +195,7 @@ fn get_valid_aggregated_attestation( let aggregator_sk = generate_deterministic_keypair(val_index).sk; let proof = SelectionProof::new::( - aggregate.data.slot, + aggregate.data().slot, &aggregator_sk, &state.fork(), chain.genesis_validators_root, @@ -198,7 +212,7 @@ fn get_valid_aggregated_attestation( let signed_aggregate = SignedAggregateAndProof::from_aggregate( aggregator_index as u64, - aggregate, + aggregate.to_ref(), None, &aggregator_sk, &state.fork(), @@ -213,14 +227,19 @@ fn get_valid_aggregated_attestation( /// attestation. fn get_non_aggregator( chain: &BeaconChain, - aggregate: &Attestation, + aggregate: AttestationRef, ) -> (usize, SecretKey) { let head = chain.head_snapshot(); let state = &head.beacon_state; let current_slot = chain.slot().expect("should get slot"); let committee = state - .get_beacon_committee(current_slot, aggregate.data.index) + .get_beacon_committee( + current_slot, + aggregate + .committee_index() + .expect("should get committee index"), + ) .expect("should get committees"); let committee_len = committee.committee.len(); @@ -231,7 +250,7 @@ fn get_non_aggregator( let aggregator_sk = generate_deterministic_keypair(val_index).sk; let proof = SelectionProof::new::( - aggregate.data.slot, + aggregate.data().slot, &aggregator_sk, &state.fork(), chain.genesis_validators_root, @@ -301,15 +320,19 @@ impl GossipTester { get_valid_aggregated_attestation(&harness.chain, valid_attestation.clone()); let mut invalid_attestation = valid_attestation.clone(); - invalid_attestation.data.beacon_block_root = Hash256::repeat_byte(13); + invalid_attestation.data_mut().beacon_block_root = Hash256::repeat_byte(13); let (mut invalid_aggregate, _, _) = get_valid_aggregated_attestation(&harness.chain, invalid_attestation.clone()); - invalid_aggregate.message.aggregator_index = invalid_aggregate - .message - .aggregator_index - .checked_sub(1) - .unwrap(); + + match invalid_aggregate.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + att.message.aggregator_index = att.message.aggregator_index.checked_sub(1).unwrap(); + } + SignedAggregateAndProofRefMut::Electra(att) => { + att.message.aggregator_index = att.message.aggregator_index.checked_sub(1).unwrap(); + } + } Self { harness, @@ -361,7 +384,10 @@ impl GossipTester { } pub fn non_aggregator(&self) -> (usize, SecretKey) { - get_non_aggregator(&self.harness.chain, &self.valid_aggregate.message.aggregate) + get_non_aggregator( + &self.harness.chain, + self.valid_aggregate.message().aggregate(), + ) } pub fn import_valid_aggregate(self) -> Self { @@ -418,6 +444,7 @@ impl GossipTester { vec![&self.invalid_aggregate, &aggregate].into_iter(), ) .unwrap(); + assert_eq!(results.len(), 2); let batch_err = results.pop().unwrap().err().expect(&format!( "{} should error during batch_verify_aggregated_attestations_for_gossip", @@ -490,7 +517,14 @@ async fn aggregated_gossip_verification() { */ .inspect_aggregate_err( "aggregate from future slot", - |tester, a| a.message.aggregate.data.slot = tester.slot() + 1, + |tester, a| match a.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + att.message.aggregate.data.slot = tester.slot() + 1 + } + SignedAggregateAndProofRefMut::Electra(att) => { + att.message.aggregate.data.slot = tester.slot() + 1 + } + }, |tester, err| { assert!(matches!( err, @@ -504,8 +538,18 @@ async fn aggregated_gossip_verification() { "aggregate from past slot", |tester, a| { let too_early_slot = tester.earliest_valid_attestation_slot() - 1; - a.message.aggregate.data.slot = too_early_slot; - a.message.aggregate.data.target.epoch = too_early_slot.epoch(E::slots_per_epoch()); + match a.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + att.message.aggregate.data.slot = too_early_slot; + att.message.aggregate.data.target.epoch = + too_early_slot.epoch(E::slots_per_epoch()); + } + SignedAggregateAndProofRefMut::Electra(att) => { + att.message.aggregate.data.slot = too_early_slot; + att.message.aggregate.data.target.epoch = + too_early_slot.epoch(E::slots_per_epoch()); + } + } }, |tester, err| { let valid_early_slot = tester.earliest_valid_attestation_slot(); @@ -529,7 +573,14 @@ async fn aggregated_gossip_verification() { */ .inspect_aggregate_err( "attestation with invalid target epoch", - |_, a| a.message.aggregate.data.target.epoch += 1, + |_, a| match a.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + att.message.aggregate.data.target.epoch += 1 + } + SignedAggregateAndProofRefMut::Electra(att) => { + att.message.aggregate.data.target.epoch += 1 + } + }, |_, err| assert!(matches!(err, AttnError::InvalidTargetEpoch { .. })), ) /* @@ -538,7 +589,14 @@ async fn aggregated_gossip_verification() { */ .inspect_aggregate_err( "attestation with invalid target root", - |_, a| a.message.aggregate.data.target.root = Hash256::repeat_byte(42), + |_, a| match a.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + att.message.aggregate.data.target.root = Hash256::repeat_byte(42) + } + SignedAggregateAndProofRefMut::Electra(att) => { + att.message.aggregate.data.target.root = Hash256::repeat_byte(42) + } + }, |_, err| assert!(matches!(err, AttnError::InvalidTargetRoot { .. })), ) /* @@ -548,7 +606,14 @@ async fn aggregated_gossip_verification() { */ .inspect_aggregate_err( "aggregate with unknown head block", - |_, a| a.message.aggregate.data.beacon_block_root = Hash256::repeat_byte(42), + |_, a| match a.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + att.message.aggregate.data.beacon_block_root = Hash256::repeat_byte(42) + } + SignedAggregateAndProofRefMut::Electra(att) => { + att.message.aggregate.data.beacon_block_root = Hash256::repeat_byte(42) + } + }, |_, err| { assert!(matches!( err, @@ -566,11 +631,19 @@ async fn aggregated_gossip_verification() { */ .inspect_aggregate_err( "aggregate with no participants", - |_, a| { - let aggregation_bits = &mut a.message.aggregate.aggregation_bits; - aggregation_bits.difference_inplace(&aggregation_bits.clone()); - assert!(aggregation_bits.is_zero()); - a.message.aggregate.signature = AggregateSignature::infinity(); + |_, a| match a.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + let aggregation_bits = &mut att.message.aggregate.aggregation_bits; + aggregation_bits.difference_inplace(&aggregation_bits.clone()); + assert!(aggregation_bits.is_zero()); + att.message.aggregate.signature = AggregateSignature::infinity() + } + SignedAggregateAndProofRefMut::Electra(att) => { + let aggregation_bits = &mut att.message.aggregate.aggregation_bits; + aggregation_bits.difference_inplace(&aggregation_bits.clone()); + assert!(aggregation_bits.is_zero()); + att.message.aggregate.signature = AggregateSignature::infinity() + } }, |_, err| assert!(matches!(err, AttnError::EmptyAggregationBitfield)), ) @@ -581,7 +654,14 @@ async fn aggregated_gossip_verification() { */ .inspect_aggregate_err( "aggregate with bad signature", - |tester, a| a.signature = tester.aggregator_sk.sign(Hash256::repeat_byte(42)), + |tester, a| match a.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + att.signature = tester.aggregator_sk.sign(Hash256::repeat_byte(42)) + } + SignedAggregateAndProofRefMut::Electra(att) => { + att.signature = tester.aggregator_sk.sign(Hash256::repeat_byte(42)) + } + }, |_, err| assert!(matches!(err, AttnError::InvalidSignature)), ) /* @@ -598,7 +678,7 @@ async fn aggregated_gossip_verification() { .chain .head_snapshot() .beacon_state - .get_beacon_committee(tester.slot(), a.message.aggregate.data.index) + .get_beacon_committee(tester.slot(), a.message().aggregate().committee_index().expect("should get committee index")) .expect("should get committees") .committee .len(); @@ -608,19 +688,38 @@ async fn aggregated_gossip_verification() { // // Could run for ever, but that seems _really_ improbable. let mut i: u64 = 0; - a.message.selection_proof = loop { - i += 1; - let proof: SelectionProof = tester - .aggregator_sk - .sign(Hash256::from_slice(&int_to_bytes32(i))) - .into(); - if proof - .is_aggregator(committee_len, &tester.harness.chain.spec) - .unwrap() - { - break proof.into(); + match a.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + att.message.selection_proof = loop { + i += 1; + let proof: SelectionProof = tester + .aggregator_sk + .sign(Hash256::from_slice(&int_to_bytes32(i))) + .into(); + if proof + .is_aggregator(committee_len, &tester.harness.chain.spec) + .unwrap() + { + break proof.into(); + } + }; } - }; + SignedAggregateAndProofRefMut::Electra(att) => { + att.message.selection_proof = loop { + i += 1; + let proof: SelectionProof = tester + .aggregator_sk + .sign(Hash256::from_slice(&int_to_bytes32(i))) + .into(); + if proof + .is_aggregator(committee_len, &tester.harness.chain.spec) + .unwrap() + { + break proof.into(); + } + }; + } + } }, |_, err| assert!(matches!(err, AttnError::InvalidSignature)), ) @@ -634,7 +733,14 @@ async fn aggregated_gossip_verification() { |tester, a| { let mut agg_sig = AggregateSignature::infinity(); agg_sig.add_assign(&tester.aggregator_sk.sign(Hash256::repeat_byte(42))); - a.message.aggregate.signature = agg_sig; + match a.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + att.message.aggregate.signature = agg_sig; + } + SignedAggregateAndProofRefMut::Electra(att) => { + att.message.aggregate.signature = agg_sig; + } + } }, |_, err| assert!(matches!(err, AttnError::InvalidSignature)), ) @@ -643,8 +749,15 @@ async fn aggregated_gossip_verification() { */ .inspect_aggregate_err( "aggregate with too-high aggregator index", - |_, a| { - a.message.aggregator_index = ::ValidatorRegistryLimit::to_u64() + 1 + |_, a| match a.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + att.message.aggregator_index = + ::ValidatorRegistryLimit::to_u64() + 1 + } + SignedAggregateAndProofRefMut::Electra(att) => { + att.message.aggregator_index = + ::ValidatorRegistryLimit::to_u64() + 1 + } }, |_, err| { assert!(matches!( @@ -663,7 +776,14 @@ async fn aggregated_gossip_verification() { */ .inspect_aggregate_err( "aggregate with unknown aggregator index", - |_, a| a.message.aggregator_index = VALIDATOR_COUNT as u64, + |_, a| match a.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + att.message.aggregator_index = VALIDATOR_COUNT as u64 + } + SignedAggregateAndProofRefMut::Electra(att) => { + att.message.aggregator_index = VALIDATOR_COUNT as u64 + } + }, |_, err| { assert!(matches!( err, @@ -671,7 +791,7 @@ async fn aggregated_gossip_verification() { // // AttnError::AggregatorPubkeyUnknown(unknown_validator) // - // However the following error is triggered first: + // However, the following error is triggered first: AttnError::AggregatorNotInCommittee { aggregator_index } @@ -683,7 +803,7 @@ async fn aggregated_gossip_verification() { * The following test ensures: * * aggregate_and_proof.selection_proof selects the validator as an aggregator for the slot -- - * i.e. is_aggregator(state, aggregate.data.slot, aggregate.data.index, + * i.e. is_aggregator(state, aggregate.data.slot, aggregate.committee_index(), * aggregate_and_proof.selection_proof) returns True. */ .inspect_aggregate_err( @@ -693,7 +813,7 @@ async fn aggregated_gossip_verification() { let (index, sk) = tester.non_aggregator(); *a = SignedAggregateAndProof::from_aggregate( index as u64, - tester.valid_aggregate.message.aggregate.clone(), + tester.valid_aggregate.message().aggregate().clone(), None, &sk, &chain.canonical_head.cached_head().head_fork(), @@ -703,6 +823,7 @@ async fn aggregated_gossip_verification() { }, |tester, err| { let (val_index, _) = tester.non_aggregator(); + assert!(matches!( err, AttnError::InvalidSelectionProof { @@ -729,7 +850,12 @@ async fn aggregated_gossip_verification() { assert!(matches!( err, AttnError::AttestationSupersetKnown(hash) - if hash == tester.valid_aggregate.message.aggregate.data.tree_hash_root() + if hash == ObservedAttestationKey { + committee_index: tester.valid_aggregate.message().aggregate() + .committee_index() + .expect("should get committee index"), + attestation_data: tester.valid_aggregate.message().aggregate().data().clone(), + }.tree_hash_root() )) }, ) @@ -741,7 +867,14 @@ async fn aggregated_gossip_verification() { */ .inspect_aggregate_err( "aggregate from aggregator that has already been seen", - |_, a| a.message.aggregate.data.beacon_block_root = Hash256::repeat_byte(42), + |_, a| match a.to_mut() { + SignedAggregateAndProofRefMut::Base(att) => { + att.message.aggregate.data.beacon_block_root = Hash256::repeat_byte(42) + } + SignedAggregateAndProofRefMut::Electra(att) => { + att.message.aggregate.data.beacon_block_root = Hash256::repeat_byte(42) + } + }, |tester, err| { assert!(matches!( err, @@ -766,13 +899,29 @@ async fn unaggregated_gossip_verification() { .inspect_unaggregate_err( "attestation with invalid committee index", |tester, a, _| { - a.data.index = tester - .harness - .chain - .head_snapshot() - .beacon_state - .get_committee_count_at_slot(a.data.slot) - .unwrap() + match a.to_mut() { + AttestationRefMut::Base(attn) => { + attn.data.index = tester + .harness + .chain + .head_snapshot() + .beacon_state + .get_committee_count_at_slot(attn.data.slot) + .unwrap(); + } + AttestationRefMut::Electra(attn) => { + let committee_index = tester + .harness + .chain + .head_snapshot() + .beacon_state + .get_committee_count_at_slot(attn.data.slot) + .unwrap(); + // overwrite the existing committee bits before setting + attn.committee_bits = BitVector::default(); + attn.committee_bits.set(committee_index as usize, true).unwrap(); + } + } }, |_, err| assert!(matches!(err, AttnError::NoCommitteeForSlotAndIndex { .. })), ) @@ -806,7 +955,7 @@ async fn unaggregated_gossip_verification() { */ .inspect_unaggregate_err( "attestation from future slot", - |tester, a, _| a.data.slot = tester.slot() + 1, + |tester, a, _| a.data_mut().slot = tester.slot() + 1, |tester, err| { assert!(matches!( err, @@ -822,8 +971,8 @@ async fn unaggregated_gossip_verification() { "attestation from past slot", |tester, a, _| { let too_early_slot = tester.earliest_valid_attestation_slot() - 1; - a.data.slot = too_early_slot; - a.data.target.epoch = too_early_slot.epoch(E::slots_per_epoch()); + a.data_mut().slot = too_early_slot; + a.data_mut().target.epoch = too_early_slot.epoch(E::slots_per_epoch()); }, |tester, err| { let valid_early_slot = tester.earliest_valid_attestation_slot(); @@ -847,7 +996,7 @@ async fn unaggregated_gossip_verification() { */ .inspect_unaggregate_err( "attestation with invalid target epoch", - |_, a, _| a.data.target.epoch += 1, + |_, a, _| a.data_mut().target.epoch += 1, |_, err| { assert!(matches!( err, @@ -863,15 +1012,29 @@ async fn unaggregated_gossip_verification() { */ .inspect_unaggregate_err( "attestation without any aggregation bits set", - |tester, a, _| { - a.aggregation_bits - .set(tester.attester_committee_index, false) - .expect("should unset aggregation bit"); - assert_eq!( - a.aggregation_bits.num_set_bits(), - 0, - "test requires no set bits" - ); + |tester, mut a, _| { + match &mut a { + Attestation::Base(ref mut att) => { + att.aggregation_bits + .set(tester.attester_committee_index, false) + .expect("should unset aggregation bit"); + assert_eq!( + att.aggregation_bits.num_set_bits(), + 0, + "test requires no set bits" + ); + } + Attestation::Electra(ref mut att) => { + att.aggregation_bits + .set(tester.attester_committee_index, false) + .expect("should unset aggregation bit"); + assert_eq!( + att.aggregation_bits.num_set_bits(), + 0, + "test requires no set bits" + ); + } + } }, |_, err| { assert!(matches!( @@ -882,10 +1045,19 @@ async fn unaggregated_gossip_verification() { ) .inspect_unaggregate_err( "attestation with two aggregation bits set", - |tester, a, _| { - a.aggregation_bits - .set(tester.attester_committee_index + 1, true) - .expect("should set second aggregation bit"); + |tester, mut a, _| { + match &mut a { + Attestation::Base(ref mut att) => { + att.aggregation_bits + .set(tester.attester_committee_index + 1, true) + .expect("should set second aggregation bit"); + } + Attestation::Electra(ref mut att) => { + att.aggregation_bits + .set(tester.attester_committee_index + 1, true) + .expect("should set second aggregation bit"); + } + } }, |_, err| { assert!(matches!( @@ -903,11 +1075,22 @@ async fn unaggregated_gossip_verification() { */ .inspect_unaggregate_err( "attestation with invalid bitfield", - |_, a, _| { - let bits = a.aggregation_bits.iter().collect::>(); - a.aggregation_bits = BitList::with_capacity(bits.len() + 1).unwrap(); - for (i, bit) in bits.into_iter().enumerate() { - a.aggregation_bits.set(i, bit).unwrap(); + |_, mut a, _| { + match &mut a { + Attestation::Base(ref mut att) => { + let bits = att.aggregation_bits.iter().collect::>(); + att.aggregation_bits = BitList::with_capacity(bits.len() + 1).unwrap(); + for (i, bit) in bits.into_iter().enumerate() { + att.aggregation_bits.set(i, bit).unwrap(); + } + } + Attestation::Electra(ref mut att) => { + let bits = att.aggregation_bits.iter().collect::>(); + att.aggregation_bits = BitList::with_capacity(bits.len() + 1).unwrap(); + for (i, bit) in bits.into_iter().enumerate() { + att.aggregation_bits.set(i, bit).unwrap(); + } + } } }, |_, err| { @@ -927,7 +1110,7 @@ async fn unaggregated_gossip_verification() { .inspect_unaggregate_err( "attestation with unknown head block", |_, a, _| { - a.data.beacon_block_root = Hash256::repeat_byte(42); + a.data_mut().beacon_block_root = Hash256::repeat_byte(42); }, |_, err| { assert!(matches!( @@ -949,7 +1132,7 @@ async fn unaggregated_gossip_verification() { .inspect_unaggregate_err( "attestation with invalid target root", |_, a, _| { - a.data.target.root = Hash256::repeat_byte(42); + a.data_mut().target.root = Hash256::repeat_byte(42); }, |_, err| { assert!(matches!( @@ -968,7 +1151,7 @@ async fn unaggregated_gossip_verification() { |tester, a, _| { let mut agg_sig = AggregateSignature::infinity(); agg_sig.add_assign(&tester.attester_sk.sign(Hash256::repeat_byte(42))); - a.signature = agg_sig; + *a.signature_mut() = agg_sig; }, |_, err| { assert!(matches!( @@ -1055,7 +1238,7 @@ async fn attestation_that_skips_epochs() { .cloned() .expect("should have at least one attestation in committee"); - let block_root = attestation.data.beacon_block_root; + let block_root = attestation.data().beacon_block_root; let block_slot = harness .chain .store @@ -1066,7 +1249,7 @@ async fn attestation_that_skips_epochs() { .slot(); assert!( - attestation.data.slot - block_slot > E::slots_per_epoch() * 2, + attestation.data().slot - block_slot > E::slots_per_epoch() * 2, "the attestation must skip more than two epochs" ); @@ -1228,7 +1411,7 @@ async fn attestation_to_finalized_block() { .first() .cloned() .expect("should have at least one attestation in committee"); - assert_eq!(attestation.data.beacon_block_root, earlier_block_root); + assert_eq!(attestation.data().beacon_block_root, earlier_block_root); // Attestation should be rejected for attesting to a pre-finalization block. let res = harness @@ -1281,8 +1464,8 @@ async fn verify_aggregate_for_gossip_doppelganger_detection() { .verify_aggregated_attestation_for_gossip(&valid_aggregate) .expect("should verify aggregate attestation"); - let epoch = valid_aggregate.message.aggregate.data.target.epoch; - let index = valid_aggregate.message.aggregator_index as usize; + let epoch = valid_aggregate.message().aggregate().data().target.epoch; + let index = valid_aggregate.message().aggregator_index() as usize; assert!(harness.chain.validator_seen_at_epoch(index, epoch)); // Check the correct beacon cache is populated @@ -1338,7 +1521,7 @@ async fn verify_attestation_for_gossip_doppelganger_detection() { .verify_unaggregated_attestation_for_gossip(&valid_attestation, Some(subnet_id)) .expect("should verify attestation"); - let epoch = valid_attestation.data.target.epoch; + let epoch = valid_attestation.data().target.epoch; assert!(harness.chain.validator_seen_at_epoch(index, epoch)); // Check the correct beacon cache is populated diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 98a112daffe..d9c9a3b6a74 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -2,7 +2,9 @@ use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; use beacon_chain::{ - test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, + test_utils::{ + test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, + }, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock, }; use beacon_chain::{ @@ -13,7 +15,7 @@ use lazy_static::lazy_static; use logging::test_logger; use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ - common::get_indexed_attestation, + common::{attesting_indices_base, attesting_indices_electra}, per_block_processing::{per_block_processing, BlockSignatureStrategy}, per_slot_processing, BlockProcessingError, ConsensusContext, VerifyBlockRoot, }; @@ -473,6 +475,7 @@ async fn assert_invalid_signature( ) .unwrap(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await; @@ -541,6 +544,7 @@ async fn invalid_signature_gossip_block() { signed_block.canonical_root(), Arc::new(signed_block), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await, @@ -664,37 +668,95 @@ async fn invalid_signature_attester_slashing() { for &block_index in BLOCK_INDICES { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); - let indexed_attestation = IndexedAttestation { - attesting_indices: vec![0].into(), - data: AttestationData { - slot: Slot::new(0), - index: 0, - beacon_block_root: Hash256::zero(), - source: Checkpoint { - epoch: Epoch::new(0), - root: Hash256::zero(), + let fork_name = harness.chain.spec.fork_name_at_slot::(Slot::new(0)); + + let attester_slashing = if fork_name.electra_enabled() { + let indexed_attestation = IndexedAttestationElectra { + attesting_indices: vec![0].into(), + data: AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + source: Checkpoint { + epoch: Epoch::new(0), + root: Hash256::zero(), + }, + target: Checkpoint { + epoch: Epoch::new(0), + root: Hash256::zero(), + }, }, - target: Checkpoint { - epoch: Epoch::new(0), - root: Hash256::zero(), + signature: junk_aggregate_signature(), + }; + let attester_slashing = AttesterSlashingElectra { + attestation_1: indexed_attestation.clone(), + attestation_2: indexed_attestation, + }; + + AttesterSlashing::Electra(attester_slashing) + } else { + let indexed_attestation = IndexedAttestationBase { + attesting_indices: vec![0].into(), + data: AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + source: Checkpoint { + epoch: Epoch::new(0), + root: Hash256::zero(), + }, + target: Checkpoint { + epoch: Epoch::new(0), + root: Hash256::zero(), + }, }, - }, - signature: junk_aggregate_signature(), - }; - let attester_slashing = AttesterSlashing { - attestation_1: indexed_attestation.clone(), - attestation_2: indexed_attestation, + signature: junk_aggregate_signature(), + }; + let attester_slashing = AttesterSlashingBase { + attestation_1: indexed_attestation.clone(), + attestation_2: indexed_attestation, + }; + + AttesterSlashing::Base(attester_slashing) }; + let (mut block, signature) = snapshots[block_index] .beacon_block .as_ref() .clone() .deconstruct(); - block - .body_mut() - .attester_slashings_mut() - .push(attester_slashing) - .expect("should update attester slashing"); + match &mut block.body_mut() { + BeaconBlockBodyRefMut::Base(ref mut blk) => { + blk.attester_slashings + .push(attester_slashing.as_base().unwrap().clone()) + .expect("should update attester slashing"); + } + BeaconBlockBodyRefMut::Altair(ref mut blk) => { + blk.attester_slashings + .push(attester_slashing.as_base().unwrap().clone()) + .expect("should update attester slashing"); + } + BeaconBlockBodyRefMut::Bellatrix(ref mut blk) => { + blk.attester_slashings + .push(attester_slashing.as_base().unwrap().clone()) + .expect("should update attester slashing"); + } + BeaconBlockBodyRefMut::Capella(ref mut blk) => { + blk.attester_slashings + .push(attester_slashing.as_base().unwrap().clone()) + .expect("should update attester slashing"); + } + BeaconBlockBodyRefMut::Deneb(ref mut blk) => { + blk.attester_slashings + .push(attester_slashing.as_base().unwrap().clone()) + .expect("should update attester slashing"); + } + BeaconBlockBodyRefMut::Electra(ref mut blk) => { + blk.attester_slashings + .push(attester_slashing.as_electra().unwrap().clone()) + .expect("should update attester slashing"); + } + } snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots, &mut chain_segment_blobs); @@ -724,8 +786,34 @@ async fn invalid_signature_attestation() { .as_ref() .clone() .deconstruct(); - if let Some(attestation) = block.body_mut().attestations_mut().get_mut(0) { - attestation.signature = junk_aggregate_signature(); + match &mut block.body_mut() { + BeaconBlockBodyRefMut::Base(ref mut blk) => blk + .attestations + .get_mut(0) + .map(|att| att.signature = junk_aggregate_signature()), + BeaconBlockBodyRefMut::Altair(ref mut blk) => blk + .attestations + .get_mut(0) + .map(|att| att.signature = junk_aggregate_signature()), + BeaconBlockBodyRefMut::Bellatrix(ref mut blk) => blk + .attestations + .get_mut(0) + .map(|att| att.signature = junk_aggregate_signature()), + BeaconBlockBodyRefMut::Capella(ref mut blk) => blk + .attestations + .get_mut(0) + .map(|att| att.signature = junk_aggregate_signature()), + BeaconBlockBodyRefMut::Deneb(ref mut blk) => blk + .attestations + .get_mut(0) + .map(|att| att.signature = junk_aggregate_signature()), + BeaconBlockBodyRefMut::Electra(ref mut blk) => blk + .attestations + .get_mut(0) + .map(|att| att.signature = junk_aggregate_signature()), + }; + + if block.body().attestations_len() > 0 { snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots, &mut chain_segment_blobs); @@ -875,6 +963,7 @@ async fn block_gossip_verification() { gossip_verified.block_root, gossip_verified, NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -1123,8 +1212,14 @@ async fn block_gossip_verification() { #[tokio::test] async fn verify_block_for_gossip_slashing_detection() { let slasher_dir = tempdir().unwrap(); + let spec = Arc::new(test_spec::()); let slasher = Arc::new( - Slasher::open(SlasherConfig::new(slasher_dir.path().into()), test_logger()).unwrap(), + Slasher::open( + SlasherConfig::new(slasher_dir.path().into()), + spec, + test_logger(), + ) + .unwrap(), ); let inner_slasher = slasher.clone(); @@ -1165,6 +1260,7 @@ async fn verify_block_for_gossip_slashing_detection() { verified_block.block_root, verified_block, NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -1187,28 +1283,43 @@ async fn verify_block_for_gossip_doppelganger_detection() { let state = harness.get_current_state(); let ((block, _), _) = harness.make_block(state.clone(), Slot::new(1)).await; - + let attestations = block + .message() + .body() + .attestations() + .map(|att| att.clone_as_attestation()) + .collect::>(); let verified_block = harness.chain.verify_block_for_gossip(block).await.unwrap(); - let attestations = verified_block.block.message().body().attestations().clone(); harness .chain .process_block( verified_block.block_root, verified_block, NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await .unwrap(); for att in attestations.iter() { - let epoch = att.data.target.epoch; - let committee = state - .get_beacon_committee(att.data.slot, att.data.index) - .unwrap(); - let indexed_attestation = get_indexed_attestation(committee.committee, att).unwrap(); + let epoch = att.data().target.epoch; + let indexed_attestation = match att { + Attestation::Base(att) => { + let committee = state + .get_beacon_committee(att.data.slot, att.data.index) + .unwrap(); + attesting_indices_base::get_indexed_attestation(committee.committee, att).unwrap() + } + Attestation::Electra(att) => { + attesting_indices_electra::get_indexed_attestation_from_state(&state, att).unwrap() + } + }; - for &index in &indexed_attestation.attesting_indices { + for index in match indexed_attestation { + IndexedAttestation::Base(att) => att.attesting_indices.into_iter(), + IndexedAttestation::Electra(att) => att.attesting_indices.into_iter(), + } { let index = index as usize; assert!(harness.chain.validator_seen_at_epoch(index, epoch)); @@ -1342,6 +1453,7 @@ async fn add_base_block_to_altair_chain() { base_block.canonical_root(), Arc::new(base_block.clone()), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -1477,6 +1589,7 @@ async fn add_altair_block_to_base_chain() { altair_block.canonical_root(), Arc::new(altair_block.clone()), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 0ef348319af..4dc7d20e227 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -25,7 +25,6 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use task_executor::ShutdownReason; -use tree_hash::TreeHash; use types::*; const VALIDATOR_COUNT: usize = 32; @@ -702,6 +701,7 @@ async fn invalidates_all_descendants() { fork_block.canonical_root(), fork_block, NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -802,6 +802,7 @@ async fn switches_heads() { fork_block.canonical_root(), fork_block, NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -1061,7 +1062,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes, + rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), ).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) @@ -1191,15 +1192,23 @@ async fn attesting_to_optimistic_head() { .produce_unaggregated_attestation(Slot::new(0), 0) .unwrap(); - attestation.aggregation_bits.set(0, true).unwrap(); - attestation.data.slot = slot; - attestation.data.beacon_block_root = root; + match &mut attestation { + Attestation::Base(ref mut att) => { + att.aggregation_bits.set(0, true).unwrap(); + } + Attestation::Electra(ref mut att) => { + att.aggregation_bits.set(0, true).unwrap(); + } + } + + attestation.data_mut().slot = slot; + attestation.data_mut().beacon_block_root = root; rig.harness .chain .naive_aggregation_pool .write() - .insert(&attestation) + .insert(attestation.to_ref()) .unwrap(); attestation @@ -1214,16 +1223,13 @@ async fn attesting_to_optimistic_head() { let get_aggregated = || { rig.harness .chain - .get_aggregated_attestation(&attestation.data) + .get_aggregated_attestation(attestation.to_ref()) }; let get_aggregated_by_slot_and_root = || { rig.harness .chain - .get_aggregated_attestation_by_slot_and_root( - attestation.data.slot, - &attestation.data.tree_hash_root(), - ) + .get_aggregated_attestation(attestation.to_ref()) }; /* @@ -1352,6 +1358,7 @@ async fn build_optimistic_chain( block.canonical_root(), block, NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -1926,6 +1933,7 @@ async fn recover_from_invalid_head_by_importing_blocks() { fork_block.canonical_root(), fork_block.clone(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index ba8a6bf7016..e675d6956e8 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -35,7 +35,6 @@ use store::{ }; use tempfile::{tempdir, TempDir}; use tokio::time::sleep; -use tree_hash::TreeHash; use types::test_utils::{SeedableRng, XorShiftRng}; use types::*; @@ -199,8 +198,8 @@ async fn heal_freezer_block_roots_with_skip_slots() { ); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - let current_state = harness.get_current_state(); - let state_root = harness.get_current_state().tree_hash_root(); + let mut current_state = harness.get_current_state(); + let state_root = current_state.canonical_root().unwrap(); let all_validators = &harness.get_all_validators(); harness .add_attested_blocks_at_slots( @@ -606,17 +605,18 @@ async fn epoch_boundary_state_attestation_processing() { for (attestation, subnet_id) in late_attestations.into_iter().flatten() { // load_epoch_boundary_state is idempotent! - let block_root = attestation.data.beacon_block_root; + let block_root = attestation.data().beacon_block_root; let block = store .get_blinded_block(&block_root) .unwrap() .expect("block exists"); - let epoch_boundary_state = store + let mut epoch_boundary_state = store .load_epoch_boundary_state(&block.state_root()) .expect("no error") .expect("epoch boundary state exists"); + let ebs_state_root = epoch_boundary_state.canonical_root().unwrap(); let ebs_of_ebs = store - .load_epoch_boundary_state(&epoch_boundary_state.canonical_root()) + .load_epoch_boundary_state(&ebs_state_root) .expect("no error") .expect("ebs of ebs exists"); assert_eq!(epoch_boundary_state, ebs_of_ebs); @@ -629,7 +629,7 @@ async fn epoch_boundary_state_attestation_processing() { .verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id)); let current_slot = harness.chain.slot().expect("should get slot"); - let expected_attestation_slot = attestation.data.slot; + let expected_attestation_slot = attestation.data().slot; // Extra -1 to handle gossip clock disparity. let expected_earliest_permissible_slot = current_slot - E::slots_per_epoch() - 1; @@ -1014,6 +1014,7 @@ async fn multiple_attestations_per_block() { .await; let head = harness.chain.head_snapshot(); + let committees_per_slot = head .beacon_state .get_committee_count_at_slot(head.beacon_state.slot()) @@ -1022,16 +1023,29 @@ async fn multiple_attestations_per_block() { for snapshot in harness.chain.chain_dump().unwrap() { let slot = snapshot.beacon_block.slot(); - assert_eq!( - snapshot - .beacon_block - .as_ref() - .message() - .body() - .attestations() - .len() as u64, - if slot <= 1 { 0 } else { committees_per_slot } - ); + let fork_name = harness.chain.spec.fork_name_at_slot::(slot); + + if fork_name.electra_enabled() { + assert_eq!( + snapshot + .beacon_block + .as_ref() + .message() + .body() + .attestations_len() as u64, + if slot <= 1 { 0 } else { 1 } + ); + } else { + assert_eq!( + snapshot + .beacon_block + .as_ref() + .message() + .body() + .attestations_len() as u64, + if slot <= 1 { 0 } else { committees_per_slot } + ); + } } } @@ -2458,6 +2472,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { full_block.canonical_root(), RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -2589,9 +2604,9 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .unwrap() .map(Result::unwrap) { - let state = store.get_state(&state_root, Some(slot)).unwrap().unwrap(); + let mut state = store.get_state(&state_root, Some(slot)).unwrap().unwrap(); assert_eq!(state.slot(), slot); - assert_eq!(state.canonical_root(), state_root); + assert_eq!(state.canonical_root().unwrap(), state_root); } // Anchor slot is still set to the slot of the checkpoint block. @@ -2676,6 +2691,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { invalid_fork_block.canonical_root(), invalid_fork_block.clone(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -2689,6 +2705,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { valid_fork_block.canonical_root(), valid_fork_block.clone(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -3033,13 +3050,7 @@ async fn schema_downgrade_to_min_version() { ) .await; - let min_version = if harness.spec.deneb_fork_epoch.is_some() { - // Can't downgrade beyond V18 once Deneb is reached, for simplicity don't test that - // at all if Deneb is enabled. - SchemaVersion(18) - } else { - SchemaVersion(16) - }; + let min_version = SchemaVersion(19); // Save the slot clock so that the new harness doesn't revert in time. let slot_clock = harness.chain.slot_clock.clone(); diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 0e4745ff6b8..242ed558475 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -318,7 +318,6 @@ async fn aggregated_gossip_verification() { * The contribution_and_proof.selection_proof is a valid signature of the `SyncAggregatorSelectionData` * derived from the contribution by the validator with index `contribution_and_proof.aggregator_index`. */ - assert_invalid!( "aggregate with bad selection proof signature", { @@ -354,7 +353,6 @@ async fn aggregated_gossip_verification() { * derived from the participation info in `aggregation_bits` for the subcommittee specified by * the `contribution.subcommittee_index`. */ - assert_invalid!( "aggregate with bad aggregate signature", { @@ -450,6 +448,7 @@ async fn aggregated_gossip_verification() { root: contribution.beacon_block_root, subcommittee_index: contribution.subcommittee_index, }; + assert_invalid!( "aggregate that has already been seen", valid_aggregate.clone(), diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index e27180a002c..2a0854e78f6 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -12,7 +12,8 @@ use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; use state_processing::{per_slot_processing, per_slot_processing::Error as SlotProcessingError}; use types::{ - BeaconState, BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot, + BeaconState, BeaconStateError, BlockImportSource, EthSpec, Hash256, Keypair, MinimalEthSpec, + RelativeEpoch, Slot, }; // Should ideally be divisible by 3. @@ -573,7 +574,7 @@ async fn attestations_with_increasing_slots() { .verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id)); let current_slot = harness.chain.slot().expect("should get slot"); - let expected_attestation_slot = attestation.data.slot; + let expected_attestation_slot = attestation.data().slot; let expected_earliest_permissible_slot = current_slot - MinimalEthSpec::slots_per_epoch() - 1; @@ -686,6 +687,7 @@ async fn run_skip_slot_test(skip_slots: u64) { harness_a.chain.head_snapshot().beacon_block_root, harness_a.get_head_block(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ) .await diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index 6c49a28ec87..3373dd1c72f 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -15,10 +15,7 @@ strum = { workspace = true } task_executor = { workspace = true } slot_clock = { workspace = true } lighthouse_network = { workspace = true } -hex = { workspace = true } -derivative = { workspace = true } types = { workspace = true } -ethereum_ssz = { workspace = true } lazy_static = { workspace = true } lighthouse_metrics = { workspace = true } parking_lot = { workspace = true } diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index fee55b39adc..5bf13d82b7b 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -60,7 +60,9 @@ use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; -use types::{Attestation, Hash256, SignedAggregateAndProof, SubnetId}; +use types::{ + Attestation, BeaconState, ChainSpec, Hash256, RelativeEpoch, SignedAggregateAndProof, SubnetId, +}; use types::{EthSpec, Slot}; use work_reprocessing_queue::IgnoredRpcBlock; use work_reprocessing_queue::{ @@ -85,123 +87,98 @@ const MAX_IDLE_QUEUE_LEN: usize = 16_384; /// The maximum size of the channel for re-processing work events. const DEFAULT_MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * DEFAULT_MAX_WORK_EVENT_QUEUE_LEN / 4; -/// The maximum number of queued `Attestation` objects that will be stored before we start dropping -/// them. -const MAX_UNAGGREGATED_ATTESTATION_QUEUE_LEN: usize = 16_384; - -/// The maximum number of queued `Attestation` objects that will be stored before we start dropping -/// them. -const MAX_UNAGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 8_192; - -/// The maximum number of queued `SignedAggregateAndProof` objects that will be stored before we -/// start dropping them. -const MAX_AGGREGATED_ATTESTATION_QUEUE_LEN: usize = 4_096; - -/// The maximum number of queued `SignedAggregateAndProof` objects that will be stored before we -/// start dropping them. -const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `SignedBeaconBlock` objects received on gossip that will be stored -/// before we start dropping them. -const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `BlobSidecar` objects received on gossip that -/// will be stored before we start dropping them. -const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but -/// within acceptable clock disparity) that will be queued before we start dropping them. -const MAX_DELAYED_BLOCK_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `SignedVoluntaryExit` objects received on gossip that will be stored -/// before we start dropping them. -const MAX_GOSSIP_EXIT_QUEUE_LEN: usize = 4_096; - -/// The maximum number of queued `ProposerSlashing` objects received on gossip that will be stored -/// before we start dropping them. -const MAX_GOSSIP_PROPOSER_SLASHING_QUEUE_LEN: usize = 4_096; - -/// The maximum number of queued `AttesterSlashing` objects received on gossip that will be stored -/// before we start dropping them. -const MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN: usize = 4_096; - -/// The maximum number of queued `LightClientFinalityUpdate` objects received on gossip that will be stored -/// before we start dropping them. -const MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `LightClientOptimisticUpdate` objects received on gossip that will be stored -/// before we start dropping them. -const MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `LightClientOptimisticUpdate` objects received on gossip that will be stored -/// for reprocessing before we start dropping them. -const MAX_GOSSIP_OPTIMISTIC_UPDATE_REPROCESS_QUEUE_LEN: usize = 128; - -/// The maximum number of queued `SyncCommitteeMessage` objects that will be stored before we start dropping -/// them. -const MAX_SYNC_MESSAGE_QUEUE_LEN: usize = 2048; - -/// The maximum number of queued `SignedContributionAndProof` objects that will be stored before we -/// start dropping them. -const MAX_SYNC_CONTRIBUTION_QUEUE_LEN: usize = 1024; - -/// The maximum number of queued `SignedBeaconBlock` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_RPC_BLOCK_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `BlobSidecar` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_RPC_BLOB_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `Vec` objects received during syncing that will -/// be stored before we start dropping them. -const MAX_CHAIN_SEGMENT_QUEUE_LEN: usize = 64; - -/// The maximum number of queued `StatusMessage` objects received from the network RPC that will be -/// stored before we start dropping them. -const MAX_STATUS_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `BlocksByRangeRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_BLOCKS_BY_RANGE_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `BlobsByRangeRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize = 1024; - -/// The maximum number of queued `BlocksByRootRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `BlobsByRootRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_BLOBS_BY_ROOTS_QUEUE_LEN: usize = 1_024; - -/// Maximum number of `SignedBlsToExecutionChange` messages to queue before dropping them. -/// -/// This value is set high to accommodate the large spike that is expected immediately after Capella -/// is activated. -const MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN: usize = 16_384; - -/// The maximum number of queued `LightClientBootstrapRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN: usize = 1_024; - -/// The maximum number of queued `LightClientOptimisticUpdateRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUEUE_LEN: usize = 512; - -/// The maximum number of queued `LightClientFinalityUpdateRequest` objects received from the network RPC that -/// will be stored before we start dropping them. -const MAX_LIGHT_CLIENT_FINALITY_UPDATE_QUEUE_LEN: usize = 512; - -/// The maximum number of priority-0 (highest priority) messages that will be queued before -/// they begin to be dropped. -const MAX_API_REQUEST_P0_QUEUE_LEN: usize = 1_024; +/// Over-provision queues based on active validator count by some factor. The beacon chain has +/// strict churns that prevent the validator set size from changing rapidly. By over-provisioning +/// slightly, we don't need to adjust the queues during the lifetime of a process. +const ACTIVE_VALIDATOR_COUNT_OVERPROVISION_PERCENT: usize = 110; + +/// Maximum number of queued items that will be stored before dropping them +pub struct BeaconProcessorQueueLengths { + aggregate_queue: usize, + attestation_queue: usize, + unknown_block_aggregate_queue: usize, + unknown_block_attestation_queue: usize, + sync_message_queue: usize, + sync_contribution_queue: usize, + gossip_voluntary_exit_queue: usize, + gossip_proposer_slashing_queue: usize, + gossip_attester_slashing_queue: usize, + finality_update_queue: usize, + optimistic_update_queue: usize, + unknown_light_client_update_queue: usize, + rpc_block_queue: usize, + rpc_blob_queue: usize, + chain_segment_queue: usize, + backfill_chain_segment: usize, + gossip_block_queue: usize, + gossip_blob_queue: usize, + delayed_block_queue: usize, + status_queue: usize, + bbrange_queue: usize, + bbroots_queue: usize, + blbroots_queue: usize, + blbrange_queue: usize, + gossip_bls_to_execution_change_queue: usize, + lc_bootstrap_queue: usize, + lc_optimistic_update_queue: usize, + lc_finality_update_queue: usize, + api_request_p0_queue: usize, + api_request_p1_queue: usize, +} -/// The maximum number of priority-1 (second-highest priority) messages that will be queued before -/// they begin to be dropped. -const MAX_API_REQUEST_P1_QUEUE_LEN: usize = 1_024; +impl BeaconProcessorQueueLengths { + pub fn from_state( + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { + let active_validator_count = + match state.get_cached_active_validator_indices(RelativeEpoch::Current) { + Ok(indices) => indices.len(), + Err(_) => state + .get_active_validator_indices(state.current_epoch(), spec) + .map_err(|e| format!("Error computing active indices: {:?}", e))? + .len(), + }; + let active_validator_count = + (ACTIVE_VALIDATOR_COUNT_OVERPROVISION_PERCENT * active_validator_count) / 100; + let slots_per_epoch = E::slots_per_epoch() as usize; + + Ok(Self { + aggregate_queue: 4096, + unknown_block_aggregate_queue: 1024, + // Capacity for a full slot's worth of attestations if subscribed to all subnets + attestation_queue: active_validator_count / slots_per_epoch, + // Capacity for a full slot's worth of attestations if subscribed to all subnets + unknown_block_attestation_queue: active_validator_count / slots_per_epoch, + sync_message_queue: 2048, + sync_contribution_queue: 1024, + gossip_voluntary_exit_queue: 4096, + gossip_proposer_slashing_queue: 4096, + gossip_attester_slashing_queue: 4096, + finality_update_queue: 1024, + optimistic_update_queue: 1024, + unknown_light_client_update_queue: 128, + rpc_block_queue: 1024, + rpc_blob_queue: 1024, + chain_segment_queue: 64, + backfill_chain_segment: 64, + gossip_block_queue: 1024, + gossip_blob_queue: 1024, + delayed_block_queue: 1024, + status_queue: 1024, + bbrange_queue: 1024, + bbroots_queue: 1024, + blbroots_queue: 1024, + blbrange_queue: 1024, + gossip_bls_to_execution_change_queue: 16384, + lc_bootstrap_queue: 1024, + lc_optimistic_update_queue: 512, + lc_finality_update_queue: 512, + api_request_p0_queue: 1024, + api_request_p1_queue: 1024, + }) + } +} /// The name of the manager tokio task. const MANAGER_TASK_NAME: &str = "beacon_processor_manager"; @@ -772,6 +749,7 @@ impl BeaconProcessor { /// /// The optional `work_journal_tx` allows for an outside process to receive a log of all work /// events processed by `self`. This should only be used during testing. + #[allow(clippy::too_many_arguments)] pub fn spawn_manager( mut self, event_rx: mpsc::Receiver>, @@ -780,6 +758,7 @@ impl BeaconProcessor { work_journal_tx: Option>, slot_clock: S, maximum_gossip_clock_disparity: Duration, + queue_lengths: BeaconProcessorQueueLengths, ) -> Result<(), String> { // Used by workers to communicate that they are finished a task. let (idle_tx, idle_rx) = mpsc::channel::<()>(MAX_IDLE_QUEUE_LEN); @@ -787,61 +766,61 @@ impl BeaconProcessor { // Using LIFO queues for attestations since validator profits rely upon getting fresh // attestations into blocks. Additionally, later attestations contain more information than // earlier ones, so we consider them more valuable. - let mut aggregate_queue = LifoQueue::new(MAX_AGGREGATED_ATTESTATION_QUEUE_LEN); + let mut aggregate_queue = LifoQueue::new(queue_lengths.aggregate_queue); let mut aggregate_debounce = TimeLatch::default(); - let mut attestation_queue = LifoQueue::new(MAX_UNAGGREGATED_ATTESTATION_QUEUE_LEN); + let mut attestation_queue = LifoQueue::new(queue_lengths.attestation_queue); let mut attestation_debounce = TimeLatch::default(); let mut unknown_block_aggregate_queue = - LifoQueue::new(MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN); + LifoQueue::new(queue_lengths.unknown_block_aggregate_queue); let mut unknown_block_attestation_queue = - LifoQueue::new(MAX_UNAGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN); + LifoQueue::new(queue_lengths.unknown_block_attestation_queue); - let mut sync_message_queue = LifoQueue::new(MAX_SYNC_MESSAGE_QUEUE_LEN); - let mut sync_contribution_queue = LifoQueue::new(MAX_SYNC_CONTRIBUTION_QUEUE_LEN); + let mut sync_message_queue = LifoQueue::new(queue_lengths.sync_message_queue); + let mut sync_contribution_queue = LifoQueue::new(queue_lengths.sync_contribution_queue); // Using a FIFO queue for voluntary exits since it prevents exit censoring. I don't have // a strong feeling about queue type for exits. - let mut gossip_voluntary_exit_queue = FifoQueue::new(MAX_GOSSIP_EXIT_QUEUE_LEN); + let mut gossip_voluntary_exit_queue = + FifoQueue::new(queue_lengths.gossip_voluntary_exit_queue); // Using a FIFO queue for slashing to prevent people from flushing their slashings from the // queues with lots of junk messages. let mut gossip_proposer_slashing_queue = - FifoQueue::new(MAX_GOSSIP_PROPOSER_SLASHING_QUEUE_LEN); + FifoQueue::new(queue_lengths.gossip_proposer_slashing_queue); let mut gossip_attester_slashing_queue = - FifoQueue::new(MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN); + FifoQueue::new(queue_lengths.gossip_attester_slashing_queue); // Using a FIFO queue for light client updates to maintain sequence order. - let mut finality_update_queue = FifoQueue::new(MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN); - let mut optimistic_update_queue = FifoQueue::new(MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN); + let mut finality_update_queue = FifoQueue::new(queue_lengths.finality_update_queue); + let mut optimistic_update_queue = FifoQueue::new(queue_lengths.optimistic_update_queue); let mut unknown_light_client_update_queue = - FifoQueue::new(MAX_GOSSIP_OPTIMISTIC_UPDATE_REPROCESS_QUEUE_LEN); + FifoQueue::new(queue_lengths.unknown_light_client_update_queue); // Using a FIFO queue since blocks need to be imported sequentially. - let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); - let mut rpc_blob_queue = FifoQueue::new(MAX_RPC_BLOB_QUEUE_LEN); - let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); - let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); - let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); - let mut gossip_blob_queue = FifoQueue::new(MAX_GOSSIP_BLOB_QUEUE_LEN); - let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); - - let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); - let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN); - let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN); - let mut blbroots_queue = FifoQueue::new(MAX_BLOBS_BY_ROOTS_QUEUE_LEN); - let mut blbrange_queue = FifoQueue::new(MAX_BLOBS_BY_RANGE_QUEUE_LEN); + let mut rpc_block_queue = FifoQueue::new(queue_lengths.rpc_block_queue); + let mut rpc_blob_queue = FifoQueue::new(queue_lengths.rpc_blob_queue); + let mut chain_segment_queue = FifoQueue::new(queue_lengths.chain_segment_queue); + let mut backfill_chain_segment = FifoQueue::new(queue_lengths.backfill_chain_segment); + let mut gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); + let mut gossip_blob_queue = FifoQueue::new(queue_lengths.gossip_blob_queue); + let mut delayed_block_queue = FifoQueue::new(queue_lengths.delayed_block_queue); + + let mut status_queue = FifoQueue::new(queue_lengths.status_queue); + let mut bbrange_queue = FifoQueue::new(queue_lengths.bbrange_queue); + let mut bbroots_queue = FifoQueue::new(queue_lengths.bbroots_queue); + let mut blbroots_queue = FifoQueue::new(queue_lengths.blbroots_queue); + let mut blbrange_queue = FifoQueue::new(queue_lengths.blbrange_queue); let mut gossip_bls_to_execution_change_queue = - FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN); + FifoQueue::new(queue_lengths.gossip_bls_to_execution_change_queue); - let mut lc_bootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN); + let mut lc_bootstrap_queue = FifoQueue::new(queue_lengths.lc_bootstrap_queue); let mut lc_optimistic_update_queue = - FifoQueue::new(MAX_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUEUE_LEN); - let mut lc_finality_update_queue = - FifoQueue::new(MAX_LIGHT_CLIENT_FINALITY_UPDATE_QUEUE_LEN); + FifoQueue::new(queue_lengths.lc_optimistic_update_queue); + let mut lc_finality_update_queue = FifoQueue::new(queue_lengths.lc_finality_update_queue); - let mut api_request_p0_queue = FifoQueue::new(MAX_API_REQUEST_P0_QUEUE_LEN); - let mut api_request_p1_queue = FifoQueue::new(MAX_API_REQUEST_P1_QUEUE_LEN); + let mut api_request_p0_queue = FifoQueue::new(queue_lengths.api_request_p0_queue); + let mut api_request_p1_queue = FifoQueue::new(queue_lengths.api_request_p1_queue); // Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to // receive them back once they are ready (`ready_work_rx`). diff --git a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs index 496fa683d2c..137010557da 100644 --- a/beacon_node/beacon_processor/src/work_reprocessing_queue.rs +++ b/beacon_node/beacon_processor/src/work_reprocessing_queue.rs @@ -28,7 +28,6 @@ use std::time::Duration; use strum::AsRefStr; use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; -use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; use types::{EthSpec, Hash256, Slot}; @@ -196,8 +195,6 @@ enum InboundEvent { ReadyLightClientUpdate(QueuedLightClientUpdateId), /// A backfill batch that was queued is ready for processing. ReadyBackfillSync(QueuedBackfillBatch), - /// A `DelayQueue` returned an error. - DelayQueueError(TimeError, &'static str), /// A message sent to the `ReprocessQueue` Msg(ReprocessQueueMessage), } @@ -279,54 +276,42 @@ impl Stream for ReprocessQueue { // The sequential nature of blockchains means it is generally better to try and import all // existing blocks before new ones. match self.gossip_block_delay_queue.poll_expired(cx) { - Poll::Ready(Some(Ok(queued_block))) => { + Poll::Ready(Some(queued_block)) => { return Poll::Ready(Some(InboundEvent::ReadyGossipBlock( queued_block.into_inner(), ))); } - Poll::Ready(Some(Err(e))) => { - return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "gossip_block_queue"))); - } // `Poll::Ready(None)` means that there are no more entries in the delay queue and we // will continue to get this result until something else is added into the queue. Poll::Ready(None) | Poll::Pending => (), } match self.rpc_block_delay_queue.poll_expired(cx) { - Poll::Ready(Some(Ok(queued_block))) => { + Poll::Ready(Some(queued_block)) => { return Poll::Ready(Some(InboundEvent::ReadyRpcBlock(queued_block.into_inner()))); } - Poll::Ready(Some(Err(e))) => { - return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "rpc_block_queue"))); - } // `Poll::Ready(None)` means that there are no more entries in the delay queue and we // will continue to get this result until something else is added into the queue. Poll::Ready(None) | Poll::Pending => (), } match self.attestations_delay_queue.poll_expired(cx) { - Poll::Ready(Some(Ok(attestation_id))) => { + Poll::Ready(Some(attestation_id)) => { return Poll::Ready(Some(InboundEvent::ReadyAttestation( attestation_id.into_inner(), ))); } - Poll::Ready(Some(Err(e))) => { - return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "attestations_queue"))); - } // `Poll::Ready(None)` means that there are no more entries in the delay queue and we // will continue to get this result until something else is added into the queue. Poll::Ready(None) | Poll::Pending => (), } match self.lc_updates_delay_queue.poll_expired(cx) { - Poll::Ready(Some(Ok(lc_id))) => { + Poll::Ready(Some(lc_id)) => { return Poll::Ready(Some(InboundEvent::ReadyLightClientUpdate( lc_id.into_inner(), ))); } - Poll::Ready(Some(Err(e))) => { - return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "lc_updates_queue"))); - } // `Poll::Ready(None)` means that there are no more entries in the delay queue and we // will continue to get this result until something else is added into the queue. Poll::Ready(None) | Poll::Pending => (), @@ -786,14 +771,6 @@ impl ReprocessQueue { ); } } - InboundEvent::DelayQueueError(e, queue_name) => { - crit!( - log, - "Failed to poll queue"; - "queue" => queue_name, - "e" => ?e - ) - } InboundEvent::ReadyAttestation(queued_id) => { metrics::inc_counter( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS, diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index 21b9b841334..c3658f45c73 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -9,5 +9,4 @@ reqwest = { workspace = true } sensitive_url = { workspace = true } eth2 = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } lighthouse_version = { workspace = true } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 2b373292f3d..91ee00a65f7 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -29,10 +29,13 @@ pub struct Timeouts { get_builder_status: Duration, } -impl Default for Timeouts { - fn default() -> Self { +impl Timeouts { + fn new(get_header_timeout: Option) -> Self { + let get_header = + get_header_timeout.unwrap_or(Duration::from_millis(DEFAULT_GET_HEADER_TIMEOUT_MILLIS)); + Self { - get_header: Duration::from_millis(DEFAULT_GET_HEADER_TIMEOUT_MILLIS), + get_header, post_validators: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), post_blinded_blocks: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), get_builder_status: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), @@ -49,13 +52,17 @@ pub struct BuilderHttpClient { } impl BuilderHttpClient { - pub fn new(server: SensitiveUrl, user_agent: Option) -> Result { + pub fn new( + server: SensitiveUrl, + user_agent: Option, + builder_header_timeout: Option, + ) -> Result { let user_agent = user_agent.unwrap_or(DEFAULT_USER_AGENT.to_string()); let client = reqwest::Client::builder().user_agent(&user_agent).build()?; Ok(Self { client, server, - timeouts: Timeouts::default(), + timeouts: Timeouts::new(builder_header_timeout), user_agent, }) } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 16c4a947a66..4ac035d17be 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -16,8 +16,6 @@ store = { workspace = true } network = { workspace = true } timer = { path = "../timer" } lighthouse_network = { workspace = true } -logging = { workspace = true } -parking_lot = { workspace = true } types = { workspace = true } eth2_config = { workspace = true } slot_clock = { workspace = true } @@ -44,6 +42,4 @@ slasher_service = { path = "../../slasher/service" } monitoring_api = { workspace = true } execution_layer = { workspace = true } beacon_processor = { workspace = true } -num_cpus = { workspace = true } ethereum_ssz = { workspace = true } -tree_hash = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 2af4e74c224..393ce35f000 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -19,8 +19,8 @@ use beacon_chain::{ store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, }; -use beacon_processor::BeaconProcessorConfig; use beacon_processor::{BeaconProcessor, BeaconProcessorChannels}; +use beacon_processor::{BeaconProcessorConfig, BeaconProcessorQueueLengths}; use environment::RuntimeContext; use eth1::{Config as Eth1Config, Service as Eth1Service}; use eth2::{ @@ -884,6 +884,14 @@ where None, beacon_chain.slot_clock.clone(), beacon_chain.spec.maximum_gossip_clock_disparity(), + BeaconProcessorQueueLengths::from_state( + &beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_state, + &beacon_chain.spec, + )?, )?; } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index fd92c282554..e6042103e16 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,5 +1,3 @@ -extern crate slog; - mod compute_light_client_updates; pub mod config; mod metrics; diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index a6fd07789d8..632188014eb 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -434,11 +434,9 @@ async fn capella_readiness_logging( .canonical_head .cached_head() .snapshot - .beacon_block - .message() - .body() - .execution_payload() - .map_or(false, |payload| payload.withdrawals_root().is_ok()); + .beacon_state + .fork_name_unchecked() + >= ForkName::Capella; let has_execution_layer = beacon_chain.execution_layer.is_some(); @@ -496,11 +494,9 @@ async fn deneb_readiness_logging( .canonical_head .cached_head() .snapshot - .beacon_block - .message() - .body() - .execution_payload() - .map_or(false, |payload| payload.blob_gas_used().is_ok()); + .beacon_state + .fork_name_unchecked() + >= ForkName::Deneb; let has_execution_layer = beacon_chain.execution_layer.is_some(); @@ -549,17 +545,13 @@ async fn electra_readiness_logging( beacon_chain: &BeaconChain, log: &Logger, ) { - // TODO(electra): Once Electra has features, this code can be swapped back. - let electra_completed = false; - //let electra_completed = beacon_chain - // .canonical_head - // .cached_head() - // .snapshot - // .beacon_block - // .message() - // .body() - // .execution_payload() - // .map_or(false, |payload| payload.electra_placeholder().is_ok()); + let electra_completed = beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_state + .fork_name_unchecked() + .electra_enabled(); let has_execution_layer = beacon_chain.execution_layer.is_some(); @@ -729,10 +721,10 @@ fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger } } -/// Returns the peer count, returning something helpful if it's `usize::max_value` (effectively a +/// Returns the peer count, returning something helpful if it's `usize::MAX` (effectively a /// `None` value). fn peer_count_pretty(peer_count: usize) -> String { - if peer_count == usize::max_value() { + if peer_count == usize::MAX { String::from("--") } else { format!("{}", peer_count) @@ -832,7 +824,7 @@ impl Speedo { /// Returns the average of the speeds between each observation. /// - /// Does not gracefully handle slots that are above `u32::max_value()`. + /// Does not gracefully handle slots that are above `u32::MAX`. pub fn slots_per_second(&self) -> Option { let speeds = self .0 diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 2f716cd19bc..2ffca4a5710 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -11,12 +11,9 @@ sloggers = { workspace = true } environment = { workspace = true } [dependencies] -reqwest = { workspace = true } execution_layer = { workspace = true } futures = { workspace = true } -serde_json = { workspace = true } serde = { workspace = true } -hex = { workspace = true } types = { workspace = true } merkle_proof = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index 75391e58a0d..b443f739e81 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -54,7 +54,7 @@ pub enum Error { pub type SszDepositCache = SszDepositCacheV13; #[superstruct( - variants(V1, V13), + variants(V13), variant_attributes(derive(Encode, Decode, Clone)), no_enum )] @@ -62,11 +62,8 @@ pub struct SszDepositCache { pub logs: Vec, pub leaves: Vec, pub deposit_contract_deploy_block: u64, - #[superstruct(only(V13))] pub finalized_deposit_count: u64, - #[superstruct(only(V13))] pub finalized_block_height: u64, - #[superstruct(only(V13))] pub deposit_tree_snapshot: Option, pub deposit_roots: Vec, } diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index 0468a02d2e3..452922b173c 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -2,7 +2,7 @@ use crate::service::endpoint_from_config; use crate::Config; use crate::{ block_cache::{BlockCache, Eth1Block}, - deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV1, SszDepositCacheV13}, + deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV13}, }; use execution_layer::HttpJsonRpc; use parking_lot::RwLock; @@ -90,15 +90,12 @@ impl Inner { pub type SszEth1Cache = SszEth1CacheV13; #[superstruct( - variants(V1, V13), + variants(V13), variant_attributes(derive(Encode, Decode, Clone)), no_enum )] pub struct SszEth1Cache { pub block_cache: BlockCache, - #[superstruct(only(V1))] - pub deposit_cache: SszDepositCacheV1, - #[superstruct(only(V13))] pub deposit_cache: SszDepositCacheV13, #[ssz(with = "four_byte_option_u64")] pub last_processed_block: Option, diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs index 3b288de4901..9c4f9a1d8d5 100644 --- a/beacon_node/eth1/src/lib.rs +++ b/beacon_node/eth1/src/lib.rs @@ -1,6 +1,3 @@ -#[macro_use] -extern crate lazy_static; - mod block_cache; mod deposit_cache; mod inner; @@ -8,9 +5,9 @@ mod metrics; mod service; pub use block_cache::{BlockCache, Eth1Block}; -pub use deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV1, SszDepositCacheV13}; +pub use deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV13}; pub use execution_layer::http::deposit_log::DepositLog; -pub use inner::{SszEth1Cache, SszEth1CacheV1, SszEth1CacheV13}; +pub use inner::{SszEth1Cache, SszEth1CacheV13}; pub use service::{ BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Eth1Endpoint, Service, DEFAULT_CHAIN_ID, diff --git a/beacon_node/eth1/src/metrics.rs b/beacon_node/eth1/src/metrics.rs index 5441b40d7e9..ad94d42ecb9 100644 --- a/beacon_node/eth1/src/metrics.rs +++ b/beacon_node/eth1/src/metrics.rs @@ -1,5 +1,7 @@ pub use lighthouse_metrics::*; +use lazy_static::lazy_static; + lazy_static! { /* * Eth1 blocks diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 31082394baf..9cc1da13826 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -23,7 +23,7 @@ use tokio::time::{interval_at, Duration, Instant}; use types::{ChainSpec, DepositTreeSnapshot, Eth1Data, EthSpec, Unsigned}; /// Indicates the default eth1 chain id we use for the deposit contract. -pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli; +pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Mainnet; /// Indicates the default eth1 endpoint. pub const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545"; @@ -266,7 +266,7 @@ pub struct Config { pub endpoint: Eth1Endpoint, /// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract. pub deposit_contract_address: String, - /// The eth1 chain id where the deposit contract is deployed (Goerli/Mainnet). + /// The eth1 chain id where the deposit contract is deployed (Holesky/Mainnet). pub chain_id: Eth1Id, /// Defines the first block that the `DepositCache` will start searching for deposit logs. /// @@ -450,11 +450,6 @@ impl Service { /// Returns the follow distance that has been shortened to accommodate for differences in the /// spacing between blocks. - /// - /// ## Notes - /// - /// This is useful since the spec declares `SECONDS_PER_ETH1_BLOCK` to be `14`, whilst it is - /// actually `15` on Goerli. pub fn cache_follow_distance(&self) -> u64 { self.config().cache_follow_distance() } @@ -858,7 +853,7 @@ impl Service { let max_log_requests_per_update = self .config() .max_log_requests_per_update - .unwrap_or_else(usize::max_value); + .unwrap_or(usize::MAX); let range = { match new_block_numbers { @@ -1001,10 +996,7 @@ impl Service { ) -> Result { let client = self.client(); let block_cache_truncation = self.config().block_cache_truncation; - let max_blocks_per_update = self - .config() - .max_blocks_per_update - .unwrap_or_else(usize::max_value); + let max_blocks_per_update = self.config().max_blocks_per_update.unwrap_or(usize::MAX); let range = { match new_block_numbers { @@ -1030,7 +1022,7 @@ impl Service { let range_size = range.end() - range.start(); let max_size = block_cache_truncation .map(|n| n as u64) - .unwrap_or_else(u64::max_value); + .unwrap_or_else(|| u64::MAX); if range_size > max_size { // If the range of required blocks is larger than `max_size`, drop all // existing blocks and download `max_size` count of blocks. diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 28cd16e4ef9..ff147ad3b4c 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -8,9 +8,7 @@ edition = { workspace = true } [dependencies] types = { workspace = true } tokio = { workspace = true } -async-trait = "0.1.51" slog = { workspace = true } -futures = { workspace = true } sensitive_url = { workspace = true } reqwest = { workspace = true } ethereum_serde_utils = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index ce1e0fec5dd..6a56a5d076f 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -3,7 +3,8 @@ use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, - ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, + ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, + ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, }; use eth2::types::{ BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2, @@ -19,6 +20,7 @@ use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use strum::IntoStaticStr; use superstruct::superstruct; +use types::execution_payload::{DepositRequests, WithdrawalRequests}; pub use types::{ Address, BeaconBlockRef, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, Uint256, VariableList, @@ -40,6 +42,8 @@ pub use new_payload_request::{ NewPayloadRequestDeneb, NewPayloadRequestElectra, }; +use self::json_structures::{JsonDepositRequest, JsonWithdrawalRequest}; + pub const LATEST_TAG: &str = "latest"; pub type PayloadId = [u8; 8]; @@ -60,9 +64,10 @@ pub enum Error { ExecutionHeadBlockNotFound, ParentHashEqualsBlockHash(ExecutionBlockHash), PayloadIdUnavailable, - TransitionConfigurationMismatch, SszError(ssz_types::Error), DeserializeWithdrawals(ssz_types::Error), + DeserializeDepositRequests(ssz_types::Error), + DeserializeWithdrawalRequests(ssz_types::Error), BuilderApi(builder_client::Error), IncorrectStateVariant, RequiredMethodUnsupported(&'static str), @@ -197,6 +202,10 @@ pub struct ExecutionBlockWithTransactions { #[superstruct(only(Deneb, Electra))] #[serde(with = "serde_utils::u64_hex_be")] pub excess_blob_gas: u64, + #[superstruct(only(Electra))] + pub deposit_requests: Vec, + #[superstruct(only(Electra))] + pub withdrawal_requests: Vec, } impl TryFrom> for ExecutionBlockWithTransactions { @@ -304,6 +313,16 @@ impl TryFrom> for ExecutionBlockWithTransactions .collect(), blob_gas_used: block.blob_gas_used, excess_blob_gas: block.excess_blob_gas, + deposit_requests: block + .deposit_requests + .into_iter() + .map(|deposit| deposit.into()) + .collect(), + withdrawal_requests: block + .withdrawal_requests + .into_iter() + .map(|withdrawal| withdrawal.into()) + .collect(), }) } }; @@ -526,6 +545,8 @@ impl GetPayloadResponse { pub struct ExecutionPayloadBodyV1 { pub transactions: Transactions, pub withdrawals: Option>, + pub deposit_requests: Option>, + pub withdrawal_requests: Option>, } impl ExecutionPayloadBodyV1 { @@ -613,7 +634,14 @@ impl ExecutionPayloadBodyV1 { } } ExecutionPayloadHeader::Electra(header) => { - if let Some(withdrawals) = self.withdrawals { + let withdrawals_exist = self.withdrawals.is_some(); + let deposit_requests_exist = self.deposit_requests.is_some(); + let withdrawal_requests_exist = self.withdrawal_requests.is_some(); + if let (Some(withdrawals), Some(deposit_requests), Some(withdrawal_requests)) = ( + self.withdrawals, + self.deposit_requests, + self.withdrawal_requests, + ) { Ok(ExecutionPayload::Electra(ExecutionPayloadElectra { parent_hash: header.parent_hash, fee_recipient: header.fee_recipient, @@ -632,14 +660,14 @@ impl ExecutionPayloadBodyV1 { withdrawals, blob_gas_used: header.blob_gas_used, excess_blob_gas: header.excess_blob_gas, - // TODO(electra) - deposit_receipts: <_>::default(), - withdrawal_requests: <_>::default(), + deposit_requests, + withdrawal_requests, })) } else { Err(format!( - "block {} is post-capella but payload body doesn't have withdrawals", - header.block_hash + "block {} is post-electra but payload body doesn't have withdrawals/deposit_requests/withdrawal_requests \ + withdrawals: {}, deposit_requests: {}, withdrawal_requests: {}", + header.block_hash, withdrawals_exist, deposit_requests_exist, withdrawal_requests_exist )) } } @@ -652,6 +680,7 @@ pub struct EngineCapabilities { pub new_payload_v1: bool, pub new_payload_v2: bool, pub new_payload_v3: bool, + pub new_payload_v4: bool, pub forkchoice_updated_v1: bool, pub forkchoice_updated_v2: bool, pub forkchoice_updated_v3: bool, @@ -660,6 +689,7 @@ pub struct EngineCapabilities { pub get_payload_v1: bool, pub get_payload_v2: bool, pub get_payload_v3: bool, + pub get_payload_v4: bool, pub get_client_version_v1: bool, } @@ -675,6 +705,9 @@ impl EngineCapabilities { if self.new_payload_v3 { response.push(ENGINE_NEW_PAYLOAD_V3); } + if self.new_payload_v4 { + response.push(ENGINE_NEW_PAYLOAD_V4); + } if self.forkchoice_updated_v1 { response.push(ENGINE_FORKCHOICE_UPDATED_V1); } @@ -699,6 +732,9 @@ impl EngineCapabilities { if self.get_payload_v3 { response.push(ENGINE_GET_PAYLOAD_V3); } + if self.get_payload_v4 { + response.push(ENGINE_GET_PAYLOAD_V4); + } if self.get_client_version_v1 { response.push(ENGINE_GET_CLIENT_VERSION_V1); } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 93705a16925..1c03cc81fc3 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -34,11 +34,13 @@ pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2"; pub const ENGINE_NEW_PAYLOAD_V3: &str = "engine_newPayloadV3"; +pub const ENGINE_NEW_PAYLOAD_V4: &str = "engine_newPayloadV4"; pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2"; pub const ENGINE_GET_PAYLOAD_V3: &str = "engine_getPayloadV3"; +pub const ENGINE_GET_PAYLOAD_V4: &str = "engine_getPayloadV4"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; @@ -66,9 +68,11 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, + ENGINE_NEW_PAYLOAD_V4, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, + ENGINE_GET_PAYLOAD_V4, ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, @@ -238,7 +242,6 @@ pub mod deposit_methods { /// Represents an eth1 chain/network id. #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub enum Eth1Id { - Goerli, Mainnet, Custom(u64), } @@ -258,11 +261,10 @@ pub mod deposit_methods { Latest, } - impl Into for Eth1Id { - fn into(self) -> u64 { - match self { + impl From for u64 { + fn from(from: Eth1Id) -> u64 { + match from { Eth1Id::Mainnet => 1, - Eth1Id::Goerli => 5, Eth1Id::Custom(id) => id, } } @@ -273,7 +275,6 @@ pub mod deposit_methods { let into = |x: Eth1Id| -> u64 { x.into() }; match id { id if id == into(Eth1Id::Mainnet) => Eth1Id::Mainnet, - id if id == into(Eth1Id::Goerli) => Eth1Id::Goerli, id => Eth1Id::Custom(id), } } @@ -415,7 +416,7 @@ pub mod deposit_methods { .ok_or("Block number was not string")?, )?; - if number <= usize::max_value() as u64 { + if number <= usize::MAX as u64 { Ok(Block { hash, timestamp, @@ -833,7 +834,7 @@ impl HttpJsonRpc { Ok(response.into()) } - pub async fn new_payload_v3_electra( + pub async fn new_payload_v4_electra( &self, new_payload_request_electra: NewPayloadRequestElectra<'_, E>, ) -> Result { @@ -845,7 +846,7 @@ impl HttpJsonRpc { let response: JsonPayloadStatusV1 = self .rpc_request( - ENGINE_NEW_PAYLOAD_V3, + ENGINE_NEW_PAYLOAD_V4, params, ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) @@ -929,19 +930,43 @@ impl HttpJsonRpc { .await?; Ok(JsonGetPayloadResponse::V3(response).into()) } + ForkName::Base + | ForkName::Altair + | ForkName::Bellatrix + | ForkName::Capella + | ForkName::Electra => Err(Error::UnsupportedForkVariant(format!( + "called get_payload_v3 with {}", + fork_name + ))), + } + } + + pub async fn get_payload_v4( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + match fork_name { ForkName::Electra => { let response: JsonGetPayloadResponseV4 = self .rpc_request( - ENGINE_GET_PAYLOAD_V3, + ENGINE_GET_PAYLOAD_V4, params, ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) .await?; Ok(JsonGetPayloadResponse::V4(response).into()) } - ForkName::Base | ForkName::Altair | ForkName::Bellatrix | ForkName::Capella => Err( - Error::UnsupportedForkVariant(format!("called get_payload_v3 with {}", fork_name)), - ), + ForkName::Base + | ForkName::Altair + | ForkName::Bellatrix + | ForkName::Capella + | ForkName::Deneb => Err(Error::UnsupportedForkVariant(format!( + "called get_payload_v4 with {}", + fork_name + ))), } } @@ -1067,6 +1092,7 @@ impl HttpJsonRpc { new_payload_v1: capabilities.contains(ENGINE_NEW_PAYLOAD_V1), new_payload_v2: capabilities.contains(ENGINE_NEW_PAYLOAD_V2), new_payload_v3: capabilities.contains(ENGINE_NEW_PAYLOAD_V3), + new_payload_v4: capabilities.contains(ENGINE_NEW_PAYLOAD_V4), forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1), forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2), forkchoice_updated_v3: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V3), @@ -1077,6 +1103,7 @@ impl HttpJsonRpc { get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), + get_payload_v4: capabilities.contains(ENGINE_GET_PAYLOAD_V4), get_client_version_v1: capabilities.contains(ENGINE_GET_CLIENT_VERSION_V1), }) } @@ -1199,11 +1226,11 @@ impl HttpJsonRpc { } } NewPayloadRequest::Electra(new_payload_request_electra) => { - if engine_capabilities.new_payload_v3 { - self.new_payload_v3_electra(new_payload_request_electra) + if engine_capabilities.new_payload_v4 { + self.new_payload_v4_electra(new_payload_request_electra) .await } else { - Err(Error::RequiredMethodUnsupported("engine_newPayloadV3")) + Err(Error::RequiredMethodUnsupported("engine_newPayloadV4")) } } } @@ -1221,17 +1248,24 @@ impl HttpJsonRpc { ForkName::Bellatrix | ForkName::Capella => { if engine_capabilities.get_payload_v2 { self.get_payload_v2(fork_name, payload_id).await - } else if engine_capabilities.new_payload_v1 { + } else if engine_capabilities.get_payload_v1 { self.get_payload_v1(payload_id).await } else { Err(Error::RequiredMethodUnsupported("engine_getPayload")) } } - ForkName::Deneb | ForkName::Electra => { + ForkName::Deneb => { if engine_capabilities.get_payload_v3 { self.get_payload_v3(fork_name, payload_id).await } else { - Err(Error::RequiredMethodUnsupported("engine_getPayloadV3")) + Err(Error::RequiredMethodUnsupported("engine_getPayloadv3")) + } + } + ForkName::Electra => { + if engine_capabilities.get_payload_v4 { + self.get_payload_v4(fork_name, payload_id).await + } else { + Err(Error::RequiredMethodUnsupported("engine_getPayloadv4")) } } ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!( diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 50d3519e129..fbffc47e29e 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -4,7 +4,10 @@ use strum::EnumString; use superstruct::superstruct; use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobsList; -use types::{FixedVector, Unsigned}; +use types::{ + DepositRequest, ExecutionLayerWithdrawalRequest, FixedVector, PublicKeyBytes, Signature, + Unsigned, +}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -101,6 +104,12 @@ pub struct JsonExecutionPayload { #[superstruct(only(V3, V4))] #[serde(with = "serde_utils::u64_hex_be")] pub excess_blob_gas: u64, + #[superstruct(only(V4))] + // TODO(electra): Field name should be changed post devnet-0. See https://github.com/ethereum/execution-apis/pull/544 + pub deposit_requests: VariableList, + #[superstruct(only(V4))] + pub withdrawal_requests: + VariableList, } impl From> for JsonExecutionPayloadV1 { @@ -203,6 +212,18 @@ impl From> for JsonExecutionPayloadV4 .into(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, + deposit_requests: payload + .deposit_requests + .into_iter() + .map(Into::into) + .collect::>() + .into(), + withdrawal_requests: payload + .withdrawal_requests + .into_iter() + .map(Into::into) + .collect::>() + .into(), } } } @@ -319,9 +340,18 @@ impl From> for ExecutionPayloadElectra .into(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - // TODO(electra) - deposit_receipts: Default::default(), - withdrawal_requests: Default::default(), + deposit_requests: payload + .deposit_requests + .into_iter() + .map(Into::into) + .collect::>() + .into(), + withdrawal_requests: payload + .withdrawal_requests + .into_iter() + .map(Into::into) + .collect::>() + .into(), } } } @@ -690,10 +720,14 @@ impl From for JsonForkchoiceUpdatedV1Response { #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound = "E: EthSpec")] +#[serde(rename_all = "camelCase")] pub struct JsonExecutionPayloadBodyV1 { #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, pub withdrawals: Option>, + pub deposit_requests: Option>, + pub withdrawal_requests: + Option>, } impl From> for ExecutionPayloadBodyV1 { @@ -708,6 +742,22 @@ impl From> for ExecutionPayloadBodyV1< .collect::>(), ) }), + deposit_requests: value.deposit_requests.map(|json_receipts| { + DepositRequests::::from( + json_receipts + .into_iter() + .map(Into::into) + .collect::>(), + ) + }), + withdrawal_requests: value.withdrawal_requests.map(|json_withdrawal_requests| { + WithdrawalRequests::::from( + json_withdrawal_requests + .into_iter() + .map(Into::into) + .collect::>(), + ) + }), } } } @@ -755,6 +805,9 @@ pub mod serde_logs_bloom { #[serde(rename_all = "camelCase")] pub struct JsonClientVersionV1 { pub code: String, + // This `default` is required until Geth v1.13.x is no longer supported on mainnet. + // See: https://github.com/ethereum/go-ethereum/pull/29351 + #[serde(default)] pub name: String, pub version: String, pub commit: String, @@ -783,3 +836,68 @@ impl TryFrom for ClientVersionV1 { }) } } + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct JsonDepositRequest { + pub pubkey: PublicKeyBytes, + pub withdrawal_credentials: Hash256, + #[serde(with = "serde_utils::u64_hex_be")] + pub amount: u64, + pub signature: Signature, + #[serde(with = "serde_utils::u64_hex_be")] + pub index: u64, +} + +impl From for JsonDepositRequest { + fn from(deposit: DepositRequest) -> Self { + Self { + pubkey: deposit.pubkey, + withdrawal_credentials: deposit.withdrawal_credentials, + amount: deposit.amount, + signature: deposit.signature, + index: deposit.index, + } + } +} + +impl From for DepositRequest { + fn from(json_deposit: JsonDepositRequest) -> Self { + Self { + pubkey: json_deposit.pubkey, + withdrawal_credentials: json_deposit.withdrawal_credentials, + amount: json_deposit.amount, + signature: json_deposit.signature, + index: json_deposit.index, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct JsonWithdrawalRequest { + pub source_address: Address, + pub validator_public_key: PublicKeyBytes, + #[serde(with = "serde_utils::u64_hex_be")] + pub amount: u64, +} + +impl From for JsonWithdrawalRequest { + fn from(withdrawal_request: ExecutionLayerWithdrawalRequest) -> Self { + Self { + source_address: withdrawal_request.source_address, + validator_public_key: withdrawal_request.validator_pubkey, + amount: withdrawal_request.amount, + } + } +} + +impl From for ExecutionLayerWithdrawalRequest { + fn from(json_withdrawal_request: JsonWithdrawalRequest) -> Self { + Self { + source_address: json_withdrawal_request.source_address, + validator_pubkey: json_withdrawal_request.validator_public_key, + amount: json_withdrawal_request.amount, + } + } +} diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index d441596edda..eaa739d7a5d 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -370,6 +370,9 @@ pub struct Config { pub execution_endpoint: Option, /// Endpoint urls for services providing the builder api. pub builder_url: Option, + /// The timeout value used when making a request to fetch a block header + /// from the builder api. + pub builder_header_timeout: Option, /// User agent to send with requests to the builder API. pub builder_user_agent: Option, /// JWT secret for the above endpoint running the engine api. @@ -400,6 +403,7 @@ impl ExecutionLayer { execution_endpoint: url, builder_url, builder_user_agent, + builder_header_timeout, secret_file, suggested_fee_recipient, jwt_id, @@ -469,7 +473,7 @@ impl ExecutionLayer { }; if let Some(builder_url) = builder_url { - el.set_builder_url(builder_url, builder_user_agent)?; + el.set_builder_url(builder_url, builder_user_agent, builder_header_timeout)?; } Ok(el) @@ -491,9 +495,14 @@ impl ExecutionLayer { &self, builder_url: SensitiveUrl, builder_user_agent: Option, + builder_header_timeout: Option, ) -> Result<(), Error> { - let builder_client = BuilderHttpClient::new(builder_url.clone(), builder_user_agent) - .map_err(Error::Builder)?; + let builder_client = BuilderHttpClient::new( + builder_url.clone(), + builder_user_agent, + builder_header_timeout, + ) + .map_err(Error::Builder)?; info!( self.log(), "Using external block builder"; @@ -1985,6 +1994,22 @@ impl ExecutionLayer { .collect(), ) .map_err(ApiError::DeserializeWithdrawals)?; + let deposit_requests = VariableList::new( + electra_block + .deposit_requests + .into_iter() + .map(Into::into) + .collect(), + ) + .map_err(ApiError::DeserializeDepositRequests)?; + let withdrawal_requests = VariableList::new( + electra_block + .withdrawal_requests + .into_iter() + .map(Into::into) + .collect(), + ) + .map_err(ApiError::DeserializeWithdrawalRequests)?; ExecutionPayload::Electra(ExecutionPayloadElectra { parent_hash: electra_block.parent_hash, fee_recipient: electra_block.fee_recipient, @@ -2003,11 +2028,8 @@ impl ExecutionLayer { withdrawals, blob_gas_used: electra_block.blob_gas_used, excess_blob_gas: electra_block.excess_blob_gas, - // TODO(electra) - // deposit_receipts: electra_block.deposit_receipts, - // withdrawal_requests: electra_block.withdrawal_requests, - deposit_receipts: <_>::default(), - withdrawal_requests: <_>::default(), + deposit_requests, + withdrawal_requests, }) } }; @@ -2169,7 +2191,7 @@ fn verify_builder_bid( // Avoid logging values that we can't represent with our Prometheus library. let payload_value_gwei = bid.data.message.value() / 1_000_000_000; - if payload_value_gwei <= Uint256::from(i64::max_value()) { + if payload_value_gwei <= Uint256::from(i64::MAX) { metrics::set_gauge_vec( &metrics::EXECUTION_LAYER_PAYLOAD_BIDS, &[metrics::BUILDER], @@ -2182,7 +2204,7 @@ fn verify_builder_bid( .ok() .cloned() .map(|withdrawals| Withdrawals::::from(withdrawals).tree_hash_root()); - let payload_withdrawals_root = header.withdrawals_root().ok().copied(); + let payload_withdrawals_root = header.withdrawals_root().ok(); if header.parent_hash() != parent_hash { Err(Box::new(InvalidBuilderPayload::ParentHash { diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 3ed99ca6068..6aaada3dff4 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -81,7 +81,7 @@ lazy_static::lazy_static! { ); pub static ref EXECUTION_LAYER_PAYLOAD_BIDS: Result = try_create_int_gauge_vec( "execution_layer_payload_bids", - "The gwei bid value of payloads received by local EEs or builders. Only shows values up to i64::max_value.", + "The gwei bid value of payloads received by local EEs or builders. Only shows values up to i64::MAX.", &["source"] ); } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index e80c6b23705..8619e24a238 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -659,7 +659,7 @@ impl ExecutionBlockGenerator { withdrawals: pa.withdrawals.clone().into(), blob_gas_used: 0, excess_blob_gas: 0, - deposit_receipts: vec![].into(), + deposit_requests: vec![].into(), withdrawal_requests: vec![].into(), }), _ => unreachable!(), diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 1dc8f0ab83e..0dc7a7759c5 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -98,7 +98,10 @@ pub async fn handle_rpc( .unwrap()) } } - ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 | ENGINE_NEW_PAYLOAD_V3 => { + ENGINE_NEW_PAYLOAD_V1 + | ENGINE_NEW_PAYLOAD_V2 + | ENGINE_NEW_PAYLOAD_V3 + | ENGINE_NEW_PAYLOAD_V4 => { let request = match method { ENGINE_NEW_PAYLOAD_V1 => JsonExecutionPayload::V1( get_param::>(params, 0) @@ -111,20 +114,14 @@ pub async fn handle_rpc( .map(|jep| JsonExecutionPayload::V1(jep)) }) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, - ENGINE_NEW_PAYLOAD_V3 => get_param::>(params, 0) + // From v3 onwards, we use the newPayload version only for the corresponding + // ExecutionPayload version. So we return an error instead of falling back to + // older versions of newPayload + ENGINE_NEW_PAYLOAD_V3 => get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V3(jep)) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, + ENGINE_NEW_PAYLOAD_V4 => get_param::>(params, 0) .map(|jep| JsonExecutionPayload::V4(jep)) - .or_else(|_| { - get_param::>(params, 0) - .map(|jep| JsonExecutionPayload::V3(jep)) - .or_else(|_| { - get_param::>(params, 0) - .map(|jep| JsonExecutionPayload::V2(jep)) - .or_else(|_| { - get_param::>(params, 0) - .map(|jep| JsonExecutionPayload::V1(jep)) - }) - }) - }) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, _ => unreachable!(), }; @@ -190,7 +187,10 @@ pub async fn handle_rpc( } } ForkName::Electra => { - if method == ENGINE_NEW_PAYLOAD_V1 || method == ENGINE_NEW_PAYLOAD_V2 { + if method == ENGINE_NEW_PAYLOAD_V1 + || method == ENGINE_NEW_PAYLOAD_V2 + || method == ENGINE_NEW_PAYLOAD_V3 + { return Err(( format!("{} called after Electra fork!", method), GENERIC_ERROR_CODE, @@ -259,7 +259,10 @@ pub async fn handle_rpc( Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap()) } - ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 | ENGINE_GET_PAYLOAD_V3 => { + ENGINE_GET_PAYLOAD_V1 + | ENGINE_GET_PAYLOAD_V2 + | ENGINE_GET_PAYLOAD_V3 + | ENGINE_GET_PAYLOAD_V4 => { let request: JsonPayloadIdRequest = get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let id = request.into(); @@ -309,7 +312,9 @@ pub async fn handle_rpc( .read() .get_fork_at_timestamp(response.timestamp()) == ForkName::Electra - && method == ENGINE_GET_PAYLOAD_V1 + && (method == ENGINE_GET_PAYLOAD_V1 + || method == ENGINE_GET_PAYLOAD_V2 + || method == ENGINE_GET_PAYLOAD_V3) { return Err(( format!("{} called after Electra fork!", method), @@ -338,6 +343,9 @@ pub async fn handle_rpc( } _ => unreachable!(), }), + // From v3 onwards, we use the getPayload version only for the corresponding + // ExecutionPayload version. So we return an error if the ExecutionPayload version + // we get does not correspond to the getPayload version. ENGINE_GET_PAYLOAD_V3 => Ok(match JsonExecutionPayload::from(response) { JsonExecutionPayload::V3(execution_payload) => { serde_json::to_value(JsonGetPayloadResponseV3 { @@ -353,6 +361,9 @@ pub async fn handle_rpc( }) .unwrap() } + _ => unreachable!(), + }), + ENGINE_GET_PAYLOAD_V4 => Ok(match JsonExecutionPayload::from(response) { JsonExecutionPayload::V4(execution_payload) => { serde_json::to_value(JsonGetPayloadResponseV4 { execution_payload, @@ -578,6 +589,14 @@ pub async fn handle_rpc( .withdrawals() .ok() .map(|withdrawals| VariableList::from(withdrawals.clone())), + deposit_requests: block.deposit_requests().ok().map( + |deposit_requests| VariableList::from(deposit_requests.clone()), + ), + withdrawal_requests: block.withdrawal_requests().ok().map( + |withdrawal_requests| { + VariableList::from(withdrawal_requests.clone()) + }, + ), })); } None => response.push(None), diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index a6d47995af8..7b00ca9fbc3 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -43,6 +43,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { new_payload_v1: true, new_payload_v2: true, new_payload_v3: true, + new_payload_v4: true, forkchoice_updated_v1: true, forkchoice_updated_v2: true, forkchoice_updated_v3: true, @@ -51,6 +52,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_payload_v1: true, get_payload_v2: true, get_payload_v3: true, + get_payload_v4: true, get_client_version_v1: true, }; diff --git a/beacon_node/execution_layer/src/versioned_hashes.rs b/beacon_node/execution_layer/src/versioned_hashes.rs index 37bd35646d9..9bf87596b41 100644 --- a/beacon_node/execution_layer/src/versioned_hashes.rs +++ b/beacon_node/execution_layer/src/versioned_hashes.rs @@ -1,5 +1,3 @@ -extern crate alloy_consensus; -extern crate alloy_rlp; use alloy_consensus::TxEnvelope; use alloy_rlp::Decodable; use types::{EthSpec, ExecutionPayloadRef, Hash256, Unsigned, VersionedHash}; diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 0ede74ba754..70157050278 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -432,8 +432,14 @@ impl Eth1GenesisService { // Such an optimization would only be useful in a scenario where `MIN_GENESIS_TIME` // is reached _prior_ to `MIN_ACTIVE_VALIDATOR_COUNT`. I suspect this won't be the // case for mainnet, so we defer this optimization. + let Deposit { proof, data } = deposit; + let proof = if PROOF_VERIFICATION { + Some(proof) + } else { + None + }; - apply_deposit(&mut state, &deposit, spec, PROOF_VERIFICATION) + apply_deposit(&mut state, data, proof, true, spec) .map_err(|e| format!("Error whilst processing deposit: {:?}", e)) })?; diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index f99fcb55bf1..1252e0100b6 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -1,7 +1,3 @@ -//! NOTE: These tests will not pass unless an anvil is running on `ENDPOINT` (see below). -//! -//! You can start a suitable instance using the `anvil_test_node.sh` script in the `scripts` -//! dir in the root of the `lighthouse` repo. #![cfg(test)] use environment::{Environment, EnvironmentBuilder}; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index f105fdf0a7d..66c71872786 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -10,8 +10,8 @@ use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use std::sync::Arc; use types::{ - BeaconCommittee, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, Epoch, EthSpec, - Hash256, OwnedBeaconCommittee, RelativeEpoch, SignedBeaconBlock, Slot, + AttestationRef, BeaconCommittee, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, + Epoch, EthSpec, Hash256, OwnedBeaconCommittee, RelativeEpoch, SignedBeaconBlock, Slot, }; use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; @@ -111,22 +111,45 @@ impl PackingEfficiencyHandler { let attestations = block_body.attestations(); let mut attestations_in_block = HashMap::new(); - for attestation in attestations.iter() { - for (position, voted) in attestation.aggregation_bits.iter().enumerate() { - if voted { - let unique_attestation = UniqueAttestation { - slot: attestation.data.slot, - committee_index: attestation.data.index, - committee_position: position, - }; - let inclusion_distance: u64 = block - .slot() - .as_u64() - .checked_sub(attestation.data.slot.as_u64()) - .ok_or(PackingEfficiencyError::InvalidAttestationError)?; - - self.available_attestations.remove(&unique_attestation); - attestations_in_block.insert(unique_attestation, inclusion_distance); + for attestation in attestations { + match attestation { + AttestationRef::Base(attn) => { + for (position, voted) in attn.aggregation_bits.iter().enumerate() { + if voted { + let unique_attestation = UniqueAttestation { + slot: attn.data.slot, + committee_index: attn.data.index, + committee_position: position, + }; + let inclusion_distance: u64 = block + .slot() + .as_u64() + .checked_sub(attn.data.slot.as_u64()) + .ok_or(PackingEfficiencyError::InvalidAttestationError)?; + + self.available_attestations.remove(&unique_attestation); + attestations_in_block.insert(unique_attestation, inclusion_distance); + } + } + } + AttestationRef::Electra(attn) => { + for (position, voted) in attn.aggregation_bits.iter().enumerate() { + if voted { + let unique_attestation = UniqueAttestation { + slot: attn.data.slot, + committee_index: attn.data.index, + committee_position: position, + }; + let inclusion_distance: u64 = block + .slot() + .as_u64() + .checked_sub(attn.data.slot.as_u64()) + .ok_or(PackingEfficiencyError::InvalidAttestationError)?; + + self.available_attestations.remove(&unique_attestation); + attestations_in_block.insert(unique_attestation, inclusion_distance); + } + } } } } diff --git a/beacon_node/http_api/src/builder_states.rs b/beacon_node/http_api/src/builder_states.rs index a540113ab43..54f2c0efa8d 100644 --- a/beacon_node/http_api/src/builder_states.rs +++ b/beacon_node/http_api/src/builder_states.rs @@ -33,7 +33,7 @@ pub fn get_next_withdrawals( } match get_expected_withdrawals(&state, &chain.spec) { - Ok(withdrawals) => Ok(withdrawals), + Ok((withdrawals, _)) => Ok(withdrawals), Err(e) => Err(warp_utils::reject::custom_server_error(format!( "failed to get expected withdrawal: {:?}", e diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5f4620589eb..2d50dc6c635 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -31,6 +31,7 @@ mod validators; mod version; use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; +use crate::version::fork_versioned_response; use beacon_chain::{ attestation_verification::VerifiedAttestation, observed_operations::ObservationOutcome, validator_monitor::timestamp_now, AttestationError as AttnError, BeaconChain, BeaconChainError, @@ -96,7 +97,7 @@ use warp::hyper::Body; use warp::sse::Event; use warp::Reply; use warp::{http::Response, Filter, Rejection}; -use warp_utils::{query::multi_key_query, uor::UnifyingOrFilter}; +use warp_utils::{query::multi_key_query, reject::convert_rejection, uor::UnifyingOrFilter}; const API_PREFIX: &str = "eth"; @@ -256,12 +257,15 @@ pub fn prometheus_metrics() -> warp::filters::log::Log( ); // GET beacon/blocks/{block_id}/attestations - let get_beacon_block_attestations = beacon_blocks_path_v1 + let get_beacon_block_attestations = beacon_blocks_path_any .clone() .and(warp::path("attestations")) .and(warp::path::end()) .then( - |block_id: BlockId, + |endpoint_version: EndpointVersion, + block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { + task_spawner.blocking_response_task(Priority::P1, move || { let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?; - Ok(api_types::GenericResponse::from( - block.message().body().attestations().clone(), - ) - .add_execution_optimistic_finalized(execution_optimistic, finalized)) + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + let atts = block + .message() + .body() + .attestations() + .map(|att| att.clone_as_attestation()) + .collect::>(); + let res = execution_optimistic_finalized_fork_versioned_response( + endpoint_version, + fork_name, + execution_optimistic, + finalized, + &atts, + )?; + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) }) }, ); @@ -1745,8 +1766,14 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()); + let beacon_pool_path_any = any_version + .and(warp::path("beacon")) + .and(warp::path("pool")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()); + // POST beacon/pool/attestations - let post_beacon_pool_attestations = beacon_pool_path + let post_beacon_pool_attestations = beacon_pool_path_any .clone() .and(warp::path("attestations")) .and(warp::path::end()) @@ -1755,7 +1782,11 @@ pub fn serve( .and(reprocess_send_filter) .and(log_filter.clone()) .then( - |task_spawner: TaskSpawner, + // V1 and V2 are identical except V2 has a consensus version header in the request. + // We only require this header for SSZ deserialization, which isn't supported for + // this endpoint presently. + |_endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, chain: Arc>, attestations: Vec>, network_tx: UnboundedSender>, @@ -1771,21 +1802,22 @@ pub fn serve( ) .await .map(|()| warp::reply::json(&())); - task_spawner::convert_rejection(result).await + convert_rejection(result).await }, ); // GET beacon/pool/attestations?committee_index,slot - let get_beacon_pool_attestations = beacon_pool_path + let get_beacon_pool_attestations = beacon_pool_path_any .clone() .and(warp::path("attestations")) .and(warp::path::end()) .and(warp::query::()) .then( - |task_spawner: TaskSpawner, + |endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, chain: Arc>, query: api_types::AttestationPoolQuery| { - task_spawner.blocking_json_task(Priority::P1, move || { + task_spawner.blocking_response_task(Priority::P1, move || { let query_filter = |data: &AttestationData| { query.slot.map_or(true, |slot| slot == data.slot) && query @@ -1799,23 +1831,51 @@ pub fn serve( .naive_aggregation_pool .read() .iter() - .filter(|&att| query_filter(&att.data)) + .filter(|&att| query_filter(att.data())) .cloned(), ); - Ok(api_types::GenericResponse::from(attestations)) + // Use the current slot to find the fork version, and convert all messages to the + // current fork's format. This is to ensure consistent message types matching + // `Eth-Consensus-Version`. + let current_slot = + chain + .slot_clock + .now() + .ok_or(warp_utils::reject::custom_server_error( + "unable to read slot clock".to_string(), + ))?; + let fork_name = chain.spec.fork_name_at_slot::(current_slot); + let attestations = attestations + .into_iter() + .filter(|att| { + (fork_name.electra_enabled() && matches!(att, Attestation::Electra(_))) + || (!fork_name.electra_enabled() + && matches!(att, Attestation::Base(_))) + }) + .collect::>(); + + let res = fork_versioned_response(endpoint_version, fork_name, &attestations)?; + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) }) }, ); // POST beacon/pool/attester_slashings - let post_beacon_pool_attester_slashings = beacon_pool_path + let post_beacon_pool_attester_slashings = beacon_pool_path_any .clone() .and(warp::path("attester_slashings")) .and(warp::path::end()) .and(warp_utils::json::json()) .and(network_tx_filter.clone()) .then( - |task_spawner: TaskSpawner, + // V1 and V2 are identical except V2 has a consensus version header in the request. + // We only require this header for SSZ deserialization, which isn't supported for + // this endpoint presently. + |_endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, chain: Arc>, slashing: AttesterSlashing, network_tx: UnboundedSender>| { @@ -1833,7 +1893,7 @@ pub fn serve( chain .validator_monitor .read() - .register_api_attester_slashing(&slashing); + .register_api_attester_slashing(slashing.to_ref()); if let ObservationOutcome::New(slashing) = outcome { publish_pubsub_message( @@ -1852,18 +1912,45 @@ pub fn serve( ); // GET beacon/pool/attester_slashings - let get_beacon_pool_attester_slashings = beacon_pool_path - .clone() - .and(warp::path("attester_slashings")) - .and(warp::path::end()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let attestations = chain.op_pool.get_all_attester_slashings(); - Ok(api_types::GenericResponse::from(attestations)) - }) - }, - ); + let get_beacon_pool_attester_slashings = + beacon_pool_path_any + .clone() + .and(warp::path("attester_slashings")) + .and(warp::path::end()) + .then( + |endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let slashings = chain.op_pool.get_all_attester_slashings(); + + // Use the current slot to find the fork version, and convert all messages to the + // current fork's format. This is to ensure consistent message types matching + // `Eth-Consensus-Version`. + let current_slot = chain.slot_clock.now().ok_or( + warp_utils::reject::custom_server_error( + "unable to read slot clock".to_string(), + ), + )?; + let fork_name = chain.spec.fork_name_at_slot::(current_slot); + let slashings = slashings + .into_iter() + .filter(|slashing| { + (fork_name.electra_enabled() + && matches!(slashing, AttesterSlashing::Electra(_))) + || (!fork_name.electra_enabled() + && matches!(slashing, AttesterSlashing::Base(_))) + }) + .collect::>(); + + let res = fork_versioned_response(endpoint_version, fork_name, &slashings)?; + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) + }) + }, + ); // POST beacon/pool/proposer_slashings let post_beacon_pool_proposer_slashings = beacon_pool_path @@ -2121,14 +2208,7 @@ pub fn serve( task_spawner: TaskSpawner, eth1_service: eth1::Service| { task_spawner.blocking_response_task(Priority::P1, move || match accept_header { - Some(api_types::Accept::Json) | None => { - let snapshot = eth1_service.get_deposit_snapshot(); - Ok( - warp::reply::json(&api_types::GenericResponse::from(snapshot)) - .into_response(), - ) - } - _ => eth1_service + Some(api_types::Accept::Ssz) => eth1_service .get_deposit_snapshot() .map(|snapshot| { Response::builder() @@ -2154,6 +2234,13 @@ pub fn serve( )) }) }), + _ => { + let snapshot = eth1_service.get_deposit_snapshot(); + Ok( + warp::reply::json(&api_types::GenericResponse::from(snapshot)) + .into_response(), + ) + } }) }, ); @@ -3162,7 +3249,7 @@ pub fn serve( chain .produce_unaggregated_attestation(query.slot, query.committee_index) - .map(|attestation| attestation.data) + .map(|attestation| attestation.data().clone()) .map(api_types::GenericResponse::from) .map_err(warp_utils::reject::beacon_chain_error) }) @@ -3170,7 +3257,7 @@ pub fn serve( ); // GET validator/aggregate_attestation?attestation_data_root,slot - let get_validator_aggregate_attestation = eth_v1 + let get_validator_aggregate_attestation = any_version .and(warp::path("validator")) .and(warp::path("aggregate_attestation")) .and(warp::path::end()) @@ -3179,29 +3266,45 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( - |query: api_types::ValidatorAggregateAttestationQuery, + |endpoint_version: EndpointVersion, + query: api_types::ValidatorAggregateAttestationQuery, not_synced_filter: Result<(), Rejection>, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { not_synced_filter?; - chain - .get_aggregated_attestation_by_slot_and_root( + let res = if endpoint_version == V2 { + let Some(committee_index) = query.committee_index else { + return Err(warp_utils::reject::custom_bad_request( + "missing committee index".to_string(), + )); + }; + chain.get_aggregated_attestation_electra( query.slot, &query.attestation_data_root, + committee_index, ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "unable to fetch aggregate: {:?}", - e - )) - })? - .map(api_types::GenericResponse::from) - .ok_or_else(|| { - warp_utils::reject::custom_not_found( - "no matching aggregate found".to_string(), - ) - }) + } else if endpoint_version == V1 { + // Do nothing + chain.get_pre_electra_aggregated_attestation_by_slot_and_root( + query.slot, + &query.attestation_data_root, + ) + } else { + return Err(unsupported_version_rejection(endpoint_version)); + }; + res.map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "unable to fetch aggregate: {:?}", + e + )) + })? + .map(api_types::GenericResponse::from) + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "no matching aggregate found".to_string(), + ) + }) }) }, ); @@ -3297,7 +3400,7 @@ pub fn serve( ); // POST validator/aggregate_and_proofs - let post_validator_aggregate_and_proofs = eth_v1 + let post_validator_aggregate_and_proofs = any_version .and(warp::path("validator")) .and(warp::path("aggregate_and_proofs")) .and(warp::path::end()) @@ -3308,7 +3411,11 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - |not_synced_filter: Result<(), Rejection>, + // V1 and V2 are identical except V2 has a consensus version header in the request. + // We only require this header for SSZ deserialization, which isn't supported for + // this endpoint presently. + |_endpoint_version: EndpointVersion, + not_synced_filter: Result<(), Rejection>, task_spawner: TaskSpawner, chain: Arc>, aggregates: Vec>, @@ -3363,9 +3470,9 @@ pub fn serve( "Failure verifying aggregate and proofs"; "error" => format!("{:?}", e), "request_index" => index, - "aggregator_index" => aggregate.message.aggregator_index, - "attestation_index" => aggregate.message.aggregate.data.index, - "attestation_slot" => aggregate.message.aggregate.data.slot, + "aggregator_index" => aggregate.message().aggregator_index(), + "attestation_index" => aggregate.message().aggregate().committee_index(), + "attestation_slot" => aggregate.message().aggregate().data().slot, ); failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e))); } @@ -3384,9 +3491,9 @@ pub fn serve( "Failure applying verified aggregate attestation to fork choice"; "error" => format!("{:?}", e), "request_index" => index, - "aggregator_index" => verified_aggregate.aggregate().message.aggregator_index, - "attestation_index" => verified_aggregate.attestation().data.index, - "attestation_slot" => verified_aggregate.attestation().data.slot, + "aggregator_index" => verified_aggregate.aggregate().message().aggregator_index(), + "attestation_index" => verified_aggregate.attestation().committee_index(), + "attestation_slot" => verified_aggregate.attestation().data().slot, ); failures.push(api_types::Failure::new(index, format!("Fork choice: {:?}", e))); } @@ -3710,12 +3817,12 @@ pub fn serve( .await; if initial_result.is_err() { - return task_spawner::convert_rejection(initial_result).await; + return convert_rejection(initial_result).await; } // Await a response from the builder without blocking a // `BeaconProcessor` worker. - task_spawner::convert_rejection(rx.await.unwrap_or_else(|_| { + convert_rejection(rx.await.unwrap_or_else(|_| { Ok(warp::reply::with_status( warp::reply::json(&"No response from channel"), eth2::StatusCode::INTERNAL_SERVER_ERROR, @@ -4358,6 +4465,18 @@ pub fn serve( api_types::EventTopic::BlockReward => { event_handler.subscribe_block_reward() } + api_types::EventTopic::AttesterSlashing => { + event_handler.subscribe_attester_slashing() + } + api_types::EventTopic::ProposerSlashing => { + event_handler.subscribe_proposer_slashing() + } + api_types::EventTopic::BlsToExecutionChange => { + event_handler.subscribe_bls_to_execution_change() + } + api_types::EventTopic::BlockGossip => { + event_handler.subscribe_block_gossip() + } }; receivers.push( diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs index 26ee183c83f..3eada3a3d46 100644 --- a/beacon_node/http_api/src/metrics.rs +++ b/beacon_node/http_api/src/metrics.rs @@ -31,7 +31,13 @@ lazy_static::lazy_static! { ); pub static ref HTTP_API_BLOCK_BROADCAST_DELAY_TIMES: Result = try_create_histogram_vec( "http_api_block_broadcast_delay_times", - "Time between start of the slot and when the block was broadcast", + "Time between start of the slot and when the block completed broadcast and processing", + &["provenance"] + ); + pub static ref HTTP_API_BLOCK_GOSSIP_TIMES: Result = try_create_histogram_vec_with_buckets( + "http_api_block_gossip_times", + "Time between receiving the block on HTTP and publishing it on gossip", + decimal_buckets(-2, 2), &["provenance"] ); pub static ref HTTP_API_BLOCK_PUBLISHED_LATE_TOTAL: Result = try_create_int_counter( diff --git a/beacon_node/http_api/src/publish_attestations.rs b/beacon_node/http_api/src/publish_attestations.rs index ed7f1ed17c9..00654765325 100644 --- a/beacon_node/http_api/src/publish_attestations.rs +++ b/beacon_node/http_api/src/publish_attestations.rs @@ -87,7 +87,7 @@ fn verify_and_publish_attestation( .send(NetworkMessage::Publish { messages: vec![PubsubMessage::Attestation(Box::new(( attestation.subnet_id(), - attestation.attestation().clone(), + attestation.attestation().clone_as_attestation(), )))], }) .map_err(|_| Error::Publication)?; @@ -141,7 +141,7 @@ pub async fn publish_attestations( // move the `attestations` vec into the blocking task, so this small overhead is unavoidable. let attestation_metadata = attestations .iter() - .map(|att| (att.data.slot, att.data.index)) + .map(|att| (att.data().slot, att.committee_index())) .collect::>(); // Gossip validate and publish attestations that can be immediately processed. diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 0d176e6a53a..10d000ef6f8 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -19,8 +19,8 @@ use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, BeaconBlockRef, BlobSidecarList, EthSpec, ExecPayload, ExecutionBlockHash, - ForkName, FullPayload, FullPayloadBellatrix, Hash256, SignedBeaconBlock, + AbstractExecPayload, BeaconBlockRef, BlobSidecarList, BlockImportSource, EthSpec, ExecPayload, + ExecutionBlockHash, ForkName, FullPayload, FullPayloadBellatrix, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, VariableList, }; use warp::http::StatusCode; @@ -60,6 +60,11 @@ pub async fn publish_block (block_contents, true), ProvenancedBlock::Builder(block_contents, _) => (block_contents, false), }; + let provenance = if is_locally_built_block { + "local" + } else { + "builder" + }; let block = block_contents.inner_block().clone(); let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); debug!(log, "Signed block received in HTTP API"; "slot" => block.slot()); @@ -75,7 +80,18 @@ pub async fn publish_block block.slot(), "publish_delay" => ?publish_delay); + metrics::observe_timer_vec( + &metrics::HTTP_API_BLOCK_GOSSIP_TIMES, + &[provenance], + publish_delay, + ); + + info!( + log, + "Signed block published to network via HTTP API"; + "slot" => block.slot(), + "publish_delay_ms" => publish_delay.as_millis() + ); match block.as_ref() { SignedBeaconBlock::Base(_) @@ -214,6 +230,7 @@ pub async fn publish_block { beacon_processor_send: Option>, } -/// Convert a warp `Rejection` into a `Response`. -/// -/// This function should *always* be used to convert rejections into responses. This prevents warp -/// from trying to backtrack in strange ways. See: https://github.com/sigp/lighthouse/issues/3404 -pub async fn convert_rejection(res: Result) -> Response { - match res { - Ok(response) => response.into_response(), - Err(e) => match warp_utils::reject::handle_rejection(e).await { - Ok(reply) => reply.into_response(), - Err(_) => warp::reply::with_status( - warp::reply::json(&"unhandled error"), - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response(), - }, - } -} - impl TaskSpawner { pub fn new(beacon_processor_send: Option>) -> Self { Self { diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index c1313168bcd..88112de10b6 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -3,7 +3,9 @@ use beacon_chain::{ test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType}, BeaconChain, BeaconChainTypes, }; -use beacon_processor::{BeaconProcessor, BeaconProcessorChannels, BeaconProcessorConfig}; +use beacon_processor::{ + BeaconProcessor, BeaconProcessorChannels, BeaconProcessorConfig, BeaconProcessorQueueLengths, +}; use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; use lighthouse_network::{ @@ -206,6 +208,11 @@ pub async fn create_api_server( None, chain.slot_clock.clone(), chain.spec.maximum_gossip_clock_disparity(), + BeaconProcessorQueueLengths::from_state( + &chain.canonical_head.cached_head().snapshot.beacon_state, + &chain.spec, + ) + .unwrap(), ) .unwrap(); diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 6a3f7947e6b..78f9c819888 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -7,7 +7,6 @@ use eth2::types::{BroadcastValidation, PublishBlockRequest}; use http_api::test_utils::InteractiveTester; use http_api::{publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock}; use std::sync::Arc; -use tree_hash::TreeHash; use types::{Epoch, EthSpec, ForkName, Hash256, MainnetEthSpec, Slot}; use warp::Rejection; use warp_utils::reject::CustomBadRequest; @@ -353,13 +352,20 @@ pub async fn consensus_partial_pass_only_consensus() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let ((block_a, _), state_after_a) = tester.harness.make_block(state_a.clone(), slot_b).await; - let ((block_b, blobs_b), state_after_b) = tester.harness.make_block(state_a, slot_b).await; + let ((block_a, _), mut state_after_a) = + tester.harness.make_block(state_a.clone(), slot_b).await; + let ((block_b, blobs_b), mut state_after_b) = tester.harness.make_block(state_a, slot_b).await; let block_b_root = block_b.canonical_root(); /* check for `make_block` curios */ - assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); - assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_eq!( + block_a.state_root(), + state_after_a.canonical_root().unwrap() + ); + assert_eq!( + block_b.state_root(), + state_after_b.canonical_root().unwrap() + ); assert_ne!(block_a.state_root(), block_b.state_root()); let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b) @@ -516,13 +522,19 @@ pub async fn equivocation_consensus_early_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let ((block_a, blobs_a), state_after_a) = + let ((block_a, blobs_a), mut state_after_a) = tester.harness.make_block(state_a.clone(), slot_b).await; - let ((block_b, blobs_b), state_after_b) = tester.harness.make_block(state_a, slot_b).await; + let ((block_b, blobs_b), mut state_after_b) = tester.harness.make_block(state_a, slot_b).await; /* check for `make_block` curios */ - assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); - assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_eq!( + block_a.state_root(), + state_after_a.canonical_root().unwrap() + ); + assert_eq!( + block_b.state_root(), + state_after_b.canonical_root().unwrap() + ); assert_ne!(block_a.state_root(), block_b.state_root()); /* submit `block_a` as valid */ @@ -642,13 +654,19 @@ pub async fn equivocation_consensus_late_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let ((block_a, blobs_a), state_after_a) = + let ((block_a, blobs_a), mut state_after_a) = tester.harness.make_block(state_a.clone(), slot_b).await; - let ((block_b, blobs_b), state_after_b) = tester.harness.make_block(state_a, slot_b).await; + let ((block_b, blobs_b), mut state_after_b) = tester.harness.make_block(state_a, slot_b).await; /* check for `make_block` curios */ - assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); - assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_eq!( + block_a.state_root(), + state_after_a.canonical_root().unwrap() + ); + assert_eq!( + block_b.state_root(), + state_after_b.canonical_root().unwrap() + ); assert_ne!(block_a.state_root(), block_b.state_root()); let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b) @@ -1135,15 +1153,21 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_a, state_after_a) = tester + let (block_a, mut state_after_a) = tester .harness .make_blinded_block(state_a.clone(), slot_b) .await; - let (block_b, state_after_b) = tester.harness.make_blinded_block(state_a, slot_b).await; + let (block_b, mut state_after_b) = tester.harness.make_blinded_block(state_a, slot_b).await; /* check for `make_blinded_block` curios */ - assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); - assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_eq!( + block_a.state_root(), + state_after_a.canonical_root().unwrap() + ); + assert_eq!( + block_b.state_root(), + state_after_b.canonical_root().unwrap() + ); assert_ne!(block_a.state_root(), block_b.state_root()); /* submit `block_a` as valid */ @@ -1259,16 +1283,22 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_a, state_after_a) = tester + let (block_a, mut state_after_a) = tester .harness .make_blinded_block(state_a.clone(), slot_b) .await; - let (block_b, state_after_b) = tester.harness.make_blinded_block(state_a, slot_b).await; + let (block_b, mut state_after_b) = tester.harness.make_blinded_block(state_a, slot_b).await; let block_b = Arc::new(block_b); /* check for `make_blinded_block` curios */ - assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); - assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); + assert_eq!( + block_a.state_root(), + state_after_a.canonical_root().unwrap() + ); + assert_eq!( + block_b.state_root(), + state_after_b.canonical_root().unwrap() + ); assert_ne!(block_a.state_root(), block_b.state_root()); let unblinded_block_a = reconstruct_block( diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index ad32ff1d579..db8a0ab2b54 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -55,7 +55,7 @@ async fn sync_committee_duties_across_fork() { // though the head state hasn't transitioned yet. let fork_slot = fork_epoch.start_slot(E::slots_per_epoch()); let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); - let (_, state) = harness + let (_, mut state) = harness .add_attested_block_at_slot( fork_slot - 1, genesis_state, @@ -76,7 +76,7 @@ async fn sync_committee_duties_across_fork() { assert_eq!(sync_duties.len(), E::sync_committee_size()); // After applying a block at the fork slot the duties should remain unchanged. - let state_root = state.canonical_root(); + let state_root = state.canonical_root().unwrap(); harness .add_attested_block_at_slot(fork_slot, state, state_root, &all_validators) .await @@ -150,8 +150,13 @@ async fn attestations_across_fork_with_skip_slots() { .collect::>(); assert!(!unaggregated_attestations.is_empty()); + let fork_name = harness.spec.fork_name_at_slot::(fork_slot); client - .post_beacon_pool_attestations(&unaggregated_attestations) + .post_beacon_pool_attestations_v1(&unaggregated_attestations) + .await + .unwrap(); + client + .post_beacon_pool_attestations_v2(&unaggregated_attestations, fork_name) .await .unwrap(); @@ -162,7 +167,11 @@ async fn attestations_across_fork_with_skip_slots() { assert!(!signed_aggregates.is_empty()); client - .post_validator_aggregate_and_proof(&signed_aggregates) + .post_validator_aggregate_and_proof_v1(&signed_aggregates) + .await + .unwrap(); + client + .post_validator_aggregate_and_proof_v2(&signed_aggregates, fork_name) .await .unwrap(); } @@ -257,7 +266,7 @@ async fn sync_committee_indices_across_fork() { // applied. let fork_slot = fork_epoch.start_slot(E::slots_per_epoch()); let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); - let (_, state) = harness + let (_, mut state) = harness .add_attested_block_at_slot( fork_slot - 1, genesis_state, @@ -295,7 +304,7 @@ async fn sync_committee_indices_across_fork() { // Once the head is updated it should be useable for requests, including in the next sync // committee period. - let state_root = state.canonical_root(); + let state_root = state.canonical_root().unwrap(); harness .add_attested_block_at_slot(fork_slot + 1, state, state_root, &all_validators) .await diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 529dc852e98..2f417cf7ba5 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -17,7 +17,6 @@ use state_processing::{ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; -use tree_hash::TreeHash; use types::{ Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot, @@ -515,16 +514,17 @@ pub async fn proposer_boost_re_org_test( } harness.advance_slot(); - let (block_a_root, block_a, state_a) = harness + let (block_a_root, block_a, mut state_a) = harness .add_block_at_slot(slot_a, harness.get_current_state()) .await .unwrap(); + let state_a_root = state_a.canonical_root().unwrap(); // Attest to block A during slot A. let (block_a_parent_votes, _) = harness.make_attestations_with_limit( &all_validators, &state_a, - state_a.canonical_root(), + state_a_root, block_a_root, slot_a, num_parent_votes, @@ -538,7 +538,7 @@ pub async fn proposer_boost_re_org_test( let (block_a_empty_votes, block_a_attesters) = harness.make_attestations_with_limit( &all_validators, &state_a, - state_a.canonical_root(), + state_a_root, block_a_root, slot_b, num_empty_votes, @@ -553,6 +553,7 @@ pub async fn proposer_boost_re_org_test( // Produce block B and process it halfway through the slot. let (block_b, mut state_b) = harness.make_block(state_a.clone(), slot_b).await; + let state_b_root = state_b.canonical_root().unwrap(); let block_b_root = block_b.0.canonical_root(); let obs_time = slot_clock.start_of(slot_b).unwrap() + slot_clock.slot_duration() / 2; @@ -570,7 +571,7 @@ pub async fn proposer_boost_re_org_test( let (block_b_head_votes, _) = harness.make_attestations_with_limit( &remaining_attesters, &state_b, - state_b.canonical_root(), + state_b_root, block_b_root.into(), slot_b, num_head_votes, @@ -610,6 +611,7 @@ pub async fn proposer_boost_re_org_test( assert_eq!(state_b.slot(), slot_b); let pre_advance_withdrawals = get_expected_withdrawals(&state_b, &harness.chain.spec) .unwrap() + .0 .to_vec(); complete_state_advance(&mut state_b, None, slot_c, &harness.chain.spec).unwrap(); @@ -696,6 +698,7 @@ pub async fn proposer_boost_re_org_test( get_expected_withdrawals(&state_b, &harness.chain.spec) } .unwrap() + .0 .to_vec(); let payload_attribs_withdrawals = payload_attribs.withdrawals().unwrap(); assert_eq!(expected_withdrawals, *payload_attribs_withdrawals); @@ -772,32 +775,34 @@ pub async fn fork_choice_before_proposal() { let slot_d = slot_a + 3; let state_a = harness.get_current_state(); - let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b).await; + let (block_b, mut state_b) = harness.make_block(state_a.clone(), slot_b).await; let block_root_b = harness .process_block(slot_b, block_b.0.canonical_root(), block_b) .await .unwrap(); + let state_root_b = state_b.canonical_root().unwrap(); // Create attestations to B but keep them in reserve until after C has been processed. let attestations_b = harness.make_attestations( &all_validators, &state_b, - state_b.tree_hash_root(), + state_root_b, block_root_b, slot_b, ); - let (block_c, state_c) = harness.make_block(state_a, slot_c).await; + let (block_c, mut state_c) = harness.make_block(state_a, slot_c).await; let block_root_c = harness .process_block(slot_c, block_c.0.canonical_root(), block_c.clone()) .await .unwrap(); + let state_root_c = state_c.canonical_root().unwrap(); // Create attestations to C from a small number of validators and process them immediately. let attestations_c = harness.make_attestations( &all_validators[..validator_count / 2], &state_c, - state_c.tree_hash_root(), + state_root_c, block_root_c, slot_c, ); @@ -888,9 +893,10 @@ async fn queue_attestations_from_http() { .flat_map(|attestations| attestations.into_iter().map(|(att, _subnet)| att)) .collect::>(); + let fork_name = tester.harness.spec.fork_name_at_slot::(attestation_slot); let attestation_future = tokio::spawn(async move { client - .post_beacon_pool_attestations(&attestations) + .post_beacon_pool_attestations_v2(&attestations, fork_name) .await .expect("attestations should be processed successfully") }); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index d44b9a688ce..633baaf6f40 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -10,7 +10,9 @@ use eth2::{ types::{ BlockId as CoreBlockId, ForkChoiceNode, ProduceBlockV3Response, StateId as CoreStateId, *, }, - BeaconNodeHttpClient, Error, StatusCode, Timeouts, + BeaconNodeHttpClient, Error, + Error::ServerMessage, + StatusCode, Timeouts, }; use execution_layer::test_utils::{ MockBuilder, Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, @@ -35,8 +37,8 @@ use tokio::time::Duration; use tree_hash::TreeHash; use types::application_domain::ApplicationDomain; use types::{ - AggregateSignature, BitList, Domain, EthSpec, ExecutionBlockHash, Hash256, Keypair, - MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, + attestation::AttestationBase, AggregateSignature, BitList, Domain, EthSpec, ExecutionBlockHash, + Hash256, Keypair, MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, }; type E = MainnetEthSpec; @@ -70,6 +72,7 @@ struct ApiTester { attester_slashing: AttesterSlashing, proposer_slashing: ProposerSlashing, voluntary_exit: SignedVoluntaryExit, + bls_to_execution_change: SignedBlsToExecutionChange, network_rx: NetworkReceivers, local_enr: Enr, external_peer_id: PeerId, @@ -128,6 +131,7 @@ impl ApiTester { }) .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) + .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer_with_config() .build(); @@ -223,6 +227,7 @@ impl ApiTester { let attester_slashing = harness.make_attester_slashing(vec![0, 1]); let proposer_slashing = harness.make_proposer_slashing(2); let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); + let bls_to_execution_change = harness.make_bls_to_execution_change(4, Address::zero()); let chain = harness.chain.clone(); @@ -289,6 +294,7 @@ impl ApiTester { attester_slashing, proposer_slashing, voluntary_exit, + bls_to_execution_change, network_rx, local_enr, external_peer_id, @@ -301,6 +307,7 @@ impl ApiTester { BeaconChainHarness::builder(MainnetEthSpec) .default_spec() .deterministic_keypairs(VALIDATOR_COUNT) + .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .build(), ); @@ -336,6 +343,7 @@ impl ApiTester { let attester_slashing = harness.make_attester_slashing(vec![0, 1]); let proposer_slashing = harness.make_proposer_slashing(2); let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); + let bls_to_execution_change = harness.make_bls_to_execution_change(4, Address::zero()); let chain = harness.chain.clone(); @@ -373,6 +381,7 @@ impl ApiTester { attester_slashing, proposer_slashing, voluntary_exit, + bls_to_execution_change, network_rx, local_enr, external_peer_id, @@ -800,6 +809,39 @@ impl ApiTester { self } + pub async fn post_beacon_states_validator_balances_unsupported_media_failure(self) -> Self { + for state_id in self.interesting_state_ids() { + for validator_indices in self.interesting_validator_indices() { + let validator_index_ids = validator_indices + .iter() + .cloned() + .map(|i| ValidatorId::Index(i)) + .collect::>(); + + let unsupported_media_response = self + .client + .post_beacon_states_validator_balances_with_ssz_header( + state_id.0, + validator_index_ids, + ) + .await; + + if let Err(unsupported_media_response) = unsupported_media_response { + match unsupported_media_response { + ServerMessage(error) => { + assert_eq!(error.code, 415) + } + _ => panic!("Should error with unsupported media response"), + } + } else { + panic!("Should error with unsupported media response"); + } + } + } + + self + } + pub async fn test_beacon_states_validator_balances(self) -> Self { for state_id in self.interesting_state_ids() { for validator_indices in self.interesting_validator_indices() { @@ -1626,14 +1668,20 @@ impl ApiTester { for block_id in self.interesting_block_ids() { let result = self .client - .get_beacon_blocks_attestations(block_id.0) + .get_beacon_blocks_attestations_v2(block_id.0) .await .unwrap() .map(|res| res.data); let expected = block_id.full_block(&self.chain).await.ok().map( |(block, _execution_optimistic, _finalized)| { - block.message().body().attestations().clone().into() + block + .message() + .body() + .attestations() + .map(|att| att.clone_as_attestation()) + .collect::>() + .into() }, ); @@ -1651,9 +1699,9 @@ impl ApiTester { self } - pub async fn test_post_beacon_pool_attestations_valid(mut self) -> Self { + pub async fn test_post_beacon_pool_attestations_valid_v1(mut self) -> Self { self.client - .post_beacon_pool_attestations(self.attestations.as_slice()) + .post_beacon_pool_attestations_v1(self.attestations.as_slice()) .await .unwrap(); @@ -1665,11 +1713,29 @@ impl ApiTester { self } - pub async fn test_post_beacon_pool_attestations_invalid(mut self) -> Self { + pub async fn test_post_beacon_pool_attestations_valid_v2(mut self) -> Self { + let fork_name = self + .attestations + .first() + .map(|att| self.chain.spec.fork_name_at_slot::(att.data().slot)) + .unwrap(); + self.client + .post_beacon_pool_attestations_v2(self.attestations.as_slice(), fork_name) + .await + .unwrap(); + assert!( + self.network_rx.network_recv.recv().await.is_some(), + "valid attestation should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_attestations_invalid_v1(mut self) -> Self { let mut attestations = Vec::new(); for attestation in &self.attestations { let mut invalid_attestation = attestation.clone(); - invalid_attestation.data.slot += 1; + invalid_attestation.data_mut().slot += 1; // add both to ensure we only fail on invalid attestations attestations.push(attestation.clone()); @@ -1678,7 +1744,7 @@ impl ApiTester { let err = self .client - .post_beacon_pool_attestations(attestations.as_slice()) + .post_beacon_pool_attestations_v1(attestations.as_slice()) .await .unwrap_err(); @@ -1701,6 +1767,48 @@ impl ApiTester { self } + pub async fn test_post_beacon_pool_attestations_invalid_v2(mut self) -> Self { + let mut attestations = Vec::new(); + for attestation in &self.attestations { + let mut invalid_attestation = attestation.clone(); + invalid_attestation.data_mut().slot += 1; + + // add both to ensure we only fail on invalid attestations + attestations.push(attestation.clone()); + attestations.push(invalid_attestation); + } + + let fork_name = self + .attestations + .first() + .map(|att| self.chain.spec.fork_name_at_slot::(att.data().slot)) + .unwrap(); + + let err_v2 = self + .client + .post_beacon_pool_attestations_v2(attestations.as_slice(), fork_name) + .await + .unwrap_err(); + + match err_v2 { + Error::ServerIndexedMessage(IndexedErrorMessage { + code, + message: _, + failures, + }) => { + assert_eq!(code, 400); + assert_eq!(failures.len(), self.attestations.len()); + } + _ => panic!("query did not fail correctly"), + } + + assert!( + self.network_rx.network_recv.recv().await.is_some(), + "if some attestations are valid, we should send them to the network" + ); + + self + } pub async fn test_get_beacon_light_client_bootstrap(self) -> Self { let block_id = BlockId(CoreBlockId::Finalized); @@ -1764,7 +1872,7 @@ impl ApiTester { pub async fn test_get_beacon_pool_attestations(self) -> Self { let result = self .client - .get_beacon_pool_attestations(None, None) + .get_beacon_pool_attestations_v1(None, None) .await .unwrap() .data; @@ -1774,12 +1882,38 @@ impl ApiTester { assert_eq!(result, expected); + let result = self + .client + .get_beacon_pool_attestations_v2(None, None) + .await + .unwrap() + .data; + assert_eq!(result, expected); + + self + } + + pub async fn test_post_beacon_pool_attester_slashings_valid_v1(mut self) -> Self { + self.client + .post_beacon_pool_attester_slashings_v1(&self.attester_slashing) + .await + .unwrap(); + + assert!( + self.network_rx.network_recv.recv().await.is_some(), + "valid attester slashing should be sent to network" + ); + self } - pub async fn test_post_beacon_pool_attester_slashings_valid(mut self) -> Self { + pub async fn test_post_beacon_pool_attester_slashings_valid_v2(mut self) -> Self { + let fork_name = self + .chain + .spec + .fork_name_at_slot::(self.attester_slashing.attestation_1().data().slot); self.client - .post_beacon_pool_attester_slashings(&self.attester_slashing) + .post_beacon_pool_attester_slashings_v2(&self.attester_slashing, fork_name) .await .unwrap(); @@ -1791,12 +1925,47 @@ impl ApiTester { self } - pub async fn test_post_beacon_pool_attester_slashings_invalid(mut self) -> Self { + pub async fn test_post_beacon_pool_attester_slashings_invalid_v1(mut self) -> Self { let mut slashing = self.attester_slashing.clone(); - slashing.attestation_1.data.slot += 1; + match &mut slashing { + AttesterSlashing::Base(ref mut slashing) => { + slashing.attestation_1.data.slot += 1; + } + AttesterSlashing::Electra(ref mut slashing) => { + slashing.attestation_1.data.slot += 1; + } + } self.client - .post_beacon_pool_attester_slashings(&slashing) + .post_beacon_pool_attester_slashings_v1(&slashing) + .await + .unwrap_err(); + + assert!( + self.network_rx.network_recv.recv().now_or_never().is_none(), + "invalid attester slashing should not be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_attester_slashings_invalid_v2(mut self) -> Self { + let mut slashing = self.attester_slashing.clone(); + match &mut slashing { + AttesterSlashing::Base(ref mut slashing) => { + slashing.attestation_1.data.slot += 1; + } + AttesterSlashing::Electra(ref mut slashing) => { + slashing.attestation_1.data.slot += 1; + } + } + + let fork_name = self + .chain + .spec + .fork_name_at_slot::(self.attester_slashing.attestation_1().data().slot); + self.client + .post_beacon_pool_attester_slashings_v2(&slashing, fork_name) .await .unwrap_err(); @@ -1811,7 +1980,7 @@ impl ApiTester { pub async fn test_get_beacon_pool_attester_slashings(self) -> Self { let result = self .client - .get_beacon_pool_attester_slashings() + .get_beacon_pool_attester_slashings_v1() .await .unwrap() .data; @@ -1820,6 +1989,14 @@ impl ApiTester { assert_eq!(result, expected); + let result = self + .client + .get_beacon_pool_attester_slashings_v2() + .await + .unwrap() + .data; + assert_eq!(result, expected); + self } @@ -2259,9 +2436,9 @@ impl ApiTester { vec![validator_count], vec![validator_count, 1], vec![validator_count, 1, 3], - vec![u64::max_value()], - vec![u64::max_value(), 1], - vec![u64::max_value(), 1, 3], + vec![u64::MAX], + vec![u64::MAX, 1], + vec![u64::MAX, 1, 3], ]; interesting.push((0..validator_count).collect()); @@ -3168,7 +3345,8 @@ impl ApiTester { .chain .produce_unaggregated_attestation(slot, index) .unwrap() - .data; + .data() + .clone(); assert_eq!(result, expected); } @@ -3177,28 +3355,52 @@ impl ApiTester { } pub async fn test_get_validator_aggregate_attestation(self) -> Self { - let attestation = self + if self .chain - .head_beacon_block() - .message() - .body() - .attestations()[0] - .clone(); - - let result = self - .client - .get_validator_aggregate_attestation( - attestation.data.slot, - attestation.data.tree_hash_root(), - ) - .await - .unwrap() - .unwrap() - .data; + .spec + .fork_name_at_slot::(self.chain.slot().unwrap()) + .electra_enabled() + { + for attestation in self.chain.naive_aggregation_pool.read().iter() { + let result = self + .client + .get_validator_aggregate_attestation_v2( + attestation.data().slot, + attestation.data().tree_hash_root(), + attestation.committee_index().unwrap(), + ) + .await + .unwrap() + .unwrap() + .data; + let expected = attestation; - let expected = attestation; + assert_eq!(&result, expected); + } + } else { + let attestation = self + .chain + .head_beacon_block() + .message() + .body() + .attestations() + .next() + .unwrap() + .clone_as_attestation(); + let result = self + .client + .get_validator_aggregate_attestation_v1( + attestation.data().slot, + attestation.data().tree_hash_root(), + ) + .await + .unwrap() + .unwrap() + .data; + let expected = attestation; - assert_eq!(result, expected); + assert_eq!(result, expected); + } self } @@ -3269,11 +3471,12 @@ impl ApiTester { .unwrap() .data; - let mut attestation = Attestation { + // TODO(electra) make fork-agnostic + let mut attestation = Attestation::Base(AttestationBase { aggregation_bits: BitList::with_capacity(duty.committee_length as usize).unwrap(), data: attestation_data, signature: AggregateSignature::infinity(), - }; + }); attestation .sign( @@ -3287,7 +3490,7 @@ impl ApiTester { SignedAggregateAndProof::from_aggregate( i as u64, - attestation, + attestation.to_ref(), Some(proof), &kp.sk, &fork, @@ -3296,11 +3499,11 @@ impl ApiTester { ) } - pub async fn test_get_validator_aggregate_and_proofs_valid(mut self) -> Self { + pub async fn test_get_validator_aggregate_and_proofs_valid_v1(mut self) -> Self { let aggregate = self.get_aggregate().await; self.client - .post_validator_aggregate_and_proof::(&[aggregate]) + .post_validator_aggregate_and_proof_v1::(&[aggregate]) .await .unwrap(); @@ -3309,13 +3512,19 @@ impl ApiTester { self } - pub async fn test_get_validator_aggregate_and_proofs_invalid(mut self) -> Self { + pub async fn test_get_validator_aggregate_and_proofs_invalid_v1(mut self) -> Self { let mut aggregate = self.get_aggregate().await; - - aggregate.message.aggregate.data.slot += 1; + match &mut aggregate { + SignedAggregateAndProof::Base(ref mut aggregate) => { + aggregate.message.aggregate.data.slot += 1; + } + SignedAggregateAndProof::Electra(ref mut aggregate) => { + aggregate.message.aggregate.data.slot += 1; + } + } self.client - .post_validator_aggregate_and_proof::(&[aggregate]) + .post_validator_aggregate_and_proof_v1::(&[aggregate.clone()]) .await .unwrap_err(); @@ -3324,6 +3533,46 @@ impl ApiTester { self } + pub async fn test_get_validator_aggregate_and_proofs_valid_v2(mut self) -> Self { + let aggregate = self.get_aggregate().await; + let fork_name = self + .chain + .spec + .fork_name_at_slot::(aggregate.message().aggregate().data().slot); + self.client + .post_validator_aggregate_and_proof_v2::(&[aggregate], fork_name) + .await + .unwrap(); + + assert!(self.network_rx.network_recv.recv().await.is_some()); + + self + } + + pub async fn test_get_validator_aggregate_and_proofs_invalid_v2(mut self) -> Self { + let mut aggregate = self.get_aggregate().await; + match &mut aggregate { + SignedAggregateAndProof::Base(ref mut aggregate) => { + aggregate.message.aggregate.data.slot += 1; + } + SignedAggregateAndProof::Electra(ref mut aggregate) => { + aggregate.message.aggregate.data.slot += 1; + } + } + + let fork_name = self + .chain + .spec + .fork_name_at_slot::(aggregate.message().aggregate().data().slot); + self.client + .post_validator_aggregate_and_proof_v2::(&[aggregate], fork_name) + .await + .unwrap_err(); + assert!(self.network_rx.network_recv.recv().now_or_never().is_none()); + + self + } + pub async fn test_get_validator_beacon_committee_subscriptions(mut self) -> Self { let subscription = BeaconCommitteeSubscription { validator_index: 0, @@ -3419,7 +3668,7 @@ impl ApiTester { pub async fn test_post_validator_register_validator_slashed(self) -> Self { // slash a validator self.client - .post_beacon_pool_attester_slashings(&self.attester_slashing) + .post_beacon_pool_attester_slashings_v1(&self.attester_slashing) .await .unwrap(); @@ -3532,7 +3781,7 @@ impl ApiTester { // Attest to the current slot self.client - .post_beacon_pool_attestations(self.attestations.as_slice()) + .post_beacon_pool_attestations_v1(self.attestations.as_slice()) .await .unwrap(); @@ -5172,7 +5421,7 @@ impl ApiTester { // Attest to the current slot self.client - .post_beacon_pool_attestations(self.attestations.as_slice()) + .post_beacon_pool_attestations_v1(self.attestations.as_slice()) .await .unwrap(); @@ -5212,8 +5461,12 @@ impl ApiTester { EventTopic::Attestation, EventTopic::VoluntaryExit, EventTopic::Block, + EventTopic::BlockGossip, EventTopic::Head, EventTopic::FinalizedCheckpoint, + EventTopic::AttesterSlashing, + EventTopic::ProposerSlashing, + EventTopic::BlsToExecutionChange, ]; let mut events_future = self .client @@ -5224,7 +5477,7 @@ impl ApiTester { let expected_attestation_len = self.attestations.len(); self.client - .post_beacon_pool_attestations(self.attestations.as_slice()) + .post_beacon_pool_attestations_v1(self.attestations.as_slice()) .await .unwrap(); @@ -5256,6 +5509,20 @@ impl ApiTester { &[EventKind::VoluntaryExit(self.voluntary_exit.clone())] ); + // Produce a BLS to execution change event + self.client + .post_beacon_pool_bls_to_execution_changes(&[self.bls_to_execution_change.clone()]) + .await + .unwrap(); + + let bls_events = poll_events(&mut events_future, 1, Duration::from_millis(10000)).await; + assert_eq!( + bls_events.as_slice(), + &[EventKind::BlsToExecutionChange(Box::new( + self.bls_to_execution_change.clone() + ))] + ); + // Submit the next block, which is on an epoch boundary, so this will produce a finalized // checkpoint event, head event, and block event let block_root = self.next_block.signed_block().canonical_root(); @@ -5310,10 +5577,20 @@ impl ApiTester { .await .unwrap(); - let block_events = poll_events(&mut events_future, 3, Duration::from_millis(10000)).await; + let expected_gossip = EventKind::BlockGossip(Box::new(BlockGossip { + slot: next_slot, + block: block_root, + })); + + let block_events = poll_events(&mut events_future, 4, Duration::from_millis(10000)).await; assert_eq!( block_events.as_slice(), - &[expected_block, expected_head, expected_finalized] + &[ + expected_gossip, + expected_block, + expected_head, + expected_finalized + ] ); // Test a reorg event @@ -5353,6 +5630,42 @@ impl ApiTester { .await; assert_eq!(reorg_event.as_slice(), &[expected_reorg]); + // Test attester slashing event + let mut attester_slashing_event_future = self + .client + .get_events::(&[EventTopic::AttesterSlashing]) + .await + .unwrap(); + + self.harness.add_attester_slashing(vec![1, 2, 3]).unwrap(); + + let attester_slashing_event = poll_events( + &mut attester_slashing_event_future, + 1, + Duration::from_millis(10000), + ) + .await; + + assert!(attester_slashing_event.len() == 1); + + // Test proposer slashing event + let mut proposer_slashing_event_future = self + .client + .get_events::(&[EventTopic::ProposerSlashing]) + .await + .unwrap(); + + self.harness.add_proposer_slashing(1).unwrap(); + + let proposer_slashing_event = poll_events( + &mut proposer_slashing_event_future, + 1, + Duration::from_millis(10000), + ) + .await; + + assert!(proposer_slashing_event.len() == 1); + self } @@ -5388,7 +5701,9 @@ impl ApiTester { &self.chain.spec, ); } - let expected_withdrawals = get_expected_withdrawals(&state, &self.chain.spec).unwrap(); + let expected_withdrawals = get_expected_withdrawals(&state, &self.chain.spec) + .unwrap() + .0; // fetch expected withdrawals from the client let result = self.client.get_expected_withdrawals(&state_id).await; @@ -5575,6 +5890,14 @@ async fn get_events_from_genesis() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_unsupported_media_response() { + ApiTester::new() + .await + .post_beacon_states_validator_balances_unsupported_media_failure() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn beacon_get() { ApiTester::new() @@ -5673,34 +5996,66 @@ async fn post_beacon_blocks_duplicate() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn beacon_pools_post_attestations_valid() { +async fn beacon_pools_post_attestations_valid_v1() { ApiTester::new() .await - .test_post_beacon_pool_attestations_valid() + .test_post_beacon_pool_attestations_valid_v1() .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn beacon_pools_post_attestations_invalid() { +async fn beacon_pools_post_attestations_invalid_v1() { ApiTester::new() .await - .test_post_beacon_pool_attestations_invalid() + .test_post_beacon_pool_attestations_invalid_v1() .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn beacon_pools_post_attester_slashings_valid() { +async fn beacon_pools_post_attestations_valid_v2() { ApiTester::new() .await - .test_post_beacon_pool_attester_slashings_valid() + .test_post_beacon_pool_attestations_valid_v2() .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn beacon_pools_post_attester_slashings_invalid() { +async fn beacon_pools_post_attestations_invalid_v2() { ApiTester::new() .await - .test_post_beacon_pool_attester_slashings_invalid() + .test_post_beacon_pool_attestations_invalid_v2() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_pools_post_attester_slashings_valid_v1() { + ApiTester::new() + .await + .test_post_beacon_pool_attester_slashings_valid_v1() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_pools_post_attester_slashings_invalid_v1() { + ApiTester::new() + .await + .test_post_beacon_pool_attester_slashings_invalid_v1() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_pools_post_attester_slashings_valid_v2() { + ApiTester::new() + .await + .test_post_beacon_pool_attester_slashings_valid_v2() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_pools_post_attester_slashings_invalid_v2() { + ApiTester::new() + .await + .test_post_beacon_pool_attester_slashings_invalid_v2() .await; } @@ -6028,36 +6383,70 @@ async fn get_validator_aggregate_attestation_with_skip_slots() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn get_validator_aggregate_and_proofs_valid() { +async fn get_validator_aggregate_and_proofs_valid_v1() { + ApiTester::new() + .await + .test_get_validator_aggregate_and_proofs_valid_v1() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_validator_aggregate_and_proofs_valid_with_skip_slots_v1() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_aggregate_and_proofs_valid_v1() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_validator_aggregate_and_proofs_valid_v2() { + ApiTester::new() + .await + .test_get_validator_aggregate_and_proofs_valid_v2() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_validator_aggregate_and_proofs_valid_with_skip_slots_v2() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_aggregate_and_proofs_valid_v2() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_validator_aggregate_and_proofs_invalid_v1() { ApiTester::new() .await - .test_get_validator_aggregate_and_proofs_valid() + .test_get_validator_aggregate_and_proofs_invalid_v1() .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn get_validator_aggregate_and_proofs_valid_with_skip_slots() { +async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots_v1() { ApiTester::new() .await .skip_slots(E::slots_per_epoch() * 2) - .test_get_validator_aggregate_and_proofs_valid() + .test_get_validator_aggregate_and_proofs_invalid_v1() .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn get_validator_aggregate_and_proofs_invalid() { +async fn get_validator_aggregate_and_proofs_invalid_v2() { ApiTester::new() .await - .test_get_validator_aggregate_and_proofs_invalid() + .test_get_validator_aggregate_and_proofs_invalid_v2() .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots() { +async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots_v2() { ApiTester::new() .await .skip_slots(E::slots_per_epoch() * 2) - .test_get_validator_aggregate_and_proofs_invalid() + .test_get_validator_aggregate_and_proofs_invalid_v2() .await; } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 1617c0bd6c9..56a8fe99c70 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -7,14 +7,12 @@ edition = { workspace = true } [dependencies] discv5 = { workspace = true } gossipsub = { workspace = true } -unsigned-varint = { version = "0.6", features = ["codec"] } +unsigned-varint = { version = "0.8", features = ["codec"] } ssz_types = { workspace = true } types = { workspace = true } serde = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } -tree_hash_derive = { workspace = true } slog = { workspace = true } lighthouse_version = { workspace = true } tokio = { workspace = true } @@ -43,18 +41,11 @@ superstruct = { workspace = true } prometheus-client = "0.22.0" unused_port = { workspace = true } delay_map = { workspace = true } -tracing = { workspace = true } -byteorder = { workspace = true } bytes = { workspace = true } either = { workspace = true } # Local dependencies -futures-ticker = "0.0.3" -getrandom = "0.2.11" -hex_fmt = "0.3.0" -instant = "0.1.12" void = "1.0.2" -base64 = "0.21.5" libp2p-mplex = "0.41" [dependencies.libp2p] @@ -69,7 +60,7 @@ tempfile = { workspace = true } quickcheck = { workspace = true } quickcheck_macros = { workspace = true } async-channel = { workspace = true } +logging = { workspace = true } [features] libp2p-websocket = [] - diff --git a/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md b/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md index 448e224cb6b..7ec10af741e 100644 --- a/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md +++ b/beacon_node/lighthouse_network/gossipsub/CHANGELOG.md @@ -1,5 +1,8 @@ ## 0.5 Sigma Prime fork +- Implement IDONTWANT messages as per [spec](https://github.com/libp2p/specs/pull/548). + See [PR 5422](https://github.com/sigp/lighthouse/pull/5422) + - Attempt to publish to at least mesh_n peers when publishing a message when flood publish is disabled. See [PR 5357](https://github.com/sigp/lighthouse/pull/5357). - Drop `Publish` and `Forward` gossipsub stale messages when polling ConnectionHandler. diff --git a/beacon_node/lighthouse_network/gossipsub/Cargo.toml b/beacon_node/lighthouse_network/gossipsub/Cargo.toml index 871955c0591..56c42d29920 100644 --- a/beacon_node/lighthouse_network/gossipsub/Cargo.toml +++ b/beacon_node/lighthouse_network/gossipsub/Cargo.toml @@ -10,7 +10,7 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [features] -wasm-bindgen = ["getrandom/js", "instant/wasm-bindgen"] +wasm-bindgen = ["getrandom/js"] [dependencies] async-channel = { workspace = true } @@ -24,8 +24,8 @@ futures = "0.3.30" futures-ticker = "0.0.3" futures-timer = "3.0.2" getrandom = "0.2.12" +hashlink.workspace = true hex_fmt = "0.3.0" -instant = "0.1.12" libp2p = { version = "0.53", default-features = false } quick-protobuf = "0.8" quick-protobuf-codec = "0.3" @@ -33,11 +33,10 @@ rand = "0.8" regex = "1.10.3" serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.8" -smallvec = "1.13.1" tracing = "0.1.37" void = "1.0.2" - prometheus-client = "0.22.0" +web-time = "1.1.0" [dev-dependencies] quickcheck = { workspace = true } diff --git a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs index 2567a3691e0..f83a24baafe 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs @@ -20,13 +20,13 @@ //! Data structure for efficiently storing known back-off's when pruning peers. use crate::topic::TopicHash; -use instant::Instant; use libp2p::identity::PeerId; use std::collections::{ hash_map::{Entry, HashMap}, HashSet, }; use std::time::Duration; +use web_time::Instant; #[derive(Copy, Clone)] struct HeartbeatIndex(usize); diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index ce0437342e3..0a3b7a9f529 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -31,10 +31,10 @@ use std::{ use futures::StreamExt; use futures_ticker::Ticker; +use hashlink::LinkedHashMap; use prometheus_client::registry::Registry; use rand::{seq::SliceRandom, thread_rng}; -use instant::Instant; use libp2p::core::{multiaddr::Protocol::Ip4, multiaddr::Protocol::Ip6, Endpoint, Multiaddr}; use libp2p::identity::Keypair; use libp2p::identity::PeerId; @@ -44,6 +44,9 @@ use libp2p::swarm::{ ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; +use web_time::{Instant, SystemTime}; + +use crate::types::IDontWant; use super::gossip_promises::GossipPromises; use super::handler::{Handler, HandlerEvent, HandlerIn}; @@ -67,13 +70,18 @@ use super::{ types::RpcOut, }; use super::{PublishError, SubscriptionError, TopicScoreParams, ValidationError}; -use instant::SystemTime; use quick_protobuf::{MessageWrite, Writer}; use std::{cmp::Ordering::Equal, fmt::Debug}; #[cfg(test)] mod tests; +/// IDONTWANT cache capacity. +const IDONTWANT_CAP: usize = 10_000; + +/// IDONTWANT timeout before removal. +const IDONTWANT_TIMEOUT: Duration = Duration::new(3, 0); + /// Determines if published messages should be signed or not. /// /// Without signing, a number of privacy preserving modes can be selected. @@ -305,9 +313,8 @@ pub struct Behaviour { /// discovery and not by PX). outbound_peers: HashSet, - /// Stores optional peer score data together with thresholds, decay interval and gossip - /// promises. - peer_score: Option<(PeerScore, PeerScoreThresholds, Ticker, GossipPromises)>, + /// Stores optional peer score data together with thresholds and decay interval. + peer_score: Option<(PeerScore, PeerScoreThresholds, Ticker)>, /// Counts the number of `IHAVE` received from each peer since the last heartbeat. count_received_ihave: HashMap, @@ -332,6 +339,9 @@ pub struct Behaviour { /// Tracks the numbers of failed messages per peer-id. failed_messages: HashMap, + + /// Tracks recently sent `IWANT` messages and checks if peers respond to them. + gossip_promises: GossipPromises, } impl Behaviour @@ -468,6 +478,7 @@ where subscription_filter, data_transform, failed_messages: Default::default(), + gossip_promises: Default::default(), }) } } @@ -920,7 +931,7 @@ where let interval = Ticker::new(params.decay_interval); let peer_score = PeerScore::new_with_message_delivery_time_callback(params, callback); - self.peer_score = Some((peer_score, threshold, interval, GossipPromises::default())); + self.peer_score = Some((peer_score, threshold, interval)); Ok(()) } @@ -1188,7 +1199,7 @@ where } fn score_below_threshold_from_scores( - peer_score: &Option<(PeerScore, PeerScoreThresholds, Ticker, GossipPromises)>, + peer_score: &Option<(PeerScore, PeerScoreThresholds, Ticker)>, peer_id: &PeerId, threshold: impl Fn(&PeerScoreThresholds) -> f64, ) -> (bool, f64) { @@ -1249,10 +1260,7 @@ where return false; } - self.peer_score - .as_ref() - .map(|(_, _, _, promises)| !promises.contains(id)) - .unwrap_or(true) + !self.gossip_promises.contains(id) }; for (topic, ids) in ihave_msgs { @@ -1299,13 +1307,11 @@ where iwant_ids_vec.truncate(iask); *iasked += iask; - if let Some((_, _, _, gossip_promises)) = &mut self.peer_score { - gossip_promises.add_promise( - *peer_id, - &iwant_ids_vec, - Instant::now() + self.config.iwant_followup_time(), - ); - } + self.gossip_promises.add_promise( + *peer_id, + &iwant_ids_vec, + Instant::now() + self.config.iwant_followup_time(), + ); if let Some(peer) = &mut self.connected_peers.get_mut(peer_id) { tracing::trace!( @@ -1370,6 +1376,11 @@ where "IWANT: Peer has asked for message too many times; ignoring request" ); } else if let Some(peer) = &mut self.connected_peers.get_mut(peer_id) { + if peer.dont_send.get(&id).is_some() { + tracing::debug!(%peer_id, message=%id, "Peer already sent IDONTWANT for this message"); + continue; + } + tracing::debug!(peer=%peer_id, "IWANT: Sending cached messages to peer"); if peer .sender @@ -1707,14 +1718,15 @@ where peer=%propagation_source, "Rejecting message from blacklisted peer" ); - if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + self.gossip_promises + .reject_message(msg_id, &RejectReason::BlackListedPeer); + if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.reject_message( propagation_source, msg_id, &raw_message.topic, RejectReason::BlackListedPeer, ); - gossip_promises.reject_message(msg_id, &RejectReason::BlackListedPeer); } return false; } @@ -1796,6 +1808,9 @@ where // Calculate the message id on the transformed data. let msg_id = self.config.message_id(&message); + // Broadcast IDONTWANT messages. + self.send_idontwant(&raw_message, &msg_id, propagation_source); + // Check the validity of the message // Peers get penalized if this message is invalid. We don't add it to the duplicate cache // and instead continually penalize peers that repeatedly send this message. @@ -1821,11 +1836,12 @@ where metrics.msg_recvd(&message.topic); } - // Tells score that message arrived (but is maybe not fully validated yet). // Consider the message as delivered for gossip promises. - if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + self.gossip_promises.message_delivered(&msg_id); + + // Tells score that message arrived (but is maybe not fully validated yet). + if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.validate_message(propagation_source, &msg_id, &message.topic); - gossip_promises.message_delivered(&msg_id); } // Add the message to our memcache @@ -1872,7 +1888,7 @@ where raw_message: &RawMessage, reject_reason: RejectReason, ) { - if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { + if let Some((peer_score, ..)) = &mut self.peer_score { if let Some(metrics) = self.metrics.as_mut() { metrics.register_invalid_message(&raw_message.topic); } @@ -1887,7 +1903,8 @@ where reject_reason, ); - gossip_promises.reject_message(&message_id, &reject_reason); + self.gossip_promises + .reject_message(&message_id, &reject_reason); } else { // The message is invalid, we reject it ignoring any gossip promises. If a peer is // advertising this message via an IHAVE and it's invalid it will be double @@ -1960,7 +1977,7 @@ where } // if the mesh needs peers add the peer to the mesh if !self.explicit_peers.contains(propagation_source) - && matches!(peer.kind, PeerKind::Gossipsubv1_1 | PeerKind::Gossipsub) + && peer.kind.is_gossipsub() && !Self::score_below_threshold_from_scores( &self.peer_score, propagation_source, @@ -2067,8 +2084,8 @@ where /// Applies penalties to peers that did not respond to our IWANT requests. fn apply_iwant_penalties(&mut self) { - if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { - for (peer, count) in gossip_promises.get_broken_promises() { + if let Some((peer_score, ..)) = &mut self.peer_score { + for (peer, count) in self.gossip_promises.get_broken_promises() { peer_score.add_penalty(&peer, count); if let Some(metrics) = self.metrics.as_mut() { metrics.register_score_penalty(Penalty::BrokenPromise); @@ -2289,7 +2306,7 @@ where && peers.len() > 1 && self.peer_score.is_some() { - if let Some((_, thresholds, _, _)) = &self.peer_score { + if let Some((_, thresholds, _)) = &self.peer_score { // Opportunistic grafting works as follows: we check the median score of peers // in the mesh; if this score is below the opportunisticGraftThreshold, we // select a few peers at random with score over the median. @@ -2382,7 +2399,7 @@ where for (topic_hash, peers) in self.fanout.iter_mut() { let mut to_remove_peers = Vec::new(); let publish_threshold = match &self.peer_score { - Some((_, thresholds, _, _)) => thresholds.publish_threshold, + Some((_, thresholds, _)) => thresholds.publish_threshold, _ => 0.0, }; for peer_id in peers.iter() { @@ -2475,6 +2492,17 @@ where } self.failed_messages.shrink_to_fit(); + // Flush stale IDONTWANTs. + for peer in self.connected_peers.values_mut() { + while let Some((_front, instant)) = peer.dont_send.front() { + if (*instant + IDONTWANT_TIMEOUT) >= Instant::now() { + break; + } else { + peer.dont_send.pop_front(); + } + } + } + tracing::debug!("Completed Heartbeat"); if let Some(metrics) = self.metrics.as_mut() { let duration = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); @@ -2656,6 +2684,59 @@ where } } + /// Helper function which sends an IDONTWANT message to mesh\[topic\] peers. + fn send_idontwant( + &mut self, + message: &RawMessage, + msg_id: &MessageId, + propagation_source: &PeerId, + ) { + let Some(mesh_peers) = self.mesh.get(&message.topic) else { + return; + }; + + let iwant_peers = self.gossip_promises.peers_for_message(msg_id); + + let recipient_peers = mesh_peers + .iter() + .chain(iwant_peers.iter()) + .filter(|peer_id| { + *peer_id != propagation_source && Some(*peer_id) != message.source.as_ref() + }); + + for peer_id in recipient_peers { + let Some(peer) = self.connected_peers.get_mut(peer_id) else { + tracing::error!(peer = %peer_id, + "Could not IDONTWANT, peer doesn't exist in connected peer list"); + continue; + }; + + // Only gossipsub 1.2 peers support IDONTWANT. + if peer.kind != PeerKind::Gossipsubv1_2_beta { + continue; + } + + if peer + .sender + .idontwant(IDontWant { + message_ids: vec![msg_id.clone()], + }) + .is_err() + { + tracing::warn!(peer=%peer_id, "Send Queue full. Could not send IDONTWANT"); + + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.failed_message_slow_peer(peer_id); + } + // Increment failed message count + self.failed_messages + .entry(*peer_id) + .or_default() + .non_priority += 1; + } + } + } + /// Helper function which forwards a message to mesh\[topic\] peers. /// /// Returns true if at least one peer was messaged. @@ -2709,6 +2790,11 @@ where if !recipient_peers.is_empty() { for peer_id in recipient_peers.iter() { if let Some(peer) = self.connected_peers.get_mut(peer_id) { + if peer.dont_send.get(msg_id).is_some() { + tracing::debug!(%peer_id, message=%msg_id, "Peer doesn't want message"); + continue; + } + tracing::debug!(%peer_id, message=%msg_id, "Sending message to peer"); if peer .sender @@ -3058,6 +3144,7 @@ where connections: vec![], sender: RpcSender::new(self.config.connection_handler_queue_len()), topics: Default::default(), + dont_send: LinkedHashMap::new(), }); // Add the new connection connected_peer.connections.push(connection_id); @@ -3088,6 +3175,7 @@ where connections: vec![], sender: RpcSender::new(self.config.connection_handler_queue_len()), topics: Default::default(), + dont_send: LinkedHashMap::new(), }); // Add the new connection connected_peer.connections.push(connection_id); @@ -3137,7 +3225,7 @@ where } HandlerEvent::MessageDropped(rpc) => { // Account for this in the scoring logic - if let Some((peer_score, _, _, _)) = &mut self.peer_score { + if let Some((peer_score, _, _)) = &mut self.peer_score { peer_score.failed_message_slow_peer(&propagation_source); } @@ -3246,6 +3334,24 @@ where peers, backoff, }) => prune_msgs.push((topic_hash, peers, backoff)), + ControlAction::IDontWant(IDontWant { message_ids }) => { + let Some(peer) = self.connected_peers.get_mut(&propagation_source) + else { + tracing::error!(peer = %propagation_source, + "Could not handle IDONTWANT, peer doesn't exist in connected peer list"); + continue; + }; + if let Some(metrics) = self.metrics.as_mut() { + metrics.register_idontwant(message_ids.len()); + } + for message_id in message_ids { + peer.dont_send.insert(message_id, Instant::now()); + // Don't exceed capacity. + if peer.dont_send.len() > IDONTWANT_CAP { + peer.dont_send.pop_front(); + } + } + } } } if !ihave_msgs.is_empty() { @@ -3271,7 +3377,7 @@ where } // update scores - if let Some((peer_score, _, interval, _)) = &mut self.peer_score { + if let Some((peer_score, _, interval)) = &mut self.peer_score { while let Poll::Ready(Some(_)) = interval.poll_next_unpin(cx) { peer_score.refresh_scores(); } @@ -3396,7 +3502,7 @@ fn get_random_peers_dynamic( .iter() .filter(|(_, p)| p.topics.contains(topic_hash)) .filter(|(peer_id, _)| f(peer_id)) - .filter(|(_, p)| p.kind == PeerKind::Gossipsub || p.kind == PeerKind::Gossipsubv1_1) + .filter(|(_, p)| p.kind.is_gossipsub()) .map(|(peer_id, _)| *peer_id) .collect::>(); diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs index 2af0199ec93..a378198be33 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs @@ -31,13 +31,7 @@ use std::net::Ipv4Addr; use std::thread::sleep; #[derive(Default, Debug)] -struct InjectNodes -// TODO: remove trait bound Default when this issue is fixed: -// https://github.com/colin-kiegel/rust-derive-builder/issues/93 -where - D: DataTransform + Default + Clone + Send + 'static, - F: TopicSubscriptionFilter + Clone + Default + Send + 'static, -{ +struct InjectNodes { peer_no: usize, topics: Vec, to_subscribe: bool, @@ -47,6 +41,7 @@ where scoring: Option<(PeerScoreParams, PeerScoreThresholds)>, data_transform: D, subscription_filter: F, + peer_kind: Option, } impl InjectNodes @@ -94,7 +89,7 @@ where let empty = vec![]; for i in 0..self.peer_no { - let (peer, receiver) = add_peer( + let (peer, receiver) = add_peer_with_addr_and_kind( &mut gs, if self.to_subscribe { &topic_hashes @@ -103,6 +98,8 @@ where }, i < self.outbound, i < self.explicit, + Multiaddr::empty(), + self.peer_kind.clone().or(Some(PeerKind::Gossipsubv1_1)), ); peers.push(peer); receivers.insert(peer, receiver); @@ -151,6 +148,11 @@ where self.subscription_filter = subscription_filter; self } + + fn peer_kind(mut self, peer_kind: PeerKind) -> Self { + self.peer_kind = Some(peer_kind); + self + } } fn inject_nodes() -> InjectNodes @@ -235,6 +237,7 @@ where kind: kind.clone().unwrap_or(PeerKind::Floodsub), connections: vec![connection_id], topics: Default::default(), + dont_send: LinkedHashMap::new(), sender, }, ); @@ -620,6 +623,7 @@ fn test_join() { kind: PeerKind::Floodsub, connections: vec![connection_id], topics: Default::default(), + dont_send: LinkedHashMap::new(), sender, }, ); @@ -1015,6 +1019,7 @@ fn test_get_random_peers() { connections: vec![ConnectionId::new_unchecked(0)], topics: topics.clone(), sender: RpcSender::new(gs.config.connection_handler_queue_len()), + dont_send: LinkedHashMap::new(), }, ); } @@ -4580,9 +4585,9 @@ fn test_ignore_too_many_messages_in_ihave() { let (peer, receiver) = add_peer(&mut gs, &topics, false, false); receivers.insert(peer, receiver); - //peer has 20 messages + //peer has 30 messages let mut seq = 0; - let message_ids: Vec<_> = (0..20) + let message_ids: Vec<_> = (0..30) .map(|_| random_message(&mut seq, &topics)) .map(|msg| gs.data_transform.inbound_transform(msg).unwrap()) .map(|msg| config.message_id(&msg)) @@ -4624,7 +4629,7 @@ fn test_ignore_too_many_messages_in_ihave() { gs.heartbeat(); gs.handle_ihave( &peer, - vec![(topics[0].clone(), message_ids[10..20].to_vec())], + vec![(topics[0].clone(), message_ids[20..30].to_vec())], ); //we sent 10 iwant messages ids via a IWANT rpc. @@ -5236,3 +5241,191 @@ fn test_graft_without_subscribe() { // We unsubscribe from the topic. let _ = gs.unsubscribe(&Topic::new(topic)); } + +/// Test that a node sends IDONTWANT messages to the mesh peers +/// that run Gossipsub v1.2. +#[test] +fn sends_idontwant() { + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(5) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_2_beta) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawMessage { + source: Some(peers[1]), + data: vec![12], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(message.clone(), &local_id); + assert_eq!( + receivers + .into_iter() + .fold(0, |mut idontwants, (peer_id, c)| { + let non_priority = c.non_priority.into_inner(); + while !non_priority.is_empty() { + if let Ok(RpcOut::IDontWant(_)) = non_priority.try_recv() { + assert_ne!(peer_id, peers[1]); + idontwants += 1; + } + } + idontwants + }), + 3, + "IDONTWANT was not sent" + ); +} + +/// Test that a node doesn't send IDONTWANT messages to the mesh peers +/// that don't run Gossipsub v1.2. +#[test] +fn doesnt_send_idontwant() { + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(5) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_1) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawMessage { + source: Some(peers[1]), + data: vec![12], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + gs.handle_received_message(message.clone(), &local_id); + assert_eq!( + receivers + .into_iter() + .fold(0, |mut idontwants, (peer_id, c)| { + let non_priority = c.non_priority.into_inner(); + while !non_priority.is_empty() { + if matches!(non_priority.try_recv(), Ok(RpcOut::IDontWant(_)) if peer_id != peers[1]) { + idontwants += 1; + } + } + idontwants + }), + 0, + "IDONTWANT were sent" + ); +} + +/// Test that a node doesn't forward a messages to the mesh peers +/// that sent IDONTWANT. +#[test] +fn doesnt_forward_idontwant() { + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(4) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_2_beta) + .create_network(); + + let local_id = PeerId::random(); + + let raw_message = RawMessage { + source: Some(peers[1]), + data: vec![12], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + let message = gs + .data_transform + .inbound_transform(raw_message.clone()) + .unwrap(); + let message_id = gs.config.message_id(&message); + let peer = gs.connected_peers.get_mut(&peers[2]).unwrap(); + peer.dont_send.insert(message_id, Instant::now()); + + gs.handle_received_message(raw_message.clone(), &local_id); + assert_eq!( + receivers.into_iter().fold(0, |mut fwds, (peer_id, c)| { + let non_priority = c.non_priority.into_inner(); + while !non_priority.is_empty() { + if let Ok(RpcOut::Forward { .. }) = non_priority.try_recv() { + assert_ne!(peer_id, peers[2]); + fwds += 1; + } + } + fwds + }), + 2, + "IDONTWANT was not sent" + ); +} + +/// Test that a node parses an +/// IDONTWANT message to the respective peer. +#[test] +fn parses_idontwant() { + let (mut gs, peers, _receivers, _topic_hashes) = inject_nodes1() + .peer_no(2) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_2_beta) + .create_network(); + + let message_id = MessageId::new(&[0, 1, 2, 3]); + let rpc = Rpc { + messages: vec![], + subscriptions: vec![], + control_msgs: vec![ControlAction::IDontWant(IDontWant { + message_ids: vec![message_id.clone()], + })], + }; + gs.on_connection_handler_event( + peers[1], + ConnectionId::new_unchecked(0), + HandlerEvent::Message { + rpc, + invalid_messages: vec![], + }, + ); + let peer = gs.connected_peers.get_mut(&peers[1]).unwrap(); + assert!(peer.dont_send.get(&message_id).is_some()); +} + +/// Test that a node clears stale IDONTWANT messages. +#[test] +fn clear_stale_idontwant() { + let (mut gs, peers, _receivers, _topic_hashes) = inject_nodes1() + .peer_no(4) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_2_beta) + .create_network(); + + let peer = gs.connected_peers.get_mut(&peers[2]).unwrap(); + peer.dont_send + .insert(MessageId::new(&[1, 2, 3, 4]), Instant::now()); + std::thread::sleep(Duration::from_secs(3)); + gs.heartbeat(); + let peer = gs.connected_peers.get_mut(&peers[2]).unwrap(); + assert!(peer.dont_send.is_empty()); +} diff --git a/beacon_node/lighthouse_network/gossipsub/src/generated/gossipsub/pb.rs b/beacon_node/lighthouse_network/gossipsub/src/generated/gossipsub/pb.rs index 9a074fd61fc..24ac80d2755 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/generated/gossipsub/pb.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/generated/gossipsub/pb.rs @@ -154,6 +154,7 @@ pub struct ControlMessage { pub iwant: Vec, pub graft: Vec, pub prune: Vec, + pub idontwant: Vec, } impl<'a> MessageRead<'a> for ControlMessage { @@ -165,6 +166,7 @@ impl<'a> MessageRead<'a> for ControlMessage { Ok(18) => msg.iwant.push(r.read_message::(bytes)?), Ok(26) => msg.graft.push(r.read_message::(bytes)?), Ok(34) => msg.prune.push(r.read_message::(bytes)?), + Ok(42) => msg.idontwant.push(r.read_message::(bytes)?), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -180,6 +182,7 @@ impl MessageWrite for ControlMessage { + self.iwant.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + self.graft.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + self.prune.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.idontwant.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() } fn write_message(&self, w: &mut Writer) -> Result<()> { @@ -187,6 +190,7 @@ impl MessageWrite for ControlMessage { for s in &self.iwant { w.write_with_tag(18, |w| w.write_message(s))?; } for s in &self.graft { w.write_with_tag(26, |w| w.write_message(s))?; } for s in &self.prune { w.write_with_tag(34, |w| w.write_message(s))?; } + for s in &self.idontwant { w.write_with_tag(42, |w| w.write_message(s))?; } Ok(()) } } @@ -331,6 +335,38 @@ impl MessageWrite for ControlPrune { } } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlIDontWant { + pub message_ids: Vec>, +} + +impl<'a> MessageRead<'a> for ControlIDontWant { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.message_ids.push(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlIDontWant { + fn get_size(&self) -> usize { + 0 + + self.message_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + for s in &self.message_ids { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Debug, Default, PartialEq, Clone)] pub struct PeerInfo { diff --git a/beacon_node/lighthouse_network/gossipsub/src/generated/rpc.proto b/beacon_node/lighthouse_network/gossipsub/src/generated/rpc.proto index 2ce12f3f37f..e3b5888d2c0 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/generated/rpc.proto +++ b/beacon_node/lighthouse_network/gossipsub/src/generated/rpc.proto @@ -28,6 +28,7 @@ message ControlMessage { repeated ControlIWant iwant = 2; repeated ControlGraft graft = 3; repeated ControlPrune prune = 4; + repeated ControlIDontWant idontwant = 5; } message ControlIHave { @@ -49,6 +50,10 @@ message ControlPrune { optional uint64 backoff = 3; // gossipsub v1.1 backoff time (in seconds) } +message ControlIDontWant { + repeated bytes message_ids = 1; +} + message PeerInfo { optional bytes peer_id = 1; optional bytes signed_peer_record = 2; diff --git a/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs b/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs index 43ca178556b..3f72709245f 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs @@ -21,9 +21,9 @@ use super::peer_score::RejectReason; use super::MessageId; use super::ValidationError; -use instant::Instant; use libp2p::identity::PeerId; use std::collections::HashMap; +use web_time::Instant; /// Tracks recently sent `IWANT` messages and checks if peers respond to them. #[derive(Default)] @@ -41,6 +41,14 @@ impl GossipPromises { self.promises.contains_key(message) } + ///Get the peers we sent IWANT the input message id. + pub(crate) fn peers_for_message(&self, message_id: &MessageId) -> Vec { + self.promises + .get(message_id) + .map(|peers| peers.keys().copied().collect()) + .unwrap_or_default() + } + /// Track a promise to deliver a message from a list of [`MessageId`]s we are requesting. pub(crate) fn add_promise(&mut self, peer: PeerId, messages: &[MessageId], expires: Instant) { for message_id in messages { diff --git a/beacon_node/lighthouse_network/gossipsub/src/handler.rs b/beacon_node/lighthouse_network/gossipsub/src/handler.rs index 298570955fc..359bf8da428 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/handler.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/handler.rs @@ -26,7 +26,6 @@ use asynchronous_codec::Framed; use futures::future::Either; use futures::prelude::*; use futures::StreamExt; -use instant::Instant; use libp2p::core::upgrade::DeniedUpgrade; use libp2p::swarm::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, @@ -37,6 +36,7 @@ use std::{ pin::Pin, task::{Context, Poll}, }; +use web_time::Instant; /// The event emitted by the Handler. This informs the behaviour of various events created /// by the handler. diff --git a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs index 91bcd5f54bc..7e1cdac18ba 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs @@ -179,6 +179,12 @@ pub(crate) struct Metrics { /// topic. A very high metric might indicate an underperforming network. topic_iwant_msgs: Family, + /// The number of times we have received an IDONTWANT control message. + idontwant_msgs: Counter, + + /// The number of msg_id's we have received in every IDONTWANT control message. + idontwant_msgs_ids: Counter, + /// The size of the priority queue. priority_queue_size: Histogram, /// The size of the non-priority queue. @@ -311,6 +317,27 @@ impl Metrics { "topic_iwant_msgs", "Number of times we have decided an IWANT is required for this topic" ); + + let idontwant_msgs = { + let metric = Counter::default(); + registry.register( + "idontwant_msgs", + "The number of times we have received an IDONTWANT control message", + metric.clone(), + ); + metric + }; + + let idontwant_msgs_ids = { + let metric = Counter::default(); + registry.register( + "idontwant_msgs_ids", + "The number of msg_id's we have received in every IDONTWANT control message.", + metric.clone(), + ); + metric + }; + let memcache_misses = { let metric = Counter::default(); registry.register( @@ -362,6 +389,8 @@ impl Metrics { heartbeat_duration, memcache_misses, topic_iwant_msgs, + idontwant_msgs, + idontwant_msgs_ids, priority_queue_size, non_priority_queue_size, } @@ -560,6 +589,12 @@ impl Metrics { } } + /// Register receiving an IDONTWANT msg for this topic. + pub(crate) fn register_idontwant(&mut self, msgs: usize) { + self.idontwant_msgs.inc(); + self.idontwant_msgs_ids.inc_by(msgs as u64); + } + /// Observes a heartbeat duration. pub(crate) fn observe_heartbeat_duration(&mut self, millis: u64) { self.heartbeat_duration.observe(millis as f64); diff --git a/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs b/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs index 4d609434f13..fa02f06f69d 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/peer_score.rs @@ -24,11 +24,11 @@ use super::metrics::{Metrics, Penalty}; use super::time_cache::TimeCache; use super::{MessageId, TopicHash}; -use instant::Instant; use libp2p::identity::PeerId; use std::collections::{hash_map, HashMap, HashSet}; use std::net::IpAddr; use std::time::Duration; +use web_time::Instant; mod params; use super::ValidationError; diff --git a/beacon_node/lighthouse_network/gossipsub/src/protocol.rs b/beacon_node/lighthouse_network/gossipsub/src/protocol.rs index ba84ae0aa7a..5611ae32c91 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/protocol.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/protocol.rs @@ -23,8 +23,8 @@ use super::handler::HandlerEvent; use super::rpc_proto::proto; use super::topic::TopicHash; use super::types::{ - ControlAction, Graft, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, RawMessage, Rpc, - Subscription, SubscriptionAction, + ControlAction, Graft, IDontWant, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, + RawMessage, Rpc, Subscription, SubscriptionAction, }; use super::ValidationError; use asynchronous_codec::{Decoder, Encoder, Framed}; @@ -40,6 +40,10 @@ use void::Void; pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:"; +pub(crate) const GOSSIPSUB_1_2_0_BETA_PROTOCOL: ProtocolId = ProtocolId { + protocol: StreamProtocol::new("/meshsub/1.2.0"), + kind: PeerKind::Gossipsubv1_2_beta, +}; pub(crate) const GOSSIPSUB_1_1_0_PROTOCOL: ProtocolId = ProtocolId { protocol: StreamProtocol::new("/meshsub/1.1.0"), kind: PeerKind::Gossipsubv1_1, @@ -69,7 +73,11 @@ impl Default for ProtocolConfig { Self { max_transmit_size: 65536, validation_mode: ValidationMode::Strict, - protocol_ids: vec![GOSSIPSUB_1_1_0_PROTOCOL, GOSSIPSUB_1_0_0_PROTOCOL], + protocol_ids: vec![ + GOSSIPSUB_1_2_0_BETA_PROTOCOL, + GOSSIPSUB_1_1_0_PROTOCOL, + GOSSIPSUB_1_0_0_PROTOCOL, + ], } } } @@ -476,10 +484,25 @@ impl Decoder for GossipsubCodec { })); } + let idontwant_msgs: Vec = rpc_control + .idontwant + .into_iter() + .map(|idontwant| { + ControlAction::IDontWant(IDontWant { + message_ids: idontwant + .message_ids + .into_iter() + .map(MessageId::from) + .collect::>(), + }) + }) + .collect(); + control_msgs.extend(ihave_msgs); control_msgs.extend(iwant_msgs); control_msgs.extend(graft_msgs); control_msgs.extend(prune_msgs); + control_msgs.extend(idontwant_msgs); } Ok(Some(HandlerEvent::Message { diff --git a/beacon_node/lighthouse_network/gossipsub/src/time_cache.rs b/beacon_node/lighthouse_network/gossipsub/src/time_cache.rs index 89fd4afee09..a3e5c01ac4c 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/time_cache.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/time_cache.rs @@ -21,13 +21,13 @@ //! This implements a time-based LRU cache for checking gossipsub message duplicates. use fnv::FnvHashMap; -use instant::Instant; use std::collections::hash_map::{ self, Entry::{Occupied, Vacant}, }; use std::collections::VecDeque; use std::time::Duration; +use web_time::Instant; struct ExpiringElement { /// The element that expires diff --git a/beacon_node/lighthouse_network/gossipsub/src/types.rs b/beacon_node/lighthouse_network/gossipsub/src/types.rs index 712698b42ac..8df307d470b 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/types.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/types.rs @@ -25,7 +25,7 @@ use async_channel::{Receiver, Sender}; use futures::stream::Peekable; use futures::{Future, Stream, StreamExt}; use futures_timer::Delay; -use instant::Duration; +use hashlink::LinkedHashMap; use libp2p::identity::PeerId; use libp2p::swarm::ConnectionId; use prometheus_client::encoding::EncodeLabelValue; @@ -35,7 +35,9 @@ use std::fmt::Debug; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::task::{Context, Poll}; +use std::time::Instant; use std::{fmt, pin::Pin}; +use web_time::Duration; use crate::rpc_proto::proto; #[cfg(feature = "serde")] @@ -121,11 +123,16 @@ pub(crate) struct PeerConnections { pub(crate) sender: RpcSender, /// Subscribed topics. pub(crate) topics: BTreeSet, + /// Don't send messages. + pub(crate) dont_send: LinkedHashMap, } /// Describes the types of peers that can exist in the gossipsub context. #[derive(Debug, Clone, PartialEq, Hash, EncodeLabelValue, Eq)] +#[allow(non_camel_case_types)] pub enum PeerKind { + /// A gossipsub 1.2 peer. + Gossipsubv1_2_beta, /// A gossipsub 1.1 peer. Gossipsubv1_1, /// A gossipsub 1.0 peer. @@ -136,6 +143,16 @@ pub enum PeerKind { NotSupported, } +impl PeerKind { + /// Returns true if peer speaks any gossipsub version. + pub(crate) fn is_gossipsub(&self) -> bool { + matches!( + self, + Self::Gossipsubv1_2_beta | Self::Gossipsubv1_1 | Self::Gossipsub + ) + } +} + /// A message received by the gossipsub system and stored locally in caches.. #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub struct RawMessage { @@ -257,6 +274,8 @@ pub enum ControlAction { Graft(Graft), /// The node has been removed from the mesh - Prune control message. Prune(Prune), + /// The node requests us to not forward message ids (peer_id + sequence _number) - IDontWant control message. + IDontWant(IDontWant), } /// Node broadcasts known messages per topic - IHave control message. @@ -293,6 +312,13 @@ pub struct Prune { pub(crate) backoff: Option, } +/// The node requests us to not forward message ids - IDontWant control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct IDontWant { + /// A list of known message ids. + pub(crate) message_ids: Vec, +} + /// A Gossipsub RPC message sent. #[derive(Debug)] pub enum RpcOut { @@ -314,6 +340,8 @@ pub enum RpcOut { IHave(IHave), /// Send a IWant control message. IWant(IWant), + /// Send a IDontWant control message. + IDontWant(IDontWant), } impl RpcOut { @@ -374,6 +402,7 @@ impl From for proto::RPC { iwant: vec![], graft: vec![], prune: vec![], + idontwant: vec![], }), }, RpcOut::IWant(IWant { message_ids }) => proto::RPC { @@ -386,6 +415,7 @@ impl From for proto::RPC { }], graft: vec![], prune: vec![], + idontwant: vec![], }), }, RpcOut::Graft(Graft { topic_hash }) => proto::RPC { @@ -398,6 +428,7 @@ impl From for proto::RPC { topic_id: Some(topic_hash.into_string()), }], prune: vec![], + idontwant: vec![], }), }, RpcOut::Prune(Prune { @@ -424,9 +455,23 @@ impl From for proto::RPC { .collect(), backoff, }], + idontwant: vec![], }), } } + RpcOut::IDontWant(IDontWant { message_ids }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![], + idontwant: vec![proto::ControlIDontWant { + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + }), + }, } } } @@ -485,6 +530,7 @@ impl From for proto::RPC { iwant: Vec::new(), graft: Vec::new(), prune: Vec::new(), + idontwant: Vec::new(), }; let empty_control_msg = rpc.control_msgs.is_empty(); @@ -533,6 +579,12 @@ impl From for proto::RPC { }; control.prune.push(rpc_prune); } + ControlAction::IDontWant(IDontWant { message_ids }) => { + let rpc_idontwant = proto::ControlIDontWant { + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }; + control.idontwant.push(rpc_idontwant); + } } } @@ -571,6 +623,7 @@ impl PeerKind { Self::Floodsub => "Floodsub", Self::Gossipsub => "Gossipsub v1.0", Self::Gossipsubv1_1 => "Gossipsub v1.1", + Self::Gossipsubv1_2_beta => "Gossipsub v1.2-beta", } } } @@ -657,6 +710,15 @@ impl RpcSender { .map_err(|err| err.into_inner()) } + /// Send a `RpcOut::IWant` message to the `RpcReceiver` + /// this is low priority, if the queue is full an Err is returned. + #[allow(clippy::result_large_err)] + pub(crate) fn idontwant(&mut self, idontwant: IDontWant) -> Result<(), RpcOut> { + self.non_priority_sender + .try_send(RpcOut::IDontWant(idontwant)) + .map_err(|err| err.into_inner()) + } + /// Send a `RpcOut::Subscribe` message to the `RpcReceiver` /// this is high priority. pub(crate) fn subscribe(&mut self, topic: TopicHash) { diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 8cc2ea86c0c..73f51c001a7 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -398,16 +398,32 @@ impl Discovery { /// automatically update the external address. /// /// If the external address needs to be modified, use `update_enr_udp_socket. - pub fn update_enr_tcp_port(&mut self, port: u16) -> Result<(), String> { + /// + /// This returns Ok(true) if the ENR was updated, otherwise Ok(false) if nothing was done. + pub fn update_enr_tcp_port(&mut self, port: u16, v6: bool) -> Result { + let enr_field = if v6 { + if self.discv5.external_enr().read().tcp6() == Some(port) { + // The field is already set to the same value, nothing to do + return Ok(false); + } + "tcp6" + } else { + if self.discv5.external_enr().read().tcp4() == Some(port) { + // The field is already set to the same value, nothing to do + return Ok(false); + } + "tcp" + }; + self.discv5 - .enr_insert("tcp", &port) + .enr_insert(enr_field, &port) .map_err(|e| format!("{:?}", e))?; // replace the global version *self.network_globals.local_enr.write() = self.discv5.local_enr(); // persist modified enr to disk enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); - Ok(()) + Ok(true) } // TODO: Group these functions here once the ENR is shared across discv5 and lighthouse and @@ -415,16 +431,35 @@ impl Discovery { // This currently doesn't support ipv6. All of these functions should be removed and // addressed properly in the following issue. // https://github.com/sigp/lighthouse/issues/4706 - pub fn update_enr_quic_port(&mut self, port: u16) -> Result<(), String> { + pub fn update_enr_quic_port(&mut self, port: u16, v6: bool) -> Result { + let enr_field = if v6 { + if self.discv5.external_enr().read().quic6() == Some(port) { + // The field is already set to the same value, nothing to do + return Ok(false); + } + "quic6" + } else { + if self.discv5.external_enr().read().quic4() == Some(port) { + // The field is already set to the same value, nothing to do + return Ok(false); + } + "quic" + }; + let current_field = self.discv5.external_enr().read().quic4(); + if current_field == Some(port) { + // The current field is already set, no need to update. + return Ok(false); + } + self.discv5 - .enr_insert("quic", &port) + .enr_insert(enr_field, &port) .map_err(|e| format!("{:?}", e))?; // replace the global version *self.network_globals.local_enr.write() = self.discv5.local_enr(); // persist modified enr to disk enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr(), &self.log); - Ok(()) + Ok(true) } /// Updates the local ENR UDP socket. @@ -526,8 +561,8 @@ impl Discovery { /// Updates the `eth2` field of our local ENR. pub fn update_eth2_enr(&mut self, enr_fork_id: EnrForkId) { // to avoid having a reference to the spec constant, for the logging we assume - // FAR_FUTURE_EPOCH is u64::max_value() - let next_fork_epoch_log = if enr_fork_id.next_fork_epoch == u64::max_value() { + // FAR_FUTURE_EPOCH is u64::MAX + let next_fork_epoch_log = if enr_fork_id.next_fork_epoch == u64::MAX { String::from("No other fork") } else { format!("{:?}", enr_fork_id.next_fork_epoch) @@ -1057,7 +1092,7 @@ impl NetworkBehaviour for Discovery { return; } - self.update_enr_tcp_port(port) + self.update_enr_tcp_port(port, false) } (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { if !self.update_ports.quic4 { @@ -1065,7 +1100,7 @@ impl NetworkBehaviour for Discovery { return; } - self.update_enr_quic_port(port) + self.update_enr_quic_port(port, false) } _ => { debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); @@ -1079,7 +1114,7 @@ impl NetworkBehaviour for Discovery { return; } - self.update_enr_tcp_port(port) + self.update_enr_tcp_port(port, true) } (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { if !self.update_ports.quic6 { @@ -1087,7 +1122,7 @@ impl NetworkBehaviour for Discovery { return; } - self.update_enr_quic_port(port) + self.update_enr_quic_port(port, true) } _ => { debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); @@ -1103,9 +1138,10 @@ impl NetworkBehaviour for Discovery { let local_enr: Enr = self.discv5.local_enr(); match attempt_enr_update { - Ok(_) => { + Ok(true) => { info!(self.log, "Updated local ENR"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6()) } + Ok(false) => {} // Nothing to do, ENR already configured Err(e) => warn!(self.log, "Failed to update ENR"; "error" => ?e), } } diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 264795844a0..0b827164fc4 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -2,9 +2,6 @@ /// all required libp2p functionality. /// /// This crate builds and manages the libp2p services required by the beacon node. -#[macro_use] -extern crate lazy_static; - mod config; pub mod service; diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index fc441f25339..8efed44eb4f 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -1,5 +1,7 @@ pub use lighthouse_metrics::*; +use lazy_static::lazy_static; + lazy_static! { pub static ref NAT_OPEN: Result = try_create_int_gauge_vec( "nat_open", diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index b776347ad08..3858a2a5392 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -14,7 +14,6 @@ use slog::{debug, error, trace}; use types::EthSpec; use crate::discovery::enr_ext::EnrExt; -use crate::peer_manager::peerdb::BanResult; use crate::rpc::GoodbyeReason; use crate::types::SyncState; use crate::{metrics, ClearDialError}; @@ -201,7 +200,7 @@ impl NetworkBehaviour for PeerManager { ) -> Result, ConnectionDenied> { trace!(self.log, "Inbound connection"; "peer_id" => %peer_id, "multiaddr" => %remote_addr); // We already checked if the peer was banned on `handle_pending_inbound_connection`. - if let Some(BanResult::BadScore) = self.ban_status(&peer_id) { + if self.ban_status(&peer_id).is_some() { return Err(ConnectionDenied::new( "Connection to peer rejected: peer has a bad score", )); diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index ba9bd314722..8187dc4ba4e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -6,6 +6,7 @@ //! //! The scoring algorithms are currently experimental. use crate::service::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD; +use lazy_static::lazy_static; use serde::Serialize; use std::time::Instant; use strum::AsRefStr; diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index 08b81c7eae5..d17fa112a1b 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -103,8 +103,13 @@ impl RateLimiterConfig { pub const DEFAULT_GOODBYE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(1024, 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); - pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(768, 10); - pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); + // `BlocksByRange` and `BlobsByRange` are sent together during range sync. + // It makes sense for blocks and blobs quotas to be equivalent in terms of the number of blocks: + // 1024 blocks * 6 max blobs per block. + // This doesn't necessarily mean that we are sending this many blobs, because the quotas are + // measured against the maximum request size. + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(6144, 10); + pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(768, 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index df5bbba99c8..6f338ebc8be 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -15,7 +15,7 @@ use libp2p::swarm::handler::{ FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, }; use libp2p::swarm::Stream; -use slog::{crit, debug, trace, warn}; +use slog::{crit, debug, trace}; use smallvec::SmallVec; use std::{ collections::{hash_map::Entry, VecDeque}, @@ -389,70 +389,44 @@ where } // purge expired inbound substreams and send an error - loop { - match self.inbound_substreams_delay.poll_expired(cx) { - Poll::Ready(Some(Ok(inbound_id))) => { - // handle a stream timeout for various states - if let Some(info) = self.inbound_substreams.get_mut(inbound_id.get_ref()) { - // the delay has been removed - info.delay_key = None; - self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { - error: RPCError::StreamTimeout, - proto: info.protocol, - id: *inbound_id.get_ref(), - })); - - if info.pending_items.back().map(|l| l.close_after()) == Some(false) { - // if the last chunk does not close the stream, append an error - info.pending_items.push_back(RPCCodedResponse::Error( - RPCResponseErrorCode::ServerError, - "Request timed out".into(), - )); - } - } - } - Poll::Ready(Some(Err(e))) => { - warn!(self.log, "Inbound substream poll failed"; "error" => ?e); - // drops the peer if we cannot read the delay queue - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - HandlerEvent::Close(RPCError::InternalError( - "Could not poll inbound stream timer", - )), + + while let Poll::Ready(Some(inbound_id)) = self.inbound_substreams_delay.poll_expired(cx) { + // handle a stream timeout for various states + if let Some(info) = self.inbound_substreams.get_mut(inbound_id.get_ref()) { + // the delay has been removed + info.delay_key = None; + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { + error: RPCError::StreamTimeout, + proto: info.protocol, + id: *inbound_id.get_ref(), + })); + + if info.pending_items.back().map(|l| l.close_after()) == Some(false) { + // if the last chunk does not close the stream, append an error + info.pending_items.push_back(RPCCodedResponse::Error( + RPCResponseErrorCode::ServerError, + "Request timed out".into(), )); } - Poll::Pending | Poll::Ready(None) => break, } } // purge expired outbound substreams - loop { - match self.outbound_substreams_delay.poll_expired(cx) { - Poll::Ready(Some(Ok(outbound_id))) => { - if let Some(OutboundInfo { proto, req_id, .. }) = - self.outbound_substreams.remove(outbound_id.get_ref()) - { - let outbound_err = HandlerErr::Outbound { - id: req_id, - proto, - error: RPCError::StreamTimeout, - }; - // notify the user - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - HandlerEvent::Err(outbound_err), - )); - } else { - crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref()); - } - } - Poll::Ready(Some(Err(e))) => { - warn!(self.log, "Outbound substream poll failed"; "error" => ?e); - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - HandlerEvent::Close(RPCError::InternalError( - "Could not poll outbound stream timer", - )), - )); - } - Poll::Pending | Poll::Ready(None) => break, + while let Poll::Ready(Some(outbound_id)) = self.outbound_substreams_delay.poll_expired(cx) { + if let Some(OutboundInfo { proto, req_id, .. }) = + self.outbound_substreams.remove(outbound_id.get_ref()) + { + let outbound_err = HandlerErr::Outbound { + id: req_id, + proto, + error: RPCError::StreamTimeout, + }; + // notify the user + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(HandlerEvent::Err( + outbound_err, + ))); + } else { + crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref()); } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index a91c9e44b2b..027af89edfa 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -10,7 +10,7 @@ use libp2p::swarm::{ handler::ConnectionHandler, CloseConnection, ConnectionId, NetworkBehaviour, NotifyHandler, ToSwarm, }; -use libp2p::swarm::{FromSwarm, SubstreamProtocol, THandlerInEvent}; +use libp2p::swarm::{ConnectionClosed, FromSwarm, SubstreamProtocol, THandlerInEvent}; use libp2p::PeerId; use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; use slog::{crit, debug, o}; @@ -283,9 +283,61 @@ where Ok(handler) } - fn on_swarm_event(&mut self, _event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { // NOTE: FromSwarm is a non exhaustive enum so updates should be based on release notes more // than compiler feedback + // The self rate limiter holds on to requests and attempts to process them within our rate + // limits. If a peer disconnects whilst we are self-rate limiting, we want to terminate any + // pending requests and return an error response to the application. + + if let FromSwarm::ConnectionClosed(ConnectionClosed { + peer_id, + remaining_established, + connection_id, + .. + }) = event + { + // If there are still connections remaining, do nothing. + if remaining_established > 0 { + return; + } + // Get a list of pending requests from the self rate limiter + if let Some(limiter) = self.self_limiter.as_mut() { + for (id, proto) in limiter.peer_disconnected(peer_id) { + let error_msg = ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id: connection_id, + event: HandlerEvent::Err(HandlerErr::Outbound { + id, + proto, + error: RPCError::Disconnected, + }), + }); + self.events.push(error_msg); + } + } + + // Replace the pending Requests to the disconnected peer + // with reports of failed requests. + self.events.iter_mut().for_each(|event| match &event { + ToSwarm::NotifyHandler { + peer_id: p, + event: RPCSend::Request(request_id, req), + .. + } if *p == peer_id => { + *event = ToSwarm::GenerateEvent(RPCMessage { + peer_id, + conn_id: connection_id, + event: HandlerEvent::Err(HandlerErr::Outbound { + id: *request_id, + proto: req.versioned_protocol().protocol(), + error: RPCError::Disconnected, + }), + }); + } + _ => {} + }); + } } fn on_connection_handler_event( diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 12a7f09338e..bfaaef9b3bd 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -3,6 +3,7 @@ use crate::rpc::codec::{base::BaseInboundCodec, ssz_snappy::SSZSnappyInboundCode use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, StreamExt}; +use lazy_static::lazy_static; use libp2p::core::{InboundUpgrade, UpgradeInfo}; use ssz::Encode; use ssz_types::VariableList; diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index e845a775cbb..77caecb16df 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -147,7 +147,7 @@ impl SelfRateLimiter { Err((rate_limited_req, wait_time)) => { let key = (peer_id, protocol); self.next_peer_request.insert(key, wait_time); - queued_requests.push_back(rate_limited_req); + queued_requests.push_front(rate_limited_req); // If one fails just wait for the next window that allows sending requests. return; } @@ -158,13 +158,39 @@ impl SelfRateLimiter { entry.remove(); } } + // NOTE: There can be entries that have been removed due to peer disconnections, we simply + // ignore these messages here. + } + + /// Informs the limiter that a peer has disconnected. This removes any pending requests and + /// returns their IDs. + pub fn peer_disconnected(&mut self, peer_id: PeerId) -> Vec<(Id, Protocol)> { + // It's not ideal to iterate this map, but the key is (PeerId, Protocol) and this map + // should never really be large. So we iterate for simplicity + let mut failed_requests = Vec::new(); + self.delayed_requests + .retain(|(map_peer_id, protocol), queue| { + if map_peer_id == &peer_id { + // NOTE: Currently cannot remove entries from the DelayQueue, we will just let + // them expire and ignore them. + for message in queue { + failed_requests.push((message.request_id, *protocol)) + } + // Remove the entry + false + } else { + // Keep the entry + true + } + }); + failed_requests } pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { // First check the requests that were self rate limited, since those might add events to // the queue. Also do this this before rate limiter prunning to avoid removing and // immediately adding rate limiting keys. - if let Poll::Ready(Some(Ok(expired))) = self.next_peer_request.poll_expired(cx) { + if let Poll::Ready(Some(expired)) = self.next_peer_request.poll_expired(cx) { let (peer_id, protocol) = expired.into_inner(); self.next_peer_request_ready(peer_id, protocol); } @@ -179,3 +205,84 @@ impl SelfRateLimiter { Poll::Pending } } + +#[cfg(test)] +mod tests { + use crate::rpc::config::{OutboundRateLimiterConfig, RateLimiterConfig}; + use crate::rpc::rate_limiter::Quota; + use crate::rpc::self_limiter::SelfRateLimiter; + use crate::rpc::{OutboundRequest, Ping, Protocol}; + use crate::service::api_types::{AppRequestId, RequestId, SyncRequestId}; + use libp2p::PeerId; + use std::time::Duration; + use types::MainnetEthSpec; + + /// Test that `next_peer_request_ready` correctly maintains the queue. + #[tokio::test] + async fn test_next_peer_request_ready() { + let log = logging::test_logger(); + let config = OutboundRateLimiterConfig(RateLimiterConfig { + ping_quota: Quota::n_every(1, 2), + ..Default::default() + }); + let mut limiter: SelfRateLimiter = + SelfRateLimiter::new(config, log).unwrap(); + let peer_id = PeerId::random(); + + for i in 1..=5u32 { + let _ = limiter.allows( + peer_id, + RequestId::Application(AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { + id: i, + })), + OutboundRequest::Ping(Ping { data: i as u64 }), + ); + } + + { + let queue = limiter + .delayed_requests + .get(&(peer_id, Protocol::Ping)) + .unwrap(); + assert_eq!(4, queue.len()); + + // Check that requests in the queue are ordered in the sequence 2, 3, 4, 5. + let mut iter = queue.iter(); + for i in 2..=5u32 { + assert!(matches!( + iter.next().unwrap().request_id, + RequestId::Application(AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { + id, + })) if id == i + )); + } + + assert_eq!(limiter.ready_requests.len(), 0); + } + + // Wait until the tokens have been regenerated, then run `next_peer_request_ready`. + tokio::time::sleep(Duration::from_secs(3)).await; + limiter.next_peer_request_ready(peer_id, Protocol::Ping); + + { + let queue = limiter + .delayed_requests + .get(&(peer_id, Protocol::Ping)) + .unwrap(); + assert_eq!(3, queue.len()); + + // Check that requests in the queue are ordered in the sequence 3, 4, 5. + let mut iter = queue.iter(); + for i in 3..=5 { + assert!(matches!( + iter.next().unwrap().request_id, + RequestId::Application(AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { + id + })) if id == i + )); + } + + assert_eq!(limiter.ready_requests.len(), 1); + } + } +} diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 2ea41502489..376ac34dee7 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -19,10 +19,36 @@ use crate::rpc::{ /// Identifier of requests sent by a peer. pub type PeerRequestId = (ConnectionId, SubstreamId); -/// Identifier of a request. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum RequestId { - Application(AppReqId), +pub type Id = u32; + +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct SingleLookupReqId { + pub lookup_id: Id, + pub req_id: Id, +} + +/// Id of rpc requests sent by sync to the network. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub enum SyncRequestId { + /// Request searching for a block given a hash. + SingleBlock { id: SingleLookupReqId }, + /// Request searching for a set of blobs given a hash. + SingleBlob { id: SingleLookupReqId }, + /// Range request that is composed by both a block range request and a blob range request. + RangeBlockAndBlobs { id: Id }, +} + +/// Application level requests sent to the network. +#[derive(Debug, Clone, Copy)] +pub enum AppRequestId { + Sync(SyncRequestId), + Router, +} + +/// Global identifier of a request. +#[derive(Debug, Clone, Copy)] +pub enum RequestId { + Application(AppRequestId), Internal, } @@ -142,7 +168,7 @@ impl std::convert::From> for RPCCodedResponse { } } -impl slog::Value for RequestId { +impl slog::Value for RequestId { fn serialize( &self, record: &slog::Record, diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index 90121ffbfbc..ab2e43630bb 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -1,6 +1,6 @@ use crate::discovery::Discovery; use crate::peer_manager::PeerManager; -use crate::rpc::{ReqId, RPC}; +use crate::rpc::RPC; use crate::types::SnappyTransform; use libp2p::identify; @@ -16,9 +16,8 @@ pub type SubscriptionFilter = pub type Gossipsub = gossipsub::Behaviour; #[derive(NetworkBehaviour)] -pub(crate) struct Behaviour +pub(crate) struct Behaviour where - AppReqId: ReqId, E: EthSpec, { /// Keep track of active and pending connections to enforce hard limits. @@ -26,7 +25,7 @@ where /// The peer manager that keeps track of peer's reputation and status. pub peer_manager: PeerManager, /// The Eth2 RPC specified in the wire-0 protocol. - pub eth2_rpc: RPC, E>, + pub eth2_rpc: RPC, /// Discv5 Discovery protocol. pub discovery: Discovery, /// Keep regular connection to peers and disconnect if absent. diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 225b4ef8dde..158c7a994a3 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -240,7 +240,7 @@ impl futures::stream::Stream for GossipCache { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { match self.expirations.poll_expired(cx) { - Poll::Ready(Some(Ok(expired))) => { + Poll::Ready(Some(expired)) => { let expected_key = expired.key(); let (topic, data) = expired.into_inner(); match self.topic_msgs.get_mut(&topic) { @@ -259,7 +259,6 @@ impl futures::stream::Stream for GossipCache { } Poll::Ready(Some(Ok(topic))) } - Poll::Ready(Some(Err(x))) => Poll::Ready(Some(Err(x.to_string()))), Poll::Ready(None) => Poll::Ready(None), Poll::Pending => Poll::Pending, } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index f91a5b471ad..2868c616bdd 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -21,7 +21,7 @@ use crate::types::{ use crate::EnrExt; use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; -use api_types::{PeerRequestId, Request, RequestId, Response}; +use api_types::{AppRequestId, PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; use gossipsub::{ IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, @@ -57,7 +57,7 @@ const MAX_IDENTIFY_ADDRESSES: usize = 10; /// The types of events than can be obtained from polling the behaviour. #[derive(Debug)] -pub enum NetworkEvent { +pub enum NetworkEvent { /// We have successfully dialed and connected to a peer. PeerConnectedOutgoing(PeerId), /// A peer has successfully dialed and connected to us. @@ -67,7 +67,7 @@ pub enum NetworkEvent { /// An RPC Request that was sent failed. RPCFailed { /// The id of the failed request. - id: AppReqId, + id: AppRequestId, /// The peer to which this request was sent. peer_id: PeerId, /// The error of the failed request. @@ -85,7 +85,7 @@ pub enum NetworkEvent { /// Peer that sent the response. peer_id: PeerId, /// Id of the request to which the peer is responding. - id: AppReqId, + id: AppRequestId, /// Response the peer sent. response: Response, }, @@ -108,8 +108,8 @@ pub enum NetworkEvent { /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. -pub struct Network { - swarm: libp2p::swarm::Swarm>, +pub struct Network { + swarm: libp2p::swarm::Swarm>, /* Auxiliary Fields */ /// A collections of variables accessible outside the network service. network_globals: Arc>, @@ -132,7 +132,7 @@ pub struct Network { } /// Implements the combined behaviour for the libp2p service. -impl Network { +impl Network { pub async fn new( executor: task_executor::TaskExecutor, mut ctx: ServiceContext<'_>, @@ -592,7 +592,7 @@ impl Network { &mut self.swarm.behaviour_mut().gossipsub } /// The Eth2 RPC specified in the wire-0 protocol. - pub fn eth2_rpc_mut(&mut self) -> &mut RPC, E> { + pub fn eth2_rpc_mut(&mut self) -> &mut RPC { &mut self.swarm.behaviour_mut().eth2_rpc } /// Discv5 Discovery protocol. @@ -613,7 +613,7 @@ impl Network { &self.swarm.behaviour().gossipsub } /// The Eth2 RPC specified in the wire-0 protocol. - pub fn eth2_rpc(&self) -> &RPC, E> { + pub fn eth2_rpc(&self) -> &RPC { &self.swarm.behaviour().eth2_rpc } /// Discv5 Discovery protocol. @@ -917,12 +917,23 @@ impl Network { /* Eth2 RPC behaviour functions */ /// Send a request to a peer over RPC. - pub fn send_request(&mut self, peer_id: PeerId, request_id: AppReqId, request: Request) { + pub fn send_request( + &mut self, + peer_id: PeerId, + request_id: AppRequestId, + request: Request, + ) -> Result<(), (AppRequestId, RPCError)> { + // Check if the peer is connected before sending an RPC request + if !self.swarm.is_connected(&peer_id) { + return Err((request_id, RPCError::Disconnected)); + } + self.eth2_rpc_mut().send_request( peer_id, RequestId::Application(request_id), request.into(), - ) + ); + Ok(()) } /// Send a successful response to a peer over RPC. @@ -972,6 +983,12 @@ impl Network { .goodbye_peer(peer_id, reason, source); } + /// Hard (ungraceful) disconnect for testing purposes only + /// Use goodbye_peer for disconnections, do not use this function. + pub fn __hard_disconnect_testing_only(&mut self, peer_id: PeerId) { + let _ = self.swarm.disconnect_peer_id(peer_id); + } + /// Returns an iterator over all enr entries in the DHT. pub fn enr_entries(&self) -> Vec { self.discovery().table_entries_enr() @@ -1140,10 +1157,10 @@ impl Network { #[must_use = "return the response"] fn build_response( &mut self, - id: RequestId, + id: RequestId, peer_id: PeerId, response: Response, - ) -> Option> { + ) -> Option> { match id { RequestId::Application(id) => Some(NetworkEvent::ResponseReceived { peer_id, @@ -1161,7 +1178,7 @@ impl Network { id: PeerRequestId, peer_id: PeerId, request: Request, - ) -> NetworkEvent { + ) -> NetworkEvent { // Increment metrics match &request { Request::Status(_) => { @@ -1227,7 +1244,7 @@ impl Network { /* Sub-behaviour event handling functions */ /// Handle a gossipsub event. - fn inject_gs_event(&mut self, event: gossipsub::Event) -> Option> { + fn inject_gs_event(&mut self, event: gossipsub::Event) -> Option> { match event { gossipsub::Event::Message { propagation_source, @@ -1366,13 +1383,14 @@ impl Network { } /// Handle an RPC event. - fn inject_rpc_event( - &mut self, - event: RPCMessage, E>, - ) -> Option> { + fn inject_rpc_event(&mut self, event: RPCMessage) -> Option> { let peer_id = event.peer_id; - if !self.peer_manager().is_connected(&peer_id) { + // Do not permit Inbound events from peers that are being disconnected, or RPC requests. + if !self.peer_manager().is_connected(&peer_id) + && (matches!(event.event, HandlerEvent::Err(HandlerErr::Inbound { .. })) + || matches!(event.event, HandlerEvent::Ok(RPCReceived::Request(..)))) + { debug!( self.log, "Ignoring rpc message of disconnecting peer"; @@ -1598,10 +1616,7 @@ impl Network { } /// Handle an identify event. - fn inject_identify_event( - &mut self, - event: identify::Event, - ) -> Option> { + fn inject_identify_event(&mut self, event: identify::Event) -> Option> { match event { identify::Event::Received { peer_id, mut info } => { if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { @@ -1622,7 +1637,7 @@ impl Network { } /// Handle a peer manager event. - fn inject_pm_event(&mut self, event: PeerManagerEvent) -> Option> { + fn inject_pm_event(&mut self, event: PeerManagerEvent) -> Option> { match event { PeerManagerEvent::PeerConnectedIncoming(peer_id) => { Some(NetworkEvent::PeerConnectedIncoming(peer_id)) @@ -1681,12 +1696,16 @@ impl Network { libp2p::upnp::Event::NewExternalAddr(addr) => { info!(self.log, "UPnP route established"; "addr" => %addr); let mut iter = addr.iter(); - // Skip Ip address. - iter.next(); + let is_ip6 = { + let addr = iter.next(); + matches!(addr, Some(MProtocol::Ip6(_))) + }; match iter.next() { Some(multiaddr::Protocol::Udp(udp_port)) => match iter.next() { Some(multiaddr::Protocol::QuicV1) => { - if let Err(e) = self.discovery_mut().update_enr_quic_port(udp_port) { + if let Err(e) = + self.discovery_mut().update_enr_quic_port(udp_port, is_ip6) + { warn!(self.log, "Failed to update ENR"; "error" => e); } } @@ -1695,7 +1714,7 @@ impl Network { } }, Some(multiaddr::Protocol::Tcp(tcp_port)) => { - if let Err(e) = self.discovery_mut().update_enr_tcp_port(tcp_port) { + if let Err(e) = self.discovery_mut().update_enr_tcp_port(tcp_port, is_ip6) { warn!(self.log, "Failed to update ENR"; "error" => e); } } @@ -1722,7 +1741,7 @@ impl Network { /// Poll the p2p networking stack. /// /// This will poll the swarm and do maintenance routines. - pub fn poll_network(&mut self, cx: &mut Context) -> Poll> { + pub fn poll_network(&mut self, cx: &mut Context) -> Poll> { while let Poll::Ready(Some(swarm_event)) = self.swarm.poll_next_unpin(cx) { let maybe_event = match swarm_event { SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { @@ -1864,7 +1883,7 @@ impl Network { Poll::Pending } - pub async fn next_event(&mut self) -> NetworkEvent { + pub async fn next_event(&mut self) -> NetworkEvent { futures::future::poll_fn(|cx| self.poll_network(cx)).await } } diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index c6dbee1d2ed..80187efc103 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -5,6 +5,7 @@ use crate::types::{ }; use crate::{GossipTopic, NetworkConfig}; use futures::future::Either; +use gossipsub; use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed}; use libp2p::identity::{secp256k1, Keypair}; use libp2p::{core, noise, yamux, PeerId, Transport}; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index ed63ad014c9..b443ecd1b9b 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -7,12 +7,14 @@ use ssz::{Decode, Encode}; use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ - Attestation, AttesterSlashing, BlobSidecar, EthSpec, ForkContext, ForkName, + Attestation, AttestationBase, AttestationElectra, AttesterSlashing, AttesterSlashingBase, + AttesterSlashingElectra, BlobSidecar, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, - SignedBeaconBlockElectra, SignedBlsToExecutionChange, SignedContributionAndProof, - SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -154,15 +156,55 @@ impl PubsubMessage { // the ssz decoders match gossip_topic.kind() { GossipKind::BeaconAggregateAndProof => { - let agg_and_proof = SignedAggregateAndProof::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?; + let signed_aggregate_and_proof = + match fork_context.from_context_bytes(gossip_topic.fork_digest) { + Some(&fork_name) => { + if fork_name.electra_enabled() { + SignedAggregateAndProof::Electra( + SignedAggregateAndProofElectra::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) + } else { + SignedAggregateAndProof::Base( + SignedAggregateAndProofBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) + } + } + None => { + return Err(format!( + "Unknown gossipsub fork digest: {:?}", + gossip_topic.fork_digest + )) + } + }; Ok(PubsubMessage::AggregateAndProofAttestation(Box::new( - agg_and_proof, + signed_aggregate_and_proof, ))) } GossipKind::Attestation(subnet_id) => { let attestation = - Attestation::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?; + match fork_context.from_context_bytes(gossip_topic.fork_digest) { + Some(&fork_name) => { + if fork_name.electra_enabled() { + Attestation::Electra( + AttestationElectra::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) + } else { + Attestation::Base( + AttestationBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) + } + } + None => { + return Err(format!( + "Unknown gossipsub fork digest: {:?}", + gossip_topic.fork_digest + )) + } + }; Ok(PubsubMessage::Attestation(Box::new(( *subnet_id, attestation, @@ -239,8 +281,28 @@ impl PubsubMessage { Ok(PubsubMessage::ProposerSlashing(Box::new(proposer_slashing))) } GossipKind::AttesterSlashing => { - let attester_slashing = AttesterSlashing::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?; + let attester_slashing = + match fork_context.from_context_bytes(gossip_topic.fork_digest) { + Some(&fork_name) => { + if fork_name.electra_enabled() { + AttesterSlashing::Electra( + AttesterSlashingElectra::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) + } else { + AttesterSlashing::Base( + AttesterSlashingBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) + } + } + None => { + return Err(format!( + "Unknown gossipsub fork digest: {:?}", + gossip_topic.fork_digest + )) + } + }; Ok(PubsubMessage::AttesterSlashing(Box::new(attester_slashing))) } GossipKind::SignedContributionAndProof => { @@ -342,15 +404,17 @@ impl std::fmt::Display for PubsubMessage { ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, - "Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}", - att.message.aggregate.data.slot, - att.message.aggregate.data.index, - att.message.aggregator_index, + "Aggregate and Proof: slot: {}, index: {:?}, aggregator_index: {}", + att.message().aggregate().data().slot, + att.message().aggregate().committee_index(), + att.message().aggregator_index(), ), PubsubMessage::Attestation(data) => write!( f, - "Attestation: subnet_id: {}, attestation_slot: {}, attestation_index: {}", - *data.0, data.1.data.slot, data.1.data.index, + "Attestation: subnet_id: {}, attestation_slot: {}, attestation_index: {:?}", + *data.0, + data.1.data().slot, + data.1.committee_index(), ), PubsubMessage::VoluntaryExit(_data) => write!(f, "Voluntary Exit"), PubsubMessage::ProposerSlashing(_data) => write!(f, "Proposer Slashing"), diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 32e3a034666..25431226ca6 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -13,7 +13,6 @@ use types::{ }; type E = MinimalEthSpec; -type ReqId = usize; use tempfile::Builder as TempBuilder; @@ -44,14 +43,14 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { } pub struct Libp2pInstance( - LibP2PService, + LibP2PService, #[allow(dead_code)] // This field is managed for lifetime purposes may not be used directly, hence the `#[allow(dead_code)]` attribute. async_channel::Sender<()>, ); impl std::ops::Deref for Libp2pInstance { - type Target = LibP2PService; + type Target = LibP2PService; fn deref(&self) -> &Self::Target { &self.0 } @@ -125,7 +124,7 @@ pub async fn build_libp2p_instance( } #[allow(dead_code)] -pub fn get_enr(node: &LibP2PService) -> Enr { +pub fn get_enr(node: &LibP2PService) -> Enr { node.local_enr() } diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index a60af4db3db..12a1c593930 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -4,6 +4,7 @@ mod common; use common::Protocol; use lighthouse_network::rpc::methods::*; +use lighthouse_network::service::api_types::AppRequestId; use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Request, Response}; use slog::{debug, warn, Level}; use ssz::Encode; @@ -98,11 +99,13 @@ fn test_tcp_status_rpc() { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.send_request(peer_id, 10, rpc_request.clone()); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); } NetworkEvent::ResponseReceived { peer_id: _, - id: 10, + id: AppRequestId::Router, response, } => { // Should receive the RPC response @@ -194,7 +197,6 @@ fn test_tcp_blocks_by_range_chunked_rpc() { // keep count of the number of messages received let mut messages_received = 0; - let request_id = messages_to_send as usize; // build the sender future let sender_future = async { loop { @@ -202,7 +204,9 @@ fn test_tcp_blocks_by_range_chunked_rpc() { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.send_request(peer_id, request_id, rpc_request.clone()); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); } NetworkEvent::ResponseReceived { peer_id: _, @@ -319,7 +323,6 @@ fn test_blobs_by_range_chunked_rpc() { // keep count of the number of messages received let mut messages_received = 0; - let request_id = messages_to_send as usize; // build the sender future let sender_future = async { loop { @@ -327,7 +330,9 @@ fn test_blobs_by_range_chunked_rpc() { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.send_request(peer_id, request_id, rpc_request.clone()); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); } NetworkEvent::ResponseReceived { peer_id: _, @@ -427,7 +432,6 @@ fn test_tcp_blocks_by_range_over_limit() { let rpc_response_bellatrix_large = Response::BlocksByRange(Some(Arc::new(signed_full_block))); - let request_id = messages_to_send as usize; // build the sender future let sender_future = async { loop { @@ -435,11 +439,13 @@ fn test_tcp_blocks_by_range_over_limit() { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.send_request(peer_id, request_id, rpc_request.clone()); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); } // The request will fail because the sender will refuse to send anything > MAX_RPC_SIZE NetworkEvent::RPCFailed { id, .. } => { - assert_eq!(id, request_id); + assert!(matches!(id, AppRequestId::Router)); return; } _ => {} // Ignore other behaviour events @@ -520,7 +526,6 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { // keep count of the number of messages received let mut messages_received: u64 = 0; - let request_id = messages_to_send as usize; // build the sender future let sender_future = async { loop { @@ -528,7 +533,9 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.send_request(peer_id, request_id, rpc_request.clone()); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); } NetworkEvent::ResponseReceived { peer_id: _, @@ -657,11 +664,13 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.send_request(peer_id, 10, rpc_request.clone()); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); } NetworkEvent::ResponseReceived { peer_id: _, - id: 10, + id: AppRequestId::Router, response, } => match response { Response::BlocksByRange(Some(_)) => { @@ -780,11 +789,13 @@ fn test_tcp_blocks_by_root_chunked_rpc() { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.send_request(peer_id, 6, rpc_request.clone()); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); } NetworkEvent::ResponseReceived { peer_id: _, - id: 6, + id: AppRequestId::Router, response, } => match response { Response::BlocksByRoot(Some(_)) => { @@ -911,11 +922,13 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.send_request(peer_id, 10, rpc_request.clone()); + sender + .send_request(peer_id, AppRequestId::Router, rpc_request.clone()) + .unwrap(); } NetworkEvent::ResponseReceived { peer_id: _, - id: 10, + id: AppRequestId::Router, response, } => { debug!(log, "Sender received a response"); diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 406015360ef..0ad7f53ee70 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -39,19 +39,14 @@ logging = { workspace = true } task_executor = { workspace = true } igd-next = "0.14" itertools = { workspace = true } -num_cpus = { workspace = true } lru_cache = { workspace = true } -lru = { workspace = true } strum = { workspace = true } -tokio-util = { workspace = true } derivative = { workspace = true } delay_map = { workspace = true } -ethereum-types = { workspace = true } operation_pool = { workspace = true } execution_layer = { workspace = true } beacon_processor = { workspace = true } parking_lot = { workspace = true } -environment = { workspace = true } [features] # NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index da64368b16d..1149e6e6e38 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -1,6 +1,3 @@ -#[macro_use] -extern crate lazy_static; - /// This crate provides the network server for Lighthouse. pub mod error; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index d3804fbed8d..f0dba8d9655 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -5,6 +5,7 @@ use beacon_chain::{ sync_committee_verification::Error as SyncCommitteeError, }; use fnv::FnvHashMap; +use lazy_static::lazy_static; pub use lighthouse_metrics::*; use lighthouse_network::{ peer_manager::peerdb::client::ClientKind, types::GossipKind, GossipTopic, Gossipsub, @@ -236,6 +237,36 @@ lazy_static! { "Number of Syncing chains in range, per range type", &["range_type"] ); + pub static ref SYNCING_CHAINS_REMOVED: Result = try_create_int_counter_vec( + "sync_range_removed_chains_total", + "Total count of range syncing chains removed per range type", + &["range_type"] + ); + pub static ref SYNCING_CHAINS_ADDED: Result = try_create_int_counter_vec( + "sync_range_added_chains_total", + "Total count of range syncing chains added per range type", + &["range_type"] + ); + pub static ref SYNCING_CHAINS_DROPPED_BLOCKS: Result = try_create_int_counter_vec( + "sync_range_chains_dropped_blocks_total", + "Total count of dropped blocks when removing a syncing chain per range type", + &["range_type"] + ); + pub static ref SYNCING_CHAINS_IGNORED_BLOCKS: Result = try_create_int_counter_vec( + "sync_range_chains_ignored_blocks_total", + "Total count of ignored blocks when processing a syncing chain batch per chain type", + &["chain_type"] + ); + pub static ref SYNCING_CHAINS_PROCESSED_BATCHES: Result = try_create_int_counter_vec( + "sync_range_chains_processed_batches_total", + "Total count of processed batches in a syncing chain batch per chain type", + &["chain_type"] + ); + pub static ref SYNCING_CHAIN_BATCH_AWAITING_PROCESSING: Result = try_create_histogram_with_buckets( + "sync_range_chain_batch_awaiting_processing_seconds", + "Time range sync batches spend in AwaitingProcessing state", + Ok(vec![0.01,0.02,0.05,0.1,0.2,0.5,1.0,2.0,5.0,10.0,20.0]) + ); pub static ref SYNC_SINGLE_BLOCK_LOOKUPS: Result = try_create_int_gauge( "sync_single_block_lookups", "Number of single block lookups underway" @@ -244,6 +275,33 @@ lazy_static! { "sync_parent_block_lookups", "Number of parent block lookups underway" ); + pub static ref SYNC_LOOKUP_CREATED: Result = try_create_int_counter( + "sync_lookups_created_total", + "Total count of sync lookups created", + ); + pub static ref SYNC_LOOKUP_DROPPED: Result = try_create_int_counter_vec( + "sync_lookups_dropped_total", + "Total count of sync lookups dropped by reason", + &["reason"] + ); + pub static ref SYNC_LOOKUP_COMPLETED: Result = try_create_int_counter( + "sync_lookups_completed_total", + "Total count of sync lookups completed", + ); + pub static ref SYNC_LOOKUPS_STUCK: Result = try_create_int_counter( + "sync_lookups_stuck_total", + "Total count of sync lookups that are stuck and dropped", + ); + pub static ref SYNC_ACTIVE_NETWORK_REQUESTS: Result = try_create_int_gauge_vec( + "sync_active_network_requests", + "Current count of active network requests from sync", + &["type"], + ); + pub static ref SYNC_UNKNOWN_NETWORK_REQUESTS: Result = try_create_int_counter_vec( + "sync_unknwon_network_request", + "Total count of network messages received for unknown active requests", + &["type"], + ); /* * Block Delay Metrics diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index af7f3a53e56..ab250532587 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -31,9 +31,9 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, BlobSidecar, EthSpec, Hash256, IndexedAttestation, - LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, + beacon_block::BlockImportSource, Attestation, AttestationRef, AttesterSlashing, BlobSidecar, + EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, LightClientOptimisticUpdate, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; @@ -62,8 +62,8 @@ struct VerifiedUnaggregate { /// This implementation allows `Self` to be imported to fork choice and other functions on the /// `BeaconChain`. impl VerifiedAttestation for VerifiedUnaggregate { - fn attestation(&self) -> &Attestation { - &self.attestation + fn attestation(&self) -> AttestationRef { + self.attestation.to_ref() } fn indexed_attestation(&self) -> &IndexedAttestation { @@ -72,7 +72,7 @@ impl VerifiedAttestation for VerifiedUnaggregate { fn into_attestation_and_indices(self) -> (Attestation, Vec) { let attestation = *self.attestation; - let attesting_indices = self.indexed_attestation.attesting_indices.into(); + let attesting_indices = self.indexed_attestation.attesting_indices_to_vec(); (attestation, attesting_indices) } } @@ -95,8 +95,8 @@ struct VerifiedAggregate { /// This implementation allows `Self` to be imported to fork choice and other functions on the /// `BeaconChain`. impl VerifiedAttestation for VerifiedAggregate { - fn attestation(&self) -> &Attestation { - &self.signed_aggregate.message.aggregate + fn attestation(&self) -> AttestationRef { + self.signed_aggregate.message().aggregate() } fn indexed_attestation(&self) -> &IndexedAttestation { @@ -105,8 +105,8 @@ impl VerifiedAttestation for VerifiedAggregate { /// Efficient clone-free implementation that moves out of the `Box`. fn into_attestation_and_indices(self) -> (Attestation, Vec) { - let attestation = self.signed_aggregate.message.aggregate; - let attesting_indices = self.indexed_attestation.attesting_indices.into(); + let attestation = self.signed_aggregate.into_attestation(); + let attesting_indices = self.indexed_attestation.attesting_indices_to_vec(); (attestation, attesting_indices) } } @@ -133,7 +133,7 @@ enum FailedAtt { impl FailedAtt { pub fn beacon_block_root(&self) -> &Hash256 { - &self.attestation().data.beacon_block_root + &self.attestation().data().beacon_block_root } pub fn kind(&self) -> &'static str { @@ -143,10 +143,10 @@ impl FailedAtt { } } - pub fn attestation(&self) -> &Attestation { + pub fn attestation(&self) -> AttestationRef { match self { - FailedAtt::Unaggregate { attestation, .. } => attestation, - FailedAtt::Aggregate { attestation, .. } => &attestation.message.aggregate, + FailedAtt::Unaggregate { attestation, .. } => attestation.to_ref(), + FailedAtt::Aggregate { attestation, .. } => attestation.message().aggregate(), } } } @@ -309,7 +309,7 @@ impl NetworkBeaconProcessor { match result { Ok(verified_attestation) => { let indexed_attestation = &verified_attestation.indexed_attestation; - let beacon_block_root = indexed_attestation.data.beacon_block_root; + let beacon_block_root = indexed_attestation.data().beacon_block_root; // Register the attestation with any monitored validators. self.chain @@ -412,7 +412,7 @@ impl NetworkBeaconProcessor { reprocess_tx: Option>, seen_timestamp: Duration, ) { - let beacon_block_root = aggregate.message.aggregate.data.beacon_block_root; + let beacon_block_root = aggregate.message().aggregate().data().beacon_block_root; let result = match self .chain @@ -690,7 +690,6 @@ impl NetworkBeaconProcessor { | GossipBlobError::InvalidSubnet { .. } | GossipBlobError::InvalidInclusionProof | GossipBlobError::KzgError(_) - | GossipBlobError::InclusionProof(_) | GossipBlobError::NotFinalizedDescendant { .. } => { warn!( self.log, @@ -701,7 +700,7 @@ impl NetworkBeaconProcessor { "index" => %index, "commitment" => %commitment, ); - // Prevent recurring behaviour by penalizing the peer slightly. + // Prevent recurring behaviour by penalizing the peer. self.gossip_penalize_peer( peer_id, PeerAction::LowToleranceError, @@ -713,10 +712,8 @@ impl NetworkBeaconProcessor { MessageAcceptance::Reject, ); } - GossipBlobError::FutureSlot { .. } - | GossipBlobError::RepeatBlob { .. } - | GossipBlobError::PastFinalizedSlot { .. } => { - warn!( + GossipBlobError::FutureSlot { .. } | GossipBlobError::RepeatBlob { .. } => { + debug!( self.log, "Could not verify blob sidecar for gossip. Ignoring the blob sidecar"; "error" => ?err, @@ -737,6 +734,30 @@ impl NetworkBeaconProcessor { MessageAcceptance::Ignore, ); } + GossipBlobError::PastFinalizedSlot { .. } => { + debug!( + self.log, + "Could not verify blob sidecar for gossip. Ignoring the blob sidecar"; + "error" => ?err, + "slot" => %slot, + "root" => %root, + "index" => %index, + "commitment" => %commitment, + ); + // Prevent recurring behaviour by penalizing the peer. A low-tolerance + // error is fine because there's no reason for peers to be propagating old + // blobs on gossip, even if their view of finality is lagging. + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_blob_low", + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } } } } @@ -753,7 +774,9 @@ impl NetworkBeaconProcessor { let blob_slot = verified_blob.slot(); let blob_index = verified_blob.id().index; - match self.chain.process_gossip_blob(verified_blob).await { + let result = self.chain.process_gossip_blob(verified_blob).await; + + match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { // Note: Reusing block imported metric here metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); @@ -802,6 +825,16 @@ impl NetworkBeaconProcessor { ); } } + + // If a block is in the da_checker, sync maybe awaiting for an event when block is finally + // imported. A block can become imported both after processing a block or blob. If a + // importing a block results in `Imported`, notify. Do not notify of blob errors. + if matches!(result, Ok(AvailabilityProcessingStatus::Imported(_))) { + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root, + imported: true, + }); + } } /// Process the beacon block received from the gossip network and: @@ -874,17 +907,19 @@ impl NetworkBeaconProcessor { get_block_delay_ms(seen_duration, block.message(), &self.chain.slot_clock); // Log metrics to track delay from other nodes on the network. - metrics::set_gauge( - &metrics::BEACON_BLOCK_DELAY_GOSSIP, - block_delay.as_millis() as i64, - ); - let verification_result = self .chain .clone() .verify_block_for_gossip(block.clone()) .await; + if verification_result.is_ok() { + metrics::set_gauge( + &metrics::BEACON_BLOCK_DELAY_GOSSIP, + block_delay.as_millis() as i64, + ); + } + let block_root = if let Ok(verified_block) = &verification_result { verified_block.block_root } else { @@ -977,11 +1012,12 @@ impl NetworkBeaconProcessor { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } - Err(e @ BlockError::FutureSlot { .. }) - | Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) - | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { - debug!(self.log, "Could not verify block for gossip. Ignoring the block"; - "error" => %e); + Err(e @ BlockError::FutureSlot { .. }) => { + debug!( + self.log, + "Could not verify block for gossip. Ignoring the block"; + "error" => %e + ); // Prevent recurring behaviour by penalizing the peer slightly. self.gossip_penalize_peer( peer_id, @@ -991,6 +1027,25 @@ impl NetworkBeaconProcessor { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } + Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) + | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { + debug!( + self.log, + "Could not verify block for gossip. Ignoring the block"; + "error" => %e + ); + // The spec says we must IGNORE these blocks but there's no reason for an honest + // and non-buggy client to be gossiping blocks that blatantly conflict with + // finalization. Old versions of Erigon/Caplin are known to gossip pre-finalization + // blocks and we want to isolate them to encourage an update. + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_block_low", + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return None; + } Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); @@ -1141,9 +1196,16 @@ impl NetworkBeaconProcessor { let block = verified_block.block.block_cloned(); let block_root = verified_block.block_root; + // TODO(block source) + let result = self .chain - .process_block_with_early_caching(block_root, verified_block, NotifyExecutionLayer::Yes) + .process_block_with_early_caching( + block_root, + verified_block, + BlockImportSource::Gossip, + NotifyExecutionLayer::Yes, + ) .await; match &result { @@ -1187,19 +1249,18 @@ impl NetworkBeaconProcessor { "block_root" => %block_root, ); } - Err(BlockError::ParentUnknown(block)) => { - // Inform the sync manager to find parents for this block - // This should not occur. It should be checked by `should_forward_block` + Err(BlockError::ParentUnknown(_)) => { + // This should not occur. It should be checked by `should_forward_block`. + // Do not send sync message UnknownParentBlock to prevent conflicts with the + // BlockComponentProcessed message below. If this error ever happens, lookup sync + // can recover by receiving another block / blob / attestation referencing the + // chain that includes this block. error!( self.log, "Block with unknown parent attempted to be processed"; + "block_root" => %block_root, "peer_id" => %peer_id ); - self.send_sync_message(SyncMessage::UnknownParentBlock( - peer_id, - block.clone(), - block_root, - )); } Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { debug!( @@ -1263,6 +1324,11 @@ impl NetworkBeaconProcessor { &self.log, ); } + + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root, + imported: matches!(result, Ok(AvailabilityProcessingStatus::Imported(_))), + }); } pub fn process_gossip_voluntary_exit( @@ -1432,7 +1498,7 @@ impl NetworkBeaconProcessor { self.chain .validator_monitor .read() - .register_gossip_attester_slashing(slashing.as_inner()); + .register_gossip_attester_slashing(slashing.as_inner().to_ref()); self.chain.import_attester_slashing(slashing); debug!(self.log, "Successfully imported attester slashing"); @@ -2033,6 +2099,27 @@ impl NetworkBeaconProcessor { "attn_val_index_too_high", ); } + AttnError::CommitteeIndexNonZero(index) => { + /* + * The validator index is not set to zero after Electra. + * + * The peer has published an invalid consensus message. + */ + debug!( + self.log, + "Committee index non zero"; + "peer_id" => %peer_id, + "block" => ?beacon_block_root, + "type" => ?attestation_type, + "committee_index" => index, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_comm_index_non_zero", + ); + } AttnError::UnknownHeadBlock { beacon_block_root } => { trace!( self.log, @@ -2188,6 +2275,19 @@ impl NetworkBeaconProcessor { "attn_too_many_agg_bits", ); } + AttnError::NotExactlyOneCommitteeBitSet(_) => { + /* + * The attestation doesn't have only one committee bit set. + * + * The peer has published an invalid consensus message. + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_too_many_comm_bits", + ); + } AttnError::AttestsToFutureBlock { .. } => { /* * The beacon_block_root is from a higher slot than the attestation. @@ -2286,7 +2386,7 @@ impl NetworkBeaconProcessor { self.log, "Ignored attestation to finalized block"; "block_root" => ?beacon_block_root, - "attestation_slot" => failed_att.attestation().data.slot, + "attestation_slot" => failed_att.attestation().data().slot, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -2309,9 +2409,9 @@ impl NetworkBeaconProcessor { debug!( self.log, "Dropping attestation"; - "target_root" => ?failed_att.attestation().data.target.root, + "target_root" => ?failed_att.attestation().data().target.root, "beacon_block_root" => ?beacon_block_root, - "slot" => ?failed_att.attestation().data.slot, + "slot" => ?failed_att.attestation().data().slot, "type" => ?attestation_type, "error" => ?e, "peer_id" => % peer_id @@ -2330,7 +2430,7 @@ impl NetworkBeaconProcessor { self.log, "Unable to validate attestation"; "beacon_block_root" => ?beacon_block_root, - "slot" => ?failed_att.attestation().data.slot, + "slot" => ?failed_att.attestation().data().slot, "type" => ?attestation_type, "peer_id" => %peer_id, "error" => ?e, @@ -2727,7 +2827,7 @@ impl NetworkBeaconProcessor { /// timely), propagate it on gossip. Otherwise, ignore it. fn propagate_attestation_if_timely( &self, - attestation: &Attestation, + attestation: AttestationRef, message_id: MessageId, peer_id: PeerId, ) { diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index f10646c7414..ccdbb10720c 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -1,7 +1,5 @@ -use crate::{ - service::NetworkMessage, - sync::{manager::BlockProcessType, SyncMessage}, -}; +use crate::sync::manager::BlockProcessType; +use crate::{service::NetworkMessage, sync::manager::SyncMessage}; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{builder::Witness, eth1_chain::CachingEth1Backend, BeaconChain}; use beacon_chain::{BeaconChainTypes, NotifyExecutionLayer}; @@ -144,7 +142,7 @@ impl NetworkBeaconProcessor { processor.process_gossip_aggregate_batch(aggregates, Some(reprocess_tx)) }; - let beacon_block_root = aggregate.message.aggregate.data.beacon_block_root; + let beacon_block_root = aggregate.message().aggregate().data().beacon_block_root; self.try_send(BeaconWorkEvent { drop_during_sync: true, work: Work::GossipAggregate { diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 887974c6e0b..68bd6745144 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -24,6 +24,7 @@ use store::KzgCommitment; use tokio::sync::mpsc; use types::beacon_block_body::format_kzg_commitments; use types::blob_sidecar::FixedBlobSidecarList; +use types::BlockImportSource; use types::{Epoch, Hash256}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. @@ -33,8 +34,6 @@ pub enum ChainSegmentProcessId { RangeBatchId(ChainId, Epoch), /// Processing ID for a backfill syncing batch. BackSyncBatchId(Epoch), - /// Processing Id of the parent lookup of a block. - ParentLookup(Hash256), } /// Returned when a chain segment import fails. @@ -155,7 +154,12 @@ impl NetworkBeaconProcessor { let result = self .chain - .process_block_with_early_caching(block_root, block, NotifyExecutionLayer::Yes) + .process_block_with_early_caching( + block_root, + block, + BlockImportSource::Lookup, + NotifyExecutionLayer::Yes, + ) .await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); @@ -172,17 +176,15 @@ impl NetworkBeaconProcessor { if reprocess_tx.try_send(reprocess_msg).is_err() { error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %hash) }; - if matches!(process_type, BlockProcessType::SingleBlock { .. }) { - self.chain.block_times_cache.write().set_time_observed( - hash, - slot, - seen_timestamp, - None, - None, - ); + self.chain.block_times_cache.write().set_time_observed( + hash, + slot, + seen_timestamp, + None, + None, + ); - self.chain.recompute_head_at_current_slot().await; - } + self.chain.recompute_head_at_current_slot().await; } // Sync handles these results self.send_sync_message(SyncMessage::BlockComponentProcessed { @@ -324,7 +326,7 @@ impl NetworkBeaconProcessor { .process_blocks(downloaded_blocks.iter(), notify_execution_layer) .await { - (_, Ok(_)) => { + (imported_blocks, Ok(_)) => { debug!(self.log, "Batch processed"; "batch_epoch" => epoch, "first_block_slot" => start_slot, @@ -333,7 +335,8 @@ impl NetworkBeaconProcessor { "processed_blocks" => sent_blocks, "service"=> "sync"); BatchProcessResult::Success { - was_non_empty: sent_blocks > 0, + sent_blocks, + imported_blocks, } } (imported_blocks, Err(e)) => { @@ -347,7 +350,7 @@ impl NetworkBeaconProcessor { "service" => "sync"); match e.peer_action { Some(penalty) => BatchProcessResult::FaultyFailure { - imported_blocks: imported_blocks > 0, + imported_blocks, penalty, }, None => BatchProcessResult::NonFaultyFailure, @@ -366,7 +369,7 @@ impl NetworkBeaconProcessor { .sum::(); match self.process_backfill_blocks(downloaded_blocks) { - (_, Ok(_)) => { + (imported_blocks, Ok(_)) => { debug!(self.log, "Backfill batch processed"; "batch_epoch" => epoch, "first_block_slot" => start_slot, @@ -375,7 +378,8 @@ impl NetworkBeaconProcessor { "processed_blobs" => n_blobs, "service"=> "sync"); BatchProcessResult::Success { - was_non_empty: sent_blocks > 0, + sent_blocks, + imported_blocks, } } (_, Err(e)) => { @@ -388,7 +392,7 @@ impl NetworkBeaconProcessor { "service" => "sync"); match e.peer_action { Some(penalty) => BatchProcessResult::FaultyFailure { - imported_blocks: false, + imported_blocks: 0, penalty, }, None => BatchProcessResult::NonFaultyFailure, @@ -396,41 +400,6 @@ impl NetworkBeaconProcessor { } } } - // this is a parent lookup request from the sync manager - ChainSegmentProcessId::ParentLookup(chain_head) => { - debug!( - self.log, "Processing parent lookup"; - "chain_hash" => %chain_head, - "blocks" => downloaded_blocks.len() - ); - // parent blocks are ordered from highest slot to lowest, so we need to process in - // reverse - match self - .process_blocks(downloaded_blocks.iter().rev(), notify_execution_layer) - .await - { - (imported_blocks, Err(e)) => { - debug!(self.log, "Parent lookup failed"; "error" => %e.message); - match e.peer_action { - Some(penalty) => BatchProcessResult::FaultyFailure { - imported_blocks: imported_blocks > 0, - penalty, - }, - None => BatchProcessResult::NonFaultyFailure, - } - } - (imported_blocks, Ok(_)) => { - debug!( - self.log, "Parent lookup processed successfully"; - "chain_hash" => %chain_head, - "imported_blocks" => imported_blocks - ); - BatchProcessResult::Success { - was_non_empty: imported_blocks > 0, - } - } - } - } }; self.send_sync_message(SyncMessage::BatchProcessed { sync_type, result }); diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index dd58eb83555..a9b9f64a79d 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -239,6 +239,11 @@ impl TestRig { Some(work_journal_tx), harness.chain.slot_clock.clone(), chain.spec.maximum_gossip_clock_disparity(), + BeaconProcessorQueueLengths::from_state( + &chain.canonical_head.cached_head().snapshot.beacon_state, + &chain.spec, + ) + .unwrap(), ); assert!(beacon_processor.is_ok()); @@ -311,9 +316,7 @@ impl TestRig { block_root, RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), std::time::Duration::default(), - BlockProcessType::ParentLookup { - chain_hash: Hash256::random(), - }, + BlockProcessType::SingleBlock { id: 0 }, ) .unwrap(); } @@ -790,9 +793,7 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod let mut rig = TestRig::new(SMALL_CHAIN).await; // Empty the op pool. - rig.chain - .op_pool - .prune_attestations(u64::max_value().into()); + rig.chain.op_pool.prune_attestations(u64::MAX.into()); assert_eq!(rig.chain.op_pool.num_attestations(), 0); // Send the attestation but not the block, and check that it was not imported. diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 1937fc11cf9..e125c13f4c2 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -7,9 +7,8 @@ use crate::error; use crate::network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}; -use crate::service::{NetworkMessage, RequestId}; +use crate::service::NetworkMessage; use crate::status::status_message; -use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_processor::{ @@ -18,6 +17,7 @@ use beacon_processor::{ use futures::prelude::*; use lighthouse_network::rpc::*; use lighthouse_network::{ + service::api_types::{AppRequestId, SyncRequestId}, MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, }; use logging::TimeLatch; @@ -61,13 +61,13 @@ pub enum RouterMessage { /// An RPC response has been received. RPCResponseReceived { peer_id: PeerId, - request_id: RequestId, + request_id: AppRequestId, response: Response, }, /// An RPC request failed RPCFailed { peer_id: PeerId, - request_id: RequestId, + request_id: AppRequestId, error: RPCError, }, /// A gossip message has been received. The fields are: message id, the peer that sent us this @@ -235,7 +235,7 @@ impl Router { fn handle_rpc_response( &mut self, peer_id: PeerId, - request_id: RequestId, + request_id: AppRequestId, response: Response, ) { match response { @@ -448,9 +448,9 @@ impl Router { /// An error occurred during an RPC request. The state is maintained by the sync manager, so /// this function notifies the sync manager of the error. - pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { + pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: AppRequestId, error: RPCError) { // Check if the failed RPC belongs to sync - if let RequestId::Sync(request_id) = request_id { + if let AppRequestId::Sync(request_id) = request_id { self.send_to_sync(SyncMessage::RpcError { peer_id, request_id, @@ -488,18 +488,18 @@ impl Router { pub fn on_blocks_by_range_response( &mut self, peer_id: PeerId, - request_id: RequestId, + request_id: AppRequestId, beacon_block: Option>>, ) { let request_id = match request_id { - RequestId::Sync(sync_id) => match sync_id { - SyncId::SingleBlock { .. } | SyncId::SingleBlob { .. } => { + AppRequestId::Sync(sync_id) => match sync_id { + SyncRequestId::SingleBlock { .. } | SyncRequestId::SingleBlob { .. } => { crit!(self.log, "Block lookups do not request BBRange requests"; "peer_id" => %peer_id); return; } - id @ SyncId::RangeBlockAndBlobs { .. } => id, + id @ SyncRequestId::RangeBlockAndBlobs { .. } => id, }, - RequestId::Router => { + AppRequestId::Router => { crit!(self.log, "All BBRange requests belong to sync"; "peer_id" => %peer_id); return; } @@ -522,7 +522,7 @@ impl Router { pub fn on_blobs_by_range_response( &mut self, peer_id: PeerId, - request_id: RequestId, + request_id: AppRequestId, blob_sidecar: Option>>, ) { trace!( @@ -531,7 +531,7 @@ impl Router { "peer" => %peer_id, ); - if let RequestId::Sync(id) = request_id { + if let AppRequestId::Sync(id) = request_id { self.send_to_sync(SyncMessage::RpcBlob { peer_id, request_id: id, @@ -550,22 +550,22 @@ impl Router { pub fn on_blocks_by_root_response( &mut self, peer_id: PeerId, - request_id: RequestId, + request_id: AppRequestId, beacon_block: Option>>, ) { let request_id = match request_id { - RequestId::Sync(sync_id) => match sync_id { - id @ SyncId::SingleBlock { .. } => id, - SyncId::RangeBlockAndBlobs { .. } => { + AppRequestId::Sync(sync_id) => match sync_id { + id @ SyncRequestId::SingleBlock { .. } => id, + SyncRequestId::RangeBlockAndBlobs { .. } => { crit!(self.log, "Batch syncing do not request BBRoot requests"; "peer_id" => %peer_id); return; } - SyncId::SingleBlob { .. } => { + SyncRequestId::SingleBlob { .. } => { crit!(self.log, "Blob response to block by roots request"; "peer_id" => %peer_id); return; } }, - RequestId::Router => { + AppRequestId::Router => { crit!(self.log, "All BBRoot requests belong to sync"; "peer_id" => %peer_id); return; } @@ -588,22 +588,22 @@ impl Router { pub fn on_blobs_by_root_response( &mut self, peer_id: PeerId, - request_id: RequestId, + request_id: AppRequestId, blob_sidecar: Option>>, ) { let request_id = match request_id { - RequestId::Sync(sync_id) => match sync_id { - id @ SyncId::SingleBlob { .. } => id, - SyncId::SingleBlock { .. } => { + AppRequestId::Sync(sync_id) => match sync_id { + id @ SyncRequestId::SingleBlob { .. } => id, + SyncRequestId::SingleBlock { .. } => { crit!(self.log, "Block response to blobs by roots request"; "peer_id" => %peer_id); return; } - SyncId::RangeBlockAndBlobs { .. } => { + SyncRequestId::RangeBlockAndBlobs { .. } => { crit!(self.log, "Batch syncing does not request BBRoot requests"; "peer_id" => %peer_id); return; } }, - RequestId::Router => { + AppRequestId::Router => { crit!(self.log, "All BlobsByRoot requests belong to sync"; "peer_id" => %peer_id); return; } @@ -667,7 +667,7 @@ impl HandlerNetworkContext { pub fn send_processor_request(&mut self, peer_id: PeerId, request: Request) { self.inform_network(NetworkMessage::SendRequest { peer_id, - request_id: RequestId::Router, + request_id: AppRequestId::Router, request, }) } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 34ed3edcf93..e522285a9e3 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,4 +1,3 @@ -use super::sync::manager::RequestId as SyncId; use crate::nat; use crate::network_beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; @@ -23,6 +22,7 @@ use lighthouse_network::{ Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet, }; use lighthouse_network::{ + service::api_types::AppRequestId, types::{core_topics_to_subscribe, GossipEncoding, GossipTopic}, MessageId, NetworkEvent, NetworkGlobals, PeerId, }; @@ -51,13 +51,6 @@ const UNSUBSCRIBE_DELAY_EPOCHS: u64 = 2; /// able to run tens of thousands of validators on one BN. const VALIDATOR_SUBSCRIPTION_MESSAGE_QUEUE_SIZE: usize = 65_536; -/// Application level requests sent to the network. -#[derive(Debug, Clone, Copy)] -pub enum RequestId { - Sync(SyncId), - Router, -} - /// Types of messages that the network service can receive. #[derive(Debug, IntoStaticStr)] #[strum(serialize_all = "snake_case")] @@ -69,7 +62,7 @@ pub enum NetworkMessage { SendRequest { peer_id: PeerId, request: Request, - request_id: RequestId, + request_id: AppRequestId, }, /// Send a successful Response to the libp2p service. SendResponse { @@ -168,7 +161,7 @@ pub struct NetworkService { /// A reference to the underlying beacon chain. beacon_chain: Arc>, /// The underlying libp2p service that drives all the network interactions. - libp2p: Network, + libp2p: Network, /// An attestation and subnet manager service. attestation_service: AttestationService, /// A sync committeee subnet manager service. @@ -499,7 +492,7 @@ impl NetworkService { /// Handle an event received from the network. async fn on_libp2p_event( &mut self, - ev: NetworkEvent, + ev: NetworkEvent, shutdown_sender: &mut Sender, ) { match ev { @@ -613,7 +606,15 @@ impl NetworkService { request, request_id, } => { - self.libp2p.send_request(peer_id, request_id, request); + if let Err((request_id, error)) = + self.libp2p.send_request(peer_id, request_id, request) + { + self.send_to_router(RouterMessage::RPCFailed { + peer_id, + request_id, + error, + }); + } } NetworkMessage::SendResponse { peer_id, diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index ab9ffb95a6c..830c43cbb18 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -29,6 +29,10 @@ pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; /// Currently a whole slot ahead. const ADVANCE_SUBSCRIBE_SLOT_FRACTION: u32 = 1; +/// The number of slots after an aggregator duty where we remove the entry from +/// `aggregate_validators_on_subnet` delay map. +const UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY: u32 = 2; + #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub(crate) enum SubscriptionKind { /// Long lived subscriptions. @@ -389,7 +393,7 @@ impl AttestationService { .map(|tracked_vals| { tracked_vals.contains_key(&ExactSubnet { subnet_id: subnet, - slot: attestation.data.slot, + slot: attestation.data().slot, }) }) .unwrap_or(true) @@ -462,23 +466,27 @@ impl AttestationService { ) -> Result<(), &'static str> { let slot_duration = self.beacon_chain.slot_clock.slot_duration(); + // The short time we schedule the subscription before it's actually required. This + // ensures we are subscribed on time, and allows consecutive subscriptions to the same + // subnet to overlap, reducing subnet churn. + let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; + // The time to the required slot. + let time_to_subscription_slot = self + .beacon_chain + .slot_clock + .duration_to_slot(slot) + .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. + // Calculate how long before we need to subscribe to the subnet. - let time_to_subscription_start = { - // The short time we schedule the subscription before it's actually required. This - // ensures we are subscribed on time, and allows consecutive subscriptions to the same - // subnet to overlap, reducing subnet churn. - let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; - // The time to the required slot. - let time_to_subscription_slot = self - .beacon_chain - .slot_clock - .duration_to_slot(slot) - .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. - time_to_subscription_slot.saturating_sub(advance_subscription_duration) - }; + let time_to_subscription_start = + time_to_subscription_slot.saturating_sub(advance_subscription_duration); + // The time after a duty slot where we no longer need it in the `aggregate_validators_on_subnet` + // delay map. + let time_to_unsubscribe = + time_to_subscription_slot + UNSUBSCRIBE_AFTER_AGGREGATOR_DUTY * slot_duration; if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { - tracked_vals.insert(ExactSubnet { subnet_id, slot }); + tracked_vals.insert_at(ExactSubnet { subnet_id, slot }, time_to_unsubscribe); } // If the subscription should be done in the future, schedule it. Otherwise subscribe diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 9075bb15f08..dfb05da19bd 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -9,7 +9,7 @@ //! sync as failed, log an error and attempt to retry once a new peer joins the node. use crate::network_beacon_processor::ChainSegmentProcessId; -use crate::sync::manager::{BatchProcessResult, Id}; +use crate::sync::manager::BatchProcessResult; use crate::sync::network_context::RangeRequestId; use crate::sync::network_context::SyncNetworkContext; use crate::sync::range_sync::{ @@ -17,6 +17,7 @@ use crate::sync::range_sync::{ }; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use lighthouse_network::service::api_types::Id; use lighthouse_network::types::{BackFillState, NetworkGlobals}; use lighthouse_network::{PeerAction, PeerId}; use rand::seq::SliceRandom; @@ -320,7 +321,7 @@ impl BackFillSync { } if let Some(batch_ids) = self.active_requests.remove(peer_id) { - // fail the batches + // fail the batches. for id in batch_ids { if let Some(batch) = self.batches.get_mut(&id) { match batch.download_failed(false) { @@ -335,7 +336,7 @@ impl BackFillSync { // If we have run out of peers in which to retry this batch, the backfill state // transitions to a paused state. // We still need to reset the state for all the affected batches, so we should not - // short circuit early + // short circuit early. if self.retry_batch_download(network, id).is_err() { debug!( self.log, @@ -403,7 +404,7 @@ impl BackFillSync { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>, + blocks: Vec>, ) -> Result { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { @@ -426,20 +427,14 @@ impl BackFillSync { } }; - if let Some(block) = beacon_block { - // This is not a stream termination, simply add the block to the request - if let Err(e) = batch.add_block(block) { - self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; - } - Ok(ProcessResult::Successful) - } else { + { // A stream termination has been sent. This batch has ended. Process a completed batch. // Remove the request from the peer's active batches self.active_requests .get_mut(peer_id) .map(|active_requests| active_requests.remove(&batch_id)); - match batch.download_completed() { + match batch.download_completed(blocks) { Ok(received) => { let awaiting_batches = self.processing_target.saturating_sub(batch_id) / BACKFILL_EPOCHS_PER_BATCH; @@ -533,7 +528,7 @@ impl BackFillSync { // result callback. This is done, because an empty batch could end a chain and the logic // for removing chains and checking completion is in the callback. - let blocks = match batch.start_processing() { + let (blocks, _) = match batch.start_processing() { Err(e) => { return self .fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)) @@ -620,13 +615,15 @@ impl BackFillSync { "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); match result { - BatchProcessResult::Success { was_non_empty } => { + BatchProcessResult::Success { + imported_blocks, .. + } => { if let Err(e) = batch.processing_completed(BatchProcessingResult::Success) { self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; } // If the processed batch was not empty, we can validate previous unvalidated // blocks. - if *was_non_empty { + if *imported_blocks > 0 { self.advance_chain(network, batch_id); } @@ -682,7 +679,7 @@ impl BackFillSync { Ok(BatchOperationOutcome::Continue) => { // chain can continue. Check if it can be progressed - if *imported_blocks { + if *imported_blocks > 0 { // At least one block was successfully verified and imported, then we can be sure all // previous batches are valid and we only need to download the current failed // batch. @@ -924,24 +921,22 @@ impl BackFillSync { // Find a peer to request the batch let failed_peers = batch.failed_peers(); - let new_peer = { - let mut priorized_peers = self - .network_globals - .peers - .read() - .synced_peers() - .map(|peer| { - ( - failed_peers.contains(peer), - self.active_requests.get(peer).map(|v| v.len()).unwrap_or(0), - *peer, - ) - }) - .collect::>(); + let new_peer = self + .network_globals + .peers + .read() + .synced_peers() + .map(|peer| { + ( + failed_peers.contains(peer), + self.active_requests.get(peer).map(|v| v.len()).unwrap_or(0), + rand::random::(), + *peer, + ) + }) // Sort peers prioritizing unrelated peers with less active requests. - priorized_peers.sort_unstable(); - priorized_peers.first().map(|&(_, _, peer)| peer) - }; + .min() + .map(|(_, _, _, peer)| peer); if let Some(peer) = new_peer { self.participating_peers.insert(peer); @@ -986,7 +981,7 @@ impl BackFillSync { Err(e) => { // NOTE: under normal conditions this shouldn't happen but we handle it anyway warn!(self.log, "Could not send batch request"; - "batch_id" => batch_id, "error" => e, &batch); + "batch_id" => batch_id, "error" => ?e, &batch); // register the failed download and check if the batch can be retried if let Err(e) = batch.start_downloading_from_peer(peer, 1) { return self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)); diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 9d1af0cb813..e94e9589c0a 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -1,21 +1,17 @@ -use crate::sync::block_lookups::parent_lookup::PARENT_FAIL_TOLERANCE; use crate::sync::block_lookups::single_block_lookup::{ LookupRequestError, SingleBlockLookup, SingleLookupRequestState, }; -use crate::sync::block_lookups::{ - BlobRequestState, BlockLookups, BlockRequestState, PeerId, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, -}; -use crate::sync::manager::{BlockProcessType, Id, SingleLookupReqId}; -use crate::sync::network_context::{ - BlobsByRootSingleBlockRequest, BlocksByRootSingleRequest, SyncNetworkContext, -}; +use crate::sync::block_lookups::{BlobRequestState, BlockRequestState, PeerId}; +use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::data_availability_checker::ChildComponents; use beacon_chain::BeaconChainTypes; +use lighthouse_network::service::api_types::Id; use std::sync::Arc; -use std::time::Duration; use types::blob_sidecar::FixedBlobSidecarList; -use types::{Hash256, SignedBeaconBlock}; +use types::SignedBeaconBlock; + +use super::single_block_lookup::DownloadResult; +use super::SingleLookupId; #[derive(Debug, Copy, Clone)] pub enum ResponseType { @@ -23,21 +19,6 @@ pub enum ResponseType { Blob, } -#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] -pub enum LookupType { - Current, - Parent, -} - -impl LookupType { - fn max_attempts(&self) -> u8 { - match self { - LookupType::Current => SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, - LookupType::Parent => PARENT_FAIL_TOLERANCE, - } - } -} - /// This trait unifies common single block lookup functionality across blocks and blobs. This /// includes making requests, verifying responses, and handling processing results. A /// `SingleBlockLookup` includes both a `BlockRequestState` and a `BlobRequestState`, this trait is @@ -47,121 +28,29 @@ impl LookupType { /// safety when handling a block/blob response ensuring we only mutate the correct corresponding /// state. pub trait RequestState { - /// The type of the request . - type RequestType; - /// The type created after validation. type VerifiedResponseType: Clone; - /// We convert a `VerifiedResponseType` to this type prior to sending it to the beacon processor. - type ReconstructedResponseType; - - /* Request building methods */ - - /// Construct a new request. - fn build_request( - &mut self, - lookup_type: LookupType, - ) -> Result<(PeerId, Self::RequestType), LookupRequestError> { - // Verify and construct request. - self.too_many_attempts(lookup_type)?; - let peer = self.get_peer()?; - let request = self.new_request(); - Ok((peer, request)) - } - - /// Construct a new request and send it. - fn build_request_and_send( - &mut self, - id: Id, - lookup_type: LookupType, - cx: &mut SyncNetworkContext, - ) -> Result<(), LookupRequestError> { - // Check if request is necessary. - if !self.get_state().is_awaiting_download() { - return Ok(()); - } - - // Construct request. - let (peer_id, request) = self.build_request(lookup_type)?; - - // Update request state. - let req_counter = self.get_state_mut().on_download_start(peer_id); - - // Make request - let id = SingleLookupReqId { - id, - req_counter, - lookup_type, - }; - Self::make_request(id, peer_id, request, cx) - } - - /// Verify the current request has not exceeded the maximum number of attempts. - fn too_many_attempts(&self, lookup_type: LookupType) -> Result<(), LookupRequestError> { - let request_state = self.get_state(); - - if request_state.failed_attempts() >= lookup_type.max_attempts() { - let cannot_process = request_state.more_failed_processing_attempts(); - Err(LookupRequestError::TooManyAttempts { cannot_process }) - } else { - Ok(()) - } - } - - /// Get the next peer to request. Draws from the set of peers we think should have both the - /// block and blob first. If that fails, we draw from the set of peers that may have either. - fn get_peer(&mut self) -> Result { - self.get_state_mut() - .use_rand_available_peer() - .ok_or(LookupRequestError::NoPeers) - } - - /// Initialize `Self::RequestType`. - fn new_request(&self) -> Self::RequestType; - - /// Send the request to the network service. + /// Request the network context to prepare a request of a component of `block_root`. If the + /// request is not necessary because the component is already known / processed, return false. + /// Return true if it sent a request and we can expect an event back from the network. fn make_request( - id: SingleLookupReqId, + &self, + id: Id, peer_id: PeerId, - request: Self::RequestType, + downloaded_block_expected_blobs: Option, cx: &mut SyncNetworkContext, - ) -> Result<(), LookupRequestError>; + ) -> Result; /* Response handling methods */ - /// A getter for the parent root of the response. Returns an `Option` because we won't know - /// the blob parent if we don't end up getting any blobs in the response. - fn get_parent_root(verified_response: &Self::VerifiedResponseType) -> Option; - - /// Caches the verified response in the lookup if necessary. This is only necessary for lookups - /// triggered by `UnknownParent` errors. - fn add_to_child_components( - verified_response: Self::VerifiedResponseType, - components: &mut ChildComponents, - ); - - /// Convert a verified response to the type we send to the beacon processor. - fn verified_to_reconstructed( - block_root: Hash256, - verified: Self::VerifiedResponseType, - ) -> Self::ReconstructedResponseType; - /// Send the response to the beacon processor. - fn send_reconstructed_for_processing( + fn send_for_processing( id: Id, - bl: &BlockLookups, - block_root: Hash256, - verified: Self::ReconstructedResponseType, - duration: Duration, + result: DownloadResult, cx: &SyncNetworkContext, ) -> Result<(), LookupRequestError>; - /// Register a failure to process the block or blob. - fn register_failure_downloading(&mut self) { - self.get_state_mut().on_download_failure() - } - /* Utility methods */ /// Returns the `ResponseType` associated with this trait implementation. Useful in logging. @@ -171,64 +60,44 @@ pub trait RequestState { fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self; /// A getter for a reference to the `SingleLookupRequestState` associated with this trait. - fn get_state(&self) -> &SingleLookupRequestState; + fn get_state(&self) -> &SingleLookupRequestState; /// A getter for a mutable reference to the SingleLookupRequestState associated with this trait. - fn get_state_mut(&mut self) -> &mut SingleLookupRequestState; + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState; } -impl RequestState for BlockRequestState { - type RequestType = BlocksByRootSingleRequest; +impl RequestState for BlockRequestState { type VerifiedResponseType = Arc>; - type ReconstructedResponseType = RpcBlock; - - fn new_request(&self) -> Self::RequestType { - BlocksByRootSingleRequest(self.requested_block_root) - } fn make_request( - id: SingleLookupReqId, + &self, + id: SingleLookupId, peer_id: PeerId, - request: Self::RequestType, + _: Option, cx: &mut SyncNetworkContext, - ) -> Result<(), LookupRequestError> { - cx.block_lookup_request(id, peer_id, request) - .map_err(LookupRequestError::SendFailed) + ) -> Result { + cx.block_lookup_request(id, peer_id, self.requested_block_root) + .map_err(LookupRequestError::SendFailedNetwork) } - fn get_parent_root(verified_response: &Arc>) -> Option { - Some(verified_response.parent_root()) - } - - fn add_to_child_components( - verified_response: Arc>, - components: &mut ChildComponents, - ) { - components.merge_block(verified_response); - } - - fn verified_to_reconstructed( - block_root: Hash256, - block: Arc>, - ) -> RpcBlock { - RpcBlock::new_without_blobs(Some(block_root), block) - } - - fn send_reconstructed_for_processing( - id: Id, - bl: &BlockLookups, - block_root: Hash256, - constructed: RpcBlock, - duration: Duration, + fn send_for_processing( + id: SingleLookupId, + download_result: DownloadResult, cx: &SyncNetworkContext, ) -> Result<(), LookupRequestError> { - bl.send_block_for_processing( + let DownloadResult { + value, block_root, - constructed, - duration, - BlockProcessType::SingleBlock { id }, - cx, + seen_timestamp, + peer_id: _, + } = download_result; + cx.send_block_for_processing( + id, + block_root, + RpcBlock::new_without_blobs(Some(block_root), value), + seen_timestamp, ) + .map_err(LookupRequestError::SendFailedProcessor) } fn response_type() -> ResponseType { @@ -237,73 +106,46 @@ impl RequestState for BlockRequestState { fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { &mut request.block_request_state } - fn get_state(&self) -> &SingleLookupRequestState { + fn get_state(&self) -> &SingleLookupRequestState { &self.state } - fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { &mut self.state } } -impl RequestState for BlobRequestState { - type RequestType = BlobsByRootSingleBlockRequest; +impl RequestState for BlobRequestState { type VerifiedResponseType = FixedBlobSidecarList; - type ReconstructedResponseType = FixedBlobSidecarList; - - fn new_request(&self) -> Self::RequestType { - BlobsByRootSingleBlockRequest { - block_root: self.block_root, - indices: self.requested_ids.indices(), - } - } fn make_request( - id: SingleLookupReqId, + &self, + id: Id, peer_id: PeerId, - request: Self::RequestType, + downloaded_block_expected_blobs: Option, cx: &mut SyncNetworkContext, - ) -> Result<(), LookupRequestError> { - cx.blob_lookup_request(id, peer_id, request) - .map_err(LookupRequestError::SendFailed) - } - - fn get_parent_root(verified_response: &FixedBlobSidecarList) -> Option { - verified_response - .into_iter() - .filter_map(|blob| blob.as_ref()) - .map(|blob| blob.block_parent_root()) - .next() - } - - fn add_to_child_components( - verified_response: FixedBlobSidecarList, - components: &mut ChildComponents, - ) { - components.merge_blobs(verified_response); - } - - fn verified_to_reconstructed( - _block_root: Hash256, - blobs: FixedBlobSidecarList, - ) -> FixedBlobSidecarList { - blobs + ) -> Result { + cx.blob_lookup_request( + id, + peer_id, + self.block_root, + downloaded_block_expected_blobs, + ) + .map_err(LookupRequestError::SendFailedNetwork) } - fn send_reconstructed_for_processing( + fn send_for_processing( id: Id, - bl: &BlockLookups, - block_root: Hash256, - verified: FixedBlobSidecarList, - duration: Duration, + download_result: DownloadResult, cx: &SyncNetworkContext, ) -> Result<(), LookupRequestError> { - bl.send_blobs_for_processing( + let DownloadResult { + value, block_root, - verified, - duration, - BlockProcessType::SingleBlob { id }, - cx, - ) + seen_timestamp, + peer_id: _, + } = download_result; + cx.send_blobs_for_processing(id, block_root, value, seen_timestamp) + .map_err(LookupRequestError::SendFailedProcessor) } fn response_type() -> ResponseType { @@ -312,10 +154,10 @@ impl RequestState for BlobRequestState { fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { &mut request.blob_request_state } - fn get_state(&self) -> &SingleLookupRequestState { + fn get_state(&self) -> &SingleLookupRequestState { &self.state } - fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { + fn get_state_mut(&mut self) -> &mut SingleLookupRequestState { &mut self.state } } diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index a2909b49dd1..0a44cf2fdf5 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -1,756 +1,521 @@ -use self::single_block_lookup::SingleBlockLookup; -use super::manager::BlockProcessingResult; -use super::network_context::{LookupFailure, LookupVerifyError}; -use super::BatchProcessResult; -use super::{manager::BlockProcessType, network_context::SyncNetworkContext}; +//! Implements block lookup sync. +//! +//! Block lookup sync is triggered when a peer claims to have imported a block we don't know about. +//! For example, a peer attesting to a head block root that is not in our fork-choice. Lookup sync +//! is recursive in nature, as we may discover that this attested head block root has a parent that +//! is also unknown to us. +//! +//! Block lookup is implemented as an event-driven state machine. It sends events to the network and +//! beacon processor, and expects some set of events back. A discrepancy in the expected event API +//! will result in lookups getting "stuck". A lookup becomes stuck when there is no future event +//! that will trigger the lookup to make progress. There's a fallback mechanism that drops lookups +//! that live for too long, logging the line "Notify the devs a sync lookup is stuck". +//! +//! The expected event API is documented in the code paths that are making assumptions with the +//! comment prefix "Lookup sync event safety:" +//! +//! Block lookup sync attempts to not re-download or re-process data that we already have. Block +//! components are cached temporarily in multiple places before they are imported into fork-choice. +//! Therefore, block lookup sync must peek these caches correctly to decide when to skip a download +//! or consider a lookup complete. These caches are read from the `SyncNetworkContext` and its state +//! returned to this module as `LookupRequestResult` variants. + +use self::parent_chain::{compute_parent_chains, NodeChain}; +pub use self::single_block_lookup::DownloadResult; +use self::single_block_lookup::{LookupRequestError, LookupResult, SingleBlockLookup}; +use super::manager::{BlockProcessType, BlockProcessingResult, SLOT_IMPORT_TOLERANCE}; +use super::network_context::{RpcResponseResult, SyncNetworkContext}; use crate::metrics; -use crate::network_beacon_processor::ChainSegmentProcessId; -use crate::sync::block_lookups::common::LookupType; -use crate::sync::block_lookups::parent_lookup::{ParentLookup, RequestError}; -use crate::sync::block_lookups::single_block_lookup::{CachedChild, LookupRequestError}; -use crate::sync::manager::{Id, SingleLookupReqId}; -use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; -pub use beacon_chain::data_availability_checker::ChildComponents; -use beacon_chain::data_availability_checker::{ - AvailabilityCheckErrorCategory, DataAvailabilityChecker, -}; -use beacon_chain::validator_monitor::timestamp_now; +use crate::sync::block_lookups::common::ResponseType; +use crate::sync::block_lookups::parent_chain::find_oldest_fork_ancestor; +use beacon_chain::block_verification_types::AsBlock; +use beacon_chain::data_availability_checker::AvailabilityCheckErrorCategory; use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError}; pub use common::RequestState; use fnv::FnvHashMap; +use lighthouse_network::service::api_types::SingleLookupReqId; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; pub use single_block_lookup::{BlobRequestState, BlockRequestState}; -use slog::{debug, error, trace, warn, Logger}; -use smallvec::SmallVec; -use std::collections::{HashMap, VecDeque}; +use slog::{debug, error, warn, Logger}; +use std::collections::hash_map::Entry; use std::sync::Arc; use std::time::Duration; use store::Hash256; -use types::blob_sidecar::FixedBlobSidecarList; -use types::Slot; +use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; pub mod common; -mod parent_lookup; +pub mod parent_chain; mod single_block_lookup; #[cfg(test)] mod tests; -pub type DownloadedBlock = (Hash256, RpcBlock); +/// The maximum depth we will search for a parent block. In principle we should have sync'd any +/// canonical chain to its head once the peer connects. A chain should not appear where it's depth +/// is further back than the most recent head slot. +pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; -pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; +pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 4; + +/// Maximum time we allow a lookup to exist before assuming it is stuck and will never make +/// progress. Assume the worse case processing time per block component set * times max depth. +/// 15 * 2 * 32 = 16 minutes. +const LOOKUP_MAX_DURATION_STUCK_SECS: u64 = 15 * PARENT_DEPTH_TOLERANCE as u64; +/// The most common case of child-lookup without peers is receiving block components before the +/// attestation deadline when the node is lagging behind. Once peers start attesting for the child +/// lookup at most after 4 seconds, the lookup should gain peers. +const LOOKUP_MAX_DURATION_NO_PEERS_SECS: u64 = 10; + +/// Lookups contain untrusted data, including blocks that have not yet been validated. In case of +/// bugs or malicious activity we want to bound how much memory these lookups can consume. Aprox the +/// max size of a lookup is ~ 10 MB (current max size of gossip and RPC blocks). 200 lookups can +/// take at most 2 GB. 200 lookups allow 3 parallel chains of depth 64 (current maximum). +const MAX_LOOKUPS: usize = 200; + +pub enum BlockComponent { + Block(DownloadResult>>), + Blob(DownloadResult>>), +} + +impl BlockComponent { + fn parent_root(&self) -> Hash256 { + match self { + BlockComponent::Block(block) => block.value.parent_root(), + BlockComponent::Blob(blob) => blob.value.block_parent_root(), + } + } + fn get_type(&self) -> &'static str { + match self { + BlockComponent::Block(_) => "block", + BlockComponent::Blob(_) => "blob", + } + } +} + +pub type SingleLookupId = u32; enum Action { Retry, - ParentUnknown { parent_root: Hash256, slot: Slot }, + ParentUnknown { parent_root: Hash256 }, Drop, Continue, } pub struct BlockLookups { - /// Parent chain lookups being downloaded. - parent_lookups: SmallVec<[ParentLookup; 3]>, - - processing_parent_lookups: HashMap, SingleBlockLookup)>, - /// A cache of failed chain lookups to prevent duplicate searches. failed_chains: LRUTimeCache, - single_block_lookups: FnvHashMap>, - - pub(crate) da_checker: Arc>, + // TODO: Why not index lookups by block_root? + single_block_lookups: FnvHashMap>, /// The logger for the import manager. log: Logger, } +#[cfg(test)] +use lighthouse_network::service::api_types::Id; + +#[cfg(test)] +/// Tuple of `SingleLookupId`, requested block root, awaiting parent block root (if any), +/// and list of peers that claim to have imported this set of block components. +pub(crate) type BlockLookupSummary = (Id, Hash256, Option, Vec); + impl BlockLookups { - pub fn new(da_checker: Arc>, log: Logger) -> Self { + pub fn new(log: Logger) -> Self { Self { - parent_lookups: Default::default(), - processing_parent_lookups: Default::default(), failed_chains: LRUTimeCache::new(Duration::from_secs( FAILED_CHAINS_CACHE_EXPIRY_SECONDS, )), single_block_lookups: Default::default(), - da_checker, log, } } #[cfg(test)] - pub(crate) fn active_single_lookups(&self) -> Vec { - self.single_block_lookups.keys().cloned().collect() + pub(crate) fn insert_failed_chain(&mut self, block_root: Hash256) { + self.failed_chains.insert(block_root); } #[cfg(test)] - pub(crate) fn active_parent_lookups(&self) -> Vec { - self.parent_lookups - .iter() - .map(|r| r.chain_hash()) - .collect::>() + pub(crate) fn get_failed_chains(&mut self) -> Vec { + self.failed_chains.keys().cloned().collect() } #[cfg(test)] - pub(crate) fn failed_chains_contains(&mut self, chain_hash: &Hash256) -> bool { - self.failed_chains.contains(chain_hash) + pub(crate) fn active_single_lookups(&self) -> Vec { + self.single_block_lookups + .iter() + .map(|(id, l)| { + ( + *id, + l.block_root(), + l.awaiting_parent(), + l.all_peers().copied().collect(), + ) + }) + .collect() + } + + /// Returns a vec of all parent lookup chains by tip, in descending slot order (tip first) + pub(crate) fn active_parent_lookups(&self) -> Vec { + compute_parent_chains( + &self + .single_block_lookups + .values() + .map(|lookup| lookup.into()) + .collect::>(), + ) } /* Lookup requests */ - /// Creates a lookup for the block with the given `block_root` and immediately triggers it. - pub fn search_block( + /// Creates a parent lookup for the block with the given `block_root` and immediately triggers it. + /// If a parent lookup exists or is triggered, a current lookup will be created. + pub fn search_child_and_parent( &mut self, block_root: Hash256, - peer_source: &[PeerId], + block_component: BlockComponent, + peer_id: PeerId, cx: &mut SyncNetworkContext, ) { - self.new_current_lookup(block_root, None, peer_source, cx) + let parent_root = block_component.parent_root(); + + let parent_lookup_exists = + self.search_parent_of_child(parent_root, block_root, &[peer_id], cx); + // Only create the child lookup if the parent exists + if parent_lookup_exists { + // `search_parent_of_child` ensures that parent root is not a failed chain + self.new_current_lookup( + block_root, + Some(block_component), + Some(parent_root), + // On a `UnknownParentBlock` or `UnknownParentBlob` event the peer is not required + // to have the rest of the block components (refer to decoupled blob gossip). Create + // the lookup with zero peers to house the block components. + &[], + cx, + ); + } } - /// Creates a lookup for the block with the given `block_root`, while caching other block - /// components we've already received. The block components are cached here because we haven't - /// imported its parent and therefore can't fully validate it and store it in the data - /// availability cache. - /// - /// The request is immediately triggered. - pub fn search_child_block( + /// Seach a block whose parent root is unknown. + /// Returns true if the lookup is created or already exists + pub fn search_unknown_block( &mut self, block_root: Hash256, - child_components: ChildComponents, peer_source: &[PeerId], cx: &mut SyncNetworkContext, ) { - self.new_current_lookup(block_root, Some(child_components), peer_source, cx) + self.new_current_lookup(block_root, None, None, peer_source, cx); } - /// Attempts to trigger the request matching the given `block_root`. - pub fn trigger_single_lookup( + /// A block or blob triggers the search of a parent. + /// Check if this new lookup extends a bad chain: + /// - Extending `child_block_root_trigger` would exceed the max depth + /// - `block_root_to_search` is a failed chain + /// Returns true if the lookup is created or already exists + pub fn search_parent_of_child( &mut self, - mut single_block_lookup: SingleBlockLookup, + block_root_to_search: Hash256, + child_block_root_trigger: Hash256, + peers: &[PeerId], cx: &mut SyncNetworkContext, - ) { - let block_root = single_block_lookup.block_root(); - match single_block_lookup.request_block_and_blobs(cx) { - Ok(()) => self.add_single_lookup(single_block_lookup), - Err(e) => { - debug!(self.log, "Single block lookup failed"; - "error" => ?e, - "block_root" => ?block_root, - ); + ) -> bool { + let parent_chains = self.active_parent_lookups(); + + for (chain_idx, parent_chain) in parent_chains.iter().enumerate() { + // `block_root_to_search` will trigger a new lookup, and it will extend a parent_chain + // beyond its max length + let block_would_extend_chain = parent_chain.ancestor() == child_block_root_trigger; + // `block_root_to_search` already has a lookup, and with the block trigger it extends + // the parent_chain beyond its length. This can happen because when creating a lookup + // for a new root we don't do any parent chain length checks + let trigger_is_chain_tip = parent_chain.tip == child_block_root_trigger; + + if (block_would_extend_chain || trigger_is_chain_tip) + && parent_chain.len() >= PARENT_DEPTH_TOLERANCE + { + debug!(self.log, "Parent lookup chain too long"; "block_root" => ?block_root_to_search); + + // Searching for this parent would extend a parent chain over the max + // Insert the tip only to failed chains + self.failed_chains.insert(parent_chain.tip); + + // Note: Drop only the chain that's too long until it merges with another chain + // that's not too long. Consider this attack: there's a chain of valid unknown + // blocks A -> B. A malicious peer builds `PARENT_DEPTH_TOLERANCE` garbage + // blocks on top of A forming A -> C. The malicious peer forces us to fetch C + // from it, which will result in parent A hitting the chain_too_long error. Then + // the valid chain A -> B is dropped too. + if let Ok(block_to_drop) = find_oldest_fork_ancestor(parent_chains, chain_idx) { + // Drop all lookups descending from the child of the too long parent chain + if let Some((lookup_id, lookup)) = self + .single_block_lookups + .iter() + .find(|(_, l)| l.block_root() == block_to_drop) + { + for &peer_id in lookup.all_peers() { + cx.report_peer( + peer_id, + PeerAction::LowToleranceError, + "chain_too_long", + ); + } + self.drop_lookup_and_children(*lookup_id); + } + } + + return false; } } - } - - /// Adds a lookup to the `single_block_lookups` map. - pub fn add_single_lookup(&mut self, single_block_lookup: SingleBlockLookup) { - self.single_block_lookups - .insert(single_block_lookup.id, single_block_lookup); - metrics::set_gauge( - &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, - self.single_block_lookups.len() as i64, - ); + // `block_root_to_search` is a failed chain check happens inside new_current_lookup + self.new_current_lookup(block_root_to_search, None, None, peers, cx) } /// Searches for a single block hash. If the blocks parent is unknown, a chain of blocks is /// constructed. - pub fn new_current_lookup( + /// Returns true if the lookup is created or already exists + fn new_current_lookup( &mut self, block_root: Hash256, - child_components: Option>, + block_component: Option>, + awaiting_parent: Option, peers: &[PeerId], cx: &mut SyncNetworkContext, - ) { + ) -> bool { + // If this block or it's parent is part of a known failed chain, ignore it. + if self.failed_chains.contains(&block_root) { + debug!(self.log, "Block is from a past failed chain. Dropping"; "block_root" => ?block_root); + for peer_id in peers { + cx.report_peer(*peer_id, PeerAction::MidToleranceError, "failed_chain"); + } + return false; + } + // Do not re-request a block that is already being requested - if let Some((_, lookup)) = self + if let Some((&lookup_id, lookup)) = self .single_block_lookups .iter_mut() .find(|(_id, lookup)| lookup.is_for_block(block_root)) { - lookup.add_peers(peers); - if let Some(components) = child_components { - lookup.add_child_components(components); + if let Some(block_component) = block_component { + let component_type = block_component.get_type(); + let imported = lookup.add_child_components(block_component); + if !imported { + debug!(self.log, "Lookup child component ignored"; "block_root" => ?block_root, "type" => component_type); + } } - return; + + if let Err(e) = self.add_peers_to_lookup_and_ancestors(lookup_id, peers, cx) { + warn!(self.log, "Error adding peers to ancestor lookup"; "error" => ?e); + } + + return true; } - if let Some(parent_lookup) = self.parent_lookups.iter_mut().find(|parent_req| { - parent_req.is_for_block(block_root) || parent_req.contains_block(&block_root) - }) { - parent_lookup.add_peers(peers); + // Ensure that awaiting parent exists, otherwise this lookup won't be able to make progress + if let Some(awaiting_parent) = awaiting_parent { + if !self + .single_block_lookups + .iter() + .any(|(_, lookup)| lookup.is_for_block(awaiting_parent)) + { + warn!(self.log, "Ignoring child lookup parent lookup not found"; "block_root" => ?awaiting_parent); + return false; + } + } - // If the block was already downloaded, or is being downloaded in this moment, do not - // request it. - trace!(self.log, "Already searching for block in a parent lookup request"; "block_root" => ?block_root); - return; + // Lookups contain untrusted data, bound the total count of lookups hold in memory to reduce + // the risk of OOM in case of bugs of malicious activity. + if self.single_block_lookups.len() > MAX_LOOKUPS { + warn!(self.log, "Dropping lookup reached max"; "block_root" => ?block_root); + return false; } - if self - .processing_parent_lookups - .values() - .any(|(hashes, _last_parent_request)| hashes.contains(&block_root)) - { - // we are already processing this block, ignore it. - trace!(self.log, "Already processing block in a parent request"; "block_root" => ?block_root); - return; + // If we know that this lookup has unknown parent (is awaiting a parent lookup to resolve), + // signal here to hold processing downloaded data. + let mut lookup = SingleBlockLookup::new(block_root, peers, cx.next_id(), awaiting_parent); + + // Add block components to the new request + if let Some(block_component) = block_component { + lookup.add_child_components(block_component); } - let msg = if child_components.is_some() { - "Searching for components of a block with unknown parent" - } else { - "Searching for block components" + let id = lookup.id; + let lookup = match self.single_block_lookups.entry(id) { + Entry::Vacant(entry) => entry.insert(lookup), + Entry::Occupied(_) => { + // Should never happen + warn!(self.log, "Lookup exists with same id"; "id" => id); + return false; + } }; - let lookup = SingleBlockLookup::new( - block_root, - child_components, - peers, - self.da_checker.clone(), - cx.next_id(), - LookupType::Current, - ); - debug!( self.log, - "{}", msg; + "Created block lookup"; "peer_ids" => ?peers, - "block" => ?block_root, + "block_root" => ?block_root, + "awaiting_parent" => awaiting_parent.map(|root| root.to_string()).unwrap_or("none".to_owned()), + "id" => lookup.id, ); - self.trigger_single_lookup(lookup, cx); - } - - /// If a block is attempted to be processed but we do not know its parent, this function is - /// called in order to find the block's parent. - pub fn search_parent( - &mut self, - slot: Slot, - block_root: Hash256, - parent_root: Hash256, - peer_id: PeerId, - cx: &mut SyncNetworkContext, - ) { - // If this block or it's parent is part of a known failed chain, ignore it. - if self.failed_chains.contains(&parent_root) || self.failed_chains.contains(&block_root) { - debug!(self.log, "Block is from a past failed chain. Dropping"; - "block_root" => ?block_root, "block_slot" => slot); - return; - } + metrics::inc_counter(&metrics::SYNC_LOOKUP_CREATED); - // Make sure this block is not already downloaded, and that neither it or its parent is - // being searched for. - if let Some(parent_lookup) = self.parent_lookups.iter_mut().find(|parent_req| { - parent_req.contains_block(&parent_root) || parent_req.is_for_block(parent_root) - }) { - parent_lookup.add_peer(peer_id); - // we are already searching for this block, ignore it - debug!(self.log, "Already searching for parent block"; - "block_root" => ?block_root, "parent_root" => ?parent_root); - return; - } - - if self - .processing_parent_lookups - .iter() - .any(|(chain_hash, (hashes, _peers))| { - chain_hash == &block_root - || hashes.contains(&block_root) - || hashes.contains(&parent_root) - }) - { - // we are already processing this block, ignore it. - debug!(self.log, "Already processing parent block"; - "block_root" => ?block_root, "parent_root" => ?parent_root); - return; + let result = lookup.continue_requests(cx); + if self.on_lookup_result(id, result, "new_current_lookup", cx) { + self.update_metrics(); + true + } else { + false } - let parent_lookup = ParentLookup::new( - block_root, - parent_root, - peer_id, - self.da_checker.clone(), - cx, - ); - - debug!(self.log, "Created new parent lookup"; "block_root" => ?block_root, "parent_root" => ?parent_root); - - self.request_parent(parent_lookup, cx); } /* Lookup responses */ - /// Get a single block lookup by its ID. This method additionally ensures the `req_counter` - /// matches the current `req_counter` for the lookup. This ensures any stale responses from requests - /// that have been retried are ignored. - fn get_single_lookup>( + /// Process a block or blob response received from a single lookup request. + pub fn on_download_response>( &mut self, id: SingleLookupReqId, - ) -> Option> { - let mut lookup = self.single_block_lookups.remove(&id.id)?; - - let request_state = R::request_state_mut(&mut lookup); - if request_state - .get_state() - .is_current_req_counter(id.req_counter) - { - Some(lookup) - } else { - // We don't want to drop the lookup, just ignore the old response. - self.single_block_lookups.insert(id.id, lookup); - None - } - } - - /// Checks whether a single block lookup is waiting for a parent lookup to complete. This is - /// necessary because we want to make sure all parents are processed before sending a child - /// for processing, otherwise the block will fail validation and will be returned to the network - /// layer with an `UnknownParent` error. - pub fn has_pending_parent_request(&self, block_root: Hash256) -> bool { - self.parent_lookups - .iter() - .any(|parent_lookup| parent_lookup.chain_hash() == block_root) + peer_id: PeerId, + response: RpcResponseResult, + cx: &mut SyncNetworkContext, + ) { + let result = self.on_download_response_inner::(id, peer_id, response, cx); + self.on_lookup_result(id.lookup_id, result, "download_response", cx); } /// Process a block or blob response received from a single lookup request. - pub fn single_lookup_response>( + pub fn on_download_response_inner>( &mut self, - lookup_id: SingleLookupReqId, + id: SingleLookupReqId, peer_id: PeerId, - response: R::VerifiedResponseType, - seen_timestamp: Duration, + response: RpcResponseResult, cx: &mut SyncNetworkContext, - ) { - let id = lookup_id.id; - let response_type = R::response_type(); + ) -> Result { + // Note: no need to downscore peers here, already downscored on network context - let Some(mut lookup) = self.get_single_lookup::(lookup_id) else { + let response_type = R::response_type(); + let Some(lookup) = self.single_block_lookups.get_mut(&id.lookup_id) else { // We don't have the ability to cancel in-flight RPC requests. So this can happen // if we started this RPC request, and later saw the block/blobs via gossip. - debug!( - self.log, - "Block returned for single block lookup not present"; - "response_type" => ?response_type, - ); - return; + debug!(self.log, "Block returned for single block lookup not present"; "id" => ?id); + return Err(LookupRequestError::UnknownLookup); }; - let expected_block_root = lookup.block_root(); - debug!(self.log, - "Peer returned response for single lookup"; - "peer_id" => %peer_id , - "id" => ?id, - "block_root" => ?expected_block_root, - "response_type" => ?response_type, - ); + let block_root = lookup.block_root(); + let request_state = R::request_state_mut(lookup).get_state_mut(); - match self.handle_verified_response::( - seen_timestamp, - cx, - BlockProcessType::SingleBlock { id: lookup.id }, - response, - &mut lookup, - ) { - Ok(_) => { - self.single_block_lookups.insert(id, lookup); - } - Err(e) => { + match response { + Ok((response, seen_timestamp)) => { debug!(self.log, - "Single lookup request failed"; - "error" => ?e, - "block_root" => ?expected_block_root, + "Received lookup download success"; + "block_root" => ?block_root, + "id" => ?id, + "peer_id" => %peer_id, + "response_type" => ?response_type, ); - } - } - - metrics::set_gauge( - &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, - self.single_block_lookups.len() as i64, - ); - } - - /// Consolidates error handling for `single_lookup_response`. An `Err` here should always mean - /// the lookup is dropped. - fn handle_verified_response>( - &self, - seen_timestamp: Duration, - cx: &mut SyncNetworkContext, - process_type: BlockProcessType, - verified_response: R::VerifiedResponseType, - lookup: &mut SingleBlockLookup, - ) -> Result<(), LookupRequestError> { - let id = lookup.id; - let block_root = lookup.block_root(); - let cached_child = lookup.add_response::(verified_response.clone()); - match cached_child { - CachedChild::Ok(block) => { - // If we have an outstanding parent request for this block, delay sending the response until - // all parent blocks have been processed, otherwise we will fail validation with an - // `UnknownParent`. - let delay_send = match lookup.lookup_type { - LookupType::Parent => false, - LookupType::Current => self.has_pending_parent_request(lookup.block_root()), - }; - - if !delay_send { - R::request_state_mut(lookup) - .get_state_mut() - .on_download_success() - .map_err(LookupRequestError::BadState)?; - self.send_block_for_processing( + // Here we could check if response extends a parent chain beyond its max length. + // However we defer that check to the handling of a processing error ParentUnknown. + // + // Here we could check if there's already a lookup for parent_root of `response`. In + // that case we know that sending the response for processing will likely result in + // a `ParentUnknown` error. However, for simplicity we choose to not implement this + // optimization. + + // Register the download peer here. Once we have received some data over the wire we + // attribute it to this peer for scoring latter regardless of how the request was + // done. + request_state.on_download_success( + id.req_id, + DownloadResult { + value: response, block_root, - block, seen_timestamp, - process_type, - cx, - )? - } - } - CachedChild::DownloadIncomplete => { - R::request_state_mut(lookup) - .get_state_mut() - .on_download_success() - .map_err(LookupRequestError::BadState)?; - // If this was the result of a block request, we can't determine if the block peer - // did anything wrong. If we already had both a block and blobs response processed, - // we should penalize the blobs peer because they did not provide all blobs on the - // initial request. - if lookup.both_components_downloaded() { - lookup.penalize_blob_peer(cx); - lookup.blob_request_state.state.on_download_failure(); - } - lookup.request_block_and_blobs(cx)?; - } - CachedChild::NotRequired => { - R::request_state_mut(lookup) - .get_state_mut() - .on_download_success() - .map_err(LookupRequestError::BadState)?; - - R::send_reconstructed_for_processing( - id, - self, - block_root, - R::verified_to_reconstructed(block_root, verified_response), - seen_timestamp, - cx, - )? - } - CachedChild::Err(e) => { - warn!(self.log, "Consistency error in cached block"; - "error" => ?e, - "block_root" => ?block_root - ); - lookup.handle_consistency_failure(cx); - lookup.request_block_and_blobs(cx)?; - } - } - Ok(()) - } - - /// Get a parent block lookup by its ID. This method additionally ensures the `req_counter` - /// matches the current `req_counter` for the lookup. This any stale responses from requests - /// that have been retried are ignored. - fn get_parent_lookup>( - &mut self, - id: SingleLookupReqId, - ) -> Option> { - let mut parent_lookup = if let Some(pos) = self - .parent_lookups - .iter() - .position(|request| request.current_parent_request.id == id.id) - { - self.parent_lookups.remove(pos) - } else { - return None; - }; - - if R::request_state_mut(&mut parent_lookup.current_parent_request) - .get_state() - .is_current_req_counter(id.req_counter) - { - Some(parent_lookup) - } else { - self.parent_lookups.push(parent_lookup); - None - } - } - - /// Process a response received from a parent lookup request. - pub fn parent_lookup_response>( - &mut self, - id: SingleLookupReqId, - peer_id: PeerId, - response: R::VerifiedResponseType, - seen_timestamp: Duration, - cx: &mut SyncNetworkContext, - ) { - let Some(mut parent_lookup) = self.get_parent_lookup::(id) else { - debug!(self.log, "Response for a parent lookup request that was not found"; "peer_id" => %peer_id); - return; - }; - - debug!(self.log, - "Peer returned response for parent lookup"; - "peer_id" => %peer_id , - "id" => ?id, - "block_root" => ?parent_lookup.current_parent_request.block_request_state.requested_block_root, - "response_type" => ?R::response_type(), - ); - - match self.parent_lookup_response_inner::( - peer_id, - response, - seen_timestamp, - cx, - &mut parent_lookup, - ) { - Ok(()) => { - self.parent_lookups.push(parent_lookup); + peer_id, + }, + )?; + // continue_request will send for processing as the request state is AwaitingProcessing } Err(e) => { - self.handle_parent_request_error(&mut parent_lookup, cx, e); - } - } - - metrics::set_gauge( - &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_lookups.len() as i64, - ); - } - - /// Consolidates error handling for `parent_lookup_response`. An `Err` here should always mean - /// the lookup is dropped. - fn parent_lookup_response_inner>( - &mut self, - peer_id: PeerId, - response: R::VerifiedResponseType, - seen_timestamp: Duration, - cx: &mut SyncNetworkContext, - parent_lookup: &mut ParentLookup, - ) -> Result<(), RequestError> { - // check if the parent of this block isn't in the failed cache. If it is, this chain should - // be dropped and the peer downscored. - if let Some(parent_root) = R::get_parent_root(&response) { - if self.failed_chains.contains(&parent_root) { - let request_state = R::request_state_mut(&mut parent_lookup.current_parent_request); - request_state.register_failure_downloading(); - debug!( - self.log, - "Parent chain ignored due to past failure"; - "block" => %parent_root, + debug!(self.log, + "Received lookup download failure"; + "block_root" => ?block_root, + "id" => ?id, + "peer_id" => %peer_id, + "response_type" => ?response_type, + "error" => %e, ); - // Add the root block to failed chains - self.failed_chains.insert(parent_lookup.chain_hash()); - cx.report_peer( - peer_id, - PeerAction::MidToleranceError, - "bbroot_failed_chains", - ); - return Ok(()); + request_state.on_download_failure(id.req_id)?; + // continue_request will retry a download as the request state is AwaitingDownload } } - self.handle_verified_response::( - seen_timestamp, - cx, - BlockProcessType::ParentLookup { - chain_hash: parent_lookup.chain_hash(), - }, - response, - &mut parent_lookup.current_parent_request, - )?; - - Ok(()) - } - - /// Handle logging and peer scoring for `RequestError`s during parent lookup requests. - fn handle_parent_request_error( - &mut self, - parent_lookup: &mut ParentLookup, - cx: &SyncNetworkContext, - e: RequestError, - ) { - debug!(self.log, "Failed to request parent"; "error" => e.as_static()); - match e { - RequestError::SendFailed(_) => { - // Probably shutting down, nothing to do here. Drop the request - } - RequestError::ChainTooLong => { - self.failed_chains.insert(parent_lookup.chain_hash()); - // This indicates faulty peers. - for &peer_id in parent_lookup.all_used_peers() { - cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) - } - } - RequestError::TooManyAttempts { cannot_process } => { - // We only consider the chain failed if we were unable to process it. - // We could have failed because one peer continually failed to send us - // bad blocks. We still allow other peers to send us this chain. Note - // that peers that do this, still get penalised. - if cannot_process { - self.failed_chains.insert(parent_lookup.chain_hash()); - } - // This indicates faulty peers. - for &peer_id in parent_lookup.all_used_peers() { - cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) - } - } - RequestError::NoPeers => { - // This happens if the peer disconnects while the block is being - // processed. Drop the request without extra penalty - } - RequestError::BadState(..) => { - warn!(self.log, "Failed to request parent"; "error" => e.as_static()); - } - } + lookup.continue_requests(cx) } /* Error responses */ - pub fn peer_disconnected(&mut self, peer_id: &PeerId, cx: &mut SyncNetworkContext) { - /* Check disconnection for single lookups */ - self.single_block_lookups.retain(|_, req| { - let should_drop_lookup = - req.should_drop_lookup_on_disconnected_peer(peer_id, cx, &self.log); - - !should_drop_lookup - }); - - /* Check disconnection for parent lookups */ - while let Some(pos) = self - .parent_lookups - .iter_mut() - .position(|req| req.check_peer_disconnected(peer_id).is_err()) - { - let parent_lookup = self.parent_lookups.remove(pos); - debug!(self.log, "Dropping parent lookup after peer disconnected"; &parent_lookup); - self.request_parent(parent_lookup, cx); + pub fn peer_disconnected(&mut self, peer_id: &PeerId) { + for (_, lookup) in self.single_block_lookups.iter_mut() { + lookup.remove_peer(peer_id); } } - /// An RPC error has occurred during a parent lookup. This function handles this case. - pub fn parent_lookup_failed>( - &mut self, - id: SingleLookupReqId, - peer_id: &PeerId, - cx: &mut SyncNetworkContext, - error: LookupFailure, - ) { - // Only downscore lookup verify errors. RPC errors are downscored in the network handler. - if let LookupFailure::LookupVerifyError(e) = &error { - // Downscore peer even if lookup is not known - self.downscore_on_rpc_error(peer_id, e, cx); - } - - let Some(mut parent_lookup) = self.get_parent_lookup::(id) else { - debug!(self.log, - "RPC failure for a block parent lookup request that was not found"; - "peer_id" => %peer_id, - "error" => %error - ); - return; - }; - R::request_state_mut(&mut parent_lookup.current_parent_request) - .register_failure_downloading(); - debug!(self.log, "Parent lookup block request failed"; - "chain_hash" => %parent_lookup.chain_hash(), "id" => ?id, "error" => %error - ); - - self.request_parent(parent_lookup, cx); - - metrics::set_gauge( - &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_lookups.len() as i64, - ); - } + /* Processing responses */ - /// An RPC error has occurred during a single lookup. This function handles this case.\ - pub fn single_block_lookup_failed>( + pub fn on_processing_result( &mut self, - id: SingleLookupReqId, - peer_id: &PeerId, + process_type: BlockProcessType, + result: BlockProcessingResult, cx: &mut SyncNetworkContext, - error: LookupFailure, ) { - // Only downscore lookup verify errors. RPC errors are downscored in the network handler. - if let LookupFailure::LookupVerifyError(e) = &error { - // Downscore peer even if lookup is not known - self.downscore_on_rpc_error(peer_id, e, cx); - } - - let log = self.log.clone(); - let Some(mut lookup) = self.get_single_lookup::(id) else { - debug!(log, "Error response to dropped lookup"; "error" => %error); - return; + let lookup_result = match process_type { + BlockProcessType::SingleBlock { id } => { + self.on_processing_result_inner::>(id, result, cx) + } + BlockProcessType::SingleBlob { id } => { + self.on_processing_result_inner::>(id, result, cx) + } }; - let block_root = lookup.block_root(); - let request_state = R::request_state_mut(&mut lookup); - let response_type = R::response_type(); - trace!(log, - "Single lookup failed"; - "block_root" => ?block_root, - "error" => %error, - "peer_id" => %peer_id, - "response_type" => ?response_type - ); - let id = id.id; - request_state.register_failure_downloading(); - if let Err(e) = lookup.request_block_and_blobs(cx) { - debug!(self.log, - "Single lookup retry failed"; - "error" => ?e, - "block_root" => ?block_root, - ); - } else { - self.single_block_lookups.insert(id, lookup); - } - - metrics::set_gauge( - &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, - self.single_block_lookups.len() as i64, - ); + let id = match process_type { + BlockProcessType::SingleBlock { id } | BlockProcessType::SingleBlob { id } => id, + }; + self.on_lookup_result(id, lookup_result, "processing_result", cx); } - /* Processing responses */ - - pub fn single_block_component_processed>( + pub fn on_processing_result_inner>( &mut self, - target_id: Id, + lookup_id: SingleLookupId, result: BlockProcessingResult, cx: &mut SyncNetworkContext, - ) { - let Some(mut lookup) = self.single_block_lookups.remove(&target_id) else { - debug!(self.log, "Unknown single block lookup"; "target_id" => target_id); - return; + ) -> Result { + let Some(lookup) = self.single_block_lookups.get_mut(&lookup_id) else { + debug!(self.log, "Unknown single block lookup"; "id" => lookup_id); + return Err(LookupRequestError::UnknownLookup); }; let block_root = lookup.block_root(); - let request_state = R::request_state_mut(&mut lookup); + let request_state = R::request_state_mut(lookup).get_state_mut(); - let peer_id = match request_state.get_state().processing_peer() { - Ok(peer_id) => peer_id, - Err(e) => { - debug!(self.log, "Attempting to process single block lookup in bad state"; "id" => target_id, "response_type" => ?R::response_type(), "error" => e); - return; - } - }; debug!( self.log, - "Block component processed for lookup"; - "response_type" => ?R::response_type(), + "Received lookup processing result"; + "component" => ?R::response_type(), "block_root" => ?block_root, + "id" => lookup_id, "result" => ?result, - "id" => target_id, ); let action = match result { BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_)) - | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { + | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown(_)) => { // Successfully imported - trace!(self.log, "Single block processing succeeded"; "block" => %block_root); - Action::Drop + request_state.on_processing_success()?; + Action::Continue } BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( @@ -759,28 +524,17 @@ impl BlockLookups { )) => { // `on_processing_success` is called here to ensure the request state is updated prior to checking // if both components have been processed. - if R::request_state_mut(&mut lookup) - .get_state_mut() - .on_processing_success() - .is_err() - { - warn!( - self.log, - "Single block processing state incorrect"; - "action" => "dropping single block request" - ); - Action::Drop - // If this was the result of a block request, we can't determined if the block peer did anything - // wrong. If we already had both a block and blobs response processed, we should penalize the - // blobs peer because they did not provide all blobs on the initial request. - } else if lookup.both_components_processed() { - lookup.penalize_blob_peer(cx); - - // Try it again if possible. - lookup.blob_request_state.state.on_processing_failure(); - Action::Retry + request_state.on_processing_success()?; + + if lookup.both_components_processed() { + // We don't request for other block components until being sure that the block has + // data. If we request blobs / columns to a peer we are sure those must exist. + // Therefore if all components are processed and we still receive `MissingComponents` + // it indicates an internal bug. + return Err(LookupRequestError::MissingComponentsAfterAllProcessed); } else { - Action::Continue + // Continue request, potentially request blobs + Action::Retry } } BlockProcessingResult::Ignored => { @@ -788,25 +542,28 @@ impl BlockLookups { // This implies that the cpu is overloaded. Drop the request. warn!( self.log, - "Single block processing was ignored, cpu might be overloaded"; - "action" => "dropping single block request" + "Lookup component processing ignored, cpu might be overloaded"; + "component" => ?R::response_type(), ); Action::Drop } BlockProcessingResult::Err(e) => { - let root = lookup.block_root(); - trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); match e { BlockError::BeaconChainError(e) => { // Internal error - error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); + error!(self.log, "Beacon chain error processing lookup component"; "block_root" => %block_root, "error" => ?e); Action::Drop } BlockError::ParentUnknown(block) => { - let slot = block.slot(); - let parent_root = block.parent_root(); - lookup.add_child_components(block.into()); - Action::ParentUnknown { parent_root, slot } + // Reverts the status of this request to `AwaitingProcessing` holding the + // downloaded data. A future call to `continue_requests` will re-submit it + // once there are no pending parent requests. + // Note: `BlockError::ParentUnknown` is only returned when processing + // blocks, not blobs. + request_state.revert_to_awaiting_processing()?; + Action::ParentUnknown { + parent_root: block.parent_root(), + } } ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { // These errors indicate that the execution layer is offline @@ -814,35 +571,36 @@ impl BlockLookups { debug!( self.log, "Single block lookup failed. Execution layer is offline / unsynced / misconfigured"; - "root" => %root, + "block_root" => ?block_root, "error" => ?e ); Action::Drop } - BlockError::AvailabilityCheck(e) => match e.category() { - AvailabilityCheckErrorCategory::Internal => { - warn!(self.log, "Internal availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); - lookup.block_request_state.state.on_download_failure(); - lookup.blob_request_state.state.on_download_failure(); - Action::Retry - } - AvailabilityCheckErrorCategory::Malicious => { - warn!(self.log, "Availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e); - lookup.handle_availability_check_failure(cx); - Action::Retry - } - }, + BlockError::AvailabilityCheck(e) + if e.category() == AvailabilityCheckErrorCategory::Internal => + { + // There errors indicate internal problems and should not downscore the peer + warn!(self.log, "Internal availability check failure"; "block_root" => ?block_root, "error" => ?e); + + // Here we choose *not* to call `on_processing_failure` because this could result in a bad + // lookup state transition. This error invalidates both blob and block requests, and we don't know the + // state of both requests. Blobs may have already successfullly processed for example. + // We opt to drop the lookup instead. + Action::Drop + } other => { - warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); - if let Ok(block_peer) = lookup.block_request_state.state.processing_peer() { - cx.report_peer( - block_peer, - PeerAction::MidToleranceError, - "single_block_failure", - ); + debug!(self.log, "Invalid lookup component"; "block_root" => ?block_root, "component" => ?R::response_type(), "error" => ?other); + + let peer_id = request_state.on_processing_failure()?; + cx.report_peer( + peer_id, + PeerAction::MidToleranceError, + match R::response_type() { + ResponseType::Block => "lookup_block_processing_failure", + ResponseType::Blob => "lookup_blobs_processing_failure", + }, + ); - lookup.block_request_state.state.on_processing_failure(); - } Action::Retry } } @@ -851,501 +609,308 @@ impl BlockLookups { match action { Action::Retry => { - if let Err(e) = lookup.request_block_and_blobs(cx) { - warn!(self.log, "Single block lookup failed"; "block_root" => %block_root, "error" => ?e); - // Failed with too many retries, drop with noop - self.update_metrics(); - } else { - self.single_block_lookups.insert(target_id, lookup); - } + // Trigger download for all components in case `MissingComponents` failed the blob + // request. Also if blobs are `AwaitingProcessing` and need to be progressed + lookup.continue_requests(cx) } - Action::ParentUnknown { parent_root, slot } => { - // TODO: Consider including all peers from the lookup, claiming to know this block, not - // just the one that sent this specific block - self.search_parent(slot, block_root, parent_root, peer_id, cx); - self.single_block_lookups.insert(target_id, lookup); + Action::ParentUnknown { parent_root } => { + let peers = lookup.all_peers().copied().collect::>(); + lookup.set_awaiting_parent(parent_root); + debug!(self.log, "Marking lookup as awaiting parent"; "id" => lookup.id, "block_root" => ?block_root, "parent_root" => ?parent_root); + self.search_parent_of_child(parent_root, block_root, &peers, cx); + Ok(LookupResult::Pending) } Action::Drop => { - // drop with noop - self.update_metrics(); + // Drop with noop + Err(LookupRequestError::Failed) } Action::Continue => { - self.single_block_lookups.insert(target_id, lookup); + // Drop this completed lookup only + Ok(LookupResult::Completed) } } } - pub fn parent_block_processed( + pub fn on_external_processing_result( &mut self, - chain_hash: Hash256, - result: BlockProcessingResult, + block_root: Hash256, + imported: bool, cx: &mut SyncNetworkContext, ) { - let index = self - .parent_lookups - .iter() - .enumerate() - .find(|(_, lookup)| lookup.chain_hash() == chain_hash) - .map(|(index, _)| index); - - let Some(mut parent_lookup) = index.map(|index| self.parent_lookups.remove(index)) else { - return debug!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); + let Some((id, lookup)) = self + .single_block_lookups + .iter_mut() + .find(|(_, lookup)| lookup.is_for_block(block_root)) + else { + // Ok to ignore gossip process events + return; }; - match &result { - BlockProcessingResult::Ok(status) => match status { - AvailabilityProcessingStatus::Imported(block_root) => { - debug!(self.log, "Parent block processing succeeded"; &parent_lookup, "block_root" => ?block_root) - } - AvailabilityProcessingStatus::MissingComponents(_, block_root) => { - debug!(self.log, "Parent missing parts, triggering single block lookup"; &parent_lookup,"block_root" => ?block_root) - } - }, - BlockProcessingResult::Err(e) => { - debug!(self.log, "Parent block processing failed"; &parent_lookup, "error" => %e) - } - BlockProcessingResult::Ignored => { - debug!( - self.log, - "Parent block processing job was ignored"; - "action" => "re-requesting block", - &parent_lookup - ); - } - } + let lookup_result = if imported { + Ok(LookupResult::Completed) + } else { + lookup.continue_requests(cx) + }; + let id = *id; + self.on_lookup_result(id, lookup_result, "external_processing_result", cx); + } - match result { - BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( - _, - block_root, - )) => { - let expected_block_root = parent_lookup.current_parent_request.block_root(); - if block_root != expected_block_root { - warn!( - self.log, - "Parent block processing result/request root mismatch"; - "request" =>?expected_block_root, - "result" => ?block_root - ); - return; - } + /// Makes progress on the immediate children of `block_root` + pub fn continue_child_lookups(&mut self, block_root: Hash256, cx: &mut SyncNetworkContext) { + let mut lookup_results = vec![]; // < need to buffer lookup results to not re-borrow &mut self - // We only send parent blocks + blobs for processing together. This means a - // `MissingComponents` response here indicates missing blobs. Therefore we always - // register a blob processing failure here. - parent_lookup - .current_parent_request - .blob_request_state - .state - .on_processing_failure(); - match parent_lookup - .current_parent_request - .request_block_and_blobs(cx) - { - Ok(()) => self.parent_lookups.push(parent_lookup), - Err(e) => self.handle_parent_request_error(&mut parent_lookup, cx, e.into()), - } - } - BlockProcessingResult::Err(BlockError::ParentUnknown(block)) => { - parent_lookup.add_unknown_parent_block(block); - self.request_parent(parent_lookup, cx); - } - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_)) - | BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown(_)) => { - let (chain_hash, blocks, hashes, block_request) = - parent_lookup.parts_for_processing(); - - let blocks = self.add_child_block_to_chain(chain_hash, blocks, cx).into(); - - let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); - - // Check if the beacon processor is available - let Some(beacon_processor) = cx.beacon_processor_if_enabled() else { - return trace!( - self.log, - "Dropping parent chain segment that was ready for processing."; - "chain_hash" => %chain_hash, - ); - }; - - match beacon_processor.send_chain_segment(process_id, blocks) { - Ok(_) => { - self.processing_parent_lookups - .insert(chain_hash, (hashes, block_request)); - } - Err(e) => { - error!( - self.log, - "Failed to send chain segment to processor"; - "error" => ?e - ); - } - } - } - ref e @ BlockProcessingResult::Err(BlockError::ExecutionPayloadError(ref epe)) - if !epe.penalize_peer() => - { - // These errors indicate that the execution layer is offline - // and failed to validate the execution payload. Do not downscore peer. - debug!( - self.log, - "Parent lookup failed. Execution layer is offline"; - "chain_hash" => %chain_hash, - "error" => ?e - ); - } - BlockProcessingResult::Err(outcome) => { - self.handle_parent_block_error(outcome, cx, parent_lookup); - } - BlockProcessingResult::Ignored => { - // Beacon processor signalled to ignore the block processing result. - // This implies that the cpu is overloaded. Drop the request. - warn!( - self.log, - "Parent block processing was ignored, cpu might be overloaded"; - "action" => "dropping parent request" - ); + for (id, lookup) in self.single_block_lookups.iter_mut() { + if lookup.awaiting_parent() == Some(block_root) { + lookup.resolve_awaiting_parent(); + debug!(self.log, "Continuing child lookup"; "parent_root" => ?block_root, "id" => id, "block_root" => ?lookup.block_root()); + let result = lookup.continue_requests(cx); + lookup_results.push((*id, result)); } } - metrics::set_gauge( - &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_lookups.len() as i64, - ); - } - - /// Find the child block that spawned the parent lookup request and add it to the chain - /// to send for processing. - fn add_child_block_to_chain( - &mut self, - chain_hash: Hash256, - mut blocks: VecDeque>, - cx: &mut SyncNetworkContext, - ) -> VecDeque> { - // Find the child block that spawned the parent lookup request and add it to the chain - // to send for processing. - if let Some(child_lookup_id) = self - .single_block_lookups - .iter() - .find_map(|(id, lookup)| (lookup.block_root() == chain_hash).then_some(*id)) - { - let Some(child_lookup) = self.single_block_lookups.get_mut(&child_lookup_id) else { - debug!(self.log, "Missing child for parent lookup request"; "child_root" => ?chain_hash); - return blocks; - }; - match child_lookup.get_cached_child_block() { - CachedChild::Ok(rpc_block) => { - // Insert this block at the front. This order is important because we later check - // for linear roots in `filter_chain_segment` - blocks.push_front(rpc_block); - } - CachedChild::DownloadIncomplete => { - trace!(self.log, "Parent lookup chain complete, awaiting child response"; "chain_hash" => ?chain_hash); - } - CachedChild::NotRequired => { - warn!(self.log, "Child not cached for parent lookup"; "chain_hash" => %chain_hash); - } - CachedChild::Err(e) => { - warn!( - self.log, - "Consistency error in child block triggering chain or parent lookups"; - "error" => ?e, - "chain_hash" => ?chain_hash - ); - child_lookup.handle_consistency_failure(cx); - if let Err(e) = child_lookup.request_block_and_blobs(cx) { - debug!(self.log, - "Failed to request block and blobs, dropping lookup"; - "error" => ?e - ); - self.single_block_lookups.remove(&child_lookup_id); - } - } - } - } else { - debug!(self.log, "Missing child for parent lookup request"; "child_root" => ?chain_hash); - }; - blocks + for (id, result) in lookup_results { + self.on_lookup_result(id, result, "continue_child_lookups", cx); + } } - /// Handle the peer scoring, retries, and logging related to a `BlockError` returned from - /// processing a block + blobs for a parent lookup. - fn handle_parent_block_error( - &mut self, - outcome: BlockError<::EthSpec>, - cx: &mut SyncNetworkContext, - mut parent_lookup: ParentLookup, - ) { - // We should always have a block peer. - let block_peer_id = match parent_lookup.block_processing_peer() { - Ok(peer_id) => peer_id, - Err(e) => { - warn!(self.log, "Parent lookup in bad state"; "chain_hash" => %parent_lookup.chain_hash(), "error" => e); - return; - } - }; + /// Drops `dropped_id` lookup and all its children recursively. Lookups awaiting a parent need + /// the parent to make progress to resolve, therefore we must drop them if the parent is + /// dropped. + pub fn drop_lookup_and_children(&mut self, dropped_id: SingleLookupId) { + if let Some(dropped_lookup) = self.single_block_lookups.remove(&dropped_id) { + debug!(self.log, "Dropping lookup"; + "id" => ?dropped_id, + "block_root" => ?dropped_lookup.block_root(), + "awaiting_parent" => ?dropped_lookup.awaiting_parent(), + ); - // We may not have a blob peer, if there were no blobs required for this block. - let blob_peer_id = parent_lookup.blob_processing_peer().ok(); + let child_lookups = self + .single_block_lookups + .iter() + .filter(|(_, lookup)| lookup.awaiting_parent() == Some(dropped_lookup.block_root())) + .map(|(id, _)| *id) + .collect::>(); - // all else we consider the chain a failure and downvote the peer that sent - // us the last block - warn!( - self.log, "Invalid parent chain"; - "score_adjustment" => %PeerAction::MidToleranceError, - "outcome" => ?outcome, - "block_peer_id" => %block_peer_id, - ); - // This currently can be a host of errors. We permit this due to the partial - // ambiguity. - cx.report_peer( - block_peer_id, - PeerAction::MidToleranceError, - "parent_request_err", - ); - // Don't downscore the same peer twice - if let Some(blob_peer_id) = blob_peer_id { - if block_peer_id != blob_peer_id { - debug!( - self.log, "Additionally down-scoring blob peer"; - "score_adjustment" => %PeerAction::MidToleranceError, - "outcome" => ?outcome, - "blob_peer_id" => %blob_peer_id, - ); - cx.report_peer( - blob_peer_id, - PeerAction::MidToleranceError, - "parent_request_err", - ); + for id in child_lookups { + self.drop_lookup_and_children(id); } } - - // Try again if possible - parent_lookup.processing_failed(); - self.request_parent(parent_lookup, cx); } - pub fn parent_chain_processed( + /// Common handler a lookup request error, drop it and update metrics + /// Returns true if the lookup is created or already exists + fn on_lookup_result( &mut self, - chain_hash: Hash256, - result: BatchProcessResult, + id: SingleLookupId, + result: Result, + source: &str, cx: &mut SyncNetworkContext, - ) { - let Some((_hashes, request)) = self.processing_parent_lookups.remove(&chain_hash) else { - return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result); - }; - - debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result); + ) -> bool { match result { - BatchProcessResult::Success { .. } => { - let Some(id) = self - .single_block_lookups - .iter() - .find_map(|(id, req)| (req.block_root() == chain_hash).then_some(*id)) - else { - warn!(self.log, "No id found for single block lookup"; "chain_hash" => %chain_hash); - return; - }; - - let Some(lookup) = self.single_block_lookups.get_mut(&id) else { - warn!(self.log, "No id found for single block lookup"; "chain_hash" => %chain_hash); - return; - }; - - match lookup.get_cached_child_block() { - CachedChild::Ok(rpc_block) => { - // This is the correct block, send it for processing - if self - .send_block_for_processing( - chain_hash, - rpc_block, - timestamp_now(), - BlockProcessType::SingleBlock { id }, - cx, - ) - .is_err() - { - // Remove to avoid inconsistencies - self.single_block_lookups.remove(&id); - } - } - CachedChild::DownloadIncomplete => { - trace!(self.log, "Parent chain complete, awaiting child response"; "chain_hash" => %chain_hash); - } - CachedChild::NotRequired => { - warn!(self.log, "Child not cached for parent lookup"; "chain_hash" => %chain_hash); - } - CachedChild::Err(e) => { - warn!( - self.log, - "Consistency error in child block triggering parent lookup"; - "chain_hash" => %chain_hash, - "error" => ?e - ); - lookup.handle_consistency_failure(cx); - if let Err(e) = lookup.request_block_and_blobs(cx) { - debug!(self.log, - "Failed to request block and blobs, dropping lookup"; - "error" => ?e - ); - self.single_block_lookups.remove(&id); - } - } - } - } - BatchProcessResult::FaultyFailure { - imported_blocks: _, - penalty, - } => { - self.failed_chains.insert(chain_hash); - for peer_source in request.all_used_peers() { - cx.report_peer(*peer_source, penalty, "parent_chain_failure") + Ok(LookupResult::Pending) => true, // no action + Ok(LookupResult::Completed) => { + if let Some(lookup) = self.single_block_lookups.remove(&id) { + debug!(self.log, "Dropping completed lookup"; "block" => ?lookup.block_root(), "id" => id); + metrics::inc_counter(&metrics::SYNC_LOOKUP_COMPLETED); + // Block imported, continue the requests of pending child blocks + self.continue_child_lookups(lookup.block_root(), cx); + self.update_metrics(); + } else { + debug!(self.log, "Attempting to drop non-existent lookup"; "id" => id); } - } - BatchProcessResult::NonFaultyFailure => { - // We might request this chain again if there is need but otherwise, don't try again + false + } + // If UnknownLookup do not log the request error. No need to drop child lookups nor + // update metrics because the lookup does not exist. + Err(LookupRequestError::UnknownLookup) => false, + Err(error) => { + debug!(self.log, "Dropping lookup on request error"; "id" => id, "source" => source, "error" => ?error); + metrics::inc_counter_vec(&metrics::SYNC_LOOKUP_DROPPED, &[error.into()]); + self.drop_lookup_and_children(id); + self.update_metrics(); + false } } + } + + /* Helper functions */ + + /// Drops all the single block requests and returns how many requests were dropped. + pub fn drop_single_block_requests(&mut self) -> usize { + let requests_to_drop = self.single_block_lookups.len(); + self.single_block_lookups.clear(); + requests_to_drop + } + pub fn update_metrics(&self) { metrics::set_gauge( - &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_lookups.len() as i64, + &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, + self.single_block_lookups.len() as i64, ); } - /* Helper functions */ + /// Perform some prune operations on lookups on some interval + pub fn prune_lookups(&mut self) { + self.drop_lookups_without_peers(); + self.drop_stuck_lookups(); + } - fn send_block_for_processing( - &self, - block_root: Hash256, - block: RpcBlock, - duration: Duration, - process_type: BlockProcessType, - cx: &SyncNetworkContext, - ) -> Result<(), LookupRequestError> { - match cx.beacon_processor_if_enabled() { - Some(beacon_processor) => { - debug!(self.log, "Sending block for processing"; "block" => ?block_root, "process" => ?process_type); - if let Err(e) = beacon_processor.send_rpc_beacon_block( - block_root, - block, - duration, - process_type, - ) { - error!( - self.log, - "Failed to send sync block to processor"; - "error" => ?e - ); - Err(LookupRequestError::SendFailed( - "beacon processor send failure", - )) - } else { - Ok(()) - } - } - None => { - trace!(self.log, "Dropping block ready for processing. Beacon processor not available"; "block" => %block_root); - Err(LookupRequestError::SendFailed( - "beacon processor unavailable", - )) - } + /// Lookups without peers are allowed to exist for some time. See this common race condition: + /// + /// 1. Receive unknown block parent event + /// 2. Create child lookup with zero peers + /// 3. Parent is processed, before receiving any attestation for the child block + /// 4. Child lookup is attempted to make progress but has no peers + /// 5. We receive an attestion for child block and add a peer to the child block lookup + /// + /// On step 4 we could drop the lookup because we attempt to issue a request with no peers + /// available. This has two issues: + /// - We may drop the lookup while some other block component is processing, triggering an + /// unknown lookup error. This can potentially cause un-related child lookups to also be + /// dropped when calling `drop_lookup_and_children`. + /// - We lose all progress of the lookup, and have to re-download its components that we may + /// already have there cached. + /// + /// Instead there's no negative for keeping lookups with no peers around for some time. If we + /// regularly prune them, it should not be a memory concern (TODO: maybe yes!). + fn drop_lookups_without_peers(&mut self) { + for (lookup_id, block_root) in self + .single_block_lookups + .values() + .filter(|lookup| { + // Do not drop lookup that are awaiting events to prevent inconsinstencies. If a + // lookup gets stuck, it will be eventually pruned by `drop_stuck_lookups` + lookup.has_no_peers() + && lookup.elapsed_since_created() + > Duration::from_secs(LOOKUP_MAX_DURATION_NO_PEERS_SECS) + && !lookup.is_awaiting_event() + }) + .map(|lookup| (lookup.id, lookup.block_root())) + .collect::>() + { + debug!(self.log, "Dropping lookup with no peers"; + "id" => lookup_id, + "block_root" => ?block_root + ); + self.drop_lookup_and_children(lookup_id); } } - fn send_blobs_for_processing( - &self, - block_root: Hash256, - blobs: FixedBlobSidecarList, - duration: Duration, - process_type: BlockProcessType, - cx: &SyncNetworkContext, - ) -> Result<(), LookupRequestError> { - match cx.beacon_processor_if_enabled() { - Some(beacon_processor) => { - trace!(self.log, "Sending blobs for processing"; "block" => ?block_root, "process_type" => ?process_type); - if let Err(e) = - beacon_processor.send_rpc_blobs(block_root, blobs, duration, process_type) - { - error!( - self.log, - "Failed to send sync blobs to processor"; - "error" => ?e - ); - Err(LookupRequestError::SendFailed( - "beacon processor send failure", - )) - } else { - Ok(()) + /// Safety mechanism to unstuck lookup sync. Lookup sync if purely event driven and depends on + /// external components to feed it events to make progress. If there is a bug in network, in + /// beacon processor, or here internally: lookups can get stuck forever. A stuck lookup can + /// stall a node indefinitely as other lookup will be awaiting on a parent lookup to make + /// progress. + /// + /// If a lookup lasts more than LOOKUP_MAX_DURATION_SECS this function will find its oldest + /// ancestor and then drop it and all its children. This action will allow the node to unstuck + /// itself. Bugs that cause lookups to get stuck may be triggered consistently. So this strategy + /// is useful for two reasons: + /// + /// - One single clear warn level log per stuck incident + /// - If the original bug is sporadic, it reduces the time a node is stuck from forever to 15 min + fn drop_stuck_lookups(&mut self) { + // While loop to find and drop all disjoint trees of potentially stuck lookups. + while let Some(stuck_lookup) = self.single_block_lookups.values().find(|lookup| { + lookup.elapsed_since_created() > Duration::from_secs(LOOKUP_MAX_DURATION_STUCK_SECS) + }) { + let ancestor_stuck_lookup = match self.find_oldest_ancestor_lookup(stuck_lookup) { + Ok(lookup) => lookup, + Err(e) => { + warn!(self.log, "Error finding oldest ancestor lookup"; "error" => ?e); + // Default to dropping the lookup that exceeds the max duration so at least + // eventually sync should be unstuck + stuck_lookup } + }; + + if stuck_lookup.id == ancestor_stuck_lookup.id { + warn!(self.log, "Notify the devs a sync lookup is stuck"; + "block_root" => ?stuck_lookup.block_root(), + "lookup" => ?stuck_lookup, + ); + } else { + warn!(self.log, "Notify the devs a sync lookup is stuck"; + "block_root" => ?stuck_lookup.block_root(), + "lookup" => ?stuck_lookup, + "ancestor_block_root" => ?ancestor_stuck_lookup.block_root(), + "ancestor_lookup" => ?ancestor_stuck_lookup, + ); } - None => { - trace!(self.log, "Dropping blobs ready for processing. Beacon processor not available"; "block_root" => %block_root); - Err(LookupRequestError::SendFailed( - "beacon processor unavailable", + + metrics::inc_counter(&metrics::SYNC_LOOKUPS_STUCK); + self.drop_lookup_and_children(ancestor_stuck_lookup.id); + } + } + + /// Recursively find the oldest ancestor lookup of another lookup + fn find_oldest_ancestor_lookup<'a>( + &'a self, + lookup: &'a SingleBlockLookup, + ) -> Result<&'a SingleBlockLookup, String> { + if let Some(awaiting_parent) = lookup.awaiting_parent() { + if let Some(lookup) = self + .single_block_lookups + .values() + .find(|l| l.block_root() == awaiting_parent) + { + self.find_oldest_ancestor_lookup(lookup) + } else { + Err(format!( + "Lookup references unknown parent {awaiting_parent:?}" )) } + } else { + Ok(lookup) } } - /// Attempts to request the next unknown parent. This method handles peer scoring and dropping - /// the lookup in the event of failure. - fn request_parent( + /// Adds peers to a lookup and its ancestors recursively. + /// Note: Takes a `lookup_id` as argument to allow recursion on mutable lookups, without having + /// to duplicate the code to add peers to a lookup + fn add_peers_to_lookup_and_ancestors( &mut self, - mut parent_lookup: ParentLookup, + lookup_id: SingleLookupId, + peers: &[PeerId], cx: &mut SyncNetworkContext, - ) { - let response = parent_lookup.request_parent(cx); - - match response { - Err(e) => { - self.handle_parent_request_error(&mut parent_lookup, cx, e); + ) -> Result<(), String> { + let lookup = self + .single_block_lookups + .get_mut(&lookup_id) + .ok_or(format!("Unknown lookup for id {lookup_id}"))?; + + let mut added_some_peer = false; + for peer in peers { + if lookup.add_peer(*peer) { + added_some_peer = true; + debug!(self.log, "Adding peer to existing single block lookup"; + "block_root" => ?lookup.block_root(), + "peer" => ?peer + ); } - Ok(_) => self.parent_lookups.push(parent_lookup), } - // We remove and add back again requests so we want this updated regardless of outcome. - metrics::set_gauge( - &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_lookups.len() as i64, - ); - } - - /// Drops all the single block requests and returns how many requests were dropped. - pub fn drop_single_block_requests(&mut self) -> usize { - let requests_to_drop = self.single_block_lookups.len(); - self.single_block_lookups.clear(); - requests_to_drop - } - - /// Drops all the parent chain requests and returns how many requests were dropped. - pub fn drop_parent_chain_requests(&mut self) -> usize { - self.parent_lookups.drain(..).len() - } - - pub fn downscore_on_rpc_error( - &self, - peer_id: &PeerId, - error: &LookupVerifyError, - cx: &SyncNetworkContext, - ) { - // Note: logging the report event here with the full error display. The log inside - // `report_peer` only includes a smaller string, like "invalid_data" - let error_str: &'static str = error.into(); - - debug!(self.log, "reporting peer for sync lookup error"; "error" => error_str); - cx.report_peer(*peer_id, PeerAction::LowToleranceError, error_str); - } - - pub fn update_metrics(&self) { - metrics::set_gauge( - &metrics::SYNC_SINGLE_BLOCK_LOOKUPS, - self.single_block_lookups.len() as i64, - ); - - metrics::set_gauge( - &metrics::SYNC_PARENT_BLOCK_LOOKUPS, - self.parent_lookups.len() as i64, - ); + if let Some(parent_root) = lookup.awaiting_parent() { + if let Some((&child_id, _)) = self + .single_block_lookups + .iter() + .find(|(_, l)| l.block_root() == parent_root) + { + self.add_peers_to_lookup_and_ancestors(child_id, peers, cx) + } else { + Err(format!("Lookup references unknown parent {parent_root:?}")) + } + } else if added_some_peer { + // If this lookup is not awaiting a parent and we added at least one peer, attempt to + // make progress. It is possible that a lookup is created with zero peers, attempted to + // make progress, and then receives peers. After that time the lookup will never be + // pruned with `drop_lookups_without_peers` because it has peers. This is rare corner + // case, but it can result in stuck lookups. + let result = lookup.continue_requests(cx); + self.on_lookup_result(lookup_id, result, "add_peers", cx); + Ok(()) + } else { + Ok(()) + } } } diff --git a/beacon_node/network/src/sync/block_lookups/parent_chain.rs b/beacon_node/network/src/sync/block_lookups/parent_chain.rs new file mode 100644 index 00000000000..7f4fe5119f6 --- /dev/null +++ b/beacon_node/network/src/sync/block_lookups/parent_chain.rs @@ -0,0 +1,198 @@ +use super::single_block_lookup::SingleBlockLookup; +use beacon_chain::BeaconChainTypes; +use std::collections::{HashMap, HashSet}; +use types::Hash256; + +/// Summary of a lookup of which we may not know it's parent_root yet +pub(crate) struct Node { + block_root: Hash256, + parent_root: Option, +} + +impl From<&SingleBlockLookup> for Node { + fn from(value: &SingleBlockLookup) -> Self { + Self { + block_root: value.block_root(), + parent_root: value.awaiting_parent(), + } + } +} + +/// Wrapper around a chain of block roots that have a least one element (tip) +pub(crate) struct NodeChain { + // Parent chain blocks in descending slot order + pub(crate) chain: Vec, + pub(crate) tip: Hash256, +} + +impl NodeChain { + /// Returns the block_root of the oldest ancestor (min slot) of this chain + pub(crate) fn ancestor(&self) -> Hash256 { + self.chain.last().copied().unwrap_or(self.tip) + } + pub(crate) fn len(&self) -> usize { + self.chain.len() + } +} + +/// Given a set of nodes that reference each other, returns a list of chains with unique tips that +/// contain at least two elements. In descending slot order (tip first). +pub(crate) fn compute_parent_chains(nodes: &[Node]) -> Vec { + let mut child_to_parent = HashMap::new(); + let mut parent_to_child = HashMap::>::new(); + for node in nodes { + child_to_parent.insert(node.block_root, node.parent_root); + if let Some(parent_root) = node.parent_root { + parent_to_child + .entry(parent_root) + .or_default() + .push(node.block_root); + } + } + + let mut parent_chains = vec![]; + + // Iterate blocks with no children + for tip in nodes { + let mut block_root = tip.block_root; + if !parent_to_child.contains_key(&block_root) { + let mut chain = vec![]; + + // Resolve chain of blocks + while let Some(parent_root) = child_to_parent.get(&block_root) { + // block_root is a known block that may or may not have a parent root + chain.push(block_root); + if let Some(parent_root) = parent_root { + block_root = *parent_root; + } else { + break; + } + } + + if chain.len() > 1 { + parent_chains.push(NodeChain { + chain, + tip: tip.block_root, + }); + } + } + } + + parent_chains +} + +/// Given a list of node chains, find the oldest node of a specific chain that is not contained in +/// any other chain. +pub(crate) fn find_oldest_fork_ancestor( + parent_chains: Vec, + chain_idx: usize, +) -> Result { + let mut other_blocks = HashSet::new(); + + // Register blocks from other chains + for (i, parent_chain) in parent_chains.iter().enumerate() { + if i != chain_idx { + for block in &parent_chain.chain { + other_blocks.insert(block); + } + } + } + + // Should never happen + let parent_chain = parent_chains + .get(chain_idx) + .ok_or("chain_idx out of bounds")?; + // Find the first block in the target parent chain that is not in other parent chains + // Iterate in ascending slot order + for block in parent_chain.chain.iter().rev() { + if !other_blocks.contains(block) { + return Ok(*block); + } + } + + // No match means that the chain is fully contained within another chain. This should never + // happen, but if that was the case just return the tip + Ok(parent_chain.tip) +} + +#[cfg(test)] +mod tests { + use super::{compute_parent_chains, find_oldest_fork_ancestor, Node}; + use types::Hash256; + + fn h(n: u64) -> Hash256 { + Hash256::from_low_u64_be(n) + } + + fn n(block: u64) -> Node { + Node { + block_root: h(block), + parent_root: None, + } + } + + fn np(parent: u64, block: u64) -> Node { + Node { + block_root: h(block), + parent_root: Some(h(parent)), + } + } + + fn compute_parent_chains_test(nodes: &[Node], expected_chain: Vec>) { + assert_eq!( + compute_parent_chains(nodes) + .iter() + .map(|c| c.chain.clone()) + .collect::>(), + expected_chain + ); + } + + fn find_oldest_fork_ancestor_test(nodes: &[Node], expected: Hash256) { + let chains = compute_parent_chains(nodes); + println!( + "chains {:?}", + chains.iter().map(|c| &c.chain).collect::>() + ); + assert_eq!(find_oldest_fork_ancestor(chains, 0).unwrap(), expected); + } + + #[test] + fn compute_parent_chains_empty_case() { + compute_parent_chains_test(&[], vec![]); + } + + #[test] + fn compute_parent_chains_single_branch() { + compute_parent_chains_test(&[n(0), np(0, 1), np(1, 2)], vec![vec![h(2), h(1), h(0)]]); + } + + #[test] + fn compute_parent_chains_single_branch_with_solo() { + compute_parent_chains_test( + &[n(0), np(0, 1), np(1, 2), np(3, 4)], + vec![vec![h(2), h(1), h(0)]], + ); + } + + #[test] + fn compute_parent_chains_two_forking_branches() { + compute_parent_chains_test( + &[n(0), np(0, 1), np(1, 2), np(1, 3)], + vec![vec![h(2), h(1), h(0)], vec![h(3), h(1), h(0)]], + ); + } + + #[test] + fn compute_parent_chains_two_independent_branches() { + compute_parent_chains_test( + &[n(0), np(0, 1), np(1, 2), n(3), np(3, 4)], + vec![vec![h(2), h(1), h(0)], vec![h(4), h(3)]], + ); + } + + #[test] + fn find_oldest_fork_ancestor_simple_case() { + find_oldest_fork_ancestor_test(&[n(0), np(0, 1), np(1, 2), np(0, 3)], h(1)) + } +} diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs deleted file mode 100644 index 11eb908953f..00000000000 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ /dev/null @@ -1,227 +0,0 @@ -use super::common::LookupType; -use super::single_block_lookup::{LookupRequestError, SingleBlockLookup}; -use super::{DownloadedBlock, PeerId}; -use crate::sync::{manager::SLOT_IMPORT_TOLERANCE, network_context::SyncNetworkContext}; -use beacon_chain::block_verification_types::AsBlock; -use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::data_availability_checker::{ChildComponents, DataAvailabilityChecker}; -use beacon_chain::BeaconChainTypes; -use std::collections::VecDeque; -use std::sync::Arc; -use store::Hash256; - -/// How many attempts we try to find a parent of a block before we give up trying. -pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5; -/// The maximum depth we will search for a parent block. In principle we should have sync'd any -/// canonical chain to its head once the peer connects. A chain should not appear where it's depth -/// is further back than the most recent head slot. -pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; - -/// Maintains a sequential list of parents to lookup and the lookup's current state. -pub(crate) struct ParentLookup { - /// The root of the block triggering this parent request. - chain_hash: Hash256, - /// The blocks that have currently been downloaded. - downloaded_blocks: Vec>, - /// Request of the last parent. - pub current_parent_request: SingleBlockLookup, -} - -#[derive(Debug, PartialEq, Eq)] -pub(crate) enum RequestError { - SendFailed(&'static str), - ChainTooLong, - /// We witnessed too many failures trying to complete this parent lookup. - TooManyAttempts { - /// We received more failures trying to process the blocks than downloading them - /// from peers. - cannot_process: bool, - }, - NoPeers, - BadState(String), -} - -impl ParentLookup { - pub fn new( - block_root: Hash256, - parent_root: Hash256, - peer_id: PeerId, - da_checker: Arc>, - cx: &mut SyncNetworkContext, - ) -> Self { - let current_parent_request = SingleBlockLookup::new( - parent_root, - Some(ChildComponents::empty(block_root)), - &[peer_id], - da_checker, - cx.next_id(), - LookupType::Parent, - ); - - Self { - chain_hash: block_root, - downloaded_blocks: vec![], - current_parent_request, - } - } - - pub fn contains_block(&self, block_root: &Hash256) -> bool { - self.downloaded_blocks - .iter() - .any(|(root, _d_block)| root == block_root) - } - - pub fn is_for_block(&self, block_root: Hash256) -> bool { - self.current_parent_request.is_for_block(block_root) - } - - /// Attempts to request the next unknown parent. If the request fails, it should be removed. - pub fn request_parent(&mut self, cx: &mut SyncNetworkContext) -> Result<(), RequestError> { - // check to make sure this request hasn't failed - if self.downloaded_blocks.len() + 1 >= PARENT_DEPTH_TOLERANCE { - return Err(RequestError::ChainTooLong); - } - - self.current_parent_request - .request_block_and_blobs(cx) - .map_err(Into::into) - } - - pub fn check_peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), ()> { - self.current_parent_request - .block_request_state - .state - .check_peer_disconnected(peer_id) - .and_then(|()| { - self.current_parent_request - .blob_request_state - .state - .check_peer_disconnected(peer_id) - }) - } - - pub fn add_unknown_parent_block(&mut self, block: RpcBlock) { - let next_parent = block.parent_root(); - // Cache the block. - let current_root = self.current_parent_request.block_root(); - self.downloaded_blocks.push((current_root, block)); - - // Update the parent request. - self.current_parent_request - .update_requested_parent_block(next_parent) - } - - pub fn block_processing_peer(&self) -> Result { - self.current_parent_request - .block_request_state - .state - .processing_peer() - } - - pub fn blob_processing_peer(&self) -> Result { - self.current_parent_request - .blob_request_state - .state - .processing_peer() - } - - /// Consumes the parent request and destructures it into it's parts. - #[allow(clippy::type_complexity)] - pub fn parts_for_processing( - self, - ) -> ( - Hash256, - VecDeque>, - Vec, - SingleBlockLookup, - ) { - let ParentLookup { - chain_hash, - downloaded_blocks, - current_parent_request, - } = self; - let block_count = downloaded_blocks.len(); - let mut blocks = VecDeque::with_capacity(block_count); - let mut hashes = Vec::with_capacity(block_count); - for (hash, block) in downloaded_blocks.into_iter() { - blocks.push_back(block); - hashes.push(hash); - } - (chain_hash, blocks, hashes, current_parent_request) - } - - /// Get the parent lookup's chain hash. - pub fn chain_hash(&self) -> Hash256 { - self.chain_hash - } - - pub fn processing_failed(&mut self) { - self.current_parent_request - .block_request_state - .state - .on_processing_failure(); - self.current_parent_request - .blob_request_state - .state - .on_processing_failure(); - if let Some(components) = self.current_parent_request.child_components.as_mut() { - components.downloaded_block = None; - components.downloaded_blobs = <_>::default(); - } - } - - pub fn add_peer(&mut self, peer: PeerId) { - self.current_parent_request.add_peer(peer) - } - - /// Adds a list of peers to the parent request. - pub fn add_peers(&mut self, peers: &[PeerId]) { - self.current_parent_request.add_peers(peers) - } - - pub fn all_used_peers(&self) -> impl Iterator + '_ { - self.current_parent_request.all_used_peers() - } -} - -impl From for RequestError { - fn from(e: LookupRequestError) -> Self { - use LookupRequestError as E; - match e { - E::TooManyAttempts { cannot_process } => { - RequestError::TooManyAttempts { cannot_process } - } - E::NoPeers => RequestError::NoPeers, - E::SendFailed(msg) => RequestError::SendFailed(msg), - E::BadState(msg) => RequestError::BadState(msg), - } - } -} - -impl slog::KV for ParentLookup { - fn serialize( - &self, - record: &slog::Record, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - serializer.emit_arguments("chain_hash", &format_args!("{}", self.chain_hash))?; - slog::Value::serialize(&self.current_parent_request, record, "parent", serializer)?; - serializer.emit_usize("downloaded_blocks", self.downloaded_blocks.len())?; - slog::Result::Ok(()) - } -} - -impl RequestError { - pub fn as_static(&self) -> &'static str { - match self { - RequestError::SendFailed(e) => e, - RequestError::ChainTooLong => "chain_too_long", - RequestError::TooManyAttempts { cannot_process } if *cannot_process => { - "too_many_processing_attempts" - } - RequestError::TooManyAttempts { cannot_process: _ } => "too_many_downloading_attempts", - RequestError::NoPeers => "no_peers", - RequestError::BadState(..) => "bad_state", - } - } -} diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 49ef1dd15bf..0466636fb7d 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,24 +1,30 @@ -use super::common::LookupType; -use super::PeerId; +use super::common::ResponseType; +use super::{BlockComponent, PeerId, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS}; use crate::sync::block_lookups::common::RequestState; -use crate::sync::block_lookups::Id; -use crate::sync::network_context::SyncNetworkContext; -use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::data_availability_checker::ChildComponents; -use beacon_chain::data_availability_checker::{ - AvailabilityCheckError, DataAvailabilityChecker, MissingBlobs, +use crate::sync::network_context::{ + LookupRequestResult, ReqId, RpcRequestSendError, SendErrorProcessor, SyncNetworkContext, }; use beacon_chain::BeaconChainTypes; -use itertools::Itertools; -use lighthouse_network::PeerAction; +use derivative::Derivative; +use lighthouse_network::service::api_types::Id; use rand::seq::IteratorRandom; -use slog::{debug, Logger}; use std::collections::HashSet; use std::fmt::Debug; use std::sync::Arc; +use std::time::{Duration, Instant}; use store::Hash256; use strum::IntoStaticStr; -use types::EthSpec; +use types::blob_sidecar::FixedBlobSidecarList; +use types::{EthSpec, SignedBeaconBlock}; + +// Dedicated enum for LookupResult to force its usage +#[must_use = "LookupResult must be handled with on_lookup_result"] +pub enum LookupResult { + /// Lookup completed successfully + Completed, + /// Lookup is expecting some future event from the network + Pending, +} #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupRequestError { @@ -27,558 +33,562 @@ pub enum LookupRequestError { /// The failed attempts were primarily due to processing failures. cannot_process: bool, }, + /// No peers left to serve this lookup NoPeers, - SendFailed(&'static str), + /// Error sending event to network + SendFailedNetwork(RpcRequestSendError), + /// Error sending event to processor + SendFailedProcessor(SendErrorProcessor), + /// Inconsistent lookup request state BadState(String), + /// Lookup failed for some other reason and should be dropped + Failed, + /// Received MissingComponents when all components have been processed. This should never + /// happen, and indicates some internal bug + MissingComponentsAfterAllProcessed, + /// Attempted to retrieve a not known lookup id + UnknownLookup, + /// Received a download result for a different request id than the in-flight request. + /// There should only exist a single request at a time. Having multiple requests is a bug and + /// can result in undefined state, so it's treated as a hard error and the lookup is dropped. + UnexpectedRequestId { + expected_req_id: ReqId, + req_id: ReqId, + }, } +#[derive(Derivative)] +#[derivative(Debug(bound = "T: BeaconChainTypes"))] pub struct SingleBlockLookup { pub id: Id, - pub lookup_type: LookupType, - pub block_request_state: BlockRequestState, - pub blob_request_state: BlobRequestState, - pub da_checker: Arc>, - /// Only necessary for requests triggered by an `UnknownBlockParent` or `UnknownBlockParent` - /// because any blocks or blobs without parents won't hit the data availability cache. - pub child_components: Option>, + pub block_request_state: BlockRequestState, + pub blob_request_state: BlobRequestState, + /// Peers that claim to have imported this set of block components + #[derivative(Debug(format_with = "fmt_peer_set_as_len"))] + peers: HashSet, + block_root: Hash256, + awaiting_parent: Option, + created: Instant, } impl SingleBlockLookup { pub fn new( requested_block_root: Hash256, - child_components: Option>, peers: &[PeerId], - da_checker: Arc>, id: Id, - lookup_type: LookupType, + awaiting_parent: Option, ) -> Self { - let is_deneb = da_checker.is_deneb(); Self { id, - lookup_type, - block_request_state: BlockRequestState::new(requested_block_root, peers), - blob_request_state: BlobRequestState::new(requested_block_root, peers, is_deneb), - da_checker, - child_components, + block_request_state: BlockRequestState::new(requested_block_root), + blob_request_state: BlobRequestState::new(requested_block_root), + peers: HashSet::from_iter(peers.iter().copied()), + block_root: requested_block_root, + awaiting_parent, + created: Instant::now(), } } /// Get the block root that is being requested. pub fn block_root(&self) -> Hash256 { - self.block_request_state.requested_block_root + self.block_root } - /// Check the block root matches the requested block root. - pub fn is_for_block(&self, block_root: Hash256) -> bool { - self.block_root() == block_root + pub fn awaiting_parent(&self) -> Option { + self.awaiting_parent } - /// Update the requested block, this should only be used in a chain of parent lookups to request - /// the next parent. - pub fn update_requested_parent_block(&mut self, block_root: Hash256) { - self.block_request_state.requested_block_root = block_root; - self.blob_request_state.block_root = block_root; - self.block_request_state.state.state = State::AwaitingDownload; - self.blob_request_state.state.state = State::AwaitingDownload; - self.child_components = Some(ChildComponents::empty(block_root)); + /// Mark this lookup as awaiting a parent lookup from being processed. Meanwhile don't send + /// components for processing. + pub fn set_awaiting_parent(&mut self, parent_root: Hash256) { + self.awaiting_parent = Some(parent_root) } - /// Get all unique used peers across block and blob requests. - pub fn all_used_peers(&self) -> impl Iterator + '_ { - self.block_request_state - .state - .get_used_peers() - .chain(self.blob_request_state.state.get_used_peers()) - .unique() + /// Mark this lookup as no longer awaiting a parent lookup. Components can be sent for + /// processing. + pub fn resolve_awaiting_parent(&mut self) { + self.awaiting_parent = None; } - /// Send the necessary requests for blocks and/or blobs. This will check whether we have - /// downloaded the block and/or blobs already and will not send requests if so. It will also - /// inspect the request state or blocks and blobs to ensure we are not already processing or - /// downloading the block and/or blobs. - pub fn request_block_and_blobs( - &mut self, - cx: &mut SyncNetworkContext, - ) -> Result<(), LookupRequestError> { - let block_already_downloaded = self.block_already_downloaded(); - let blobs_already_downloaded = self.blobs_already_downloaded(); - - if !block_already_downloaded { - self.block_request_state - .build_request_and_send(self.id, self.lookup_type, cx)?; - } - if !blobs_already_downloaded { - self.blob_request_state - .build_request_and_send(self.id, self.lookup_type, cx)?; - } - Ok(()) + /// Returns the time elapsed since this lookup was created + pub fn elapsed_since_created(&self) -> Duration { + self.created.elapsed() } - /// Returns a `CachedChild`, which is a wrapper around a `RpcBlock` that is either: - /// - /// 1. `NotRequired`: there is no child caching required for this lookup. - /// 2. `DownloadIncomplete`: Child caching is required, but all components are not yet downloaded. - /// 3. `Ok`: The child is required and we have downloaded it. - /// 4. `Err`: The child is required, but has failed consistency checks. - pub fn get_cached_child_block(&self) -> CachedChild { - if let Some(components) = self.child_components.as_ref() { - let Some(block) = components.downloaded_block.as_ref() else { - return CachedChild::DownloadIncomplete; - }; - - if !self.missing_blob_ids().is_empty() { - return CachedChild::DownloadIncomplete; - } - - match RpcBlock::new_from_fixed( - self.block_request_state.requested_block_root, - block.clone(), - components.downloaded_blobs.clone(), - ) { - Ok(rpc_block) => CachedChild::Ok(rpc_block), - Err(e) => CachedChild::Err(e), + /// Maybe insert a verified response into this lookup. Returns true if imported + pub fn add_child_components(&mut self, block_component: BlockComponent) -> bool { + match block_component { + BlockComponent::Block(block) => self + .block_request_state + .state + .insert_verified_response(block), + BlockComponent::Blob(_) => { + // For now ignore single blobs, as the blob request state assumes all blobs are + // attributed to the same peer = the peer serving the remaining blobs. Ignoring this + // block component has a minor effect, causing the node to re-request this blob + // once the parent chain is successfully resolved + false } - } else { - CachedChild::NotRequired - } - } - - /// Accepts a verified response, and adds it to the child components if required. This method - /// returns a `CachedChild` which provides a completed block + blob response if all components have been - /// received, or information about whether the child is required and if it has been downloaded. - pub fn add_response>( - &mut self, - verified_response: R::VerifiedResponseType, - ) -> CachedChild { - if let Some(child_components) = self.child_components.as_mut() { - R::add_to_child_components(verified_response, child_components); - self.get_cached_child_block() - } else { - CachedChild::NotRequired - } - } - - /// Add a child component to the lookup request. Merges with any existing child components. - pub fn add_child_components(&mut self, components: ChildComponents) { - if let Some(ref mut existing_components) = self.child_components { - let ChildComponents { - block_root: _, - downloaded_block, - downloaded_blobs, - } = components; - if let Some(block) = downloaded_block { - existing_components.merge_block(block); - } - existing_components.merge_blobs(downloaded_blobs); - } else { - self.child_components = Some(components); } } - /// Add all given peers to both block and blob request states. - pub fn add_peer(&mut self, peer_id: PeerId) { - self.block_request_state.state.add_peer(&peer_id); - self.blob_request_state.state.add_peer(&peer_id); + /// Check the block root matches the requested block root. + pub fn is_for_block(&self, block_root: Hash256) -> bool { + self.block_root() == block_root } - /// Add all given peers to both block and blob request states. - pub fn add_peers(&mut self, peers: &[PeerId]) { - for peer in peers { - self.add_peer(*peer); - } + /// Returns true if the block has already been downloaded. + pub fn both_components_processed(&self) -> bool { + self.block_request_state.state.is_processed() + && self.blob_request_state.state.is_processed() } - /// Returns true if the block has already been downloaded. - pub fn both_components_downloaded(&self) -> bool { - self.block_request_state.state.is_downloaded() - && self.blob_request_state.state.is_downloaded() + /// Returns true if this request is expecting some event to make progress + pub fn is_awaiting_event(&self) -> bool { + self.awaiting_parent.is_some() + || self.block_request_state.state.is_awaiting_event() + || self.blob_request_state.state.is_awaiting_event() } - /// Returns true if the block has already been downloaded. - pub fn both_components_processed(&self) -> bool { - self.block_request_state.state.is_processed() + /// Makes progress on all requests of this lookup. Any error is not recoverable and must result + /// in dropping the lookup. May mark the lookup as completed. + pub fn continue_requests( + &mut self, + cx: &mut SyncNetworkContext, + ) -> Result { + // TODO: Check what's necessary to download, specially for blobs + self.continue_request::>(cx)?; + self.continue_request::>(cx)?; + + // If all components of this lookup are already processed, there will be no future events + // that can make progress so it must be dropped. Consider the lookup completed. + // This case can happen if we receive the components from gossip during a retry. + if self.block_request_state.state.is_processed() && self.blob_request_state.state.is_processed() + { + Ok(LookupResult::Completed) + } else { + Ok(LookupResult::Pending) + } } - /// Checks both the block and blob request states to see if the peer is disconnected. - /// - /// Returns true if the lookup should be dropped. - pub fn should_drop_lookup_on_disconnected_peer( + /// Potentially makes progress on this request if it's in a progress-able state + fn continue_request>( &mut self, - peer_id: &PeerId, cx: &mut SyncNetworkContext, - log: &Logger, - ) -> bool { - let block_root = self.block_root(); - let block_peer_disconnected = self + ) -> Result<(), LookupRequestError> { + let id = self.id; + let awaiting_parent = self.awaiting_parent.is_some(); + let downloaded_block_expected_blobs = self .block_request_state .state - .check_peer_disconnected(peer_id) - .is_err(); - let blob_peer_disconnected = self - .blob_request_state - .state - .check_peer_disconnected(peer_id) - .is_err(); + .peek_downloaded_data() + .map(|block| block.num_expected_blobs()); + let block_is_processed = self.block_request_state.state.is_processed(); + let request = R::request_state_mut(self); + + // Attempt to progress awaiting downloads + if request.get_state().is_awaiting_download() { + // Verify the current request has not exceeded the maximum number of attempts. + let request_state = request.get_state(); + if request_state.failed_attempts() >= SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS { + let cannot_process = request_state.more_failed_processing_attempts(); + return Err(LookupRequestError::TooManyAttempts { cannot_process }); + } + + let Some(peer_id) = self.use_rand_available_peer() else { + // Allow lookup to not have any peers and do nothing. This is an optimization to not + // lose progress of lookups created from a block with unknown parent before we receive + // attestations for said block. + // Lookup sync event safety: If a lookup requires peers to make progress, and does + // not receive any new peers for some time it will be dropped. If it receives a new + // peer it must attempt to make progress. + R::request_state_mut(self) + .get_state_mut() + .update_awaiting_download_status("no peers"); + return Ok(()); + }; - if block_peer_disconnected || blob_peer_disconnected { - if let Err(e) = self.request_block_and_blobs(cx) { - debug!(log, "Single lookup failed on peer disconnection"; "block_root" => ?block_root, "error" => ?e); - return true; + let request = R::request_state_mut(self); + match request.make_request(id, peer_id, downloaded_block_expected_blobs, cx)? { + LookupRequestResult::RequestSent(req_id) => { + // Lookup sync event safety: If make_request returns `RequestSent`, we are + // guaranteed that `BlockLookups::on_download_response` will be called exactly + // with this `req_id`. + request.get_state_mut().on_download_start(req_id)? + } + LookupRequestResult::NoRequestNeeded => { + // Lookup sync event safety: Advances this request to the terminal `Processed` + // state. If all requests reach this state, the request is marked as completed + // in `Self::continue_requests`. + request.get_state_mut().on_completed_request()? + } + // Sync will receive a future event to make progress on the request, do nothing now + LookupRequestResult::Pending(reason) => { + // Lookup sync event safety: Refer to the code paths constructing + // `LookupRequestResult::Pending` + request + .get_state_mut() + .update_awaiting_download_status(reason); + return Ok(()); + } } - } - false - } - /// Returns `true` if the block has already been downloaded. - pub(crate) fn block_already_downloaded(&self) -> bool { - if let Some(components) = self.child_components.as_ref() { - components.downloaded_block.is_some() - } else { - self.da_checker.has_block(&self.block_root()) + // Otherwise, attempt to progress awaiting processing + // If this request is awaiting a parent lookup to be processed, do not send for processing. + // The request will be rejected with unknown parent error. + // + // TODO: The condition `block_is_processed || Block` can be dropped after checking for + // unknown parent root when import RPC blobs + } else if !awaiting_parent + && (block_is_processed || matches!(R::response_type(), ResponseType::Block)) + { + // maybe_start_processing returns Some if state == AwaitingProcess. This pattern is + // useful to conditionally access the result data. + if let Some(result) = request.get_state_mut().maybe_start_processing() { + // Lookup sync event safety: If `send_for_processing` returns Ok() we are guaranteed + // that `BlockLookups::on_processing_result` will be called exactly once with this + // lookup_id + return R::send_for_processing(id, result, cx); + } + // Lookup sync event safety: If the request is not in `AwaitingDownload` or + // `AwaitingProcessing` state it is guaranteed to receive some event to make progress. } - } - /// Updates the `requested_ids` field of the `BlockRequestState` with the most recent picture - /// of which blobs still need to be requested. Returns `true` if there are no more blobs to - /// request. - pub(crate) fn blobs_already_downloaded(&mut self) -> bool { - if matches!(self.blob_request_state.state.state, State::AwaitingDownload) { - self.update_blobs_request(); - } - self.blob_request_state.requested_ids.is_empty() + // Lookup sync event safety: If a lookup is awaiting a parent we are guaranteed to either: + // (1) attempt to make progress with `BlockLookups::continue_child_lookups` if the parent + // lookup completes, or (2) get dropped if the parent fails and is dropped. + + Ok(()) } - /// Updates this request with the most recent picture of which blobs still need to be requested. - pub fn update_blobs_request(&mut self) { - self.blob_request_state.requested_ids = self.missing_blob_ids(); + /// Get all unique peers that claim to have imported this set of block components + pub fn all_peers(&self) -> impl Iterator + '_ { + self.peers.iter() } - /// If `child_components` is `Some`, we know block components won't hit the data - /// availability cache, so we don't check its processing cache unless `child_components` - /// is `None`. - pub(crate) fn missing_blob_ids(&self) -> MissingBlobs { - let block_root = self.block_root(); - if let Some(components) = self.child_components.as_ref() { - self.da_checker.get_missing_blob_ids( - block_root, - components.downloaded_block.as_ref().map(|b| b.as_ref()), - &components.downloaded_blobs, - ) - } else { - self.da_checker.get_missing_blob_ids_with(block_root) - } + /// Add peer to all request states. The peer must be able to serve this request. + /// Returns true if the peer was newly inserted into some request state. + pub fn add_peer(&mut self, peer_id: PeerId) -> bool { + self.peers.insert(peer_id) } - /// Penalizes a blob peer if it should have blobs but didn't return them to us. - pub fn penalize_blob_peer(&mut self, cx: &SyncNetworkContext) { - if let Ok(blob_peer) = self.blob_request_state.state.processing_peer() { - cx.report_peer( - blob_peer, - PeerAction::MidToleranceError, - "single_blob_failure", - ); - } + /// Remove peer from available peers. + pub fn remove_peer(&mut self, peer_id: &PeerId) { + self.peers.remove(peer_id); } - /// This failure occurs on download, so register a failure downloading, penalize the peer - /// and clear the blob cache. - pub fn handle_consistency_failure(&mut self, cx: &SyncNetworkContext) { - self.penalize_blob_peer(cx); - if let Some(cached_child) = self.child_components.as_mut() { - cached_child.clear_blobs(); - } - self.blob_request_state.state.on_download_failure() + /// Returns true if this lookup has zero peers + pub fn has_no_peers(&self) -> bool { + self.peers.is_empty() } - /// This failure occurs after processing, so register a failure processing, penalize the peer - /// and clear the blob cache. - pub fn handle_availability_check_failure(&mut self, cx: &SyncNetworkContext) { - self.penalize_blob_peer(cx); - if let Some(cached_child) = self.child_components.as_mut() { - cached_child.clear_blobs(); - } - self.blob_request_state.state.on_processing_failure() + /// Selects a random peer from available peers if any + fn use_rand_available_peer(&mut self) -> Option { + self.peers.iter().choose(&mut rand::thread_rng()).copied() } } /// The state of the blob request component of a `SingleBlockLookup`. -pub struct BlobRequestState { - /// The latest picture of which blobs still need to be requested. This includes information - /// from both block/blobs downloaded in the network layer and any blocks/blobs that exist in - /// the data availability checker. - pub requested_ids: MissingBlobs, +#[derive(Derivative)] +#[derivative(Debug)] +pub struct BlobRequestState { + #[derivative(Debug = "ignore")] pub block_root: Hash256, - pub state: SingleLookupRequestState, + pub state: SingleLookupRequestState>, } -impl BlobRequestState { - pub fn new(block_root: Hash256, peer_source: &[PeerId], is_deneb: bool) -> Self { - let default_ids = MissingBlobs::new_without_block(block_root, is_deneb); +impl BlobRequestState { + pub fn new(block_root: Hash256) -> Self { Self { block_root, - requested_ids: default_ids, - state: SingleLookupRequestState::new(peer_source), + state: SingleLookupRequestState::new(), } } } /// The state of the block request component of a `SingleBlockLookup`. -pub struct BlockRequestState { +#[derive(Derivative)] +#[derivative(Debug)] +pub struct BlockRequestState { + #[derivative(Debug = "ignore")] pub requested_block_root: Hash256, - pub state: SingleLookupRequestState, + pub state: SingleLookupRequestState>>, } -impl BlockRequestState { - pub fn new(block_root: Hash256, peers: &[PeerId]) -> Self { +impl BlockRequestState { + pub fn new(block_root: Hash256) -> Self { Self { requested_block_root: block_root, - state: SingleLookupRequestState::new(peers), + state: SingleLookupRequestState::new(), } } } -/// This is the status of cached components for a lookup if they are required. It provides information -/// about whether we should send a responses immediately for processing, whether we require more -/// responses, or whether all cached components have been received and the reconstructed block -/// should be sent for processing. -pub enum CachedChild { - /// All child components have been received, this is the reconstructed block, including all. - /// It has been checked for consistency between blobs and block, but no consensus checks have - /// been performed and no kzg verification has been performed. - Ok(RpcBlock), - /// All child components have not yet been received. - DownloadIncomplete, - /// Child components should not be cached, send this directly for processing. - NotRequired, - /// There was an error during consistency checks between block and blobs. - Err(AvailabilityCheckError), +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct DownloadResult { + pub value: T, + pub block_root: Hash256, + pub seen_timestamp: Duration, + pub peer_id: PeerId, } -#[derive(Debug, PartialEq, Eq)] -pub enum State { - AwaitingDownload, - Downloading { peer_id: PeerId }, - Processing { peer_id: PeerId }, - Processed { peer_id: PeerId }, +#[derive(PartialEq, Eq, IntoStaticStr)] +pub enum State { + AwaitingDownload(&'static str), + Downloading(ReqId), + AwaitingProcess(DownloadResult), + /// Request is processing, sent by lookup sync + Processing(DownloadResult), + /// Request is processed + Processed, } /// Object representing the state of a single block or blob lookup request. -#[derive(PartialEq, Eq, Debug)] -pub struct SingleLookupRequestState { +#[derive(PartialEq, Eq, Derivative)] +#[derivative(Debug)] +pub struct SingleLookupRequestState { /// State of this request. - state: State, - /// Peers that should have this block or blob. - available_peers: HashSet, - /// Peers from which we have requested this block. - used_peers: HashSet, + state: State, /// How many times have we attempted to process this block or blob. failed_processing: u8, /// How many times have we attempted to download this block or blob. failed_downloading: u8, - /// Should be incremented everytime this request is retried. The purpose of this is to - /// differentiate retries of the same block/blob request within a lookup. We currently penalize - /// peers and retry requests prior to receiving the stream terminator. This means responses - /// from a prior request may arrive after a new request has been sent, this counter allows - /// us to differentiate these two responses. - req_counter: u32, } -impl SingleLookupRequestState { - pub fn new(peers: &[PeerId]) -> Self { - let mut available_peers = HashSet::default(); - for peer in peers.iter().copied() { - available_peers.insert(peer); - } - +impl SingleLookupRequestState { + pub fn new() -> Self { Self { - state: State::AwaitingDownload, - available_peers, - used_peers: HashSet::default(), + state: State::AwaitingDownload("not started"), failed_processing: 0, failed_downloading: 0, - req_counter: 0, } } - pub fn is_current_req_counter(&self, req_counter: u32) -> bool { - self.req_counter == req_counter - } - pub fn is_awaiting_download(&self) -> bool { - matches!(self.state, State::AwaitingDownload) + match self.state { + State::AwaitingDownload { .. } => true, + State::Downloading { .. } + | State::AwaitingProcess { .. } + | State::Processing { .. } + | State::Processed { .. } => false, + } } - pub fn is_downloaded(&self) -> bool { + pub fn is_processed(&self) -> bool { match self.state { - State::AwaitingDownload => false, - State::Downloading { .. } => false, - State::Processing { .. } => true, + State::AwaitingDownload { .. } + | State::Downloading { .. } + | State::AwaitingProcess { .. } + | State::Processing { .. } => false, State::Processed { .. } => true, } } - pub fn is_processed(&self) -> bool { + /// Returns true if we can expect some future event to progress this block component request + /// specifically. + pub fn is_awaiting_event(&self) -> bool { match self.state { - State::AwaitingDownload => false, - State::Downloading { .. } => false, - State::Processing { .. } => false, - State::Processed { .. } => true, + // No event will progress this request specifically, but the request may be put on hold + // due to some external event + State::AwaitingDownload { .. } => false, + // Network will emit a download success / error event + State::Downloading { .. } => true, + // Not awaiting any external event + State::AwaitingProcess { .. } => false, + // Beacon processor will emit a processing result event + State::Processing { .. } => true, + // Request complete, no future event left + State::Processed { .. } => false, } } - pub fn on_download_start(&mut self, peer_id: PeerId) -> u32 { - self.state = State::Downloading { peer_id }; - self.req_counter += 1; - self.req_counter + pub fn peek_downloaded_data(&self) -> Option<&T> { + match &self.state { + State::AwaitingDownload { .. } => None, + State::Downloading { .. } => None, + State::AwaitingProcess(result) => Some(&result.value), + State::Processing(result) => Some(&result.value), + State::Processed { .. } => None, + } } - /// Registers a failure in downloading a block. This might be a peer disconnection or a wrong - /// block. - pub fn on_download_failure(&mut self) { - self.failed_downloading = self.failed_downloading.saturating_add(1); - self.state = State::AwaitingDownload; + /// Switch to `AwaitingProcessing` if the request is in `AwaitingDownload` state, otherwise + /// ignore. + pub fn insert_verified_response(&mut self, result: DownloadResult) -> bool { + if let State::AwaitingDownload { .. } = &self.state { + self.state = State::AwaitingProcess(result); + true + } else { + false + } + } + + /// Append metadata on why this request is in AwaitingDownload status. Very helpful to debug + /// stuck lookups. Not fallible as it's purely informational. + pub fn update_awaiting_download_status(&mut self, new_status: &'static str) { + if let State::AwaitingDownload(status) = &mut self.state { + *status = new_status + } } - pub fn on_download_success(&mut self) -> Result<(), String> { + /// Switch to `Downloading` if the request is in `AwaitingDownload` state, otherwise returns None. + pub fn on_download_start(&mut self, req_id: ReqId) -> Result<(), LookupRequestError> { match &self.state { - State::Downloading { peer_id } => { - self.state = State::Processing { peer_id: *peer_id }; + State::AwaitingDownload { .. } => { + self.state = State::Downloading(req_id); Ok(()) } - other => Err(format!( - "request bad state, expected downloading got {other}" - )), + other => Err(LookupRequestError::BadState(format!( + "Bad state on_download_start expected AwaitingDownload got {other}" + ))), } } - /// Registers a failure in processing a block. - pub fn on_processing_failure(&mut self) { - self.failed_processing = self.failed_processing.saturating_add(1); - self.state = State::AwaitingDownload; - } - - pub fn on_processing_success(&mut self) -> Result<(), String> { + /// Registers a failure in downloading a block. This might be a peer disconnection or a wrong + /// block. + pub fn on_download_failure(&mut self, req_id: ReqId) -> Result<(), LookupRequestError> { match &self.state { - State::Processing { peer_id } => { - self.state = State::Processed { peer_id: *peer_id }; + State::Downloading(expected_req_id) => { + if req_id != *expected_req_id { + return Err(LookupRequestError::UnexpectedRequestId { + expected_req_id: *expected_req_id, + req_id, + }); + } + self.failed_downloading = self.failed_downloading.saturating_add(1); + self.state = State::AwaitingDownload("not started"); Ok(()) } - other => Err(format!("not in processing state: {}", other).to_string()), + other => Err(LookupRequestError::BadState(format!( + "Bad state on_download_failure expected Downloading got {other}" + ))), } } - /// The total number of failures, whether it be processing or downloading. - pub fn failed_attempts(&self) -> u8 { - self.failed_processing + self.failed_downloading + pub fn on_download_success( + &mut self, + req_id: ReqId, + result: DownloadResult, + ) -> Result<(), LookupRequestError> { + match &self.state { + State::Downloading(expected_req_id) => { + if req_id != *expected_req_id { + return Err(LookupRequestError::UnexpectedRequestId { + expected_req_id: *expected_req_id, + req_id, + }); + } + self.state = State::AwaitingProcess(result); + Ok(()) + } + other => Err(LookupRequestError::BadState(format!( + "Bad state on_download_success expected Downloading got {other}" + ))), + } } - pub fn more_failed_processing_attempts(&self) -> bool { - self.failed_processing >= self.failed_downloading + /// Switch to `Processing` if the request is in `AwaitingProcess` state, otherwise returns None. + pub fn maybe_start_processing(&mut self) -> Option> { + // For 2 lines replace state with placeholder to gain ownership of `result` + match &self.state { + State::AwaitingProcess(result) => { + let result = result.clone(); + self.state = State::Processing(result.clone()); + Some(result) + } + _ => None, + } } - /// This method should be used for peers wrapped in `PeerId::BlockAndBlobs`. - pub fn add_peer(&mut self, peer_id: &PeerId) { - self.available_peers.insert(*peer_id); + /// Revert into `AwaitingProcessing`, if the payload if not invalid and can be submitted for + /// processing latter. + pub fn revert_to_awaiting_processing(&mut self) -> Result<(), LookupRequestError> { + match &self.state { + State::Processing(result) => { + self.state = State::AwaitingProcess(result.clone()); + Ok(()) + } + other => Err(LookupRequestError::BadState(format!( + "Bad state on revert_to_awaiting_processing expected Processing got {other}" + ))), + } } - /// If a peer disconnects, this request could be failed. If so, an error is returned - pub fn check_peer_disconnected(&mut self, dc_peer_id: &PeerId) -> Result<(), ()> { - self.available_peers.remove(dc_peer_id); - if let State::Downloading { peer_id } = &self.state { - if peer_id == dc_peer_id { - // Peer disconnected before providing a block - self.on_download_failure(); - return Err(()); + /// Registers a failure in processing a block. + pub fn on_processing_failure(&mut self) -> Result { + match &self.state { + State::Processing(result) => { + let peer_id = result.peer_id; + self.failed_processing = self.failed_processing.saturating_add(1); + self.state = State::AwaitingDownload("not started"); + Ok(peer_id) } + other => Err(LookupRequestError::BadState(format!( + "Bad state on_processing_failure expected Processing got {other}" + ))), } - Ok(()) } - /// Returns the id peer we downloaded from if we have downloaded a verified block, otherwise - /// returns an error. - pub fn processing_peer(&self) -> Result { + pub fn on_processing_success(&mut self) -> Result<(), LookupRequestError> { match &self.state { - State::Processing { peer_id } | State::Processed { peer_id } => Ok(*peer_id), - other => Err(format!("not in processing state: {}", other).to_string()), + State::Processing(_) => { + self.state = State::Processed; + Ok(()) + } + other => Err(LookupRequestError::BadState(format!( + "Bad state on_processing_success expected Processing got {other}" + ))), } } - pub fn get_used_peers(&self) -> impl Iterator { - self.used_peers.iter() + /// Mark a request as complete without any download or processing + pub fn on_completed_request(&mut self) -> Result<(), LookupRequestError> { + match &self.state { + State::AwaitingDownload { .. } => { + self.state = State::Processed; + Ok(()) + } + other => Err(LookupRequestError::BadState(format!( + "Bad state on_completed_request expected AwaitingDownload got {other}" + ))), + } } - /// Selects a random peer from available peers if any, inserts it in used peers and returns it. - pub fn use_rand_available_peer(&mut self) -> Option { - let peer_id = self - .available_peers - .iter() - .choose(&mut rand::thread_rng()) - .copied()?; - self.used_peers.insert(peer_id); - Some(peer_id) + /// The total number of failures, whether it be processing or downloading. + pub fn failed_attempts(&self) -> u8 { + self.failed_processing + self.failed_downloading } -} -impl slog::Value for SingleBlockLookup { - fn serialize( - &self, - _record: &slog::Record, - key: slog::Key, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - serializer.emit_str("request", key)?; - serializer.emit_arguments("lookup_type", &format_args!("{:?}", self.lookup_type))?; - serializer.emit_arguments("hash", &format_args!("{}", self.block_root()))?; - serializer.emit_arguments( - "blob_ids", - &format_args!("{:?}", self.blob_request_state.requested_ids.indices()), - )?; - serializer.emit_arguments( - "block_request_state.state", - &format_args!("{:?}", self.block_request_state.state), - )?; - serializer.emit_arguments( - "blob_request_state.state", - &format_args!("{:?}", self.blob_request_state.state), - )?; - slog::Result::Ok(()) + pub fn more_failed_processing_attempts(&self) -> bool { + self.failed_processing >= self.failed_downloading } } -impl slog::Value for SingleLookupRequestState { - fn serialize( - &self, - record: &slog::Record, - key: slog::Key, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - serializer.emit_str("request_state", key)?; - match &self.state { - State::AwaitingDownload => { - "awaiting_download".serialize(record, "state", serializer)? - } - State::Downloading { peer_id } => { - serializer.emit_arguments("downloading_peer", &format_args!("{}", peer_id))? - } - State::Processing { peer_id } => { - serializer.emit_arguments("processing_peer", &format_args!("{}", peer_id))? - } - State::Processed { .. } => "processed".serialize(record, "state", serializer)?, - } - serializer.emit_u8("failed_downloads", self.failed_downloading)?; - serializer.emit_u8("failed_processing", self.failed_processing)?; - slog::Result::Ok(()) +// Display is used in the BadState assertions above +impl std::fmt::Display for State { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", Into::<&'static str>::into(self)) } } -impl std::fmt::Display for State { +// Debug is used in the log_stuck_lookups print to include some more info. Implements custom Debug +// to not dump an entire block or blob to terminal which don't add valuable data. +impl std::fmt::Debug for State { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - State::AwaitingDownload => write!(f, "AwaitingDownload"), - State::Downloading { .. } => write!(f, "Downloading"), - State::Processing { .. } => write!(f, "Processing"), - State::Processed { .. } => write!(f, "Processed"), + Self::AwaitingDownload(status) => write!(f, "AwaitingDownload({:?})", status), + Self::Downloading(req_id) => write!(f, "Downloading({:?})", req_id), + Self::AwaitingProcess(d) => write!(f, "AwaitingProcess({:?})", d.peer_id), + Self::Processing(d) => write!(f, "Processing({:?})", d.peer_id), + Self::Processed { .. } => write!(f, "Processed"), } } } + +fn fmt_peer_set_as_len( + peer_set: &HashSet, + f: &mut std::fmt::Formatter, +) -> Result<(), std::fmt::Error> { + write!(f, "{}", peer_set.len()) +} diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 8e3b35ee5d3..ef2822fe563 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,7 +1,6 @@ use crate::network_beacon_processor::NetworkBeaconProcessor; -use crate::service::RequestId; -use crate::sync::manager::{RequestId as SyncRequestId, SingleLookupReqId, SyncManager}; +use crate::sync::manager::{BlockProcessType, SyncManager}; use crate::sync::SyncMessage; use crate::NetworkMessage; use std::sync::Arc; @@ -9,23 +8,32 @@ use std::sync::Arc; use super::*; use crate::sync::block_lookups::common::ResponseType; +use beacon_chain::blob_verification::GossipVerifiedBlob; +use beacon_chain::block_verification_types::{BlockImportData, RpcBlock}; use beacon_chain::builder::Witness; +use beacon_chain::data_availability_checker::Availability; use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::test_utils::{ build_log, generate_rand_block_and_blobs, BeaconChainHarness, EphemeralHarnessType, NumBlobs, }; +use beacon_chain::{ + AvailabilityPendingExecutedBlock, PayloadVerificationOutcome, PayloadVerificationStatus, +}; use beacon_processor::WorkEvent; use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; +use lighthouse_network::service::api_types::{AppRequestId, Id, SingleLookupReqId, SyncRequestId}; use lighthouse_network::types::SyncState; use lighthouse_network::{NetworkGlobals, Request}; use slog::info; use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock}; use store::MemoryStore; use tokio::sync::mpsc; +use types::test_utils::TestRandom; use types::{ test_utils::{SeedableRng, XorShiftRng}, - BlobSidecar, ForkName, MinimalEthSpec as E, SignedBeaconBlock, + BlobSidecar, ForkName, MinimalEthSpec as E, SignedBeaconBlock, Slot, }; +use types::{BeaconState, BeaconStateBase}; type T = Witness, E, MemoryStore, MemoryStore>; @@ -57,6 +65,7 @@ type T = Witness, E, MemoryStore, Memo struct TestRig { /// Receiver for `BeaconProcessor` events (e.g. block processing results). beacon_processor_rx: mpsc::Receiver>, + beacon_processor_rx_queue: Vec>, /// Receiver for `NetworkMessage` (e.g. outgoing RPC requests from sync) network_rx: mpsc::UnboundedReceiver>, /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) @@ -65,6 +74,8 @@ struct TestRig { sync_manager: SyncManager, /// To manipulate sync state and peer connection status network_globals: Arc>, + /// Beacon chain harness + harness: BeaconChainHarness>, /// `rng` for generating test blocks and blobs. rng: XorShiftRng, fork_name: ForkName, @@ -72,6 +83,7 @@ struct TestRig { } const D: Duration = Duration::new(0, 0); +const PARENT_FAIL_TOLERANCE: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; impl TestRig { fn test_setup() -> Self { @@ -114,6 +126,7 @@ impl TestRig { let rng = XorShiftRng::from_seed([42; 16]); TestRig { beacon_processor_rx, + beacon_processor_rx_queue: vec![], network_rx, network_rx_queue: vec![], rng, @@ -125,6 +138,7 @@ impl TestRig { sync_recv, log.clone(), ), + harness, fork_name, log, } @@ -194,11 +208,15 @@ impl TestRig { self.sync_manager.handle_message(sync_message); } + fn active_single_lookups(&self) -> Vec { + self.sync_manager.active_single_lookups() + } + fn active_single_lookups_count(&self) -> usize { self.sync_manager.active_single_lookups().len() } - fn active_parent_lookups(&self) -> Vec { + fn active_parent_lookups(&self) -> Vec> { self.sync_manager.active_parent_lookups() } @@ -206,22 +224,90 @@ impl TestRig { self.sync_manager.active_parent_lookups().len() } - fn failed_chains_contains(&mut self, chain_hash: &Hash256) -> bool { - self.sync_manager.failed_chains_contains(chain_hash) + fn assert_single_lookups_count(&self, count: usize) { + assert_eq!( + self.active_single_lookups_count(), + count, + "Unexpected count of single lookups. Current lookups: {:?}", + self.active_single_lookups() + ); } - #[track_caller] - fn assert_parent_lookups_consistency(&self) { - let hashes = self.active_parent_lookups(); - let expected = hashes.len(); + fn assert_parent_lookups_count(&self, count: usize) { assert_eq!( - expected, - hashes - .into_iter() - .collect::>() - .len(), - "duplicated chain hashes in parent queue" - ) + self.active_parent_lookups_count(), + count, + "Unexpected count of parent lookups. Parent lookups: {:?}. Current lookups: {:?}", + self.active_parent_lookups(), + self.active_single_lookups() + ); + } + + fn assert_lookup_is_active(&self, block_root: Hash256) { + let lookups = self.sync_manager.active_single_lookups(); + if !lookups.iter().any(|l| l.1 == block_root) { + panic!("Expected lookup {block_root} to be the only active: {lookups:?}"); + } + } + + fn assert_lookup_peers(&self, block_root: Hash256, mut expected_peers: Vec) { + let mut lookup = self + .sync_manager + .active_single_lookups() + .into_iter() + .find(|l| l.1 == block_root) + .unwrap_or_else(|| panic!("no lookup for {block_root}")); + lookup.3.sort(); + expected_peers.sort(); + assert_eq!( + lookup.3, expected_peers, + "unexpected peers on lookup {block_root}" + ); + } + + fn insert_failed_chain(&mut self, block_root: Hash256) { + self.sync_manager.insert_failed_chain(block_root); + } + + fn assert_not_failed_chain(&mut self, chain_hash: Hash256) { + let failed_chains = self.sync_manager.get_failed_chains(); + if failed_chains.contains(&chain_hash) { + panic!("failed chains contain {chain_hash:?}: {failed_chains:?}"); + } + } + + fn assert_failed_chain(&mut self, chain_hash: Hash256) { + let failed_chains = self.sync_manager.get_failed_chains(); + if !failed_chains.contains(&chain_hash) { + panic!("expected failed chains to contain {chain_hash:?}: {failed_chains:?}"); + } + } + + fn find_single_lookup_for(&self, block_root: Hash256) -> Id { + self.active_single_lookups() + .iter() + .find(|l| l.1 == block_root) + .unwrap_or_else(|| panic!("no single block lookup found for {block_root}")) + .0 + } + + #[track_caller] + fn expect_no_active_single_lookups(&self) { + assert!( + self.active_single_lookups().is_empty(), + "expect no single block lookups: {:?}", + self.active_single_lookups() + ); + } + + #[track_caller] + fn expect_no_active_lookups(&self) { + self.expect_no_active_single_lookups(); + } + + fn expect_no_active_lookups_empty_network(&mut self) { + self.expect_no_active_lookups(); + self.expect_empty_network(); } fn new_connected_peer(&mut self) -> PeerId { @@ -233,27 +319,43 @@ impl TestRig { peer_id } - fn parent_chain_processed(&mut self, chain_hash: Hash256, result: BatchProcessResult) { - self.send_sync_message(SyncMessage::BatchProcessed { - sync_type: ChainSegmentProcessId::ParentLookup(chain_hash), - result, - }) + fn parent_chain_processed_success( + &mut self, + chain_hash: Hash256, + blocks: &[Arc>], + ) { + // Send import events for all pending parent blocks + for _ in blocks { + self.parent_block_processed_imported(chain_hash); + } + // Send final import event for the block that triggered the lookup + self.single_block_component_processed_imported(chain_hash); + } + + /// Locate a parent lookup chain with tip hash `chain_hash` + fn find_oldest_parent_lookup(&self, chain_hash: Hash256) -> Hash256 { + let parent_chain = self + .active_parent_lookups() + .into_iter() + .find(|chain| chain.first() == Some(&chain_hash)) + .unwrap_or_else(|| { + panic!( + "No parent chain with chain_hash {chain_hash:?}: Parent lookups {:?} Single lookups {:?}", + self.active_parent_lookups(), + self.active_single_lookups(), + ) + }); + *parent_chain.last().unwrap() } - fn parent_chain_processed_success(&mut self, chain_hash: Hash256) { - self.parent_chain_processed( - chain_hash, - BatchProcessResult::Success { - was_non_empty: true, - }, - ) + fn parent_block_processed(&mut self, chain_hash: Hash256, result: BlockProcessingResult) { + let id = self.find_single_lookup_for(self.find_oldest_parent_lookup(chain_hash)); + self.single_block_component_processed(id, result); } - fn parent_block_processed(&mut self, chain_hash: Hash256, result: BlockProcessingResult) { - self.send_sync_message(SyncMessage::BlockComponentProcessed { - process_type: BlockProcessType::ParentLookup { chain_hash }, - result, - }); + fn parent_blob_processed(&mut self, chain_hash: Hash256, result: BlockProcessingResult) { + let id = self.find_single_lookup_for(self.find_oldest_parent_lookup(chain_hash)); + self.single_blob_component_processed(id, result); } fn parent_block_processed_imported(&mut self, chain_hash: Hash256) { @@ -263,35 +365,24 @@ impl TestRig { ); } - fn single_block_component_processed( - &mut self, - id: SingleLookupReqId, - result: BlockProcessingResult, - ) { + fn single_block_component_processed(&mut self, id: Id, result: BlockProcessingResult) { self.send_sync_message(SyncMessage::BlockComponentProcessed { - process_type: BlockProcessType::SingleBlock { id: id.id }, + process_type: BlockProcessType::SingleBlock { id }, result, }) } - fn single_block_component_processed_imported( - &mut self, - id: SingleLookupReqId, - block_root: Hash256, - ) { + fn single_block_component_processed_imported(&mut self, block_root: Hash256) { + let id = self.find_single_lookup_for(block_root); self.single_block_component_processed( id, BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)), ) } - fn single_blob_component_processed( - &mut self, - id: SingleLookupReqId, - result: BlockProcessingResult, - ) { + fn single_blob_component_processed(&mut self, id: Id, result: BlockProcessingResult) { self.send_sync_message(SyncMessage::BlockComponentProcessed { - process_type: BlockProcessType::SingleBlob { id: id.id }, + process_type: BlockProcessType::SingleBlob { id }, result, }) } @@ -302,6 +393,7 @@ impl TestRig { peer_id: PeerId, beacon_block: Option>>, ) { + self.log("parent_lookup_block_response"); self.send_sync_message(SyncMessage::RpcBlock { request_id: SyncRequestId::SingleBlock { id }, peer_id, @@ -316,6 +408,7 @@ impl TestRig { peer_id: PeerId, beacon_block: Option>>, ) { + self.log("single_lookup_block_response"); self.send_sync_message(SyncMessage::RpcBlock { request_id: SyncRequestId::SingleBlock { id }, peer_id, @@ -330,6 +423,10 @@ impl TestRig { peer_id: PeerId, blob_sidecar: Option>>, ) { + self.log(&format!( + "parent_lookup_blob_response {:?}", + blob_sidecar.as_ref().map(|b| b.index) + )); self.send_sync_message(SyncMessage::RpcBlob { request_id: SyncRequestId::SingleBlob { id }, peer_id, @@ -352,6 +449,72 @@ impl TestRig { }); } + fn complete_single_lookup_blob_download( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + blobs: Vec>, + ) { + for blob in blobs { + self.single_lookup_blob_response(id, peer_id, Some(blob.into())); + } + self.single_lookup_blob_response(id, peer_id, None); + } + + fn complete_single_lookup_blob_lookup_valid( + &mut self, + id: SingleLookupReqId, + peer_id: PeerId, + blobs: Vec>, + import: bool, + ) { + let block_root = blobs.first().unwrap().block_root(); + let block_slot = blobs.first().unwrap().slot(); + self.complete_single_lookup_blob_download(id, peer_id, blobs); + self.expect_block_process(ResponseType::Blob); + self.single_blob_component_processed( + id.lookup_id, + if import { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)) + } else { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + block_slot, block_root, + )) + }, + ); + } + + fn complete_lookup_block_download(&mut self, block: SignedBeaconBlock) { + let block_root = block.canonical_root(); + let id = self.expect_block_lookup_request(block_root); + self.expect_empty_network(); + let peer_id = self.new_connected_peer(); + self.single_lookup_block_response(id, peer_id, Some(block.into())); + self.single_lookup_block_response(id, peer_id, None); + } + + fn complete_lookup_block_import_valid(&mut self, block_root: Hash256, import: bool) { + self.expect_block_process(ResponseType::Block); + let id = self.find_single_lookup_for(block_root); + self.single_block_component_processed( + id, + if import { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(block_root)) + } else { + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + Slot::new(0), + block_root, + )) + }, + ) + } + + fn complete_single_lookup_block_valid(&mut self, block: SignedBeaconBlock, import: bool) { + let block_root = block.canonical_root(); + self.complete_lookup_block_download(block); + self.complete_lookup_block_import_valid(block_root, import) + } + fn parent_lookup_failed(&mut self, id: SingleLookupReqId, peer_id: PeerId, error: RPCError) { self.send_sync_message(SyncMessage::RpcError { peer_id, @@ -379,6 +542,25 @@ impl TestRig { }) } + /// Return RPCErrors for all active requests of peer + fn rpc_error_all_active_requests(&mut self, disconnected_peer_id: PeerId) { + self.drain_network_rx(); + while let Ok(request_id) = self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request_id: AppRequestId::Sync(id), + .. + } if *peer_id == disconnected_peer_id => Some(*id), + _ => None, + }) { + self.send_sync_message(SyncMessage::RpcError { + peer_id: disconnected_peer_id, + request_id, + error: RPCError::Disconnected, + }); + } + } + fn peer_disconnected(&mut self, peer_id: PeerId) { self.send_sync_message(SyncMessage::Disconnect(peer_id)); } @@ -389,6 +571,12 @@ impl TestRig { } } + fn drain_processor_rx(&mut self) { + while let Ok(event) = self.beacon_processor_rx.try_recv() { + self.beacon_processor_rx_queue.push(event); + } + } + fn pop_received_network_event) -> Option>( &mut self, predicate_transform: F, @@ -409,42 +597,75 @@ impl TestRig { } } - #[track_caller] - fn expect_block_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { + fn pop_received_processor_event) -> Option>( + &mut self, + predicate_transform: F, + ) -> Result { + self.drain_processor_rx(); + + if let Some(index) = self + .beacon_processor_rx_queue + .iter() + .position(|x| predicate_transform(x).is_some()) + { + // Transform the item, knowing that it won't be None because we checked it in the position predicate. + let transformed = predicate_transform(&self.beacon_processor_rx_queue[index]).unwrap(); + self.beacon_processor_rx_queue.remove(index); + Ok(transformed) + } else { + Err(format!( + "current processor messages {:?}", + self.beacon_processor_rx_queue + ) + .to_string()) + } + } + + fn find_block_lookup_request( + &mut self, + for_block: Hash256, + ) -> Result { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, request: Request::BlocksByRoot(request), - request_id: RequestId::Sync(SyncRequestId::SingleBlock { id }), - } if id.lookup_type == LookupType::Current - && request.block_roots().to_vec().contains(&for_block) => - { - Some(*id) - } + request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), + } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, }) - .unwrap_or_else(|e| panic!("Expected block request for {for_block:?}: {e}")) } #[track_caller] - fn expect_blob_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { + fn expect_block_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { + self.find_block_lookup_request(for_block) + .unwrap_or_else(|e| panic!("Expected block request for {for_block:?}: {e}")) + } + + fn find_blob_lookup_request( + &mut self, + for_block: Hash256, + ) -> Result { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, request: Request::BlobsByRoot(request), - request_id: RequestId::Sync(SyncRequestId::SingleBlob { id }), - } if id.lookup_type == LookupType::Current - && request - .blob_ids - .to_vec() - .iter() - .any(|r| r.block_root == for_block) => + request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), + } if request + .blob_ids + .to_vec() + .iter() + .any(|r| r.block_root == for_block) => { Some(*id) } _ => None, }) - .unwrap_or_else(|e| panic!("Expected blob request for {for_block:?}: {e}")) + } + + #[track_caller] + fn expect_blob_lookup_request(&mut self, for_block: Hash256) -> SingleLookupReqId { + self.find_blob_lookup_request(for_block) + .unwrap_or_else(|e| panic!("Expected blob request for {for_block:?}: {e}")) } #[track_caller] @@ -453,30 +674,34 @@ impl TestRig { NetworkMessage::SendRequest { peer_id: _, request: Request::BlocksByRoot(request), - request_id: RequestId::Sync(SyncRequestId::SingleBlock { id }), - } if id.lookup_type == LookupType::Parent - && request.block_roots().to_vec().contains(&for_block) => - { - Some(*id) - } + request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), + } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, }) .unwrap_or_else(|e| panic!("Expected block parent request for {for_block:?}: {e}")) } + fn expect_no_requests_for(&mut self, block_root: Hash256) { + if let Ok(request) = self.find_block_lookup_request(block_root) { + panic!("Expected no block request for {block_root:?} found {request:?}"); + } + if let Ok(request) = self.find_blob_lookup_request(block_root) { + panic!("Expected no blob request for {block_root:?} found {request:?}"); + } + } + #[track_caller] fn expect_blob_parent_request(&mut self, for_block: Hash256) -> SingleLookupReqId { self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id: _, request: Request::BlobsByRoot(request), - request_id: RequestId::Sync(SyncRequestId::SingleBlob { id }), - } if id.lookup_type == LookupType::Parent - && request - .blob_ids - .to_vec() - .iter() - .all(|r| r.block_root == for_block) => + request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), + } if request + .blob_ids + .to_vec() + .iter() + .all(|r| r.block_root == for_block) => { Some(*id) } @@ -485,41 +710,19 @@ impl TestRig { .unwrap_or_else(|e| panic!("Expected blob parent request for {for_block:?}: {e}")) } - fn expect_lookup_request_block_and_blobs(&mut self, block_root: Hash256) -> SingleLookupReqId { - let id = self.expect_block_lookup_request(block_root); - // If we're in deneb, a blob request should have been triggered as well, - // we don't require a response because we're generateing 0-blob blocks in this test. - if self.after_deneb() { - let _ = self.expect_blob_lookup_request(block_root); - } - id - } - - fn expect_parent_request_block_and_blobs(&mut self, block_root: Hash256) -> SingleLookupReqId { - let id = self.expect_block_parent_request(block_root); - // If we're in deneb, a blob request should have been triggered as well, - // we don't require a response because we're generateing 0-blob blocks in this test. - if self.after_deneb() { - let _ = self.expect_blob_parent_request(block_root); - } - id - } - #[track_caller] fn expect_block_process(&mut self, response_type: ResponseType) { match response_type { - ResponseType::Block => match self.beacon_processor_rx.try_recv() { - Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::RPC_BLOCK); - } - other => panic!("Expected block process, found {:?}", other), - }, - ResponseType::Blob => match self.beacon_processor_rx.try_recv() { - Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::RPC_BLOBS); - } - other => panic!("Expected blob process, found {:?}", other), - }, + ResponseType::Block => self + .pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::RPC_BLOCK).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expected block work event: {e}")), + ResponseType::Blob => self + .pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::RPC_BLOBS).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expected blobs work event: {e}")), } } @@ -544,9 +747,13 @@ impl TestRig { fn expect_parent_chain_process(&mut self) { match self.beacon_processor_rx.try_recv() { Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::CHAIN_SEGMENT); + // Parent chain sends blocks one by one + assert_eq!(work.work_type(), beacon_processor::RPC_BLOCK); } - other => panic!("Expected chain segment process, found {:?}", other), + other => panic!( + "Expected rpc_block from chain segment process, found {:?}", + other + ), } } @@ -560,24 +767,37 @@ impl TestRig { #[track_caller] fn expect_empty_beacon_processor(&mut self) { + match self.beacon_processor_rx.try_recv() { + Err(mpsc::error::TryRecvError::Empty) => {} // ok + Ok(event) => panic!("expected empty beacon processor: {:?}", event), + other => panic!("unexpected err {:?}", other), + } + } + + #[track_caller] + pub fn expect_penalty(&mut self, peer_id: PeerId, expect_penalty_msg: &'static str) { + let penalty_msg = self + .pop_received_network_event(|ev| match ev { + NetworkMessage::ReportPeer { + peer_id: p_id, msg, .. + } if p_id == &peer_id => Some(msg.to_owned()), + _ => None, + }) + .unwrap_or_else(|_| { + panic!( + "Expected '{expect_penalty_msg}' penalty for peer {peer_id}: {:#?}", + self.network_rx_queue + ) + }); assert_eq!( - self.beacon_processor_rx.try_recv().expect_err("must err"), - mpsc::error::TryRecvError::Empty + penalty_msg, expect_penalty_msg, + "Unexpected penalty msg for {peer_id}" ); } - #[track_caller] - pub fn expect_penalty(&mut self, peer_id: PeerId) { - self.pop_received_network_event(|ev| match ev { - NetworkMessage::ReportPeer { peer_id: p_id, .. } if p_id == &peer_id => Some(()), - _ => None, - }) - .unwrap_or_else(|_| { - panic!( - "Expected peer penalty for {peer_id}: {:#?}", - self.network_rx_queue - ) - }) + pub fn expect_single_penalty(&mut self, peer_id: PeerId, expect_penalty_msg: &'static str) { + self.expect_penalty(peer_id, expect_penalty_msg); + self.expect_no_penalty_for(peer_id); } pub fn block_with_parent_and_blobs( @@ -595,17 +815,127 @@ impl TestRig { pub fn rand_blockchain(&mut self, depth: usize) -> Vec>> { let mut blocks = Vec::>>::with_capacity(depth); - while blocks.len() < depth { + for slot in 0..depth { let parent = blocks .last() .map(|b| b.canonical_root()) .unwrap_or_else(Hash256::random); let mut block = self.rand_block(); *block.message_mut().parent_root_mut() = parent; + *block.message_mut().slot_mut() = slot.into(); blocks.push(block.into()); } + self.log(&format!( + "Blockchain dump {:#?}", + blocks + .iter() + .map(|b| format!( + "block {} {} parent {}", + b.slot(), + b.canonical_root(), + b.parent_root() + )) + .collect::>() + )); blocks } + + fn insert_block_to_da_checker(&mut self, block: Arc>) { + let state = BeaconState::Base(BeaconStateBase::random_for_test(&mut self.rng)); + let parent_block = self.rand_block(); + let import_data = BlockImportData::::__new_for_test( + block.canonical_root(), + state, + parent_block.into(), + ); + let payload_verification_outcome = PayloadVerificationOutcome { + payload_verification_status: PayloadVerificationStatus::Verified, + is_valid_merge_transition_block: false, + }; + let executed_block = + AvailabilityPendingExecutedBlock::new(block, import_data, payload_verification_outcome); + match self + .harness + .chain + .data_availability_checker + .put_pending_executed_block(executed_block) + .unwrap() + { + Availability::Available(_) => panic!("block removed from da_checker, available"), + Availability::MissingComponents(block_root) => { + self.log(&format!("inserted block to da_checker {block_root:?}")) + } + }; + } + + fn insert_blob_to_da_checker(&mut self, blob: BlobSidecar) { + match self + .harness + .chain + .data_availability_checker + .put_gossip_blob(GossipVerifiedBlob::__assumed_valid(blob.into())) + .unwrap() + { + Availability::Available(_) => panic!("blob removed from da_checker, available"), + Availability::MissingComponents(block_root) => { + self.log(&format!("inserted blob to da_checker {block_root:?}")) + } + }; + } + + fn insert_block_to_processing_cache(&mut self, block: Arc>) { + self.harness + .chain + .reqresp_pre_import_cache + .write() + .insert(block.canonical_root(), block); + } + + fn simulate_block_gossip_processing_becomes_invalid(&mut self, block_root: Hash256) { + self.harness + .chain + .reqresp_pre_import_cache + .write() + .remove(&block_root); + + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root, + imported: false, + }); + } + + fn simulate_block_gossip_processing_becomes_valid_missing_components( + &mut self, + block: Arc>, + ) { + let block_root = block.canonical_root(); + self.harness + .chain + .reqresp_pre_import_cache + .write() + .remove(&block_root); + + self.insert_block_to_da_checker(block); + + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root, + imported: false, + }); + } +} + +#[test] +fn stable_rng() { + let mut rng = XorShiftRng::from_seed([42; 16]); + let (block, _) = generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng); + assert_eq!( + block.canonical_root(), + Hash256::from_slice( + &hex::decode("adfd2e9e7a7976e8ccaed6eaf0257ed36a5b476732fee63ff44966602fd099ec") + .unwrap() + ), + "rng produces a consistent value" + ); } #[test] @@ -616,7 +946,7 @@ fn test_single_block_lookup_happy_path() { let block_root = block.canonical_root(); // Trigger the request rig.trigger_unknown_block_from_attestation(block_root, peer_id); - let id = rig.expect_lookup_request_block_and_blobs(block_root); + let id = rig.expect_block_lookup_request(block_root); // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. @@ -630,27 +960,34 @@ fn test_single_block_lookup_happy_path() { // Send the stream termination. Peer should have not been penalized, and the request removed // after processing. rig.single_lookup_block_response(id, peer_id, None); - rig.single_block_component_processed_imported(id, block_root); + rig.single_block_component_processed_imported(block_root); rig.expect_empty_network(); - assert_eq!(rig.active_single_lookups_count(), 0); + rig.expect_no_active_lookups(); } +// Tests that if a peer does not respond with a block, we downscore and retry the block only #[test] fn test_single_block_lookup_empty_response() { - let mut rig = TestRig::test_setup(); + let mut r = TestRig::test_setup(); - let block_hash = Hash256::random(); - let peer_id = rig.new_connected_peer(); + let block = r.rand_block(); + let block_root = block.canonical_root(); + let peer_id = r.new_connected_peer(); // Trigger the request - rig.trigger_unknown_block_from_attestation(block_hash, peer_id); - let id = rig.expect_lookup_request_block_and_blobs(block_hash); + r.trigger_unknown_block_from_attestation(block_root, peer_id); + let id = r.expect_block_lookup_request(block_root); // The peer does not have the block. It should be penalized. - rig.single_lookup_block_response(id, peer_id, None); - rig.expect_penalty(peer_id); - - rig.expect_block_lookup_request(block_hash); // it should be retried + r.single_lookup_block_response(id, peer_id, None); + r.expect_penalty(peer_id, "NoResponseReturned"); + // it should be retried + let id = r.expect_block_lookup_request(block_root); + // Send the right block this time. + r.single_lookup_block_response(id, peer_id, Some(block.into())); + r.expect_block_process(ResponseType::Block); + r.single_block_component_processed_imported(block_root); + r.expect_no_active_lookups(); } #[test] @@ -662,12 +999,12 @@ fn test_single_block_lookup_wrong_response() { // Trigger the request rig.trigger_unknown_block_from_attestation(block_hash, peer_id); - let id = rig.expect_lookup_request_block_and_blobs(block_hash); + let id = rig.expect_block_lookup_request(block_hash); // Peer sends something else. It should be penalized. let bad_block = rig.rand_block(); rig.single_lookup_block_response(id, peer_id, Some(bad_block.into())); - rig.expect_penalty(peer_id); + rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); rig.expect_block_lookup_request(block_hash); // should be retried // Send the stream termination. This should not produce an additional penalty. @@ -684,7 +1021,7 @@ fn test_single_block_lookup_failure() { // Trigger the request rig.trigger_unknown_block_from_attestation(block_hash, peer_id); - let id = rig.expect_lookup_request_block_and_blobs(block_hash); + let id = rig.expect_block_lookup_request(block_hash); // The request fails. RPC failures are handled elsewhere so we should not penalize the peer. rig.single_lookup_failed(id, peer_id, RPCError::UnsupportedProtocol); @@ -692,6 +1029,28 @@ fn test_single_block_lookup_failure() { rig.expect_empty_network(); } +#[test] +fn test_single_block_lookup_peer_disconnected_then_rpc_error() { + let mut rig = TestRig::test_setup(); + + let block_hash = Hash256::random(); + let peer_id = rig.new_connected_peer(); + + // Trigger the request. + rig.trigger_unknown_block_from_attestation(block_hash, peer_id); + let id = rig.expect_block_lookup_request(block_hash); + + // The peer disconnect event reaches sync before the rpc error. + rig.peer_disconnected(peer_id); + // The lookup is not removed as it can still potentially make progress. + rig.assert_single_lookups_count(1); + // The request fails. + rig.single_lookup_failed(id, peer_id, RPCError::Disconnected); + rig.expect_block_lookup_request(block_hash); + // The request should be removed from the network context on disconnection. + rig.expect_empty_network(); +} + #[test] fn test_single_block_lookup_becomes_parent_request() { let mut rig = TestRig::test_setup(); @@ -703,7 +1062,7 @@ fn test_single_block_lookup_becomes_parent_request() { // Trigger the request rig.trigger_unknown_block_from_attestation(block.canonical_root(), peer_id); - let id = rig.expect_lookup_request_block_and_blobs(block_root); + let id = rig.expect_block_parent_request(block_root); // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. @@ -717,11 +1076,11 @@ fn test_single_block_lookup_becomes_parent_request() { // Send the stream termination. Peer should have not been penalized, and the request moved to a // parent request after processing. rig.single_block_component_processed( - id, + id.lookup_id, BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), ); - assert_eq!(rig.active_single_lookups_count(), 1); - rig.expect_parent_request_block_and_blobs(parent_root); + assert_eq!(rig.active_single_lookups_count(), 2); // 2 = current + parent + rig.expect_block_parent_request(parent_root); rig.expect_empty_network(); assert_eq!(rig.active_parent_lookups_count(), 1); } @@ -735,21 +1094,25 @@ fn test_parent_lookup_happy_path() { // Trigger the request rig.trigger_unknown_parent_block(peer_id, block.into()); - let id = rig.expect_parent_request_block_and_blobs(parent_root); + let id = rig.expect_block_parent_request(parent_root); // Peer sends the right block, it should be sent for processing. Peer should not be penalized. rig.parent_lookup_block_response(id, peer_id, Some(parent.into())); + // No request of blobs because the block has not data + rig.expect_empty_network(); rig.expect_block_process(ResponseType::Block); rig.expect_empty_network(); + // Add peer to child lookup to prevent it being dropped + rig.trigger_unknown_block_from_attestation(block_root, peer_id); // Processing succeeds, now the rest of the chain should be sent for processing. rig.parent_block_processed( block_root, BlockError::BlockIsAlreadyKnown(block_root).into(), ); rig.expect_parent_chain_process(); - rig.parent_chain_processed_success(block_root); - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.parent_chain_processed_success(block_root, &[]); + rig.expect_no_active_lookups_empty_network(); } #[test] @@ -761,12 +1124,12 @@ fn test_parent_lookup_wrong_response() { // Trigger the request rig.trigger_unknown_parent_block(peer_id, block.into()); - let id1 = rig.expect_parent_request_block_and_blobs(parent_root); + let id1 = rig.expect_block_parent_request(parent_root); // Peer sends the wrong block, peer should be penalized and the block re-requested. let bad_block = rig.rand_block(); rig.parent_lookup_block_response(id1, peer_id, Some(bad_block.into())); - rig.expect_penalty(peer_id); + rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); let id2 = rig.expect_block_parent_request(parent_root); // Send the stream termination for the first request. This should not produce extra penalties. @@ -777,38 +1140,13 @@ fn test_parent_lookup_wrong_response() { rig.parent_lookup_block_response(id2, peer_id, Some(parent.into())); rig.expect_block_process(ResponseType::Block); + // Add peer to child lookup to prevent it being dropped + rig.trigger_unknown_block_from_attestation(block_root, peer_id); // Processing succeeds, now the rest of the chain should be sent for processing. rig.parent_block_processed_imported(block_root); rig.expect_parent_chain_process(); - rig.parent_chain_processed_success(block_root); - assert_eq!(rig.active_parent_lookups_count(), 0); -} - -#[test] -fn test_parent_lookup_empty_response() { - let mut rig = TestRig::test_setup(); - - let (parent, block, parent_root, block_root) = rig.rand_block_and_parent(); - let peer_id = rig.new_connected_peer(); - - // Trigger the request - rig.trigger_unknown_parent_block(peer_id, block.into()); - let id1 = rig.expect_parent_request_block_and_blobs(parent_root); - - // Peer sends an empty response, peer should be penalized and the block re-requested. - rig.parent_lookup_block_response(id1, peer_id, None); - rig.expect_penalty(peer_id); - let id2 = rig.expect_block_parent_request(parent_root); - - // Send the right block this time. - rig.parent_lookup_block_response(id2, peer_id, Some(parent.into())); - rig.expect_block_process(ResponseType::Block); - - // Processing succeeds, now the rest of the chain should be sent for processing. - rig.parent_block_processed_imported(block_root); - rig.expect_parent_chain_process(); - rig.parent_chain_processed_success(block_root); - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.parent_chain_processed_success(block_root, &[]); + rig.expect_no_active_lookups_empty_network(); } #[test] @@ -820,21 +1158,23 @@ fn test_parent_lookup_rpc_failure() { // Trigger the request rig.trigger_unknown_parent_block(peer_id, block.into()); - let id1 = rig.expect_parent_request_block_and_blobs(parent_root); + let id = rig.expect_block_parent_request(parent_root); // The request fails. It should be tried again. - rig.parent_lookup_failed_unavailable(id1, peer_id); - let id2 = rig.expect_block_parent_request(parent_root); + rig.parent_lookup_failed_unavailable(id, peer_id); + let id = rig.expect_block_parent_request(parent_root); // Send the right block this time. - rig.parent_lookup_block_response(id2, peer_id, Some(parent.into())); + rig.parent_lookup_block_response(id, peer_id, Some(parent.into())); rig.expect_block_process(ResponseType::Block); + // Add peer to child lookup to prevent it being dropped + rig.trigger_unknown_block_from_attestation(block_root, peer_id); // Processing succeeds, now the rest of the chain should be sent for processing. rig.parent_block_processed_imported(block_root); rig.expect_parent_chain_process(); - rig.parent_chain_processed_success(block_root); - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.parent_chain_processed_success(block_root, &[]); + rig.expect_no_active_lookups_empty_network(); } #[test] @@ -847,12 +1187,9 @@ fn test_parent_lookup_too_many_attempts() { // Trigger the request rig.trigger_unknown_parent_block(peer_id, block.into()); - for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { + for i in 1..=PARENT_FAIL_TOLERANCE { let id = rig.expect_block_parent_request(parent_root); // Blobs are only requested in the first iteration as this test only retries blocks - if rig.after_deneb() && i == 1 { - let _ = rig.expect_blob_parent_request(parent_root); - } if i % 2 == 0 { // make sure every error is accounted for @@ -872,11 +1209,11 @@ fn test_parent_lookup_too_many_attempts() { // I'm unsure if this is how it should behave? // rig.parent_lookup_block_response(id, peer_id, None); - rig.expect_penalty(peer_id); + rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); } } - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.expect_no_active_lookups_empty_network(); } #[test] @@ -888,13 +1225,9 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { // Trigger the request rig.trigger_unknown_parent_block(peer_id, block.into()); - for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { - assert!(!rig.failed_chains_contains(&block_root)); + for i in 1..=PARENT_FAIL_TOLERANCE { + rig.assert_not_failed_chain(block_root); let id = rig.expect_block_parent_request(parent_root); - // Blobs are only requested in the first iteration as this test only retries blocks - if rig.after_deneb() && i == 1 { - let _ = rig.expect_blob_parent_request(parent_root); - } if i % 2 != 0 { // The request fails. It should be tried again. rig.parent_lookup_failed_unavailable(id, peer_id); @@ -902,18 +1235,18 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() { // Send a bad block this time. It should be tried again. let bad_block = rig.rand_block(); rig.parent_lookup_block_response(id, peer_id, Some(bad_block.into())); - rig.expect_penalty(peer_id); + rig.expect_penalty(peer_id, "UnrequestedBlockRoot"); } } - assert_eq!(rig.active_parent_lookups_count(), 0); - assert!(!rig.failed_chains_contains(&block_root)); - assert!(!rig.failed_chains_contains(&parent.canonical_root())); + rig.assert_not_failed_chain(block_root); + rig.assert_not_failed_chain(parent.canonical_root()); + rig.expect_no_active_lookups_empty_network(); } #[test] fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { - const PROCESSING_FAILURES: u8 = parent_lookup::PARENT_FAIL_TOLERANCE / 2 + 1; + const PROCESSING_FAILURES: u8 = PARENT_FAIL_TOLERANCE / 2 + 1; let mut rig = TestRig::test_setup(); let (parent, block, parent_root, block_root) = rig.rand_block_and_parent(); let peer_id = rig.new_connected_peer(); @@ -922,39 +1255,32 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { rig.trigger_unknown_parent_block(peer_id, block.into()); rig.log("Fail downloading the block"); - for i in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { + for _ in 0..(PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { let id = rig.expect_block_parent_request(parent_root); - // Blobs are only requested in the first iteration as this test only retries blocks - if rig.after_deneb() && i == 0 { - let _ = rig.expect_blob_parent_request(parent_root); - } // The request fails. It should be tried again. rig.parent_lookup_failed_unavailable(id, peer_id); } rig.log("Now fail processing a block in the parent request"); - for i in 0..PROCESSING_FAILURES { + for _ in 0..PROCESSING_FAILURES { let id = rig.expect_block_parent_request(parent_root); - // Blobs are only requested in the first iteration as this test only retries blocks - if rig.after_deneb() && i != 0 { - let _ = rig.expect_blob_parent_request(parent_root); - } - assert!(!rig.failed_chains_contains(&block_root)); + // Blobs are only requested in the previous first iteration as this test only retries blocks + rig.assert_not_failed_chain(block_root); // send the right parent but fail processing rig.parent_lookup_block_response(id, peer_id, Some(parent.clone().into())); rig.parent_block_processed(block_root, BlockError::InvalidSignature.into()); rig.parent_lookup_block_response(id, peer_id, None); - rig.expect_penalty(peer_id); + rig.expect_penalty(peer_id, "lookup_block_processing_failure"); } - assert!(rig.failed_chains_contains(&block_root)); - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.assert_not_failed_chain(block_root); + rig.expect_no_active_lookups_empty_network(); } #[test] -fn test_parent_lookup_too_deep() { +fn test_parent_lookup_too_deep_grow_ancestor() { let mut rig = TestRig::test_setup(); - let mut blocks = rig.rand_blockchain(parent_lookup::PARENT_DEPTH_TOLERANCE); + let mut blocks = rig.rand_blockchain(PARENT_DEPTH_TOLERANCE); let peer_id = rig.new_connected_peer(); let trigger_block = blocks.pop().unwrap(); @@ -962,7 +1288,7 @@ fn test_parent_lookup_too_deep() { rig.trigger_unknown_parent_block(peer_id, trigger_block); for block in blocks.into_iter().rev() { - let id = rig.expect_parent_request_block_and_blobs(block.canonical_root()); + let id = rig.expect_block_parent_request(block.canonical_root()); // the block rig.parent_lookup_block_response(id, peer_id, Some(block.clone())); // the stream termination @@ -976,19 +1302,94 @@ fn test_parent_lookup_too_deep() { ) } - rig.expect_penalty(peer_id); - assert!(rig.failed_chains_contains(&chain_hash)); + rig.expect_penalty(peer_id, "chain_too_long"); + rig.assert_failed_chain(chain_hash); } #[test] -fn test_parent_lookup_disconnection() { +fn test_parent_lookup_too_deep_grow_tip() { let mut rig = TestRig::test_setup(); + let blocks = rig.rand_blockchain(PARENT_DEPTH_TOLERANCE - 1); let peer_id = rig.new_connected_peer(); - let trigger_block = rig.rand_block(); - rig.trigger_unknown_parent_block(peer_id, trigger_block.into()); + let tip = blocks.last().unwrap().clone(); + + for block in blocks.into_iter() { + let block_root = block.canonical_root(); + rig.trigger_unknown_block_from_attestation(block_root, peer_id); + let id = rig.expect_block_parent_request(block_root); + rig.single_lookup_block_response(id, peer_id, Some(block.clone())); + rig.single_lookup_block_response(id, peer_id, None); + rig.expect_block_process(ResponseType::Block); + rig.single_block_component_processed( + id.lookup_id, + BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), + ); + } + rig.expect_penalty(peer_id, "chain_too_long"); + rig.assert_failed_chain(tip.canonical_root()); +} + +#[test] +fn test_lookup_peer_disconnected_no_peers_left_while_request() { + let mut rig = TestRig::test_setup(); + let peer_id = rig.new_connected_peer(); + let trigger_block = rig.rand_block(); + rig.trigger_unknown_parent_block(peer_id, trigger_block.into()); rig.peer_disconnected(peer_id); - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.rpc_error_all_active_requests(peer_id); + // Erroring all rpc requests and disconnecting the peer shouldn't remove the requests + // from the lookups map as they can still progress. + rig.assert_single_lookups_count(2); +} + +#[test] +fn test_lookup_disconnection_peer_left() { + let mut rig = TestRig::test_setup(); + let peer_ids = (0..2).map(|_| rig.new_connected_peer()).collect::>(); + let disconnecting_peer = *peer_ids.first().unwrap(); + let block_root = Hash256::random(); + // lookup should have two peers associated with the same block + for peer_id in peer_ids.iter() { + rig.trigger_unknown_block_from_attestation(block_root, *peer_id); + } + // Disconnect the first peer only, which is the one handling the request + rig.peer_disconnected(disconnecting_peer); + rig.rpc_error_all_active_requests(disconnecting_peer); + rig.assert_single_lookups_count(1); +} + +#[test] +fn test_lookup_add_peers_to_parent() { + let mut r = TestRig::test_setup(); + let peer_id_1 = r.new_connected_peer(); + let peer_id_2 = r.new_connected_peer(); + let blocks = r.rand_blockchain(5); + let last_block_root = blocks.last().unwrap().canonical_root(); + // Create a chain of lookups + for block in &blocks { + r.trigger_unknown_parent_block(peer_id_1, block.clone()); + } + r.trigger_unknown_block_from_attestation(last_block_root, peer_id_2); + for block in blocks.iter().take(blocks.len() - 1) { + // Parent has the original unknown parent event peer + new peer + r.assert_lookup_peers(block.canonical_root(), vec![peer_id_1, peer_id_2]); + } + // Child lookup only has the unknown attestation peer + r.assert_lookup_peers(last_block_root, vec![peer_id_2]); +} + +#[test] +fn test_skip_creating_failed_parent_lookup() { + let mut rig = TestRig::test_setup(); + let (_, block, parent_root, _) = rig.rand_block_and_parent(); + let peer_id = rig.new_connected_peer(); + rig.insert_failed_chain(parent_root); + rig.trigger_unknown_parent_block(peer_id, block.into()); + // Expect single penalty for peer, despite dropping two lookups + rig.expect_single_penalty(peer_id, "failed_chain"); + // Both current and parent lookup should be rejected + rig.expect_no_active_lookups(); } #[test] @@ -1000,7 +1401,7 @@ fn test_single_block_lookup_ignored_response() { // Trigger the request rig.trigger_unknown_block_from_attestation(block.canonical_root(), peer_id); - let id = rig.expect_lookup_request_block_and_blobs(block.canonical_root()); + let id = rig.expect_block_lookup_request(block.canonical_root()); // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. @@ -1015,9 +1416,8 @@ fn test_single_block_lookup_ignored_response() { // after processing. rig.single_lookup_block_response(id, peer_id, None); // Send an Ignored response, the request should be dropped - rig.single_block_component_processed(id, BlockProcessingResult::Ignored); - rig.expect_empty_network(); - assert_eq!(rig.active_single_lookups_count(), 0); + rig.single_block_component_processed(id.lookup_id, BlockProcessingResult::Ignored); + rig.expect_no_active_lookups_empty_network(); } #[test] @@ -1028,8 +1428,10 @@ fn test_parent_lookup_ignored_response() { let peer_id = rig.new_connected_peer(); // Trigger the request - rig.trigger_unknown_parent_block(peer_id, block.into()); - let id = rig.expect_parent_request_block_and_blobs(parent_root); + rig.trigger_unknown_parent_block(peer_id, block.clone().into()); + let id = rig.expect_block_parent_request(parent_root); + // Note: single block lookup for current `block` does not trigger any request because it does + // not have blobs, and the block is already cached // Peer sends the right block, it should be sent for processing. Peer should not be penalized. rig.parent_lookup_block_response(id, peer_id, Some(parent.into())); @@ -1039,7 +1441,7 @@ fn test_parent_lookup_ignored_response() { // Return an Ignored result. The request should be dropped rig.parent_block_processed(block_root, BlockProcessingResult::Ignored); rig.expect_empty_network(); - assert_eq!(rig.active_parent_lookups_count(), 0); + rig.expect_no_active_lookups(); } /// This is a regression test. @@ -1056,8 +1458,8 @@ fn test_same_chain_race_condition() { let chain_hash = trigger_block.canonical_root(); rig.trigger_unknown_parent_block(peer_id, trigger_block.clone()); - for (i, block) in blocks.into_iter().rev().enumerate() { - let id = rig.expect_parent_request_block_and_blobs(block.canonical_root()); + for (i, block) in blocks.clone().into_iter().rev().enumerate() { + let id = rig.expect_block_parent_request(block.canonical_root()); // the block rig.parent_lookup_block_response(id, peer_id, Some(block.clone())); // the stream termination @@ -1066,41 +1468,137 @@ fn test_same_chain_race_condition() { rig.expect_block_process(ResponseType::Block); // the processing result if i + 2 == depth { - // one block was removed + rig.log(&format!("Block {i} was removed and is already known")); rig.parent_block_processed( chain_hash, BlockError::BlockIsAlreadyKnown(block.canonical_root()).into(), ) } else { + rig.log(&format!("Block {i} ParentUnknown")); rig.parent_block_processed( chain_hash, BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(), ) } - rig.assert_parent_lookups_consistency(); } - // Processing succeeds, now the rest of the chain should be sent for processing. - rig.expect_parent_chain_process(); - // Try to get this block again while the chain is being processed. We should not request it again. let peer_id = rig.new_connected_peer(); - rig.trigger_unknown_parent_block(peer_id, trigger_block); - rig.assert_parent_lookups_consistency(); + rig.trigger_unknown_parent_block(peer_id, trigger_block.clone()); + rig.expect_empty_network(); - rig.parent_chain_processed_success(chain_hash); - assert_eq!(rig.active_parent_lookups_count(), 0); + // Add a peer to the tip child lookup which has zero peers + rig.trigger_unknown_block_from_attestation(trigger_block.canonical_root(), peer_id); + + rig.log("Processing succeeds, now the rest of the chain should be sent for processing."); + for block in blocks.iter().skip(1).chain(&[trigger_block]) { + rig.expect_parent_chain_process(); + rig.single_block_component_processed_imported(block.canonical_root()); + } + rig.expect_no_active_lookups_empty_network(); +} + +#[test] +fn block_in_da_checker_skips_download() { + let Some(mut r) = TestRig::test_setup_after_deneb() else { + return; + }; + let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); + let block_root = block.canonical_root(); + let peer_id = r.new_connected_peer(); + r.insert_block_to_da_checker(block.into()); + r.trigger_unknown_block_from_attestation(block_root, peer_id); + // Should not trigger block request + let id = r.expect_blob_lookup_request(block_root); + r.expect_empty_network(); + // Resolve blob and expect lookup completed + r.complete_single_lookup_blob_lookup_valid(id, peer_id, blobs, true); + r.expect_no_active_lookups(); +} + +#[test] +fn block_in_processing_cache_becomes_invalid() { + let Some(mut r) = TestRig::test_setup_after_deneb() else { + return; + }; + let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); + let block_root = block.canonical_root(); + let peer_id = r.new_connected_peer(); + r.insert_block_to_processing_cache(block.clone().into()); + r.trigger_unknown_block_from_attestation(block_root, peer_id); + // Should trigger blob request + let id = r.expect_blob_lookup_request(block_root); + // Should not trigger block request + r.expect_empty_network(); + // Simulate invalid block, removing it from processing cache + r.simulate_block_gossip_processing_becomes_invalid(block_root); + // Should download block, then issue blobs request + r.complete_lookup_block_download(block); + // Should not trigger block or blob request + r.expect_empty_network(); + r.complete_lookup_block_import_valid(block_root, false); + // Resolve blob and expect lookup completed + r.complete_single_lookup_blob_lookup_valid(id, peer_id, blobs, true); + r.expect_no_active_lookups(); +} + +#[test] +fn block_in_processing_cache_becomes_valid_imported() { + let Some(mut r) = TestRig::test_setup_after_deneb() else { + return; + }; + let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); + let block_root = block.canonical_root(); + let peer_id = r.new_connected_peer(); + r.insert_block_to_processing_cache(block.clone().into()); + r.trigger_unknown_block_from_attestation(block_root, peer_id); + // Should trigger blob request + let id = r.expect_blob_lookup_request(block_root); + // Should not trigger block request + r.expect_empty_network(); + // Resolve the block from processing step + r.simulate_block_gossip_processing_becomes_valid_missing_components(block.into()); + // Should not trigger block or blob request + r.expect_empty_network(); + // Resolve blob and expect lookup completed + r.complete_single_lookup_blob_lookup_valid(id, peer_id, blobs, true); + r.expect_no_active_lookups(); +} + +// IGNORE: wait for change that delays blob fetching to knowing the block +#[ignore] +#[test] +fn blobs_in_da_checker_skip_download() { + let Some(mut r) = TestRig::test_setup_after_deneb() else { + return; + }; + let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); + let block_root = block.canonical_root(); + let peer_id = r.new_connected_peer(); + for blob in blobs { + r.insert_blob_to_da_checker(blob); + } + r.trigger_unknown_block_from_attestation(block_root, peer_id); + // Should download and process the block + r.complete_single_lookup_block_valid(block, true); + // Should not trigger blob request + r.expect_empty_network(); + r.expect_no_active_lookups(); } mod deneb_only { use super::*; - use beacon_chain::data_availability_checker::AvailabilityCheckError; + use beacon_chain::{ + block_verification_types::RpcBlock, data_availability_checker::AvailabilityCheckError, + }; use ssz_types::VariableList; + use std::collections::VecDeque; struct DenebTester { rig: TestRig, block: Arc>, blobs: Vec>>, + parent_block_roots: Vec, parent_block: VecDeque>>, parent_blobs: VecDeque>>>, unknown_parent_block: Option>>, @@ -1116,16 +1614,16 @@ mod deneb_only { enum RequestTrigger { AttestationUnknownBlock, - GossipUnknownParentBlock { num_parents: usize }, - GossipUnknownParentBlob { num_parents: usize }, + GossipUnknownParentBlock(usize), + GossipUnknownParentBlob(usize), } impl RequestTrigger { fn num_parents(&self) -> usize { match self { RequestTrigger::AttestationUnknownBlock => 0, - RequestTrigger::GossipUnknownParentBlock { num_parents } => *num_parents, - RequestTrigger::GossipUnknownParentBlob { num_parents } => *num_parents, + RequestTrigger::GossipUnknownParentBlock(num_parents) => *num_parents, + RequestTrigger::GossipUnknownParentBlob(num_parents) => *num_parents, } } } @@ -1143,6 +1641,7 @@ mod deneb_only { let num_parents = request_trigger.num_parents(); let mut parent_block_chain = VecDeque::with_capacity(num_parents); let mut parent_blobs_chain = VecDeque::with_capacity(num_parents); + let mut parent_block_roots = vec![]; for _ in 0..num_parents { // Set the current block as the parent. let parent_root = block.canonical_root(); @@ -1150,6 +1649,7 @@ mod deneb_only { let parent_blobs = blobs.clone(); parent_block_chain.push_front(parent_block); parent_blobs_chain.push_front(parent_blobs); + parent_block_roots.push(parent_root); // Create the next block. let (child_block, child_blobs) = @@ -1173,8 +1673,7 @@ mod deneb_only { peer_id, block_root, )); let block_req_id = rig.expect_block_lookup_request(block_root); - let blob_req_id = rig.expect_blob_lookup_request(block_root); - (Some(block_req_id), Some(blob_req_id), None, None) + (Some(block_req_id), None, None, None) } RequestTrigger::GossipUnknownParentBlock { .. } => { rig.send_sync_message(SyncMessage::UnknownParentBlock( @@ -1184,33 +1683,18 @@ mod deneb_only { )); let parent_root = block.parent_root(); - let blob_req_id = rig.expect_blob_lookup_request(block_root); let parent_block_req_id = rig.expect_block_parent_request(parent_root); - let parent_blob_req_id = rig.expect_blob_parent_request(parent_root); rig.expect_empty_network(); // expect no more requests - ( - None, - Some(blob_req_id), - Some(parent_block_req_id), - Some(parent_blob_req_id), - ) + (None, None, Some(parent_block_req_id), None) } RequestTrigger::GossipUnknownParentBlob { .. } => { let single_blob = blobs.first().cloned().unwrap(); let parent_root = single_blob.block_parent_root(); rig.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, single_blob)); - let block_req_id = rig.expect_block_lookup_request(block_root); - let blobs_req_id = rig.expect_blob_lookup_request(block_root); let parent_block_req_id = rig.expect_block_parent_request(parent_root); - let parent_blob_req_id = rig.expect_blob_parent_request(parent_root); rig.expect_empty_network(); // expect no more requests - ( - Some(block_req_id), - Some(blobs_req_id), - Some(parent_block_req_id), - Some(parent_blob_req_id), - ) + (None, None, Some(parent_block_req_id), None) } }; @@ -1220,6 +1704,7 @@ mod deneb_only { blobs, parent_block: parent_block_chain, parent_blobs: parent_blobs_chain, + parent_block_roots, unknown_parent_block: None, unknown_parent_blobs: None, peer_id, @@ -1232,6 +1717,18 @@ mod deneb_only { }) } + fn log(self, msg: &str) -> Self { + self.rig.log(msg); + self + } + + fn trigger_unknown_block_from_attestation(mut self) -> Self { + let block_root = self.block.canonical_root(); + self.rig + .trigger_unknown_block_from_attestation(block_root, self.peer_id); + self + } + fn parent_block_response(mut self) -> Self { self.rig.expect_empty_network(); let block = self.parent_block.pop_front().unwrap().clone(); @@ -1242,10 +1739,27 @@ mod deneb_only { Some(block), ); - assert_eq!(self.rig.active_parent_lookups_count(), 1); + self.rig.assert_parent_lookups_count(1); self } + fn parent_block_response_expect_blobs(mut self) -> Self { + self.rig.expect_empty_network(); + let block = self.parent_block.pop_front().unwrap().clone(); + let _ = self.unknown_parent_block.insert(block.clone()); + self.rig.parent_lookup_block_response( + self.parent_block_req_id.expect("parent request id"), + self.peer_id, + Some(block), + ); + + // Expect blobs request after sending block + let s = self.expect_parent_blobs_request(); + + s.rig.assert_parent_lookups_count(1); + s + } + fn parent_blob_response(mut self) -> Self { let blobs = self.parent_blobs.pop_front().unwrap(); let _ = self.unknown_parent_blobs.insert(blobs.clone()); @@ -1258,7 +1772,7 @@ mod deneb_only { assert_eq!(self.rig.active_parent_lookups_count(), 1); } self.rig.parent_lookup_blob_response( - self.parent_blob_req_id.expect("blob request id"), + self.parent_blob_req_id.expect("parent blob request id"), self.peer_id, None, ); @@ -1267,7 +1781,7 @@ mod deneb_only { } fn block_response_triggering_process(self) -> Self { - let mut me = self.block_response(); + let mut me = self.block_response_and_expect_blob_request(); me.rig.expect_block_process(ResponseType::Block); // The request should still be active. @@ -1275,7 +1789,7 @@ mod deneb_only { me } - fn block_response(mut self) -> Self { + fn block_response_and_expect_blob_request(mut self) -> Self { // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. self.rig.single_lookup_block_response( @@ -1283,21 +1797,27 @@ mod deneb_only { self.peer_id, Some(self.block.clone()), ); - self.rig.expect_empty_network(); + // After responding with block the node will issue a blob request + let mut s = self.expect_blobs_request(); + + s.rig.expect_empty_network(); // The request should still be active. - assert_eq!(self.rig.active_single_lookups_count(), 1); - self + s.rig.assert_lookup_is_active(s.block.canonical_root()); + s } fn blobs_response(mut self) -> Self { + self.rig + .log(&format!("blobs response {}", self.blobs.len())); for blob in &self.blobs { self.rig.single_lookup_blob_response( self.blob_req_id.expect("blob request id"), self.peer_id, Some(blob.clone()), ); - assert_eq!(self.rig.active_single_lookups_count(), 1); + self.rig + .assert_lookup_is_active(self.block.canonical_root()); } self.rig.single_lookup_blob_response( self.blob_req_id.expect("blob request id"), @@ -1338,21 +1858,35 @@ mod deneb_only { self } - fn empty_parent_block_response(mut self) -> Self { - self.rig.parent_lookup_block_response( - self.parent_block_req_id.expect("block request id"), + fn empty_parent_blobs_response(mut self) -> Self { + self.rig.parent_lookup_blob_response( + self.parent_blob_req_id.expect("blob request id"), self.peer_id, None, ); self } - fn empty_parent_blobs_response(mut self) -> Self { - self.rig.parent_lookup_blob_response( - self.parent_blob_req_id.expect("blob request id"), - self.peer_id, - None, + fn block_missing_components(mut self) -> Self { + self.rig.single_block_component_processed( + self.block_req_id.expect("block request id").lookup_id, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + self.block.slot(), + self.block_root, + )), + ); + self.rig.expect_empty_network(); + self.rig.assert_single_lookups_count(1); + self + } + + fn blob_imported(mut self) -> Self { + self.rig.single_blob_component_processed( + self.blob_req_id.expect("blob request id").lookup_id, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(self.block_root)), ); + self.rig.expect_empty_network(); + self.rig.assert_single_lookups_count(0); self } @@ -1360,25 +1894,61 @@ mod deneb_only { // Missing blobs should be the request is not removed, the outstanding blobs request should // mean we do not send a new request. self.rig.single_block_component_processed( - self.block_req_id.expect("block request id"), + self.block_req_id + .or(self.blob_req_id) + .expect("block request id") + .lookup_id, BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(self.block_root)), ); self.rig.expect_empty_network(); - assert_eq!(self.rig.active_single_lookups_count(), 0); + self.rig.assert_single_lookups_count(0); self } fn parent_block_imported(mut self) -> Self { + let parent_root = *self.parent_block_roots.first().unwrap(); + self.rig + .log(&format!("parent_block_imported {parent_root:?}")); self.rig.parent_block_processed( self.block_root, - BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(self.block_root)), + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(parent_root)), ); - self.rig.expect_empty_network(); - assert_eq!(self.rig.active_parent_lookups_count(), 0); + self.rig.expect_no_requests_for(parent_root); + self.rig.assert_parent_lookups_count(0); + self + } + + fn parent_block_missing_components(mut self) -> Self { + let parent_root = *self.parent_block_roots.first().unwrap(); + self.rig + .log(&format!("parent_block_missing_components {parent_root:?}")); + self.rig.parent_block_processed( + self.block_root, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( + Slot::new(0), + parent_root, + )), + ); + self.rig.expect_no_requests_for(parent_root); + self + } + + fn parent_blob_imported(mut self) -> Self { + let parent_root = *self.parent_block_roots.first().unwrap(); + self.rig + .log(&format!("parent_blob_imported {parent_root:?}")); + self.rig.parent_blob_processed( + self.block_root, + BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(parent_root)), + ); + + self.rig.expect_no_requests_for(parent_root); + self.rig.assert_parent_lookups_count(0); self } fn parent_block_unknown_parent(mut self) -> Self { + self.rig.log("parent_block_unknown_parent"); let block = self.unknown_parent_block.take().unwrap(); // Now this block is the one we expect requests from self.block = block.clone(); @@ -1407,50 +1977,69 @@ mod deneb_only { fn invalid_block_processed(mut self) -> Self { self.rig.single_block_component_processed( - self.block_req_id.expect("block request id"), + self.block_req_id.expect("block request id").lookup_id, BlockProcessingResult::Err(BlockError::ProposalSignatureInvalid), ); - assert_eq!(self.rig.active_single_lookups_count(), 1); + self.rig.assert_single_lookups_count(1); self } fn invalid_blob_processed(mut self) -> Self { - self.rig.single_block_component_processed( - self.blob_req_id.expect("blob request id"), + self.rig.log("invalid_blob_processed"); + self.rig.single_blob_component_processed( + self.blob_req_id.expect("blob request id").lookup_id, BlockProcessingResult::Err(BlockError::AvailabilityCheck( AvailabilityCheckError::KzgVerificationFailed, )), ); - assert_eq!(self.rig.active_single_lookups_count(), 1); + self.rig.assert_single_lookups_count(1); self } fn missing_components_from_block_request(mut self) -> Self { self.rig.single_block_component_processed( - self.block_req_id.expect("block request id"), + self.block_req_id.expect("block request id").lookup_id, BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( self.slot, self.block_root, )), ); - assert_eq!(self.rig.active_single_lookups_count(), 1); + // Add block to da_checker so blobs request can continue + self.rig.insert_block_to_da_checker(self.block.clone()); + + self.rig.assert_single_lookups_count(1); self } - fn missing_components_from_blob_request(mut self) -> Self { - self.rig.single_blob_component_processed( - self.blob_req_id.expect("blob request id"), - BlockProcessingResult::Ok(AvailabilityProcessingStatus::MissingComponents( - self.slot, - self.block_root, - )), - ); - assert_eq!(self.rig.active_single_lookups_count(), 1); - self + fn complete_current_block_and_blobs_lookup(self) -> Self { + self.expect_block_request() + .block_response_and_expect_blob_request() + .blobs_response() + // TODO: Should send blobs for processing + .expect_block_process() + .block_imported() } - fn expect_penalty(mut self) -> Self { - self.rig.expect_penalty(self.peer_id); + fn parent_block_then_empty_parent_blobs(self) -> Self { + self.log( + " Return empty blobs for parent, block errors with missing components, downscore", + ) + .parent_block_response() + .expect_parent_blobs_request() + .empty_parent_blobs_response() + .expect_penalty("NotEnoughResponsesReturned") + .log("Re-request parent blobs, succeed and import parent") + .expect_parent_blobs_request() + .parent_blob_response() + .expect_block_process() + .parent_block_missing_components() + // Insert new peer into child request before completing parent + .trigger_unknown_block_from_attestation() + .parent_blob_imported() + } + + fn expect_penalty(mut self, expect_penalty_msg: &'static str) -> Self { + self.rig.expect_penalty(self.peer_id, expect_penalty_msg); self } fn expect_no_penalty(mut self) -> Self { @@ -1506,14 +2095,14 @@ mod deneb_only { self.blobs.push(first_blob); self } - fn expect_parent_chain_process(mut self) -> Self { - self.rig.expect_parent_chain_process(); - self - } fn expect_block_process(mut self) -> Self { self.rig.expect_block_process(ResponseType::Block); self } + fn expect_no_active_lookups(self) -> Self { + self.rig.expect_no_active_lookups(); + self + } fn search_parent_dup(mut self) -> Self { self.rig .trigger_unknown_parent_block(self.peer_id, self.block.clone()); @@ -1526,45 +2115,12 @@ mod deneb_only { let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester - .block_response_triggering_process() + .block_response_and_expect_blob_request() .blobs_response() + .block_missing_components() // blobs not yet imported .blobs_response_was_valid() - .block_imported(); - } - - #[test] - fn single_block_and_blob_lookup_blobs_returned_first_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - - tester - .blobs_response() - .blobs_response_was_valid() - .block_response_triggering_process() - .block_imported(); - } - - #[test] - fn single_block_and_blob_lookup_empty_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - - tester - .empty_block_response() - .expect_penalty() - .expect_block_request() - .expect_no_blobs_request() - .empty_blobs_response() - .expect_empty_beacon_processor() - .expect_no_penalty() - .expect_no_block_request() - .expect_no_blobs_request() - .block_response_triggering_process() - .missing_components_from_block_request(); + .blob_imported(); // now blobs resolve as imported } #[test] @@ -1572,48 +2128,28 @@ mod deneb_only { let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester - .block_response_triggering_process() + .block_response_and_expect_blob_request() .missing_components_from_block_request() .empty_blobs_response() - .missing_components_from_blob_request() - .expect_penalty() + .expect_penalty("NotEnoughResponsesReturned") .expect_blobs_request() .expect_no_block_request(); } - #[test] - fn single_blob_response_then_empty_block_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - - tester - .blobs_response() - .blobs_response_was_valid() - .expect_no_penalty_and_no_requests() - .missing_components_from_blob_request() - .empty_block_response() - .expect_penalty() - .expect_block_request() - .expect_no_blobs_request(); - } - #[test] fn single_invalid_block_response_then_blob_response_attestation() { let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester .block_response_triggering_process() .invalid_block_processed() - .expect_penalty() + .expect_penalty("lookup_block_processing_failure") .expect_block_request() .expect_no_blobs_request() .blobs_response() - .missing_components_from_blob_request() + // blobs not sent for processing until the block is processed .expect_no_penalty_and_no_requests(); } @@ -1622,13 +2158,12 @@ mod deneb_only { let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester .block_response_triggering_process() .missing_components_from_block_request() .blobs_response() .invalid_blob_processed() - .expect_penalty() + .expect_penalty("lookup_blobs_processing_failure") .expect_blobs_request() .expect_no_block_request(); } @@ -1638,14 +2173,12 @@ mod deneb_only { let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester .block_response_triggering_process() .missing_components_from_block_request() .invalidate_blobs_too_few() .blobs_response() - .missing_components_from_blob_request() - .expect_penalty() + .expect_penalty("NotEnoughResponsesReturned") .expect_blobs_request() .expect_no_block_request(); } @@ -1655,303 +2188,156 @@ mod deneb_only { let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester .block_response_triggering_process() .invalidate_blobs_too_many() .blobs_response() - .expect_penalty() - .expect_blobs_request() + .expect_penalty("TooManyResponses") + // Network context returns "download success" because the request has enough blobs + it + // downscores the peer for returning too many. .expect_no_block_request(); } - #[test] - fn too_few_blobs_response_then_block_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - - tester - .invalidate_blobs_too_few() - .blobs_response() - .blobs_response_was_valid() - .expect_no_penalty_and_no_requests() - .block_response_triggering_process(); - } - - #[test] - fn too_many_blobs_response_then_block_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - - tester - .invalidate_blobs_too_many() - .blobs_response() - .expect_penalty() - .expect_blobs_request() - .expect_no_block_request() - .block_response_triggering_process(); - } - + // Test peer returning block that has unknown parent, and a new lookup is created #[test] fn parent_block_unknown_parent() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock(1)) else { return; }; - tester - .blobs_response() .expect_empty_beacon_processor() - .parent_block_response() + .parent_block_response_expect_blobs() .parent_blob_response() .expect_block_process() .parent_block_unknown_parent() .expect_parent_block_request() - .expect_parent_blobs_request() .expect_empty_beacon_processor(); } + // Test peer returning invalid (processing) block, expect retry #[test] fn parent_block_invalid_parent() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock(1)) else { return; }; - tester - .blobs_response() - .expect_empty_beacon_processor() - .parent_block_response() + .parent_block_response_expect_blobs() .parent_blob_response() .expect_block_process() .invalid_parent_processed() - .expect_penalty() + .expect_penalty("lookup_block_processing_failure") .expect_parent_block_request() - .expect_parent_blobs_request() .expect_empty_beacon_processor(); } + // Tests that if a peer does not respond with a block, we downscore and retry the block only #[test] - fn parent_block_and_blob_lookup_parent_returned_first() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { - return; - }; - - tester - .parent_block_response() - .parent_blob_response() - .expect_block_process() - .parent_block_imported() - .blobs_response() - .expect_parent_chain_process(); - } - - #[test] - fn parent_block_and_blob_lookup_child_returned_first() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { - return; - }; - - tester - .blobs_response() - .expect_no_penalty_and_no_requests() - .parent_block_response() - .parent_blob_response() - .expect_block_process() - .parent_block_imported() - .expect_parent_chain_process(); - } - - #[test] - fn empty_parent_block_then_parent_blob() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { + fn empty_block_is_retried() { + let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { return; }; - tester - .empty_parent_block_response() - .expect_penalty() - .expect_parent_block_request() + .empty_block_response() + .expect_penalty("NoResponseReturned") + .expect_block_request() .expect_no_blobs_request() - .parent_blob_response() - .expect_empty_beacon_processor() - .parent_block_response() - .expect_block_process() - .parent_block_imported() + .block_response_and_expect_blob_request() .blobs_response() - .expect_parent_chain_process(); + .block_imported() + .expect_no_active_lookups(); } #[test] - fn empty_parent_blobs_then_parent_block() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { + fn parent_block_then_empty_parent_blobs() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock(1)) else { return; }; - tester - .blobs_response() - .empty_parent_blobs_response() - .expect_no_penalty_and_no_requests() - .parent_block_response() - .expect_penalty() - .expect_parent_blobs_request() - .parent_blob_response() - .expect_block_process() - .parent_block_imported() - .expect_parent_chain_process(); + .parent_block_then_empty_parent_blobs() + .log("resolve original block trigger blobs request and import") + // Should not have block request, it is cached + .expect_blobs_request() + // TODO: Should send blobs for processing + .block_imported() + .expect_no_active_lookups(); } #[test] fn parent_blob_unknown_parent() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { return; }; - tester - .block_response() .expect_empty_beacon_processor() - .parent_block_response() + .parent_block_response_expect_blobs() .parent_blob_response() .expect_block_process() .parent_block_unknown_parent() .expect_parent_block_request() - .expect_parent_blobs_request() .expect_empty_beacon_processor(); } #[test] fn parent_blob_invalid_parent() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { return; }; - tester - .block_response() .expect_empty_beacon_processor() - .parent_block_response() + .parent_block_response_expect_blobs() .parent_blob_response() .expect_block_process() .invalid_parent_processed() - .expect_penalty() + .expect_penalty("lookup_block_processing_failure") .expect_parent_block_request() - .expect_parent_blobs_request() + // blobs are not sent until block is processed .expect_empty_beacon_processor(); } #[test] fn parent_block_and_blob_lookup_parent_returned_first_blob_trigger() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { - return; - }; - - tester - .parent_block_response() - .parent_blob_response() - .expect_block_process() - .parent_block_imported() - .block_response() - .expect_parent_chain_process(); - } - - #[test] - fn parent_block_and_blob_lookup_child_returned_first_blob_trigger() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { return; }; - tester - .block_response() - .expect_no_penalty_and_no_requests() .parent_block_response() + .expect_parent_blobs_request() .parent_blob_response() .expect_block_process() + .trigger_unknown_block_from_attestation() .parent_block_imported() - .expect_parent_chain_process(); - } - - #[test] - fn empty_parent_block_then_parent_blob_blob_trigger() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { - return; - }; - - tester - .empty_parent_block_response() - .expect_penalty() - .expect_parent_block_request() - .expect_no_blobs_request() - .parent_blob_response() - .expect_empty_beacon_processor() - .parent_block_response() - .expect_block_process() - .parent_block_imported() - .block_response() - .expect_parent_chain_process(); + .complete_current_block_and_blobs_lookup() + .expect_no_active_lookups(); } #[test] - fn empty_parent_blobs_then_parent_block_blob_trigger() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { + fn parent_block_then_empty_parent_blobs_blob_trigger() { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { return; }; - tester - .block_response() - .empty_parent_blobs_response() - .expect_no_penalty_and_no_requests() - .parent_block_response() - .expect_penalty() - .expect_parent_blobs_request() - .parent_blob_response() - .expect_block_process() - .parent_block_imported() - .expect_parent_chain_process(); + .parent_block_then_empty_parent_blobs() + .log("resolve original block trigger blobs request and import") + .complete_current_block_and_blobs_lookup() + .expect_no_active_lookups(); } #[test] fn parent_blob_unknown_parent_chain() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 2 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(2)) else { return; }; - tester - .block_response() .expect_empty_beacon_processor() - .parent_block_response() + .parent_block_response_expect_blobs() .parent_blob_response() .expect_no_penalty() .expect_block_process() .parent_block_unknown_parent() .expect_parent_block_request() - .expect_parent_blobs_request() .expect_empty_beacon_processor() .parent_block_response() + .expect_parent_blobs_request() .parent_blob_response() .expect_no_penalty() .expect_block_process(); @@ -1959,12 +2345,9 @@ mod deneb_only { #[test] fn unknown_parent_block_dup() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlock { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlock(1)) else { return; }; - tester .search_parent_dup() .expect_no_blobs_request() @@ -1973,18 +2356,18 @@ mod deneb_only { #[test] fn unknown_parent_blob_dup() { - let Some(tester) = - DenebTester::new(RequestTrigger::GossipUnknownParentBlob { num_parents: 1 }) - else { + let Some(tester) = DenebTester::new(RequestTrigger::GossipUnknownParentBlob(1)) else { return; }; - tester .search_parent_dup() .expect_no_blobs_request() .expect_no_block_request(); } + // This test no longer applies, we don't issue requests for child lookups + // Keep for after updating rules on fetching blocks only first + #[ignore] #[test] fn no_peer_penalty_when_rpc_response_already_known_from_gossip() { let Some(mut r) = TestRig::test_setup_after_deneb() else { diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 6a3b568c1c4..f31f2921ea2 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,4 +1,5 @@ use beacon_chain::block_verification_types::RpcBlock; +use lighthouse_network::PeerId; use ssz_types::VariableList; use std::{collections::VecDeque, sync::Arc}; use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; @@ -17,16 +18,19 @@ pub struct BlocksAndBlobsRequestInfo { is_sidecars_stream_terminated: bool, /// Used to determine if this accumulator should wait for a sidecars stream termination request_type: ByRangeRequestType, + /// The peer the request was made to. + pub(crate) peer_id: PeerId, } impl BlocksAndBlobsRequestInfo { - pub fn new(request_type: ByRangeRequestType) -> Self { + pub fn new(request_type: ByRangeRequestType, peer_id: PeerId) -> Self { Self { accumulated_blocks: <_>::default(), accumulated_sidecars: <_>::default(), is_blocks_stream_terminated: <_>::default(), is_sidecars_stream_terminated: <_>::default(), request_type, + peer_id, } } @@ -109,12 +113,14 @@ mod tests { use super::BlocksAndBlobsRequestInfo; use crate::sync::range_sync::ByRangeRequestType; use beacon_chain::test_utils::{generate_rand_block_and_blobs, NumBlobs}; + use lighthouse_network::PeerId; use rand::SeedableRng; use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E}; #[test] fn no_blobs_into_responses() { - let mut info = BlocksAndBlobsRequestInfo::::new(ByRangeRequestType::Blocks); + let peer_id = PeerId::random(); + let mut info = BlocksAndBlobsRequestInfo::::new(ByRangeRequestType::Blocks, peer_id); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng).0) @@ -130,4 +136,32 @@ mod tests { assert!(info.is_finished()); info.into_responses().unwrap(); } + + #[test] + fn empty_blobs_into_responses() { + let peer_id = PeerId::random(); + let mut info = + BlocksAndBlobsRequestInfo::::new(ByRangeRequestType::BlocksAndBlobs, peer_id); + let mut rng = XorShiftRng::from_seed([42; 16]); + let blocks = (0..4) + .map(|_| { + // Always generate some blobs. + generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Number(3), &mut rng).0 + }) + .collect::>(); + + // Send blocks and complete terminate response + for block in blocks { + info.add_block_response(Some(block.into())); + } + info.add_block_response(None); + // Expect no blobs returned + info.add_sidecar_response(None); + + // Assert response is finished and RpcBlocks can be constructed, even if blobs weren't returned. + // This makes sure we don't expect blobs here when they have expired. Checking this logic should + // be hendled elsewhere. + assert!(info.is_finished()); + info.into_responses().unwrap(); + } } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 15b96c52b10..dd4fa56d537 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -34,7 +34,6 @@ //! search for the block and subsequently search for parents if needed. use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; -use super::block_lookups::common::LookupType; use super::block_lookups::BlockLookups; use super::network_context::{BlockOrBlob, RangeRequestId, RpcEvent, SyncNetworkContext}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; @@ -42,26 +41,28 @@ use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; -use crate::sync::block_lookups::{BlobRequestState, BlockRequestState}; +use crate::sync::block_lookups::{ + BlobRequestState, BlockComponent, BlockRequestState, DownloadResult, +}; use crate::sync::block_sidecar_coupling::BlocksAndBlobsRequestInfo; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::data_availability_checker::ChildComponents; +use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState, }; use futures::StreamExt; use lighthouse_network::rpc::RPCError; +use lighthouse_network::service::api_types::{Id, SingleLookupReqId, SyncRequestId}; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; -use slog::{crit, debug, error, info, trace, warn, Logger}; -use std::ops::IndexMut; +use lru_cache::LRUTimeCache; +use slog::{crit, debug, error, info, o, trace, warn, Logger}; use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use types::blob_sidecar::FixedBlobSidecarList; use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync @@ -73,25 +74,10 @@ use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// blocks for. pub const SLOT_IMPORT_TOLERANCE: usize = 32; -pub type Id = u32; - -#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] -pub struct SingleLookupReqId { - pub id: Id, - pub req_counter: Id, - pub lookup_type: LookupType, -} - -/// Id of rpc requests sent by sync to the network. -#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] -pub enum RequestId { - /// Request searching for a block given a hash. - SingleBlock { id: SingleLookupReqId }, - /// Request searching for a set of blobs given a hash. - SingleBlob { id: SingleLookupReqId }, - /// Range request that is composed by both a block range request and a blob range request. - RangeBlockAndBlobs { id: Id }, -} +/// Suppress duplicated `UnknownBlockHashFromAttestation` events for some duration of time. In +/// practice peers are likely to send the same root during a single slot. 30 seconds is a rather +/// arbitrary number that covers a full slot, but allows recovery if sync get stuck for a few slots. +const NOTIFIED_UNKNOWN_ROOT_EXPIRY_SECONDS: u64 = 30; #[derive(Debug)] /// A message that can be sent to the sync manager thread. @@ -101,7 +87,7 @@ pub enum SyncMessage { /// A block has been received from the RPC. RpcBlock { - request_id: RequestId, + request_id: SyncRequestId, peer_id: PeerId, beacon_block: Option>>, seen_timestamp: Duration, @@ -109,7 +95,7 @@ pub enum SyncMessage { /// A blob has been received from the RPC. RpcBlob { - request_id: RequestId, + request_id: SyncRequestId, peer_id: PeerId, blob_sidecar: Option>>, seen_timestamp: Duration, @@ -131,7 +117,7 @@ pub enum SyncMessage { /// An RPC Error has occurred on a request. RpcError { peer_id: PeerId, - request_id: RequestId, + request_id: SyncRequestId, error: RPCError, }, @@ -146,6 +132,9 @@ pub enum SyncMessage { process_type: BlockProcessType, result: BlockProcessingResult, }, + + /// A block from gossip has completed processing, + GossipBlockProcessResult { block_root: Hash256, imported: bool }, } /// The type of processing specified for a received block. @@ -153,7 +142,6 @@ pub enum SyncMessage { pub enum BlockProcessType { SingleBlock { id: Id }, SingleBlob { id: Id }, - ParentLookup { chain_hash: Hash256 }, } #[derive(Debug)] @@ -168,11 +156,12 @@ pub enum BlockProcessingResult { pub enum BatchProcessResult { /// The batch was completed successfully. It carries whether the sent batch contained blocks. Success { - was_non_empty: bool, + sent_blocks: usize, + imported_blocks: usize, }, /// The batch processing failed. It carries whether the processing imported any block. FaultyFailure { - imported_blocks: bool, + imported_blocks: usize, penalty: PeerAction, }, NonFaultyFailure, @@ -199,6 +188,10 @@ pub struct SyncManager { backfill_sync: BackFillSync, block_lookups: BlockLookups, + /// debounce duplicated `UnknownBlockHashFromAttestation` for the same root peer tuple. A peer + /// may forward us thousands of a attestations, each one triggering an individual event. Only + /// one event is useful, the rest generating log noise and wasted cycles + notified_unknown_roots: LRUTimeCache<(PeerId, Hash256)>, /// The logger for the import manager. log: Logger, @@ -252,29 +245,45 @@ impl SyncManager { beacon_chain.clone(), log.clone(), ), - range_sync: RangeSync::new(beacon_chain.clone(), log.clone()), - backfill_sync: BackFillSync::new(beacon_chain.clone(), network_globals, log.clone()), - block_lookups: BlockLookups::new( - beacon_chain.data_availability_checker.clone(), - log.clone(), + range_sync: RangeSync::new( + beacon_chain.clone(), + log.new(o!("service" => "range_sync")), + ), + backfill_sync: BackFillSync::new( + beacon_chain.clone(), + network_globals, + log.new(o!("service" => "backfill_sync")), ), + block_lookups: BlockLookups::new(log.new(o!("service"=> "lookup_sync"))), + notified_unknown_roots: LRUTimeCache::new(Duration::from_secs( + NOTIFIED_UNKNOWN_ROOT_EXPIRY_SECONDS, + )), log: log.clone(), } } #[cfg(test)] - pub(crate) fn active_single_lookups(&self) -> Vec { + pub(crate) fn active_single_lookups(&self) -> Vec { self.block_lookups.active_single_lookups() } #[cfg(test)] - pub(crate) fn active_parent_lookups(&self) -> Vec { - self.block_lookups.active_parent_lookups() + pub(crate) fn active_parent_lookups(&self) -> Vec> { + self.block_lookups + .active_parent_lookups() + .iter() + .map(|c| c.chain.clone()) + .collect() } #[cfg(test)] - pub(crate) fn failed_chains_contains(&mut self, chain_hash: &Hash256) -> bool { - self.block_lookups.failed_chains_contains(chain_hash) + pub(crate) fn get_failed_chains(&mut self) -> Vec { + self.block_lookups.get_failed_chains() + } + + #[cfg(test)] + pub(crate) fn insert_failed_chain(&mut self, block_root: Hash256) { + self.block_lookups.insert_failed_chain(block_root); } fn network_globals(&self) -> &NetworkGlobals { @@ -316,16 +325,16 @@ impl SyncManager { } /// Handles RPC errors related to requests that were emitted from the sync manager. - fn inject_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { + fn inject_error(&mut self, peer_id: PeerId, request_id: SyncRequestId, error: RPCError) { trace!(self.log, "Sync manager received a failed RPC"); match request_id { - RequestId::SingleBlock { id } => { + SyncRequestId::SingleBlock { id } => { self.on_single_block_response(id, peer_id, RpcEvent::RPCError(error)) } - RequestId::SingleBlob { id } => { + SyncRequestId::SingleBlob { id } => { self.on_single_blob_response(id, peer_id, RpcEvent::RPCError(error)) } - RequestId::RangeBlockAndBlobs { id } => { + SyncRequestId::RangeBlockAndBlobs { id } => { if let Some(sender_id) = self.network.range_request_failed(id) { match sender_id { RangeRequestId::RangeSync { chain_id, batch_id } => { @@ -346,19 +355,39 @@ impl SyncManager { Err(_) => self.update_sync_state(), }, } + } else { + debug!( + self.log, + "RPC error for range request has no associated entry in network context, ungraceful disconnect"; + "peer_id" => %peer_id, + "request_id" => %id, + "error" => ?error, + ); } } } } + /// Handles a peer disconnect. + /// + /// It is important that a peer disconnect retries all the batches/lookups as + /// there is no way to guarantee that libp2p always emits a error along with + /// the disconnect. fn peer_disconnect(&mut self, peer_id: &PeerId) { + // Inject a Disconnected error on all requests associated with the disconnected peer + // to retry all batches/lookups + for request_id in self.network.peer_disconnected(peer_id) { + self.inject_error(*peer_id, request_id, RPCError::Disconnected); + } + + // Remove peer from all data structures self.range_sync.peer_disconnect(&mut self.network, peer_id); - self.block_lookups - .peer_disconnected(peer_id, &mut self.network); - // Regardless of the outcome, we update the sync status. let _ = self .backfill_sync .peer_disconnected(peer_id, &mut self.network); + self.block_lookups.peer_disconnected(peer_id); + + // Regardless of the outcome, we update the sync status. self.update_sync_state(); } @@ -537,6 +566,13 @@ impl SyncManager { futures::stream::iter(ee_responsiveness_watch.await).flatten() }; + // min(LOOKUP_MAX_DURATION_*) is 15 seconds. The cost of calling prune_lookups more often is + // one iteration over the single lookups HashMap. This map is supposed to be very small < 10 + // unless there is a bug. + let mut prune_lookups_interval = tokio::time::interval(Duration::from_secs(15)); + + let mut register_metrics_interval = tokio::time::interval(Duration::from_secs(5)); + // process any inbound messages loop { tokio::select! { @@ -546,6 +582,12 @@ impl SyncManager { Some(engine_state) = check_ee_stream.next(), if check_ee => { self.handle_new_execution_engine_state(engine_state); } + _ = prune_lookups_interval.tick() => { + self.block_lookups.prune_lookups(); + } + _ = register_metrics_interval.tick() => { + self.network.register_metrics(); + } } } } @@ -578,32 +620,38 @@ impl SyncManager { block_root, parent_root, block_slot, - block.into(), + BlockComponent::Block(DownloadResult { + value: block.block_cloned(), + block_root, + seen_timestamp: timestamp_now(), + peer_id, + }), ); } SyncMessage::UnknownParentBlob(peer_id, blob) => { let blob_slot = blob.slot(); let block_root = blob.block_root(); let parent_root = blob.block_parent_root(); - let blob_index = blob.index; - if blob_index >= T::EthSpec::max_blobs_per_block() as u64 { - warn!(self.log, "Peer sent blob with invalid index"; "index" => blob_index, "peer_id" => %peer_id); - return; - } - let mut blobs = FixedBlobSidecarList::default(); - *blobs.index_mut(blob_index as usize) = Some(blob); debug!(self.log, "Received unknown parent blob message"; "block_root" => %block_root, "parent_root" => %parent_root); self.handle_unknown_parent( peer_id, block_root, parent_root, blob_slot, - ChildComponents::new(block_root, None, Some(blobs)), + BlockComponent::Blob(DownloadResult { + value: blob, + block_root, + seen_timestamp: timestamp_now(), + peer_id, + }), ); } SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_root) => { - debug!(self.log, "Received unknown block hash message"; "block_root" => %block_root); - self.handle_unknown_block_root(peer_id, block_root); + if !self.notified_unknown_roots.contains(&(peer_id, block_root)) { + self.notified_unknown_roots.insert((peer_id, block_root)); + debug!(self.log, "Received unknown block hash message"; "block_root" => ?block_root, "peer" => ?peer_id); + self.handle_unknown_block_root(peer_id, block_root); + } } SyncMessage::Disconnect(peer_id) => { debug!(self.log, "Received disconnected message"; "peer_id" => %peer_id); @@ -617,25 +665,17 @@ impl SyncManager { SyncMessage::BlockComponentProcessed { process_type, result, - } => match process_type { - BlockProcessType::SingleBlock { id } => self - .block_lookups - .single_block_component_processed::( - id, - result, - &mut self.network, - ), - BlockProcessType::SingleBlob { id } => self - .block_lookups - .single_block_component_processed::( - id, - result, - &mut self.network, - ), - BlockProcessType::ParentLookup { chain_hash } => self - .block_lookups - .parent_block_processed(chain_hash, result, &mut self.network), - }, + } => self + .block_lookups + .on_processing_result(process_type, result, &mut self.network), + SyncMessage::GossipBlockProcessResult { + block_root, + imported, + } => self.block_lookups.on_external_processing_result( + block_root, + imported, + &mut self.network, + ), SyncMessage::BatchProcessed { sync_type, result } => match sync_type { ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { self.range_sync.handle_block_process_result( @@ -661,9 +701,6 @@ impl SyncManager { } } } - ChainSegmentProcessId::ParentLookup(chain_hash) => self - .block_lookups - .parent_chain_processed(chain_hash, result, &mut self.network), }, } } @@ -674,23 +711,16 @@ impl SyncManager { block_root: Hash256, parent_root: Hash256, slot: Slot, - child_components: ChildComponents, + block_component: BlockComponent, ) { match self.should_search_for_block(Some(slot), &peer_id) { Ok(_) => { - self.block_lookups.search_parent( - slot, + self.block_lookups.search_child_and_parent( block_root, - parent_root, + block_component, peer_id, &mut self.network, ); - self.block_lookups.search_child_block( - block_root, - child_components, - &[peer_id], - &mut self.network, - ); } Err(reason) => { debug!(self.log, "Ignoring unknown parent request"; "block_root" => %block_root, "parent_root" => %parent_root, "reason" => reason); @@ -702,7 +732,7 @@ impl SyncManager { match self.should_search_for_block(None, &peer_id) { Ok(_) => { self.block_lookups - .search_block(block_root, &[peer_id], &mut self.network); + .search_unknown_block(block_root, &[peer_id], &mut self.network); } Err(reason) => { debug!(self.log, "Ignoring unknown block request"; "block_root" => %block_root, "reason" => reason); @@ -774,11 +804,6 @@ impl SyncManager { let dropped_single_blocks_requests = self.block_lookups.drop_single_block_requests(); - // - Parent lookups: - // Disabled while in this state. We drop current requests and don't search for new - // blocks. - let dropped_parent_chain_requests = self.block_lookups.drop_parent_chain_requests(); - // - Range: // We still send found peers to range so that it can keep track of potential chains // with respect to our current peers. Range will stop processing batches in the @@ -787,10 +812,9 @@ impl SyncManager { // - Backfill: Not affected by ee states, nothing to do. // Some logs. - if dropped_single_blocks_requests > 0 || dropped_parent_chain_requests > 0 { + if dropped_single_blocks_requests > 0 { debug!(self.log, "Execution engine not online. Dropping active requests."; "dropped_single_blocks_requests" => dropped_single_blocks_requests, - "dropped_parent_chain_requests" => dropped_parent_chain_requests, ); } } @@ -799,13 +823,13 @@ impl SyncManager { fn rpc_block_received( &mut self, - request_id: RequestId, + request_id: SyncRequestId, peer_id: PeerId, block: Option>>, seen_timestamp: Duration, ) { match request_id { - RequestId::SingleBlock { id } => self.on_single_block_response( + SyncRequestId::SingleBlock { id } => self.on_single_block_response( id, peer_id, match block { @@ -813,10 +837,10 @@ impl SyncManager { None => RpcEvent::StreamTermination, }, ), - RequestId::SingleBlob { .. } => { + SyncRequestId::SingleBlob { .. } => { crit!(self.log, "Block received during blob request"; "peer_id" => %peer_id ); } - RequestId::RangeBlockAndBlobs { id } => { + SyncRequestId::RangeBlockAndBlobs { id } => { self.range_block_and_blobs_response(id, peer_id, block.into()) } } @@ -828,62 +852,29 @@ impl SyncManager { peer_id: PeerId, block: RpcEvent>>, ) { - if let Some(resp) = self.network.on_single_block_response(id, block) { - match resp { - Ok((block, seen_timestamp)) => match id.lookup_type { - LookupType::Current => self - .block_lookups - .single_lookup_response::( - id, - peer_id, - block, - seen_timestamp, - &mut self.network, - ), - LookupType::Parent => self - .block_lookups - .parent_lookup_response::( - id, - peer_id, - block, - seen_timestamp, - &mut self.network, - ), - }, - Err(error) => match id.lookup_type { - LookupType::Current => self - .block_lookups - .single_block_lookup_failed::( - id, - &peer_id, - &mut self.network, - error, - ), - LookupType::Parent => self - .block_lookups - .parent_lookup_failed::( - id, - &peer_id, - &mut self.network, - error, - ), - }, - } + if let Some(resp) = self.network.on_single_block_response(id, peer_id, block) { + self.block_lookups + .on_download_response::>( + id, + peer_id, + resp, + &mut self.network, + ) } } fn rpc_blob_received( &mut self, - request_id: RequestId, + request_id: SyncRequestId, peer_id: PeerId, blob: Option>>, seen_timestamp: Duration, ) { match request_id { - RequestId::SingleBlock { .. } => { + SyncRequestId::SingleBlock { .. } => { crit!(self.log, "Single blob received during block request"; "peer_id" => %peer_id ); } - RequestId::SingleBlob { id } => self.on_single_blob_response( + SyncRequestId::SingleBlob { id } => self.on_single_blob_response( id, peer_id, match blob { @@ -891,7 +882,7 @@ impl SyncManager { None => RpcEvent::StreamTermination, }, ), - RequestId::RangeBlockAndBlobs { id } => { + SyncRequestId::RangeBlockAndBlobs { id } => { self.range_block_and_blobs_response(id, peer_id, blob.into()) } } @@ -903,48 +894,14 @@ impl SyncManager { peer_id: PeerId, blob: RpcEvent>>, ) { - if let Some(resp) = self.network.on_single_blob_response(id, blob) { - match resp { - Ok((blobs, seen_timestamp)) => match id.lookup_type { - LookupType::Current => self - .block_lookups - .single_lookup_response::( - id, - peer_id, - blobs, - seen_timestamp, - &mut self.network, - ), - LookupType::Parent => self - .block_lookups - .parent_lookup_response::( - id, - peer_id, - blobs, - seen_timestamp, - &mut self.network, - ), - }, - - Err(error) => match id.lookup_type { - LookupType::Current => self - .block_lookups - .single_block_lookup_failed::( - id, - &peer_id, - &mut self.network, - error, - ), - LookupType::Parent => { - self.block_lookups.parent_lookup_failed::( - id, - &peer_id, - &mut self.network, - error, - ) - } - }, - } + if let Some(resp) = self.network.on_single_blob_response(id, peer_id, blob) { + self.block_lookups + .on_download_response::>( + id, + peer_id, + resp, + &mut self.network, + ) } } @@ -962,39 +919,32 @@ impl SyncManager { { match resp.responses { Ok(blocks) => { - for block in blocks - .into_iter() - .map(Some) - // chain the stream terminator - .chain(vec![None]) - { - match resp.sender_id { - RangeRequestId::RangeSync { chain_id, batch_id } => { - self.range_sync.blocks_by_range_response( - &mut self.network, - peer_id, - chain_id, - batch_id, - id, - block, - ); - self.update_sync_state(); - } - RangeRequestId::BackfillSync { batch_id } => { - match self.backfill_sync.on_block_response( - &mut self.network, - batch_id, - &peer_id, - id, - block, - ) { - Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), - Ok(ProcessResult::Successful) => {} - Err(_error) => { - // The backfill sync has failed, errors are reported - // within. - self.update_sync_state(); - } + match resp.sender_id { + RangeRequestId::RangeSync { chain_id, batch_id } => { + self.range_sync.blocks_by_range_response( + &mut self.network, + peer_id, + chain_id, + batch_id, + id, + blocks, + ); + self.update_sync_state(); + } + RangeRequestId::BackfillSync { batch_id } => { + match self.backfill_sync.on_block_response( + &mut self.network, + batch_id, + &peer_id, + id, + blocks, + ) { + Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), + Ok(ProcessResult::Successful) => {} + Err(_error) => { + // The backfill sync has failed, errors are reported + // within. + self.update_sync_state(); } } } @@ -1005,7 +955,7 @@ impl SyncManager { self.network.insert_range_blocks_and_blobs_request( id, resp.sender_id, - BlocksAndBlobsRequestInfo::new(resp.request_type), + BlocksAndBlobsRequestInfo::new(resp.request_type, peer_id), ); // inform range that the request needs to be treated as failed // With time we will want to downgrade this log @@ -1016,7 +966,7 @@ impl SyncManager { "sender_id" => ?resp.sender_id, "error" => e.clone() ); - let id = RequestId::RangeBlockAndBlobs { id }; + let id = SyncRequestId::RangeBlockAndBlobs { id }; self.network.report_peer( peer_id, PeerAction::MidToleranceError, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index fc91270c1dc..df8be9f6d59 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -4,27 +4,28 @@ use self::requests::{ActiveBlobsByRootRequest, ActiveBlocksByRootRequest}; pub use self::requests::{BlobsByRootSingleBlockRequest, BlocksByRootSingleRequest}; use super::block_sidecar_coupling::BlocksAndBlobsRequestInfo; -use super::manager::{Id, RequestId as SyncRequestId}; use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; +use crate::metrics; use crate::network_beacon_processor::NetworkBeaconProcessor; -use crate::service::{NetworkMessage, RequestId}; +use crate::service::NetworkMessage; use crate::status::ToStatusMessage; -use crate::sync::manager::SingleLookupReqId; +use crate::sync::block_lookups::SingleLookupId; +use crate::sync::manager::BlockProcessType; use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::validator_monitor::timestamp_now; -use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use fnv::FnvHashMap; use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError}; +use lighthouse_network::service::api_types::{AppRequestId, Id, SingleLookupReqId, SyncRequestId}; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; pub use requests::LookupVerifyError; -use slog::{debug, trace, warn}; +use slog::{debug, error, trace, warn}; use std::collections::hash_map::Entry; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; -use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; +use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock}; mod requests; @@ -52,34 +53,62 @@ pub enum RpcEvent { RPCError(RPCError), } -pub type RpcProcessingResult = Option>; +pub type RpcResponseResult = Result<(T, Duration), RpcResponseError>; -pub enum LookupFailure { +pub enum RpcResponseError { RpcError(RPCError), - LookupVerifyError(LookupVerifyError), + VerifyError(LookupVerifyError), } -impl std::fmt::Display for LookupFailure { +#[derive(Debug, PartialEq, Eq)] +pub enum RpcRequestSendError { + /// Network channel send failed + NetworkSendError, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum SendErrorProcessor { + SendError, + ProcessorNotAvailable, +} + +impl std::fmt::Display for RpcResponseError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { - LookupFailure::RpcError(e) => write!(f, "RPC Error: {:?}", e), - LookupFailure::LookupVerifyError(e) => write!(f, "Lookup Verify Error: {:?}", e), + RpcResponseError::RpcError(e) => write!(f, "RPC Error: {:?}", e), + RpcResponseError::VerifyError(e) => write!(f, "Lookup Verify Error: {:?}", e), } } } -impl From for LookupFailure { +impl From for RpcResponseError { fn from(e: RPCError) -> Self { - LookupFailure::RpcError(e) + RpcResponseError::RpcError(e) } } -impl From for LookupFailure { +impl From for RpcResponseError { fn from(e: LookupVerifyError) -> Self { - LookupFailure::LookupVerifyError(e) + RpcResponseError::VerifyError(e) } } +/// Sequential ID that uniquely identifies ReqResp outgoing requests +pub type ReqId = u32; + +pub enum LookupRequestResult { + /// A request is sent. Sync MUST receive an event from the network in the future for either: + /// completed response or failed request + RequestSent(ReqId), + /// No request is sent, and no further action is necessary to consider this request completed + NoRequestNeeded, + /// No request is sent, but the request is not completed. Sync MUST receive some future event + /// that makes progress on the request. For example: request is processing from a different + /// source (i.e. block received from gossip) and sync MUST receive an event with that processing + /// result. + Pending(&'static str), +} + /// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id. pub struct SyncNetworkContext { /// The network channel to relay messages to the Network service. @@ -149,6 +178,46 @@ impl SyncNetworkContext { } } + /// Returns the ids of all the requests made to the given peer_id. + pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Vec { + let failed_range_ids = + self.range_blocks_and_blobs_requests + .iter() + .filter_map(|(id, request)| { + if request.1.peer_id == *peer_id { + Some(SyncRequestId::RangeBlockAndBlobs { id: *id }) + } else { + None + } + }); + + let failed_block_ids = self + .blocks_by_root_requests + .iter() + .filter_map(|(id, request)| { + if request.peer_id == *peer_id { + Some(SyncRequestId::SingleBlock { id: *id }) + } else { + None + } + }); + let failed_blob_ids = self + .blobs_by_root_requests + .iter() + .filter_map(|(id, request)| { + if request.peer_id == *peer_id { + Some(SyncRequestId::SingleBlob { id: *id }) + } else { + None + } + }); + + failed_range_ids + .chain(failed_block_ids) + .chain(failed_blob_ids) + .collect() + } + pub fn network_globals(&self) -> &NetworkGlobals { &self.network_beacon_processor.network_globals } @@ -178,7 +247,7 @@ impl SyncNetworkContext { ); let request = Request::Status(status_message.clone()); - let request_id = RequestId::Router; + let request_id = AppRequestId::Router; let _ = self.send_network_msg(NetworkMessage::SendRequest { peer_id, request, @@ -193,7 +262,7 @@ impl SyncNetworkContext { peer_id: PeerId, batch_type: ByRangeRequestType, request: BlocksByRangeRequest, - ) -> Result { + ) -> Result { let id = self.next_id(); trace!( self.log, @@ -202,11 +271,13 @@ impl SyncNetworkContext { "count" => request.count(), "peer" => %peer_id, ); - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request: Request::BlocksByRange(request.clone()), - request_id: RequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), - })?; + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: Request::BlocksByRange(request.clone()), + request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; if matches!(batch_type, ByRangeRequestType::BlocksAndBlobs) { debug!( @@ -218,14 +289,16 @@ impl SyncNetworkContext { ); // Create the blob request based on the blocks request. - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request: Request::BlobsByRange(BlobsByRangeRequest { - start_slot: *request.start_slot(), - count: *request.count(), - }), - request_id: RequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), - })?; + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: Request::BlobsByRange(BlobsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + }), + request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; } Ok(id) @@ -238,10 +311,15 @@ impl SyncNetworkContext { batch_type: ByRangeRequestType, request: BlocksByRangeRequest, sender_id: RangeRequestId, - ) -> Result { + ) -> Result { let id = self.blocks_by_range_request(peer_id, batch_type, request)?; - self.range_blocks_and_blobs_requests - .insert(id, (sender_id, BlocksAndBlobsRequestInfo::new(batch_type))); + self.range_blocks_and_blobs_requests.insert( + id, + ( + sender_id, + BlocksAndBlobsRequestInfo::new(batch_type, peer_id), + ), + ); Ok(id) } @@ -271,83 +349,177 @@ impl SyncNetworkContext { request_id: Id, block_or_blob: BlockOrBlob, ) -> Option> { - match self.range_blocks_and_blobs_requests.entry(request_id) { - Entry::Occupied(mut entry) => { - let (_, info) = entry.get_mut(); - match block_or_blob { - BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), - BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), - } - if info.is_finished() { - // If the request is finished, dequeue everything - let (sender_id, info) = entry.remove(); - let request_type = info.get_request_type(); - Some(BlocksAndBlobsByRangeResponse { - sender_id, - request_type, - responses: info.into_responses(), - }) - } else { - None - } - } - Entry::Vacant(_) => None, + let Entry::Occupied(mut entry) = self.range_blocks_and_blobs_requests.entry(request_id) + else { + metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["range_blocks"]); + return None; + }; + + let (_, info) = entry.get_mut(); + match block_or_blob { + BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), + BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), + } + if info.is_finished() { + // If the request is finished, dequeue everything + let (sender_id, info) = entry.remove(); + let request_type = info.get_request_type(); + Some(BlocksAndBlobsByRangeResponse { + sender_id, + request_type, + responses: info.into_responses(), + }) + } else { + None } } + /// Request block of `block_root` if necessary by checking: + /// - If the da_checker has a pending block from gossip or a previous request + /// + /// Returns false if no request was made, because the block is already imported pub fn block_lookup_request( &mut self, - id: SingleLookupReqId, + lookup_id: SingleLookupId, peer_id: PeerId, - request: BlocksByRootSingleRequest, - ) -> Result<(), &'static str> { + block_root: Hash256, + ) -> Result { + match self.chain.get_block_process_status(&block_root) { + // Unknown block, continue request to download + BlockProcessStatus::Unknown => {} + // Block is known are currently processing, expect a future event with the result of + // processing. + BlockProcessStatus::NotValidated { .. } => { + // Lookup sync event safety: If the block is currently in the processing cache, we + // are guaranteed to receive a `SyncMessage::GossipBlockProcessResult` that will + // make progress on this lookup + return Ok(LookupRequestResult::Pending("block in processing cache")); + } + // Block is fully validated. If it's not yet imported it's waiting for missing block + // components. Consider this request completed and do nothing. + BlockProcessStatus::ExecutionValidated { .. } => { + return Ok(LookupRequestResult::NoRequestNeeded) + } + } + + let req_id = self.next_id(); + let id = SingleLookupReqId { lookup_id, req_id }; + debug!( self.log, "Sending BlocksByRoot Request"; "method" => "BlocksByRoot", - "block_root" => ?request.0, + "block_root" => ?block_root, "peer" => %peer_id, "id" => ?id ); - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request: Request::BlocksByRoot(request.into_request(&self.chain.spec)), - request_id: RequestId::Sync(SyncRequestId::SingleBlock { id }), - })?; + let request = BlocksByRootSingleRequest(block_root); + + // Lookup sync event safety: If network_send.send() returns Ok(_) we are guaranteed that + // eventually at least one this 3 events will be received: + // - StreamTermination(request_id): handled by `Self::on_single_block_response` + // - RPCError(request_id): handled by `Self::on_single_block_response` + // - Disconnect(peer_id) handled by `Self::peer_disconnected``which converts it to a + // ` RPCError(request_id)`event handled by the above method + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: Request::BlocksByRoot(request.into_request(&self.chain.spec)), + request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; self.blocks_by_root_requests - .insert(id, ActiveBlocksByRootRequest::new(request)); + .insert(id, ActiveBlocksByRootRequest::new(request, peer_id)); - Ok(()) + Ok(LookupRequestResult::RequestSent(req_id)) } + /// Request necessary blobs for `block_root`. Requests only the necessary blobs by checking: + /// - If we have a downloaded but not yet processed block + /// - If the da_checker has a pending block + /// - If the da_checker has pending blobs from gossip + /// + /// Returns false if no request was made, because we don't need to import (more) blobs. pub fn blob_lookup_request( &mut self, - id: SingleLookupReqId, + lookup_id: SingleLookupId, peer_id: PeerId, - request: BlobsByRootSingleBlockRequest, - ) -> Result<(), &'static str> { + block_root: Hash256, + downloaded_block_expected_blobs: Option, + ) -> Result { + let Some(expected_blobs) = downloaded_block_expected_blobs.or_else(|| { + // If the block is already being processed or fully validated, retrieve how many blobs + // it expects. Consider any stage of the block. If the block root has been validated, we + // can assert that this is the correct value of `blob_kzg_commitments_count`. + match self.chain.get_block_process_status(&block_root) { + BlockProcessStatus::Unknown => None, + BlockProcessStatus::NotValidated(block) + | BlockProcessStatus::ExecutionValidated(block) => Some(block.num_expected_blobs()), + } + }) else { + // Wait to download the block before downloading blobs. Then we can be sure that the + // block has data, so there's no need to do "blind" requests for all possible blobs and + // latter handle the case where if the peer sent no blobs, penalize. + // - if `downloaded_block_expected_blobs` is Some = block is downloading or processing. + // - if `num_expected_blobs` returns Some = block is processed. + // + // Lookup sync event safety: Reaching this code means that a block is not in any pre-import + // cache nor in the request state of this lookup. Therefore, the block must either: (1) not + // be downloaded yet or (2) the block is already imported into the fork-choice. + // In case (1) the lookup must either successfully download the block or get dropped. + // In case (2) the block will be downloaded, processed, reach `BlockIsAlreadyKnown` and + // get dropped as completed. + return Ok(LookupRequestResult::Pending("waiting for block download")); + }; + + let imported_blob_indexes = self + .chain + .data_availability_checker + .imported_blob_indexes(&block_root) + .unwrap_or_default(); + // Include only the blob indexes not yet imported (received through gossip) + let indices = (0..expected_blobs as u64) + .filter(|index| !imported_blob_indexes.contains(index)) + .collect::>(); + + if indices.is_empty() { + // No blobs required, do not issue any request + return Ok(LookupRequestResult::NoRequestNeeded); + } + + let req_id = self.next_id(); + let id = SingleLookupReqId { lookup_id, req_id }; + debug!( self.log, "Sending BlobsByRoot Request"; "method" => "BlobsByRoot", - "block_root" => ?request.block_root, - "blob_indices" => ?request.indices, + "block_root" => ?block_root, + "blob_indices" => ?indices, "peer" => %peer_id, "id" => ?id ); - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request: Request::BlobsByRoot(request.clone().into_request(&self.chain.spec)), - request_id: RequestId::Sync(SyncRequestId::SingleBlob { id }), - })?; + let request = BlobsByRootSingleBlockRequest { + block_root, + indices, + }; + + // Lookup sync event safety: Refer to `Self::block_lookup_request` `network_send.send` call + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: Request::BlobsByRoot(request.clone().into_request(&self.chain.spec)), + request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; self.blobs_by_root_requests - .insert(id, ActiveBlobsByRootRequest::new(request)); + .insert(id, ActiveBlobsByRootRequest::new(request, peer_id)); - Ok(()) + Ok(LookupRequestResult::RequestSent(req_id)) } pub fn is_execution_engine_online(&self) -> bool { @@ -457,13 +629,15 @@ impl SyncNetworkContext { pub fn on_single_block_response( &mut self, request_id: SingleLookupReqId, + peer_id: PeerId, block: RpcEvent>>, - ) -> RpcProcessingResult>> { + ) -> Option>>> { let Entry::Occupied(mut request) = self.blocks_by_root_requests.entry(request_id) else { + metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["blocks_by_root"]); return None; }; - Some(match block { + let resp = match block { RpcEvent::Response(block, seen_timestamp) => { match request.get_mut().add_response(block) { Ok(block) => Ok((block, seen_timestamp)), @@ -482,43 +656,142 @@ impl SyncNetworkContext { request.remove(); Err(e.into()) } - }) + }; + + if let Err(RpcResponseError::VerifyError(e)) = &resp { + self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); + } + Some(resp) } pub fn on_single_blob_response( &mut self, request_id: SingleLookupReqId, + peer_id: PeerId, blob: RpcEvent>>, - ) -> RpcProcessingResult> { + ) -> Option>> { let Entry::Occupied(mut request) = self.blobs_by_root_requests.entry(request_id) else { + metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["blobs_by_root"]); return None; }; - Some(match blob { - RpcEvent::Response(blob, _) => match request.get_mut().add_response(blob) { - Ok(Some(blobs)) => to_fixed_blob_sidecar_list(blobs) - .map(|blobs| (blobs, timestamp_now())) - .map_err(Into::into), - Ok(None) => return None, - Err(e) => { - request.remove(); - Err(e.into()) + let resp = match blob { + RpcEvent::Response(blob, seen_timestamp) => { + let request = request.get_mut(); + match request.add_response(blob) { + Ok(Some(blobs)) => to_fixed_blob_sidecar_list(blobs) + .map(|blobs| (blobs, seen_timestamp)) + .map_err(|e| (e.into(), request.resolve())), + Ok(None) => return None, + Err(e) => Err((e.into(), request.resolve())), } + } + RpcEvent::StreamTermination => match request.remove().terminate() { + Ok(_) => return None, + // (err, false = not resolved) because terminate returns Ok() if resolved + Err(e) => Err((e.into(), false)), }, - RpcEvent::StreamTermination => { - // Stream terminator - match request.remove().terminate() { - Some(blobs) => to_fixed_blob_sidecar_list(blobs) - .map(|blobs| (blobs, timestamp_now())) - .map_err(Into::into), - None => return None, + RpcEvent::RPCError(e) => Err((e.into(), request.remove().resolve())), + }; + + match resp { + Ok(resp) => Some(Ok(resp)), + // Track if this request has already returned some value downstream. Ensure that + // downstream code only receives a single Result per request. If the serving peer does + // multiple penalizable actions per request, downscore and return None. This allows to + // catch if a peer is returning more blobs than requested or if the excess blobs are + // invalid. + Err((e, resolved)) => { + if let RpcResponseError::VerifyError(e) = &e { + self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); + } + if resolved { + None + } else { + Some(Err(e)) } } - RpcEvent::RPCError(e) => { - request.remove(); - Err(e.into()) - } - }) + } + } + + pub fn send_block_for_processing( + &self, + id: Id, + block_root: Hash256, + block: RpcBlock, + duration: Duration, + ) -> Result<(), SendErrorProcessor> { + let beacon_processor = self + .beacon_processor_if_enabled() + .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; + + debug!(self.log, "Sending block for processing"; "block" => ?block_root, "id" => id); + // Lookup sync event safety: If `beacon_processor.send_rpc_beacon_block` returns Ok() sync + // must receive a single `SyncMessage::BlockComponentProcessed` with this process type + beacon_processor + .send_rpc_beacon_block( + block_root, + block, + duration, + BlockProcessType::SingleBlock { id }, + ) + .map_err(|e| { + error!( + self.log, + "Failed to send sync block to processor"; + "error" => ?e + ); + SendErrorProcessor::SendError + }) + } + + pub fn send_blobs_for_processing( + &self, + id: Id, + block_root: Hash256, + blobs: FixedBlobSidecarList, + duration: Duration, + ) -> Result<(), SendErrorProcessor> { + let beacon_processor = self + .beacon_processor_if_enabled() + .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; + + debug!(self.log, "Sending blobs for processing"; "block" => ?block_root, "id" => id); + // Lookup sync event safety: If `beacon_processor.send_rpc_blobs` returns Ok() sync + // must receive a single `SyncMessage::BlockComponentProcessed` event with this process type + beacon_processor + .send_rpc_blobs( + block_root, + blobs, + duration, + BlockProcessType::SingleBlob { id }, + ) + .map_err(|e| { + error!( + self.log, + "Failed to send sync blobs to processor"; + "error" => ?e + ); + SendErrorProcessor::SendError + }) + } + + pub(crate) fn register_metrics(&self) { + metrics::set_gauge_vec( + &metrics::SYNC_ACTIVE_NETWORK_REQUESTS, + &["blocks_by_root"], + self.blocks_by_root_requests.len() as i64, + ); + metrics::set_gauge_vec( + &metrics::SYNC_ACTIVE_NETWORK_REQUESTS, + &["blobs_by_root"], + self.blobs_by_root_requests.len() as i64, + ); + metrics::set_gauge_vec( + &metrics::SYNC_ACTIVE_NETWORK_REQUESTS, + &["range_blocks"], + self.range_blocks_and_blobs_requests.len() as i64, + ); } } diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 0522b7fa384..8387e9b0e1a 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -1,5 +1,8 @@ use beacon_chain::get_block_root; -use lighthouse_network::rpc::{methods::BlobsByRootRequest, BlocksByRootRequest}; +use lighthouse_network::{ + rpc::{methods::BlobsByRootRequest, BlocksByRootRequest}, + PeerId, +}; use std::sync::Arc; use strum::IntoStaticStr; use types::{ @@ -9,6 +12,7 @@ use types::{ #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupVerifyError { NoResponseReturned, + NotEnoughResponsesReturned { expected: usize, actual: usize }, TooManyResponses, UnrequestedBlockRoot(Hash256), UnrequestedBlobIndex(u64), @@ -19,13 +23,15 @@ pub enum LookupVerifyError { pub struct ActiveBlocksByRootRequest { request: BlocksByRootSingleRequest, resolved: bool, + pub(crate) peer_id: PeerId, } impl ActiveBlocksByRootRequest { - pub fn new(request: BlocksByRootSingleRequest) -> Self { + pub fn new(request: BlocksByRootSingleRequest, peer_id: PeerId) -> Self { Self { request, resolved: false, + peer_id, } } @@ -93,14 +99,16 @@ pub struct ActiveBlobsByRootRequest { request: BlobsByRootSingleBlockRequest, blobs: Vec>>, resolved: bool, + pub(crate) peer_id: PeerId, } impl ActiveBlobsByRootRequest { - pub fn new(request: BlobsByRootSingleBlockRequest) -> Self { + pub fn new(request: BlobsByRootSingleBlockRequest, peer_id: PeerId) -> Self { Self { request, blobs: vec![], resolved: false, + peer_id, } } @@ -119,7 +127,7 @@ impl ActiveBlobsByRootRequest { if self.request.block_root != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); } - if !blob.verify_blob_sidecar_inclusion_proof().unwrap_or(false) { + if !blob.verify_blob_sidecar_inclusion_proof() { return Err(LookupVerifyError::InvalidInclusionProof); } if !self.request.indices.contains(&blob.index) { @@ -139,11 +147,20 @@ impl ActiveBlobsByRootRequest { } } - pub fn terminate(self) -> Option>>> { + pub fn terminate(self) -> Result<(), LookupVerifyError> { if self.resolved { - None + Ok(()) } else { - Some(self.blobs) + Err(LookupVerifyError::NotEnoughResponsesReturned { + expected: self.request.indices.len(), + actual: self.blobs.len(), + }) } } + + /// Mark request as resolved (= has returned something downstream) while marking this status as + /// true for future calls. + pub fn resolve(&mut self) -> bool { + std::mem::replace(&mut self.resolved, true) + } } diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 75cb49d176d..49e3ac3a817 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -1,10 +1,11 @@ -use crate::sync::manager::Id; use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use lighthouse_network::rpc::methods::BlocksByRangeRequest; +use lighthouse_network::service::api_types::Id; use lighthouse_network::PeerId; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; +use std::time::{Duration, Instant}; use strum::Display; use types::{Epoch, EthSpec, Slot}; @@ -116,9 +117,9 @@ pub enum BatchState { /// The batch has failed either downloading or processing, but can be requested again. AwaitingDownload, /// The batch is being downloaded. - Downloading(PeerId, Vec>, Id), + Downloading(PeerId, Id), /// The batch has been completely downloaded and is ready for processing. - AwaitingProcessing(PeerId, Vec>), + AwaitingProcessing(PeerId, Vec>, Instant), /// The batch is being processed. Processing(Attempt), /// The batch was successfully processed and is waiting to be validated. @@ -199,7 +200,7 @@ impl BatchInfo { /// Verifies if an incoming block belongs to this batch. pub fn is_expecting_block(&self, peer_id: &PeerId, request_id: &Id) -> bool { - if let BatchState::Downloading(expected_peer, _, expected_id) = &self.state { + if let BatchState::Downloading(expected_peer, expected_id) = &self.state { return peer_id == expected_peer && expected_id == request_id; } false @@ -209,14 +210,27 @@ impl BatchInfo { pub fn current_peer(&self) -> Option<&PeerId> { match &self.state { BatchState::AwaitingDownload | BatchState::Failed => None, - BatchState::Downloading(peer_id, _, _) - | BatchState::AwaitingProcessing(peer_id, _) + BatchState::Downloading(peer_id, _) + | BatchState::AwaitingProcessing(peer_id, _, _) | BatchState::Processing(Attempt { peer_id, .. }) | BatchState::AwaitingValidation(Attempt { peer_id, .. }) => Some(peer_id), BatchState::Poisoned => unreachable!("Poisoned batch"), } } + /// Returns the count of stored pending blocks if in awaiting processing state + pub fn pending_blocks(&self) -> usize { + match &self.state { + BatchState::AwaitingProcessing(_, blocks, _) => blocks.len(), + BatchState::AwaitingDownload + | BatchState::Downloading { .. } + | BatchState::Processing { .. } + | BatchState::AwaitingValidation { .. } + | BatchState::Poisoned + | BatchState::Failed => 0, + } + } + /// Returns a BlocksByRange request associated with the batch. pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ByRangeRequestType) { ( @@ -250,36 +264,18 @@ impl BatchInfo { &self.failed_processing_attempts } - /// Adds a block to a downloading batch. - pub fn add_block(&mut self, block: RpcBlock) -> Result<(), WrongState> { - match self.state.poison() { - BatchState::Downloading(peer, mut blocks, req_id) => { - blocks.push(block); - self.state = BatchState::Downloading(peer, blocks, req_id); - Ok(()) - } - BatchState::Poisoned => unreachable!("Poisoned batch"), - other => { - self.state = other; - Err(WrongState(format!( - "Add block for batch in wrong state {:?}", - self.state - ))) - } - } - } - /// Marks the batch as ready to be processed if the blocks are in the range. The number of /// received blocks is returned, or the wrong batch end on failure #[must_use = "Batch may have failed"] pub fn download_completed( &mut self, + blocks: Vec>, ) -> Result< usize, /* Received blocks */ Result<(Slot, Slot, BatchOperationOutcome), WrongState>, > { match self.state.poison() { - BatchState::Downloading(peer, blocks, _request_id) => { + BatchState::Downloading(peer, _request_id) => { // verify that blocks are in range if let Some(last_slot) = blocks.last().map(|b| b.slot()) { // the batch is non-empty @@ -311,7 +307,7 @@ impl BatchInfo { } let received = blocks.len(); - self.state = BatchState::AwaitingProcessing(peer, blocks); + self.state = BatchState::AwaitingProcessing(peer, blocks, Instant::now()); Ok(received) } BatchState::Poisoned => unreachable!("Poisoned batch"), @@ -336,7 +332,7 @@ impl BatchInfo { mark_failed: bool, ) -> Result { match self.state.poison() { - BatchState::Downloading(peer, _, _request_id) => { + BatchState::Downloading(peer, _request_id) => { // register the attempt and check if the batch can be tried again if mark_failed { self.failed_download_attempts.push(peer); @@ -369,7 +365,7 @@ impl BatchInfo { ) -> Result<(), WrongState> { match self.state.poison() { BatchState::AwaitingDownload => { - self.state = BatchState::Downloading(peer, Vec::new(), request_id); + self.state = BatchState::Downloading(peer, request_id); Ok(()) } BatchState::Poisoned => unreachable!("Poisoned batch"), @@ -383,11 +379,11 @@ impl BatchInfo { } } - pub fn start_processing(&mut self) -> Result>, WrongState> { + pub fn start_processing(&mut self) -> Result<(Vec>, Duration), WrongState> { match self.state.poison() { - BatchState::AwaitingProcessing(peer, blocks) => { + BatchState::AwaitingProcessing(peer, blocks, start_instant) => { self.state = BatchState::Processing(Attempt::new::(peer, &blocks)); - Ok(blocks) + Ok((blocks, start_instant.elapsed())) } BatchState::Poisoned => unreachable!("Poisoned batch"), other => { @@ -533,16 +529,12 @@ impl std::fmt::Debug for BatchState { }) => write!(f, "AwaitingValidation({})", peer_id), BatchState::AwaitingDownload => f.write_str("AwaitingDownload"), BatchState::Failed => f.write_str("Failed"), - BatchState::AwaitingProcessing(ref peer, ref blocks) => { + BatchState::AwaitingProcessing(ref peer, ref blocks, _) => { write!(f, "AwaitingProcessing({}, {} blocks)", peer, blocks.len()) } - BatchState::Downloading(peer, blocks, request_id) => write!( - f, - "Downloading({}, {} blocks, {})", - peer, - blocks.len(), - request_id - ), + BatchState::Downloading(peer, request_id) => { + write!(f, "Downloading({}, {})", peer, request_id) + } BatchState::Poisoned => f.write_str("Poisoned"), } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index c60cdb2cc9f..d63b2f95d80 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,17 +1,19 @@ use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; +use super::RangeSyncType; +use crate::metrics; use crate::network_beacon_processor::ChainSegmentProcessId; use crate::sync::network_context::RangeRequestId; -use crate::sync::{ - manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult, -}; +use crate::sync::{network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult}; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use fnv::FnvHashMap; +use lighthouse_network::service::api_types::Id; use lighthouse_network::{PeerAction, PeerId}; -use rand::seq::SliceRandom; +use rand::{seq::SliceRandom, Rng}; use slog::{crit, debug, o, warn}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::hash::{Hash, Hasher}; +use strum::IntoStaticStr; use types::{Epoch, EthSpec, Hash256, Slot}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of @@ -54,6 +56,13 @@ pub struct KeepChain; pub type ChainId = u64; pub type BatchId = Epoch; +#[derive(Debug, Copy, Clone, IntoStaticStr)] +pub enum SyncingChainType { + Head, + Finalized, + Backfill, +} + /// A chain of blocks that need to be downloaded. Peers who claim to contain the target head /// root are grouped into the peer pool and queried for batches when downloading the /// chain. @@ -61,6 +70,9 @@ pub struct SyncingChain { /// A random id used to identify this chain. id: ChainId, + /// SyncingChain type + pub chain_type: SyncingChainType, + /// The start of the chain segment. Any epoch previous to this one has been validated. pub start_epoch: Epoch, @@ -127,6 +139,7 @@ impl SyncingChain { target_head_slot: Slot, target_head_root: Hash256, peer_id: PeerId, + chain_type: SyncingChainType, log: &slog::Logger, ) -> Self { let mut peers = FnvHashMap::default(); @@ -136,6 +149,7 @@ impl SyncingChain { SyncingChain { id, + chain_type, start_epoch, target_head_slot, target_head_root, @@ -172,6 +186,14 @@ impl SyncingChain { self.validated_batches * EPOCHS_PER_BATCH } + /// Returns the total count of pending blocks in all the batches of this chain + pub fn pending_blocks(&self) -> usize { + self.batches + .values() + .map(|batch| batch.pending_blocks()) + .sum() + } + /// Removes a peer from the chain. /// If the peer has active batches, those are considered failed and re-requested. pub fn remove_peer( @@ -180,7 +202,7 @@ impl SyncingChain { network: &mut SyncNetworkContext, ) -> ProcessingResult { if let Some(batch_ids) = self.peers.remove(peer_id) { - // fail the batches + // fail the batches. for id in batch_ids { if let Some(batch) = self.batches.get_mut(&id) { if let BatchOperationOutcome::Failed { blacklist } = @@ -222,7 +244,7 @@ impl SyncingChain { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>, + blocks: Vec>, ) -> ProcessingResult { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { @@ -243,18 +265,14 @@ impl SyncingChain { } }; - if let Some(block) = beacon_block { - // This is not a stream termination, simply add the block to the request - batch.add_block(block)?; - Ok(KeepChain) - } else { + { // A stream termination has been sent. This batch has ended. Process a completed batch. // Remove the request from the peer's active batches self.peers .get_mut(peer_id) .map(|active_requests| active_requests.remove(&batch_id)); - match batch.download_completed() { + match batch.download_completed(blocks) { Ok(received) => { let awaiting_batches = batch_id .saturating_sub(self.optimistic_start.unwrap_or(self.processing_target)) @@ -310,7 +328,12 @@ impl SyncingChain { // result callback. This is done, because an empty batch could end a chain and the logic // for removing chains and checking completion is in the callback. - let blocks = batch.start_processing()?; + let (blocks, duration_in_awaiting_processing) = batch.start_processing()?; + metrics::observe_duration( + &metrics::SYNCING_CHAIN_BATCH_AWAITING_PROCESSING, + duration_in_awaiting_processing, + ); + let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id); self.current_processing_batch = Some(batch_id); @@ -474,10 +497,27 @@ impl SyncingChain { // We consider three cases. Batch was successfully processed, Batch failed processing due // to a faulty peer, or batch failed processing but the peer can't be deemed faulty. match result { - BatchProcessResult::Success { was_non_empty } => { + BatchProcessResult::Success { + sent_blocks, + imported_blocks, + } => { + if sent_blocks > imported_blocks { + let ignored_blocks = sent_blocks - imported_blocks; + metrics::inc_counter_vec_by( + &metrics::SYNCING_CHAINS_IGNORED_BLOCKS, + &[self.chain_type.into()], + ignored_blocks as u64, + ); + } + metrics::inc_counter_vec( + &metrics::SYNCING_CHAINS_PROCESSED_BATCHES, + &[self.chain_type.into()], + ); + batch.processing_completed(BatchProcessingResult::Success)?; - if *was_non_empty { + // was not empty = sent_blocks > 0 + if *sent_blocks > 0 { // If the processed batch was not empty, we can validate previous unvalidated // blocks. self.advance_chain(network, batch_id); @@ -520,7 +560,7 @@ impl SyncingChain { match batch.processing_completed(BatchProcessingResult::FaultyFailure)? { BatchOperationOutcome::Continue => { // Chain can continue. Check if it can be moved forward. - if *imported_blocks { + if *imported_blocks > 0 { // At least one block was successfully verified and imported, so we can be sure all // previous batches are valid and we only need to download the current failed // batch. @@ -878,16 +918,20 @@ impl SyncingChain { // Find a peer to request the batch let failed_peers = batch.failed_peers(); - let new_peer = { - let mut priorized_peers = self - .peers - .iter() - .map(|(peer, requests)| (failed_peers.contains(peer), requests.len(), *peer)) - .collect::>(); + let new_peer = self + .peers + .iter() + .map(|(peer, requests)| { + ( + failed_peers.contains(peer), + requests.len(), + rand::thread_rng().gen::(), + *peer, + ) + }) // Sort peers prioritizing unrelated peers with less active requests. - priorized_peers.sort_unstable(); - priorized_peers.first().map(|&(_, _, peer)| peer) - }; + .min() + .map(|(_, _, _, peer)| peer); if let Some(peer) = new_peer { self.send_batch(network, batch_id, peer) @@ -945,7 +989,7 @@ impl SyncingChain { Err(e) => { // NOTE: under normal conditions this shouldn't happen but we handle it anyway warn!(self.log, "Could not send batch request"; - "batch_id" => batch_id, "error" => e, &batch); + "batch_id" => batch_id, "error" => ?e, &batch); // register the failed download and check if the batch can be retried batch.start_downloading_from_peer(peer, 1)?; // fake request_id is not relevant self.peers @@ -1143,3 +1187,12 @@ impl RemoveChain { ) } } + +impl From for SyncingChainType { + fn from(value: RangeSyncType) -> Self { + match value { + RangeSyncType::Head => Self::Head, + RangeSyncType::Finalized => Self::Finalized, + } + } +} diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 364514a3582..3621a6605af 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -64,8 +64,8 @@ impl ChainCollection { /// Updates the Syncing state of the collection after a chain is removed. fn on_chain_removed(&mut self, id: &ChainId, was_syncing: bool, sync_type: RangeSyncType) { - let _ = metrics::get_int_gauge(&metrics::SYNCING_CHAINS_COUNT, &[sync_type.as_str()]) - .map(|m| m.dec()); + metrics::inc_counter_vec(&metrics::SYNCING_CHAINS_REMOVED, &[sync_type.as_str()]); + self.update_metrics(); match self.state { RangeSyncState::Finalized(ref syncing_id) => { @@ -493,15 +493,28 @@ impl ChainCollection { target_head_slot, target_head_root, peer, + sync_type.into(), &self.log, ); debug_assert_eq!(new_chain.get_id(), id); debug!(self.log, "New chain added to sync"; "peer_id" => peer_rpr, "sync_type" => ?sync_type, &new_chain); entry.insert(new_chain); - let _ = - metrics::get_int_gauge(&metrics::SYNCING_CHAINS_COUNT, &[sync_type.as_str()]) - .map(|m| m.inc()); + metrics::inc_counter_vec(&metrics::SYNCING_CHAINS_ADDED, &[sync_type.as_str()]); + self.update_metrics(); } } } + + fn update_metrics(&self) { + metrics::set_gauge_vec( + &metrics::SYNCING_CHAINS_COUNT, + &[RangeSyncType::Finalized.as_str()], + self.finalized_chains.len() as i64, + ); + metrics::set_gauge_vec( + &metrics::SYNCING_CHAINS_COUNT, + &[RangeSyncType::Head.as_str()], + self.head_chains.len() as i64, + ); + } } diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index c8e82666840..4213771d483 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -43,13 +43,14 @@ use super::block_storage::BlockStorage; use super::chain::{BatchId, ChainId, RemoveChain, SyncingChain}; use super::chain_collection::ChainCollection; use super::sync_type::RangeSyncType; +use crate::metrics; use crate::status::ToStatusMessage; -use crate::sync::manager::Id; use crate::sync::network_context::SyncNetworkContext; use crate::sync::BatchProcessResult; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::GoodbyeReason; +use lighthouse_network::service::api_types::Id; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; use lru_cache::LRUTimeCache; @@ -210,11 +211,11 @@ where chain_id: ChainId, batch_id: BatchId, request_id: Id, - beacon_block: Option>, + blocks: Vec>, ) { // check if this chunk removes the chain match self.chains.call_by_id(chain_id, |chain| { - chain.on_block_response(network, batch_id, &peer_id, request_id, beacon_block) + chain.on_block_response(network, batch_id, &peer_id, request_id, blocks) }) { Ok((removed_chain, sync_type)) => { if let Some((removed_chain, remove_reason)) = removed_chain { @@ -346,6 +347,12 @@ where } } + metrics::inc_counter_vec_by( + &metrics::SYNCING_CHAINS_DROPPED_BLOCKS, + &[sync_type.as_str()], + chain.pending_blocks() as u64, + ); + network.status_peers(self.beacon_chain.as_ref(), chain.peers()); let status = self.beacon_chain.status_message(); @@ -380,7 +387,6 @@ where #[cfg(test)] mod tests { use crate::network_beacon_processor::NetworkBeaconProcessor; - use crate::service::RequestId; use crate::NetworkMessage; use super::*; @@ -391,7 +397,10 @@ mod tests { use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_chain::EngineState; use beacon_processor::WorkEvent as BeaconWorkEvent; - use lighthouse_network::{rpc::StatusMessage, NetworkGlobals}; + use lighthouse_network::service::api_types::SyncRequestId; + use lighthouse_network::{ + rpc::StatusMessage, service::api_types::AppRequestId, NetworkGlobals, + }; use slog::{o, Drain}; use slot_clock::TestingSlotClock; use std::collections::HashSet; @@ -517,7 +526,7 @@ mod tests { &mut self, expected_peer: &PeerId, fork_name: ForkName, - ) -> (RequestId, Option) { + ) -> (AppRequestId, Option) { let block_req_id = if let Ok(NetworkMessage::SendRequest { peer_id, request: _, @@ -550,12 +559,12 @@ mod tests { fn complete_range_block_and_blobs_response( &mut self, - block_req: RequestId, - blob_req_opt: Option, + block_req: AppRequestId, + blob_req_opt: Option, ) -> (ChainId, BatchId, Id) { if blob_req_opt.is_some() { match block_req { - RequestId::Sync(crate::sync::manager::RequestId::RangeBlockAndBlobs { id }) => { + AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }) => { let _ = self .cx .range_block_and_blob_response(id, BlockOrBlob::Block(None)); @@ -571,7 +580,7 @@ mod tests { } } else { match block_req { - RequestId::Sync(crate::sync::manager::RequestId::RangeBlockAndBlobs { id }) => { + AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }) => { let response = self .cx .range_block_and_blob_response(id, BlockOrBlob::Block(None)) @@ -796,7 +805,7 @@ mod tests { rig.cx.update_execution_engine_state(EngineState::Offline); // send the response to the request - range.blocks_by_range_response(&mut rig.cx, peer1, chain1, batch1, id1, None); + range.blocks_by_range_response(&mut rig.cx, peer1, chain1, batch1, id1, vec![]); // the beacon processor shouldn't have received any work rig.expect_empty_processor(); @@ -810,7 +819,7 @@ mod tests { rig.complete_range_block_and_blobs_response(block_req, blob_req_opt); // send the response to the request - range.blocks_by_range_response(&mut rig.cx, peer2, chain2, batch2, id2, None); + range.blocks_by_range_response(&mut rig.cx, peer2, chain2, batch2, id2, vec![]); // the beacon processor shouldn't have received any work rig.expect_empty_processor(); diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index 5c6f684e722..97d0583e345 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -1,8 +1,8 @@ -use crate::attestation_storage::AttestationRef; +use crate::attestation_storage::{CompactAttestationRef, CompactIndexedAttestation}; use crate::max_cover::MaxCover; use crate::reward_cache::RewardCache; use state_processing::common::{ - base, get_attestation_participation_flag_indices, get_attesting_indices, + attesting_indices_base::get_attesting_indices, base, get_attestation_participation_flag_indices, }; use std::collections::HashMap; use types::{ @@ -14,14 +14,14 @@ use types::{ #[derive(Debug, Clone)] pub struct AttMaxCover<'a, E: EthSpec> { /// Underlying attestation. - pub att: AttestationRef<'a, E>, + pub att: CompactAttestationRef<'a, E>, /// Mapping of validator indices and their rewards. pub fresh_validators_rewards: HashMap, } impl<'a, E: EthSpec> AttMaxCover<'a, E> { pub fn new( - att: AttestationRef<'a, E>, + att: CompactAttestationRef<'a, E>, state: &BeaconState, reward_cache: &'a RewardCache, total_active_balance: u64, @@ -36,7 +36,7 @@ impl<'a, E: EthSpec> AttMaxCover<'a, E> { /// Initialise an attestation cover object for base/phase0 hard fork. pub fn new_for_base( - att: AttestationRef<'a, E>, + att: CompactAttestationRef<'a, E>, state: &BeaconState, base_state: &BeaconStateBase, total_active_balance: u64, @@ -69,7 +69,7 @@ impl<'a, E: EthSpec> AttMaxCover<'a, E> { /// Initialise an attestation cover object for Altair or later. pub fn new_for_altair_deneb( - att: AttestationRef<'a, E>, + att: CompactAttestationRef<'a, E>, state: &BeaconState, reward_cache: &'a RewardCache, spec: &ChainSpec, @@ -83,7 +83,7 @@ impl<'a, E: EthSpec> AttMaxCover<'a, E> { let fresh_validators_rewards = att .indexed - .attesting_indices + .attesting_indices() .iter() .filter_map(|&index| { if reward_cache @@ -119,14 +119,14 @@ impl<'a, E: EthSpec> AttMaxCover<'a, E> { impl<'a, E: EthSpec> MaxCover for AttMaxCover<'a, E> { type Object = Attestation; - type Intermediate = AttestationRef<'a, E>; + type Intermediate = CompactAttestationRef<'a, E>; type Set = HashMap; - fn intermediate(&self) -> &AttestationRef<'a, E> { + fn intermediate(&self) -> &CompactAttestationRef<'a, E> { &self.att } - fn convert_to_object(att_ref: &AttestationRef<'a, E>) -> Attestation { + fn convert_to_object(att_ref: &CompactAttestationRef<'a, E>) -> Attestation { att_ref.clone_as_attestation() } @@ -144,9 +144,16 @@ impl<'a, E: EthSpec> MaxCover for AttMaxCover<'a, E> { /// because including two attestations on chain to satisfy different participation bits is /// impossible without the validator double voting. I.e. it is only suboptimal in the presence /// of slashable voting, which is rare. + /// + /// Post-Electra this optimisation is still OK. The `self.att.data.index` will always be 0 for + /// all Electra attestations, so when a new attestation is added to the solution, we will + /// remove its validators from all attestations at the same slot. It may happen that the + /// included attestation and the attestation being updated have no validators in common, in + /// which case the `retain` will be a no-op. We could consider optimising this in future by only + /// executing the `retain` when the `committee_bits` of the two attestations intersect. fn update_covering_set( &mut self, - best_att: &AttestationRef<'a, E>, + best_att: &CompactAttestationRef<'a, E>, covered_validators: &HashMap, ) { if self.att.data.slot == best_att.data.slot && self.att.data.index == best_att.data.index { @@ -170,12 +177,16 @@ impl<'a, E: EthSpec> MaxCover for AttMaxCover<'a, E> { /// /// This isn't optimal, but with the Altair fork this code is obsolete and not worth upgrading. pub fn earliest_attestation_validators( - attestation: &AttestationRef, + attestation: &CompactAttestationRef, state: &BeaconState, base_state: &BeaconStateBase, ) -> BitList { // Bitfield of validators whose attestations are new/fresh. - let mut new_validators = attestation.indexed.aggregation_bits.clone(); + let mut new_validators = match attestation.indexed { + CompactIndexedAttestation::Base(indexed_att) => indexed_att.aggregation_bits.clone(), + // This code path is obsolete post altair fork, so we just return an empty bitlist here. + CompactIndexedAttestation::Electra(_) => return BitList::with_capacity(0).unwrap(), + }; let state_attestations = if attestation.checkpoint.target_epoch == state.current_epoch() { &base_state.current_epoch_attestations diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 43fdf3923bd..4de9d351f3c 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -1,9 +1,10 @@ use crate::AttestationStats; use itertools::Itertools; -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use types::{ - AggregateSignature, Attestation, AttestationData, BeaconState, BitList, Checkpoint, Epoch, - EthSpec, Hash256, Slot, + attestation::{AttestationBase, AttestationElectra}, + superstruct, AggregateSignature, Attestation, AttestationData, BeaconState, BitList, BitVector, + Checkpoint, Epoch, EthSpec, Hash256, Slot, Unsigned, }; #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] @@ -20,11 +21,17 @@ pub struct CompactAttestationData { pub target_root: Hash256, } +#[superstruct(variants(Base, Electra), variant_attributes(derive(Debug, PartialEq,)))] #[derive(Debug, PartialEq)] pub struct CompactIndexedAttestation { pub attesting_indices: Vec, + #[superstruct(only(Base), partial_getter(rename = "aggregation_bits_base"))] pub aggregation_bits: BitList, + #[superstruct(only(Electra), partial_getter(rename = "aggregation_bits_electra"))] + pub aggregation_bits: BitList, pub signature: AggregateSignature, + #[superstruct(only(Electra))] + pub committee_bits: BitVector, } #[derive(Debug)] @@ -35,7 +42,7 @@ pub struct SplitAttestation { } #[derive(Debug, Clone)] -pub struct AttestationRef<'a, E: EthSpec> { +pub struct CompactAttestationRef<'a, E: EthSpec> { pub checkpoint: &'a CheckpointKey, pub data: &'a CompactAttestationData, pub indexed: &'a CompactIndexedAttestation, @@ -54,20 +61,34 @@ pub struct AttestationDataMap { impl SplitAttestation { pub fn new(attestation: Attestation, attesting_indices: Vec) -> Self { let checkpoint = CheckpointKey { - source: attestation.data.source, - target_epoch: attestation.data.target.epoch, + source: attestation.data().source, + target_epoch: attestation.data().target.epoch, }; let data = CompactAttestationData { - slot: attestation.data.slot, - index: attestation.data.index, - beacon_block_root: attestation.data.beacon_block_root, - target_root: attestation.data.target.root, + slot: attestation.data().slot, + index: attestation.data().index, + beacon_block_root: attestation.data().beacon_block_root, + target_root: attestation.data().target.root, }; - let indexed = CompactIndexedAttestation { - attesting_indices, - aggregation_bits: attestation.aggregation_bits, - signature: attestation.signature, + + let indexed = match attestation.clone() { + Attestation::Base(attn) => { + CompactIndexedAttestation::Base(CompactIndexedAttestationBase { + attesting_indices, + aggregation_bits: attn.aggregation_bits, + signature: attestation.signature().clone(), + }) + } + Attestation::Electra(attn) => { + CompactIndexedAttestation::Electra(CompactIndexedAttestationElectra { + attesting_indices, + aggregation_bits: attn.aggregation_bits, + signature: attestation.signature().clone(), + committee_bits: attn.committee_bits, + }) + } }; + Self { checkpoint, data, @@ -75,8 +96,8 @@ impl SplitAttestation { } } - pub fn as_ref(&self) -> AttestationRef { - AttestationRef { + pub fn as_ref(&self) -> CompactAttestationRef { + CompactAttestationRef { checkpoint: &self.checkpoint, data: &self.data, indexed: &self.indexed, @@ -84,7 +105,7 @@ impl SplitAttestation { } } -impl<'a, E: EthSpec> AttestationRef<'a, E> { +impl<'a, E: EthSpec> CompactAttestationRef<'a, E> { pub fn attestation_data(&self) -> AttestationData { AttestationData { slot: self.data.slot, @@ -99,10 +120,20 @@ impl<'a, E: EthSpec> AttestationRef<'a, E> { } pub fn clone_as_attestation(&self) -> Attestation { - Attestation { - aggregation_bits: self.indexed.aggregation_bits.clone(), - data: self.attestation_data(), - signature: self.indexed.signature.clone(), + match self.indexed { + CompactIndexedAttestation::Base(indexed_att) => Attestation::Base(AttestationBase { + aggregation_bits: indexed_att.aggregation_bits.clone(), + data: self.attestation_data(), + signature: indexed_att.signature.clone(), + }), + CompactIndexedAttestation::Electra(indexed_att) => { + Attestation::Electra(AttestationElectra { + aggregation_bits: indexed_att.aggregation_bits.clone(), + data: self.attestation_data(), + signature: indexed_att.signature.clone(), + committee_bits: indexed_att.committee_bits.clone(), + }) + } } } } @@ -125,7 +156,37 @@ impl CheckpointKey { } impl CompactIndexedAttestation { - pub fn signers_disjoint_from(&self, other: &Self) -> bool { + pub fn should_aggregate(&self, other: &Self) -> bool { + match (self, other) { + (CompactIndexedAttestation::Base(this), CompactIndexedAttestation::Base(other)) => { + this.should_aggregate(other) + } + ( + CompactIndexedAttestation::Electra(this), + CompactIndexedAttestation::Electra(other), + ) => this.should_aggregate(other), + _ => false, + } + } + + /// Returns `true` if aggregated, otherwise `false`. + pub fn aggregate(&mut self, other: &Self) -> bool { + match (self, other) { + (CompactIndexedAttestation::Base(this), CompactIndexedAttestation::Base(other)) => { + this.aggregate(other); + true + } + ( + CompactIndexedAttestation::Electra(this), + CompactIndexedAttestation::Electra(other), + ) => this.aggregate_same_committee(other), + _ => false, + } + } +} + +impl CompactIndexedAttestationBase { + pub fn should_aggregate(&self, other: &Self) -> bool { self.aggregation_bits .intersection(&other.aggregation_bits) .is_zero() @@ -143,13 +204,108 @@ impl CompactIndexedAttestation { } } +impl CompactIndexedAttestationElectra { + pub fn should_aggregate(&self, other: &Self) -> bool { + // For Electra, only aggregate attestations in the same committee. + self.committee_bits == other.committee_bits + && self + .aggregation_bits + .intersection(&other.aggregation_bits) + .is_zero() + } + + /// Returns `true` if aggregated, otherwise `false`. + pub fn aggregate_same_committee(&mut self, other: &Self) -> bool { + if self.committee_bits != other.committee_bits { + return false; + } + self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits); + self.attesting_indices = self + .attesting_indices + .drain(..) + .merge(other.attesting_indices.iter().copied()) + .dedup() + .collect(); + self.signature.add_assign_aggregate(&other.signature); + true + } + + pub fn aggregate_with_disjoint_committees(&mut self, other: &Self) -> Option<()> { + if !self + .committee_bits + .intersection(&other.committee_bits) + .is_zero() + { + return None; + } + // The attestation being aggregated in must only have 1 committee bit set. + if other.committee_bits.num_set_bits() != 1 { + return None; + } + + // Check we are aggregating in increasing committee index order (so we can append + // aggregation bits). + if self.committee_bits.highest_set_bit() >= other.committee_bits.highest_set_bit() { + return None; + } + + self.committee_bits = self.committee_bits.union(&other.committee_bits); + if let Some(agg_bits) = bitlist_extend(&self.aggregation_bits, &other.aggregation_bits) { + self.aggregation_bits = agg_bits; + + self.attesting_indices = self + .attesting_indices + .drain(..) + .merge(other.attesting_indices.iter().copied()) + .dedup() + .collect(); + self.signature.add_assign_aggregate(&other.signature); + + return Some(()); + } + + None + } + + pub fn committee_index(&self) -> Option { + self.get_committee_indices().first().copied() + } + + pub fn get_committee_indices(&self) -> Vec { + self.committee_bits + .iter() + .enumerate() + .filter_map(|(index, bit)| if bit { Some(index as u64) } else { None }) + .collect() + } +} + +// TODO(electra): upstream this or a more efficient implementation +fn bitlist_extend(list1: &BitList, list2: &BitList) -> Option> { + let new_length = list1.len() + list2.len(); + let mut list = BitList::::with_capacity(new_length).ok()?; + + // Copy bits from list1. + for (i, bit) in list1.iter().enumerate() { + list.set(i, bit).ok()?; + } + + // Copy bits from list2, starting from the end of list1. + let offset = list1.len(); + for (i, bit) in list2.iter().enumerate() { + list.set(offset + i, bit).ok()?; + } + + Some(list) +} + impl AttestationMap { pub fn insert(&mut self, attestation: Attestation, attesting_indices: Vec) { let SplitAttestation { checkpoint, data, indexed, - } = SplitAttestation::new(attestation, attesting_indices); + } = SplitAttestation::new(attestation.clone(), attesting_indices); let attestation_map = self.checkpoint_map.entry(checkpoint).or_default(); let attestations = attestation_map.attestations.entry(data).or_default(); @@ -158,10 +314,10 @@ impl AttestationMap { // NOTE: this is sub-optimal and in future we will remove this in favour of max-clique // aggregation. let mut aggregated = false; + for existing_attestation in attestations.iter_mut() { - if existing_attestation.signers_disjoint_from(&indexed) { - existing_attestation.aggregate(&indexed); - aggregated = true; + if existing_attestation.should_aggregate(&indexed) { + aggregated = existing_attestation.aggregate(&indexed); } else if *existing_attestation == indexed { aggregated = true; } @@ -172,11 +328,93 @@ impl AttestationMap { } } + /// Aggregate Electra attestations for the same attestation data signed by different + /// committees. + /// + /// Non-Electra attestations are left as-is. + pub fn aggregate_across_committees(&mut self, checkpoint_key: CheckpointKey) { + let Some(attestation_map) = self.checkpoint_map.get_mut(&checkpoint_key) else { + return; + }; + for compact_indexed_attestations in attestation_map.attestations.values_mut() { + let unaggregated_attestations = std::mem::take(compact_indexed_attestations); + let mut aggregated_attestations: Vec> = vec![]; + + // Aggregate the best attestations for each committee and leave the rest. + let mut best_attestations_by_committee: BTreeMap< + u64, + CompactIndexedAttestationElectra, + > = BTreeMap::new(); + + for committee_attestation in unaggregated_attestations { + let mut electra_attestation = match committee_attestation { + CompactIndexedAttestation::Electra(att) + if att.committee_bits.num_set_bits() == 1 => + { + att + } + CompactIndexedAttestation::Electra(att) => { + // Aggregate already covers multiple committees, leave it as-is. + aggregated_attestations.push(CompactIndexedAttestation::Electra(att)); + continue; + } + CompactIndexedAttestation::Base(att) => { + // Leave as-is. + aggregated_attestations.push(CompactIndexedAttestation::Base(att)); + continue; + } + }; + if let Some(committee_index) = electra_attestation.committee_index() { + if let Some(existing_attestation) = + best_attestations_by_committee.get_mut(&committee_index) + { + // Search for the best (most aggregation bits) attestation for this committee + // index. + if electra_attestation.aggregation_bits.num_set_bits() + > existing_attestation.aggregation_bits.num_set_bits() + { + // New attestation is better than the previously known one for this + // committee. Replace it. + std::mem::swap(existing_attestation, &mut electra_attestation); + } + // Put the inferior attestation into the list of aggregated attestations + // without performing any cross-committee aggregation. + aggregated_attestations + .push(CompactIndexedAttestation::Electra(electra_attestation)); + } else { + // First attestation seen for this committee. Place it in the map + // provisionally. + best_attestations_by_committee.insert(committee_index, electra_attestation); + } + } + } + + if let Some(on_chain_aggregate) = + Self::compute_on_chain_aggregate(best_attestations_by_committee) + { + aggregated_attestations + .push(CompactIndexedAttestation::Electra(on_chain_aggregate)); + } + + *compact_indexed_attestations = aggregated_attestations; + } + } + + pub fn compute_on_chain_aggregate( + mut attestations_by_committee: BTreeMap>, + ) -> Option> { + let (_, mut on_chain_aggregate) = attestations_by_committee.pop_first()?; + for (_, attestation) in attestations_by_committee { + on_chain_aggregate.aggregate_with_disjoint_committees(&attestation); + } + Some(on_chain_aggregate) + } + /// Iterate all attestations matching the given `checkpoint_key`. pub fn get_attestations<'a>( &'a self, checkpoint_key: &'a CheckpointKey, - ) -> impl Iterator> + 'a { + ) -> impl Iterator> + 'a { self.checkpoint_map .get(checkpoint_key) .into_iter() @@ -184,7 +422,7 @@ impl AttestationMap { } /// Iterate all attestations in the map. - pub fn iter(&self) -> impl Iterator> { + pub fn iter(&self) -> impl Iterator> { self.checkpoint_map .iter() .flat_map(|(checkpoint_key, attestation_map)| attestation_map.iter(checkpoint_key)) @@ -215,9 +453,9 @@ impl AttestationDataMap { pub fn iter<'a>( &'a self, checkpoint_key: &'a CheckpointKey, - ) -> impl Iterator> + 'a { + ) -> impl Iterator> + 'a { self.attestations.iter().flat_map(|(data, vec_indexed)| { - vec_indexed.iter().map(|indexed| AttestationRef { + vec_indexed.iter().map(|indexed| CompactAttestationRef { checkpoint: checkpoint_key, data, indexed, diff --git a/beacon_node/operation_pool/src/attester_slashing.rs b/beacon_node/operation_pool/src/attester_slashing.rs index 725d4d2a857..c2411d4d726 100644 --- a/beacon_node/operation_pool/src/attester_slashing.rs +++ b/beacon_node/operation_pool/src/attester_slashing.rs @@ -1,17 +1,17 @@ use crate::max_cover::MaxCover; use state_processing::per_block_processing::get_slashable_indices_modular; use std::collections::{HashMap, HashSet}; -use types::{AttesterSlashing, BeaconState, EthSpec}; +use types::{AttesterSlashing, AttesterSlashingRef, BeaconState, EthSpec}; #[derive(Debug, Clone)] pub struct AttesterSlashingMaxCover<'a, E: EthSpec> { - slashing: &'a AttesterSlashing, + slashing: AttesterSlashingRef<'a, E>, effective_balances: HashMap, } impl<'a, E: EthSpec> AttesterSlashingMaxCover<'a, E> { pub fn new( - slashing: &'a AttesterSlashing, + slashing: AttesterSlashingRef<'a, E>, proposer_slashing_indices: &HashSet, state: &BeaconState, ) -> Option { @@ -39,16 +39,16 @@ impl<'a, E: EthSpec> AttesterSlashingMaxCover<'a, E> { impl<'a, E: EthSpec> MaxCover for AttesterSlashingMaxCover<'a, E> { /// The result type, of which we would eventually like a collection of maximal quality. type Object = AttesterSlashing; - type Intermediate = AttesterSlashing; + type Intermediate = AttesterSlashingRef<'a, E>; /// The type used to represent sets. type Set = HashMap; - fn intermediate(&self) -> &AttesterSlashing { - self.slashing + fn intermediate(&self) -> &AttesterSlashingRef<'a, E> { + &self.slashing } - fn convert_to_object(slashing: &AttesterSlashing) -> AttesterSlashing { - slashing.clone() + fn convert_to_object(slashing: &AttesterSlashingRef<'a, E>) -> AttesterSlashing { + slashing.clone_as_attester_slashing() } /// Get the set of elements covered. @@ -58,7 +58,7 @@ impl<'a, E: EthSpec> MaxCover for AttesterSlashingMaxCover<'a, E> { /// Update the set of items covered, for the inclusion of some object in the solution. fn update_covering_set( &mut self, - _best_slashing: &AttesterSlashing, + _best_slashing: &AttesterSlashingRef<'a, E>, covered_validator_indices: &HashMap, ) { self.effective_balances diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 6e744ccf62a..a1c9ada03a0 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -11,11 +11,10 @@ mod sync_aggregate_id; pub use crate::bls_to_execution_changes::ReceivedPreCapella; pub use attestation::{earliest_attestation_validators, AttMaxCover}; -pub use attestation_storage::{AttestationRef, SplitAttestation}; +pub use attestation_storage::{CompactAttestationRef, SplitAttestation}; pub use max_cover::MaxCover; pub use persistence::{ - PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, - PersistedOperationPoolV15, PersistedOperationPoolV5, + PersistedOperationPool, PersistedOperationPoolV15, PersistedOperationPoolV20, }; pub use reward_cache::RewardCache; use state_processing::epoch_cache::is_epoch_cache_initialized; @@ -228,7 +227,7 @@ impl OperationPool { state: &'a BeaconState, reward_cache: &'a RewardCache, total_active_balance: u64, - validity_filter: impl FnMut(&AttestationRef<'a, E>) -> bool + Send, + validity_filter: impl FnMut(&CompactAttestationRef<'a, E>) -> bool + Send, spec: &'a ChainSpec, ) -> impl Iterator> + Send { all_attestations @@ -252,10 +251,11 @@ impl OperationPool { pub fn get_attestations( &self, state: &BeaconState, - prev_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, E>) -> bool + Send, - curr_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, E>) -> bool + Send, + prev_epoch_validity_filter: impl for<'a> FnMut(&CompactAttestationRef<'a, E>) -> bool + Send, + curr_epoch_validity_filter: impl for<'a> FnMut(&CompactAttestationRef<'a, E>) -> bool + Send, spec: &ChainSpec, ) -> Result>, OpPoolError> { + let fork_name = state.fork_name_unchecked(); if !matches!(state, BeaconState::Base(_)) { // Epoch cache must be initialized to fetch base reward values in the max cover `score` // function. Currently max cover ignores items on errors. If epoch cache is not @@ -267,7 +267,6 @@ impl OperationPool { // Attestations for the current fork, which may be from the current or previous epoch. let (prev_epoch_key, curr_epoch_key) = CheckpointKey::keys_for_state(state); - let all_attestations = self.attestations.read(); let total_active_balance = state .get_total_active_balance() .map_err(OpPoolError::GetAttestationsTotalBalanceError)?; @@ -284,6 +283,16 @@ impl OperationPool { let mut num_prev_valid = 0_i64; let mut num_curr_valid = 0_i64; + // TODO(electra): Work out how to do this more elegantly. This is a bit of a hack. + let mut all_attestations = self.attestations.write(); + + if fork_name.electra_enabled() { + all_attestations.aggregate_across_committees(prev_epoch_key); + all_attestations.aggregate_across_committees(curr_epoch_key); + } + + let all_attestations = parking_lot::RwLockWriteGuard::downgrade(all_attestations); + let prev_epoch_att = self .get_valid_attestations_for_epoch( &prev_epoch_key, @@ -307,6 +316,11 @@ impl OperationPool { ) .inspect(|_| num_curr_valid += 1); + let curr_epoch_limit = if fork_name.electra_enabled() { + E::MaxAttestationsElectra::to_usize() + } else { + E::MaxAttestations::to_usize() + }; let prev_epoch_limit = if let BeaconState::Base(base_state) = state { std::cmp::min( E::MaxPendingAttestations::to_usize() @@ -314,7 +328,7 @@ impl OperationPool { E::MaxAttestations::to_usize(), ) } else { - E::MaxAttestations::to_usize() + curr_epoch_limit }; let (prev_cover, curr_cover) = rayon::join( @@ -329,11 +343,7 @@ impl OperationPool { }, move || { let _timer = metrics::start_timer(&metrics::ATTESTATION_CURR_EPOCH_PACKING_TIME); - maximum_cover( - curr_epoch_att, - E::MaxAttestations::to_usize(), - "curr_epoch_attestations", - ) + maximum_cover(curr_epoch_att, curr_epoch_limit, "curr_epoch_attestations") }, ); @@ -343,7 +353,7 @@ impl OperationPool { Ok(max_cover::merge_solutions( curr_cover, prev_cover, - E::MaxAttestations::to_usize(), + curr_epoch_limit, )) } @@ -428,7 +438,7 @@ impl OperationPool { let relevant_attester_slashings = reader.iter().flat_map(|slashing| { if slashing.signature_is_still_valid(&state.fork()) { - AttesterSlashingMaxCover::new(slashing.as_inner(), to_be_slashed, state) + AttesterSlashingMaxCover::new(slashing.as_inner().to_ref(), to_be_slashed, state) } else { None } @@ -442,7 +452,7 @@ impl OperationPool { .into_iter() .map(|cover| { to_be_slashed.extend(cover.covering_set().keys()); - cover.intermediate().clone() + AttesterSlashingMaxCover::convert_to_object(cover.intermediate()) }) .collect() } @@ -463,16 +473,19 @@ impl OperationPool { // Check that the attestation's signature is still valid wrt the fork version. let signature_ok = slashing.signature_is_still_valid(&head_state.fork()); // Slashings that don't slash any validators can also be dropped. - let slashing_ok = - get_slashable_indices_modular(head_state, slashing.as_inner(), |_, validator| { + let slashing_ok = get_slashable_indices_modular( + head_state, + slashing.as_inner().to_ref(), + |_, validator| { // Declare that a validator is still slashable if they have not exited prior // to the finalized epoch. // // We cannot check the `slashed` field since the `head` is not finalized and // a fork could un-slash someone. validator.exit_epoch > head_state.finalized_checkpoint().epoch - }) - .map_or(false, |indices| !indices.is_empty()); + }, + ) + .map_or(false, |indices| !indices.is_empty()); signature_ok && slashing_ok }); @@ -602,7 +615,7 @@ impl OperationPool { }) }, |address_change| address_change.as_inner().clone(), - usize::max_value(), + usize::MAX, ); changes.shuffle(&mut thread_rng()); changes @@ -891,7 +904,7 @@ mod release_tests { ); for (atts, aggregate) in &attestations { - let att2 = aggregate.as_ref().unwrap().message.aggregate.clone(); + let att2 = aggregate.as_ref().unwrap().message().aggregate().clone(); let att1 = atts .into_iter() @@ -899,7 +912,7 @@ mod release_tests { .take(2) .fold::>, _>(None, |att, new_att| { if let Some(mut a) = att { - a.aggregate(&new_att); + a.aggregate(new_att.to_ref()); Some(a) } else { Some(new_att.clone()) @@ -907,13 +920,13 @@ mod release_tests { }) .unwrap(); - let att1_indices = get_attesting_indices_from_state(&state, &att1).unwrap(); - let att2_indices = get_attesting_indices_from_state(&state, &att2).unwrap(); + let att1_indices = get_attesting_indices_from_state(&state, att1.to_ref()).unwrap(); + let att2_indices = get_attesting_indices_from_state(&state, att2).unwrap(); let att1_split = SplitAttestation::new(att1.clone(), att1_indices); - let att2_split = SplitAttestation::new(att2.clone(), att2_indices); + let att2_split = SplitAttestation::new(att2.clone_as_attestation(), att2_indices); assert_eq!( - att1.aggregation_bits.num_set_bits(), + att1.num_set_aggregation_bits(), earliest_attestation_validators( &att1_split.as_ref(), &state, @@ -927,8 +940,8 @@ mod release_tests { .unwrap() .current_epoch_attestations .push(PendingAttestation { - aggregation_bits: att1.aggregation_bits.clone(), - data: att1.data.clone(), + aggregation_bits: att1.aggregation_bits_base().unwrap().clone(), + data: att1.data().clone(), inclusion_delay: 0, proposer_index: 0, }) @@ -981,7 +994,8 @@ mod release_tests { for (atts, _) in attestations { for (att, _) in atts { - let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + let attesting_indices = + get_attesting_indices_from_state(&state, att.to_ref()).unwrap(); op_pool.insert_attestation(att, attesting_indices).unwrap(); } } @@ -1007,7 +1021,7 @@ mod release_tests { let agg_att = &block_attestations[0]; assert_eq!( - agg_att.aggregation_bits.num_set_bits(), + agg_att.num_set_aggregation_bits(), spec.target_committee_size as usize ); @@ -1050,12 +1064,15 @@ mod release_tests { ); for (_, aggregate) in attestations { - let att = aggregate.unwrap().message.aggregate; - let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + let agg = aggregate.unwrap(); + let att = agg.message().aggregate(); + let attesting_indices = get_attesting_indices_from_state(&state, att).unwrap(); + op_pool + .insert_attestation(att.clone_as_attestation(), attesting_indices.clone()) + .unwrap(); op_pool - .insert_attestation(att.clone(), attesting_indices.clone()) + .insert_attestation(att.clone_as_attestation(), attesting_indices) .unwrap(); - op_pool.insert_attestation(att, attesting_indices).unwrap(); } assert_eq!(op_pool.num_attestations(), committees.len()); @@ -1104,7 +1121,7 @@ mod release_tests { None, |att, new_att| { if let Some(mut a) = att { - a.aggregate(new_att); + a.aggregate(new_att.to_ref()); Some(a) } else { Some(new_att.clone()) @@ -1127,7 +1144,7 @@ mod release_tests { None, |att, new_att| { if let Some(mut a) = att { - a.aggregate(new_att); + a.aggregate(new_att.to_ref()); Some(a) } else { Some(new_att.clone()) @@ -1139,7 +1156,8 @@ mod release_tests { .collect::>(); for att in aggs1.into_iter().chain(aggs2.into_iter()) { - let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + let attesting_indices = + get_attesting_indices_from_state(&state, att.to_ref()).unwrap(); op_pool.insert_attestation(att, attesting_indices).unwrap(); } } @@ -1203,7 +1221,7 @@ mod release_tests { .fold::, _>( att_0.clone(), |mut att, new_att| { - att.aggregate(new_att); + att.aggregate(new_att.to_ref()); att }, ) @@ -1211,7 +1229,8 @@ mod release_tests { .collect::>(); for att in aggs { - let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + let attesting_indices = + get_attesting_indices_from_state(&state, att.to_ref()).unwrap(); op_pool.insert_attestation(att, attesting_indices).unwrap(); } }; @@ -1228,7 +1247,17 @@ mod release_tests { let num_big = target_committee_size / big_step_size; let stats = op_pool.attestation_stats(); - assert_eq!(stats.num_attestation_data, committees.len()); + let fork_name = state.fork_name_unchecked(); + + match fork_name { + ForkName::Electra => { + assert_eq!(stats.num_attestation_data, 1); + } + _ => { + assert_eq!(stats.num_attestation_data, committees.len()); + } + }; + assert_eq!( stats.num_attestations, (num_small + num_big) * committees.len() @@ -1239,11 +1268,25 @@ mod release_tests { let best_attestations = op_pool .get_attestations(&state, |_| true, |_| true, spec) .expect("should have best attestations"); - assert_eq!(best_attestations.len(), max_attestations); + match fork_name { + ForkName::Electra => { + assert_eq!(best_attestations.len(), 8); + } + _ => { + assert_eq!(best_attestations.len(), max_attestations); + } + }; // All the best attestations should be signed by at least `big_step_size` (4) validators. for att in &best_attestations { - assert!(att.aggregation_bits.num_set_bits() >= big_step_size); + match fork_name { + ForkName::Electra => { + assert!(att.num_set_aggregation_bits() >= small_step_size); + } + _ => { + assert!(att.num_set_aggregation_bits() >= big_step_size); + } + }; } } @@ -1298,7 +1341,7 @@ mod release_tests { .fold::, _>( att_0.clone(), |mut att, new_att| { - att.aggregate(new_att); + att.aggregate(new_att.to_ref()); att }, ) @@ -1306,7 +1349,8 @@ mod release_tests { .collect::>(); for att in aggs { - let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + let attesting_indices = + get_attesting_indices_from_state(&state, att.to_ref()).unwrap(); op_pool.insert_attestation(att, attesting_indices).unwrap(); } }; @@ -1321,11 +1365,20 @@ mod release_tests { let num_small = target_committee_size / small_step_size; let num_big = target_committee_size / big_step_size; + let fork_name = state.fork_name_unchecked(); + + match fork_name { + ForkName::Electra => { + assert_eq!(op_pool.attestation_stats().num_attestation_data, 1); + } + _ => { + assert_eq!( + op_pool.attestation_stats().num_attestation_data, + committees.len() + ); + } + }; - assert_eq!( - op_pool.attestation_stats().num_attestation_data, - committees.len() - ); assert_eq!( op_pool.num_attestations(), (num_small + num_big) * committees.len() @@ -1336,20 +1389,28 @@ mod release_tests { let best_attestations = op_pool .get_attestations(&state, |_| true, |_| true, spec) .expect("should have valid best attestations"); - assert_eq!(best_attestations.len(), max_attestations); + + match fork_name { + ForkName::Electra => { + assert_eq!(best_attestations.len(), 8); + } + _ => { + assert_eq!(best_attestations.len(), max_attestations); + } + }; let total_active_balance = state.get_total_active_balance().unwrap(); // Set of indices covered by previous attestations in `best_attestations`. let mut seen_indices = BTreeSet::::new(); // Used for asserting that rewards are in decreasing order. - let mut prev_reward = u64::max_value(); + let mut prev_reward = u64::MAX; let mut reward_cache = RewardCache::default(); reward_cache.update(&state).unwrap(); for att in best_attestations { - let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + let attesting_indices = get_attesting_indices_from_state(&state, att.to_ref()).unwrap(); let split_attestation = SplitAttestation::new(att, attesting_indices); let mut fresh_validators_rewards = AttMaxCover::new( split_attestation.as_ref(), diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index ef749a220db..79509e5f6cc 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -1,4 +1,3 @@ -use crate::attestation_id::AttestationId; use crate::attestation_storage::AttestationMap; use crate::bls_to_execution_changes::{BlsToExecutionChanges, ReceivedPreCapella}; use crate::sync_aggregate_id::SyncAggregateId; @@ -12,6 +11,7 @@ use state_processing::SigVerifiedOp; use std::collections::HashSet; use std::mem; use store::{DBColumn, Error as StoreError, StoreItem}; +use types::attestation::AttestationOnDisk; use types::*; type PersistedSyncContributions = Vec<(SyncAggregateId, Vec>)>; @@ -21,7 +21,7 @@ type PersistedSyncContributions = Vec<(SyncAggregateId, Vec = Vec<(SyncAggregateId, Vec { - /// [DEPRECATED] Mapping from attestation ID to attestation mappings. - #[superstruct(only(V5))] - pub attestations_v5: Vec<(AttestationId, Vec>)>, + #[superstruct(only(V15))] + pub attestations_v15: Vec<(AttestationBase, Vec)>, /// Attestations and their attesting indices. - #[superstruct(only(V12, V14, V15))] - pub attestations: Vec<(Attestation, Vec)>, + #[superstruct(only(V20))] + pub attestations: Vec<(AttestationOnDisk, Vec)>, /// Mapping from sync contribution ID to sync contributions and aggregate. pub sync_contributions: PersistedSyncContributions, - /// [DEPRECATED] Attester slashings. - #[superstruct(only(V5))] - pub attester_slashings_v5: Vec<(AttesterSlashing, ForkVersion)>, + #[superstruct(only(V15))] + pub attester_slashings_v15: Vec, E>>, /// Attester slashings. - #[superstruct(only(V12, V14, V15))] + #[superstruct(only(V20))] pub attester_slashings: Vec, E>>, - /// [DEPRECATED] Proposer slashings. - #[superstruct(only(V5))] - pub proposer_slashings_v5: Vec, /// Proposer slashings with fork information. - #[superstruct(only(V12, V14, V15))] pub proposer_slashings: Vec>, - /// [DEPRECATED] Voluntary exits. - #[superstruct(only(V5))] - pub voluntary_exits_v5: Vec, /// Voluntary exits with fork information. - #[superstruct(only(V12, V14, V15))] pub voluntary_exits: Vec>, /// BLS to Execution Changes - #[superstruct(only(V14, V15))] pub bls_to_execution_changes: Vec>, /// Validator indices with BLS to Execution Changes to be broadcast at the /// Capella fork. - #[superstruct(only(V15))] pub capella_bls_change_broadcast_indices: Vec, } @@ -75,8 +63,8 @@ impl PersistedOperationPool { .iter() .map(|att| { ( - att.clone_as_attestation(), - att.indexed.attesting_indices.clone(), + AttestationOnDisk::from(att.clone_as_attestation()), + att.indexed.attesting_indices().clone(), ) }) .collect(); @@ -123,7 +111,7 @@ impl PersistedOperationPool { .copied() .collect(); - PersistedOperationPool::V15(PersistedOperationPoolV15 { + PersistedOperationPool::V20(PersistedOperationPoolV20 { attestations, sync_contributions, attester_slashings, @@ -136,56 +124,86 @@ impl PersistedOperationPool { /// Reconstruct an `OperationPool`. pub fn into_operation_pool(mut self) -> Result, OpPoolError> { - let attester_slashings = RwLock::new(self.attester_slashings()?.iter().cloned().collect()); + let attester_slashings = match &self { + PersistedOperationPool::V15(pool_v15) => RwLock::new( + pool_v15 + .attester_slashings_v15 + .iter() + .map(|slashing| slashing.clone().into()) + .collect(), + ), + PersistedOperationPool::V20(pool_v20) => { + RwLock::new(pool_v20.attester_slashings.iter().cloned().collect()) + } + }; + let proposer_slashings = RwLock::new( - self.proposer_slashings()? + self.proposer_slashings() .iter() .cloned() .map(|slashing| (slashing.as_inner().proposer_index(), slashing)) .collect(), ); let voluntary_exits = RwLock::new( - self.voluntary_exits()? + self.voluntary_exits() .iter() .cloned() .map(|exit| (exit.as_inner().message.validator_index, exit)) .collect(), ); let sync_contributions = RwLock::new(self.sync_contributions().iter().cloned().collect()); - let attestations = match self { - PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => { - return Err(OpPoolError::IncorrectOpPoolVariant) + let attestations = match &self { + PersistedOperationPool::V15(pool_v15) => { + let mut map = AttestationMap::default(); + for (att, attesting_indices) in + pool_v15 + .attestations_v15 + .iter() + .map(|(att, attesting_indices)| { + (Attestation::Base(att.clone()), attesting_indices.clone()) + }) + { + map.insert(att, attesting_indices); + } + RwLock::new(map) } - PersistedOperationPool::V14(_) | PersistedOperationPool::V15(_) => { + PersistedOperationPool::V20(pool_v20) => { let mut map = AttestationMap::default(); - for (att, attesting_indices) in self.attestations()?.clone() { + for (att, attesting_indices) in + pool_v20 + .attestations + .iter() + .map(|(att, attesting_indices)| { + ( + AttestationRef::from(att.to_ref()).clone_as_attestation(), + attesting_indices.clone(), + ) + }) + { map.insert(att, attesting_indices); } RwLock::new(map) } }; - let mut bls_to_execution_changes = BlsToExecutionChanges::default(); - if let Ok(persisted_changes) = self.bls_to_execution_changes_mut() { - let persisted_changes = mem::take(persisted_changes); - let broadcast_indices = - if let Ok(indices) = self.capella_bls_change_broadcast_indices_mut() { - mem::take(indices).into_iter().collect() - } else { - HashSet::new() - }; + let mut bls_to_execution_changes = BlsToExecutionChanges::default(); + let persisted_changes = mem::take(self.bls_to_execution_changes_mut()); + let broadcast_indices: HashSet<_> = + mem::take(self.capella_bls_change_broadcast_indices_mut()) + .into_iter() + .collect(); - for bls_to_execution_change in persisted_changes { - let received_pre_capella = if broadcast_indices - .contains(&bls_to_execution_change.as_inner().message.validator_index) - { - ReceivedPreCapella::Yes - } else { - ReceivedPreCapella::No - }; - bls_to_execution_changes.insert(bls_to_execution_change, received_pre_capella); - } + for bls_to_execution_change in persisted_changes { + let received_pre_capella = if broadcast_indices + .contains(&bls_to_execution_change.as_inner().message.validator_index) + { + ReceivedPreCapella::Yes + } else { + ReceivedPreCapella::No + }; + bls_to_execution_changes.insert(bls_to_execution_change, received_pre_capella); } + let op_pool = OperationPool { attestations, sync_contributions, @@ -200,35 +218,7 @@ impl PersistedOperationPool { } } -impl StoreItem for PersistedOperationPoolV5 { - fn db_column() -> DBColumn { - DBColumn::OpPool - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - PersistedOperationPoolV5::from_ssz_bytes(bytes).map_err(Into::into) - } -} - -impl StoreItem for PersistedOperationPoolV12 { - fn db_column() -> DBColumn { - DBColumn::OpPool - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - PersistedOperationPoolV12::from_ssz_bytes(bytes).map_err(Into::into) - } -} - -impl StoreItem for PersistedOperationPoolV14 { +impl StoreItem for PersistedOperationPoolV15 { fn db_column() -> DBColumn { DBColumn::OpPool } @@ -238,11 +228,11 @@ impl StoreItem for PersistedOperationPoolV14 { } fn from_store_bytes(bytes: &[u8]) -> Result { - PersistedOperationPoolV14::from_ssz_bytes(bytes).map_err(Into::into) + PersistedOperationPoolV15::from_ssz_bytes(bytes).map_err(Into::into) } } -impl StoreItem for PersistedOperationPoolV15 { +impl StoreItem for PersistedOperationPoolV20 { fn db_column() -> DBColumn { DBColumn::OpPool } @@ -252,7 +242,7 @@ impl StoreItem for PersistedOperationPoolV15 { } fn from_store_bytes(bytes: &[u8]) -> Result { - PersistedOperationPoolV15::from_ssz_bytes(bytes).map_err(Into::into) + PersistedOperationPoolV20::from_ssz_bytes(bytes).map_err(Into::into) } } @@ -268,8 +258,8 @@ impl StoreItem for PersistedOperationPool { fn from_store_bytes(bytes: &[u8]) -> Result { // Default deserialization to the latest variant. - PersistedOperationPoolV15::from_ssz_bytes(bytes) - .map(Self::V15) + PersistedOperationPoolV20::from_ssz_bytes(bytes) + .map(Self::V20) .map_err(Into::into) } } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 818cdbd460f..c32c5e7ec6f 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1,12 +1,16 @@ -use clap::{App, Arg, ArgGroup}; +use std::time::Duration; + +use clap::{builder::ArgPredicate, crate_version, Arg, ArgAction, ArgGroup, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; use strum::VariantNames; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("beacon_node") - .visible_aliases(&["b", "bn", "beacon"]) +pub fn cli_app() -> Command { + Command::new("beacon_node") + .display_order(0) + .visible_aliases(["b", "bn", "beacon"]) .version(crate_version!()) .author("Sigma Prime ") - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) .about("The primary component which connects to the Ethereum 2.0 P2P network and \ downloads, verifies and stores blocks. Provides a HTTP API for querying \ the beacon chain and publishing messages to the network.") @@ -14,68 +18,91 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { * Configuration directory locations. */ .arg( - Arg::with_name("network-dir") + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) + ) + .arg( + Arg::new("network-dir") .long("network-dir") .value_name("DIR") .help("Data directory for network keys. Defaults to network/ inside the beacon node \ dir.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("freezer-dir") + Arg::new("freezer-dir") .long("freezer-dir") .value_name("DIR") .help("Data directory for the freezer database.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("blobs-dir") + Arg::new("blobs-dir") .long("blobs-dir") .value_name("DIR") .help("Data directory for the blobs database.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) /* * Network parameters. */ .arg( - Arg::with_name("subscribe-all-subnets") + Arg::new("subscribe-all-subnets") .long("subscribe-all-subnets") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Subscribe to all subnets regardless of validator count. \ This will also advertise the beacon node as being long-lived subscribed to all subnets.") - .takes_value(false), + .display_order(0) ) .arg( - Arg::with_name("import-all-attestations") + Arg::new("import-all-attestations") .long("import-all-attestations") .help("Import and aggregate all attestations, regardless of validator subscriptions. \ This will only import attestations from already-subscribed subnets, use with \ --subscribe-all-subnets to ensure all attestations are received for import.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("disable-packet-filter") + Arg::new("disable-packet-filter") .long("disable-packet-filter") .help("Disables the discovery packet filter. Useful for testing in smaller networks") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("shutdown-after-sync") + Arg::new("shutdown-after-sync") .long("shutdown-after-sync") .help("Shutdown beacon node as soon as sync is completed. Backfill sync will \ not be performed before shutdown.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("zero-ports") + Arg::new("zero-ports") .long("zero-ports") - .short("z") + .short('z') .help("Sets all listening TCP/UDP ports to 0, allowing the OS to choose some \ arbitrary free ports.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("listen-address") + Arg::new("listen-address") .long("listen-address") .value_name("ADDRESS") .help("The address lighthouse will listen for UDP and TCP connections. To listen \ @@ -86,13 +113,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - --listen-address '0.0.0.0' --listen-address '::' will listen over both \ IPv4 and IPv6. The order of the given addresses is not relevant. However, \ multiple IPv4, or multiple IPv6 addresses will not be accepted.") - .multiple(true) - .max_values(2) + .action(ArgAction::Append) + .num_args(0..=2) .default_value("0.0.0.0") - .takes_value(true) + .display_order(0) ) .arg( - Arg::with_name("port") + Arg::new("port") .long("port") .value_name("PORT") .help("The TCP/UDP ports to listen on. There are two UDP ports. \ @@ -100,134 +127,153 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { --discovery-port flag and the quic port can be modified by the --quic-port flag. If listening over both IPv4 and IPv6 the --port flag \ will apply to the IPv4 address and --port6 to the IPv6 address.") .default_value("9000") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("port6") + Arg::new("port6") .long("port6") .value_name("PORT") .help("The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 and \ IPv6. Defaults to 9090 when required. The Quic UDP port will be set to this value + 1.") .default_value("9090") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("discovery-port") + Arg::new("discovery-port") .long("discovery-port") .value_name("PORT") .help("The UDP port that discovery will listen on. Defaults to `port`") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("quic-port") + Arg::new("quic-port") .long("quic-port") .value_name("PORT") .help("The UDP port that quic will listen on. Defaults to `port` + 1") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("discovery-port6") + Arg::new("discovery-port6") .long("discovery-port6") .value_name("PORT") .help("The UDP port that discovery will listen on over IPv6 if listening over \ both IPv4 and IPv6. Defaults to `port6`") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("quic-port6") + Arg::new("quic-port6") .long("quic-port6") .value_name("PORT") .help("The UDP port that quic will listen on over IPv6 if listening over \ both IPv4 and IPv6. Defaults to `port6` + 1") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("target-peers") + Arg::new("target-peers") .long("target-peers") .help("The target number of peers.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("boot-nodes") + Arg::new("boot-nodes") .long("boot-nodes") .allow_hyphen_values(true) .value_name("ENR/MULTIADDR LIST") .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network. Multiaddr is also supported.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("network-load") + Arg::new("network-load") .long("network-load") .value_name("INTEGER") .help("Lighthouse's network can be tuned for bandwidth/performance. Setting this to a high value, will increase the bandwidth lighthouse uses, increasing the likelihood of redundant information in exchange for faster communication. This can increase profit of validators marginally by receiving messages faster on the network. Lower values decrease bandwidth usage, but makes communication slower which can lead to validator performance reduction. Values are in the range [1,5].") .default_value("3") - .set(clap::ArgSettings::Hidden) - .takes_value(true), + .hide(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("disable-upnp") + Arg::new("disable-upnp") .long("disable-upnp") .help("Disables UPnP support. Setting this will prevent Lighthouse from attempting to automatically establish external port mappings.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("private") + Arg::new("private") .long("private") .help("Prevents sending various client identification information.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("enr-udp-port") + Arg::new("enr-udp-port") .long("enr-udp-port") .value_name("PORT") .help("The UDP4 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv4.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-quic-port") + Arg::new("enr-quic-port") .long("enr-quic-port") .value_name("PORT") .help("The quic UDP4 port that will be set on the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv4.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-udp6-port") + Arg::new("enr-udp6-port") .long("enr-udp6-port") .value_name("PORT") .help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv6.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-quic6-port") + Arg::new("enr-quic6-port") .long("enr-quic6-port") .value_name("PORT") .help("The quic UDP6 port that will be set on the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv6.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-tcp-port") + Arg::new("enr-tcp-port") .long("enr-tcp-port") .value_name("PORT") .help("The TCP4 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv4. The --port flag is \ used if this is not set.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-tcp6-port") + Arg::new("enr-tcp6-port") .long("enr-tcp6-port") .value_name("PORT") .help("The TCP6 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IPv6. The --port6 flag is \ used if this is not set.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-address") + Arg::new("enr-address") .long("enr-address") .value_name("ADDRESS") .help("The IP address/ DNS address to broadcast to other peers on how to reach \ @@ -236,76 +282,115 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { discovery. Set this only if you are sure other nodes can connect to your \ local node on this address. This will update the `ip4` or `ip6` ENR fields \ accordingly. To update both, set this flag twice with the different values.") - .multiple(true) - .max_values(2) - .takes_value(true), + .action(ArgAction::Append) + .num_args(1..=2) + .display_order(0) ) .arg( - Arg::with_name("enr-match") - .short("e") + Arg::new("enr-match") + .short('e') .long("enr-match") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Sets the local ENR IP address and port to match those set for lighthouse. \ Specifically, the IP address will be the value of --listen-address and the \ UDP port will be --discovery-port.") + .display_order(0) ) .arg( - Arg::with_name("disable-enr-auto-update") - .short("x") + Arg::new("disable-enr-auto-update") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long("disable-enr-auto-update") .help("Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. \ - This disables this feature, fixing the ENR's IP/PORT to those specified on boot."), + This disables this feature, fixing the ENR's IP/PORT to those specified on boot.") + .display_order(0) ) .arg( - Arg::with_name("libp2p-addresses") + Arg::new("libp2p-addresses") .long("libp2p-addresses") .value_name("MULTIADDR") .help("One or more comma-delimited multiaddrs to manually connect to a libp2p peer \ without an ENR.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) - // NOTE: This is hidden because it is primarily a developer feature for testnets and + // NOTE: This is hide because it is primarily a developer feature for testnets and // debugging. We remove it from the list to avoid clutter. .arg( - Arg::with_name("disable-discovery") + Arg::new("disable-discovery") .long("disable-discovery") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Disables the discv5 discovery protocol. The node will not search for new peers or participate in the discovery protocol.") - .hidden(true) + .hide(true) + .display_order(0) ) .arg( - Arg::with_name("disable-quic") + Arg::new("disable-quic") .long("disable-quic") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Disables the quic transport. The node will rely solely on the TCP transport for libp2p connections.") + .display_order(0) ) .arg( - Arg::with_name("disable-peer-scoring") + Arg::new("disable-peer-scoring") .long("disable-peer-scoring") .help("Disables peer scoring in lighthouse. WARNING: This is a dev only flag is only meant to be used in local testing scenarios \ Using this flag on a real network may cause your node to become eclipsed and see a different view of the network") - .takes_value(false) - .hidden(true), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .hide(true) + .display_order(0) ) .arg( - Arg::with_name("trusted-peers") + Arg::new("trusted-peers") .long("trusted-peers") .value_name("TRUSTED_PEERS") .help("One or more comma-delimited trusted peer ids which always have the highest score according to the peer scoring system.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) + .display_order(0) ) .arg( - Arg::with_name("genesis-backfill") + Arg::new("genesis-backfill") .long("genesis-backfill") .help("Attempts to download blocks all the way back to genesis when checkpoint syncing.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("enable-private-discovery") + Arg::new("enable-private-discovery") .long("enable-private-discovery") .help("Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("self-limiter") + Arg::new("self-limiter") .long("self-limiter") + .help("This flag is deprecated and has no effect.") + .hide(true) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) + ) + .arg( + Arg::new("disable-self-limiter") + .long("disable-self-limiter") + .help( + "Disables the outbound rate limiter (requests sent by this node)." + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) + ) + .arg( + Arg::new("self-limiter-protocols") + .long("self-limiter-protocols") .help( "Enables the outbound rate limiter (requests made by this node).\ \ @@ -315,69 +400,89 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { present in the configuration, the quotas used for the inbound rate limiter will be \ used." ) - .min_values(0) - .hidden(true) + .action(ArgAction::Append) + .value_delimiter(';') + .conflicts_with("disable-self-limiter") + .display_order(0) ) .arg( - Arg::with_name("proposer-only") + Arg::new("proposer-only") .long("proposer-only") .help("Sets this beacon node at be a block proposer only node. \ This will run the beacon node in a minimal configuration that is sufficient for block publishing only. This flag should be used \ for a beacon node being referenced by validator client using the --proposer-node flag. This configuration is for enabling more secure setups.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("inbound-rate-limiter") - .long("inbound-rate-limiter") + Arg::new("disable-inbound-rate-limiter") + .long("disable-inbound-rate-limiter") + .help( + "Disables the inbound rate limiter (requests received by this node)." + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) + ) + .arg( + Arg::new("inbound-rate-limiter-protocols") + .long("inbound-rate-limiter-protocols") .help( "Configures the inbound rate limiter (requests received by this node).\ \ Rate limit quotas per protocol can be set in the form of \ :/. To set quotas for multiple protocols, \ - separate them by ';'. If the inbound rate limiter is enabled and a protocol is not \ - present in the configuration, the default quotas will be used. \ + separate them by ';'. \ \ - This is enabled by default, using default quotas. To disable rate limiting pass \ - `disabled` to this option instead." + This is enabled by default, using default quotas. To disable rate limiting use \ + the disable-inbound-rate-limiter flag instead." ) - .takes_value(true) - .hidden(true) + .action(ArgAction::Set) + .conflicts_with("disable-inbound-rate-limiter") + .display_order(0) ) .arg( - Arg::with_name("disable-backfill-rate-limiting") + Arg::new("disable-backfill-rate-limiting") .long("disable-backfill-rate-limiting") .help("Disable the backfill sync rate-limiting. This allow users to just sync the entire chain as fast \ as possible, however it can result in resource contention which degrades staking performance. Stakers \ should generally choose to avoid this flag since backfill sync is not required for staking.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* REST API related arguments */ .arg( - Arg::with_name("http") + Arg::new("http") .long("http") .help("Enable the RESTful HTTP API server. Disabled by default.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("http-address") + Arg::new("http-address") .long("http-address") .requires("enable_http") .value_name("ADDRESS") .help("Set the listen address for the RESTful HTTP API server.") - .default_value_if("enable_http", None, "127.0.0.1") - .takes_value(true), + .default_value_if("enable_http", ArgPredicate::IsPresent, "127.0.0.1") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-port") + Arg::new("http-port") .long("http-port") .requires("enable_http") .value_name("PORT") .help("Set the listen TCP port for the RESTful HTTP API server.") - .default_value_if("enable_http", None, "5052") - .takes_value(true), + .default_value_if("enable_http", ArgPredicate::IsPresent, "5052") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-allow-origin") + Arg::new("http-allow-origin") .long("http-allow-origin") .requires("enable_http") .value_name("ORIGIN") @@ -385,71 +490,82 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ address of this server (e.g., http://localhost:5052).") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-spec-fork") + Arg::new("http-spec-fork") .long("http-spec-fork") .requires("enable_http") .value_name("FORK") .help("This flag is deprecated and has no effect.") - .takes_value(true) - .hidden(true) + .hide(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-enable-tls") + Arg::new("http-enable-tls") .long("http-enable-tls") .help("Serves the RESTful HTTP API server over TLS. This feature is currently \ experimental.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .requires("http-tls-cert") .requires("http-tls-key") + .display_order(0) ) .arg( - Arg::with_name("http-tls-cert") + Arg::new("http-tls-cert") .long("http-tls-cert") .requires("enable_http") .help("The path of the certificate to be used when serving the HTTP API server \ over TLS.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-tls-key") + Arg::new("http-tls-key") .long("http-tls-key") .requires("enable_http") .help("The path of the private key to be used when serving the HTTP API server \ over TLS. Must not be password-protected.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-allow-sync-stalled") + Arg::new("http-allow-sync-stalled") .long("http-allow-sync-stalled") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .requires("enable_http") .help("This flag is deprecated and has no effect.") - .hidden(true) + .hide(true) + .display_order(0) ) .arg( - Arg::with_name("http-sse-capacity-multiplier") + Arg::new("http-sse-capacity-multiplier") .long("http-sse-capacity-multiplier") .requires("enable_http") - .takes_value(true) - .default_value_if("enable_http", None, "1") + .action(ArgAction::Set) + .default_value_if("enable_http", ArgPredicate::IsPresent, "1") .value_name("N") .help("Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. \ Increasing this value can prevent messages from being dropped.") + .display_order(0) ) .arg( - Arg::with_name("http-duplicate-block-status") + Arg::new("http-duplicate-block-status") .long("http-duplicate-block-status") .requires("enable_http") - .takes_value(true) - .default_value_if("enable_http", None, "202") + .action(ArgAction::Set) + .default_value_if("enable_http", ArgPredicate::IsPresent, "202") .value_name("STATUS_CODE") .help("Status code to send when a block that is already known is POSTed to the \ HTTP API.") + .display_order(0) ) .arg( - Arg::with_name("http-enable-beacon-processor") + Arg::new("http-enable-beacon-processor") .long("http-enable-beacon-processor") .requires("enable_http") .value_name("BOOLEAN") @@ -457,36 +573,41 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { DoS protection. When set to \"true\", HTTP API requests will be queued and scheduled \ alongside other tasks. When set to \"false\", HTTP API responses will be executed \ immediately.") - .takes_value(true) - .default_value_if("enable_http", None, "true") + .action(ArgAction::Set) + .display_order(0) + .default_value_if("enable_http", ArgPredicate::IsPresent, "true") ) /* Prometheus metrics HTTP server related arguments */ .arg( - Arg::with_name("metrics") + Arg::new("metrics") .long("metrics") .help("Enable the Prometheus metrics HTTP server. Disabled by default.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("metrics-address") + Arg::new("metrics-address") .long("metrics-address") .value_name("ADDRESS") .requires("metrics") .help("Set the listen address for the Prometheus metrics HTTP server.") - .default_value_if("metrics", None, "127.0.0.1") - .takes_value(true), + .default_value_if("metrics", ArgPredicate::IsPresent, "127.0.0.1") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("metrics-port") + Arg::new("metrics-port") .long("metrics-port") .requires("metrics") .value_name("PORT") .help("Set the listen TCP port for the Prometheus metrics HTTP server.") - .default_value_if("metrics", None, "5054") - .takes_value(true), + .default_value_if("metrics", ArgPredicate::IsPresent, "5054") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("metrics-allow-origin") + Arg::new("metrics-allow-origin") .long("metrics-allow-origin") .value_name("ORIGIN") .requires("metrics") @@ -494,15 +615,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ address of this server (e.g., http://localhost:5054).") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("shuffling-cache-size") + Arg::new("shuffling-cache-size") .long("shuffling-cache-size") .help("Some HTTP API requests can be optimised by caching the shufflings at each epoch. \ This flag allows the user to set the shuffling cache size in epochs. \ Shufflings are dependent on validator count and setting this value to a large number can consume a large amount of memory.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) /* @@ -510,7 +633,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { */ .arg( - Arg::with_name("monitoring-endpoint") + Arg::new("monitoring-endpoint") .long("monitoring-endpoint") .value_name("ADDRESS") .help("Enables the monitoring service for sending system metrics to a remote endpoint. \ @@ -519,16 +642,18 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Note: This will send information to a remote sever which may identify and associate your \ validators, IP address and other personal information. Always use a HTTPS connection \ and never provide an untrusted URL.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("monitoring-endpoint-period") + Arg::new("monitoring-endpoint-period") .long("monitoring-endpoint-period") .value_name("SECONDS") .help("Defines how many seconds to wait between each message sent to \ the monitoring-endpoint. Default: 60s") .requires("monitoring-endpoint") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) /* @@ -536,122 +661,143 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { */ .arg( - Arg::with_name("staking") + Arg::new("staking") .long("staking") .help("Standard option for a staking beacon node. This will enable the HTTP server \ on localhost:5052 and import deposit logs from the execution node. This is \ equivalent to `--http` on merge-ready networks, or `--http --eth1` pre-merge") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* * Eth1 Integration */ .arg( - Arg::with_name("eth1") + Arg::new("eth1") .long("eth1") .help("If present the node will connect to an eth1 node. This is required for \ block production, you must use this flag if you wish to serve a validator.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("dummy-eth1") + Arg::new("dummy-eth1") .long("dummy-eth1") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .conflicts_with("eth1") .help("If present, uses an eth1 backend that generates static dummy data.\ Identical to the method used at the 2019 Canada interop.") + .display_order(0) ) .arg( - Arg::with_name("eth1-purge-cache") + Arg::new("eth1-purge-cache") .long("eth1-purge-cache") .value_name("PURGE-CACHE") .help("Purges the eth1 block and deposit caches") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("eth1-blocks-per-log-query") + Arg::new("eth1-blocks-per-log-query") .long("eth1-blocks-per-log-query") .value_name("BLOCKS") .help("Specifies the number of blocks that a deposit log query should span. \ This will reduce the size of responses from the Eth1 endpoint.") .default_value("1000") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("eth1-cache-follow-distance") + Arg::new("eth1-cache-follow-distance") .long("eth1-cache-follow-distance") .value_name("BLOCKS") .help("Specifies the distance between the Eth1 chain head and the last block which \ should be imported into the cache. Setting this value lower can help \ compensate for irregular Proof-of-Work block times, but setting it too low \ can make the node vulnerable to re-orgs.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slots-per-restore-point") + Arg::new("slots-per-restore-point") .long("slots-per-restore-point") .value_name("SLOT_COUNT") .help("Specifies how often a freezer DB restore point should be stored. \ Cannot be changed after initialization. \ [default: 8192 (mainnet) or 64 (minimal)]") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("epochs-per-migration") + Arg::new("epochs-per-migration") .long("epochs-per-migration") .value_name("N") .help("The number of epochs to wait between running the migration of data from the \ hot DB to the cold DB. Less frequent runs can be useful for minimizing disk \ writes") .default_value("1") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("block-cache-size") + Arg::new("block-cache-size") .long("block-cache-size") .value_name("SIZE") - .help("Specifies how many blocks the database should cache in memory [default: 5]") - .takes_value(true) + .help("Specifies how many blocks the database should cache in memory") + .default_value("5") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("historic-state-cache-size") + Arg::new("historic-state-cache-size") .long("historic-state-cache-size") .value_name("SIZE") - .help("Specifies how many states from the freezer database should cache in memory [default: 1]") - .takes_value(true) + .help("Specifies how many states from the freezer database should cache in memory") + .default_value("1") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("state-cache-size") + Arg::new("state-cache-size") .long("state-cache-size") .value_name("STATE_CACHE_SIZE") - .help("Specifies the size of the snapshot cache [default: 3]") - .takes_value(true) + .help("Specifies the size of the state cache") + .default_value("128") + .action(ArgAction::Set) + .display_order(0) ) /* * Execution Layer Integration */ .arg( - Arg::with_name("execution-endpoint") + Arg::new("execution-endpoint") .long("execution-endpoint") .value_name("EXECUTION-ENDPOINT") .alias("execution-endpoints") .help("Server endpoint for an execution layer JWT-authenticated HTTP \ JSON-RPC connection. Uses the same endpoint to populate the \ deposit cache.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-jwt") + Arg::new("execution-jwt") .long("execution-jwt") .value_name("EXECUTION-JWT") .alias("jwt-secrets") .help("File path which contains the hex-encoded JWT secret for the \ execution endpoint provided in the --execution-endpoint flag.") .requires("execution-endpoint") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-jwt-secret-key") + Arg::new("execution-jwt-secret-key") .long("execution-jwt-secret-key") .value_name("EXECUTION-JWT-SECRET-KEY") .alias("jwt-secret-key") @@ -659,10 +805,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { execution endpoint provided in the --execution-endpoint flag.") .requires("execution-endpoint") .conflicts_with("execution-jwt") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-jwt-id") + Arg::new("execution-jwt-id") .long("execution-jwt-id") .value_name("EXECUTION-JWT-ID") .alias("jwt-id") @@ -670,10 +817,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { during JWT authentication. It corresponds to the 'id' field in the JWT claims object.\ Set to empty by default") .requires("execution-jwt") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-jwt-version") + Arg::new("execution-jwt-version") .long("execution-jwt-version") .value_name("EXECUTION-JWT-VERSION") .alias("jwt-version") @@ -681,119 +829,162 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { during JWT authentication. It corresponds to the 'clv' field in the JWT claims object.\ Set to empty by default") .requires("execution-jwt") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("suggested-fee-recipient") + Arg::new("suggested-fee-recipient") .long("suggested-fee-recipient") .value_name("SUGGESTED-FEE-RECIPIENT") .help("Emergency fallback fee recipient for use in case the validator client does \ not have one configured. You should set this flag on the validator \ client instead of (or in addition to) setting it here.") .requires("execution-endpoint") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder") + Arg::new("builder") .long("builder") .alias("payload-builder") .alias("payload-builders") .help("The URL of a service compatible with the MEV-boost API.") .requires("execution-endpoint") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("execution-timeout-multiplier") + Arg::new("execution-timeout-multiplier") .long("execution-timeout-multiplier") .value_name("NUM") .help("Unsigned integer to multiply the default execution timeouts by.") .default_value("1") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) + ) + .arg( + Arg::new("builder-header-timeout") + .long("builder-header-timeout") + .value_name("MILLISECONDS") + .help("Defines a timeout value (in milliseconds) to use when \ + fetching a block header from the builder API.") + .default_value("1000") + .value_parser(|timeout: &str| { + match timeout + .parse::() + .ok() + .map(Duration::from_millis) + { + Some(val) => { + if val > Duration::from_secs(3) { + return Err("builder-header-timeout cannot exceed 3000ms") + } + Ok(timeout.to_string()) + }, + None => Err("builder-header-timeout must be a number"), + } + }) + .requires("builder") + .action(ArgAction::Set) + .display_order(0) ) /* Deneb settings */ .arg( - Arg::with_name("trusted-setup-file-override") + Arg::new("trusted-setup-file-override") .long("trusted-setup-file-override") .value_name("FILE") .help("Path to a json file containing the trusted setup params. \ NOTE: This will override the trusted setup that is generated \ from the mainnet kzg ceremony. Use with caution") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) /* * Database purging and compaction. */ .arg( - Arg::with_name("purge-db") + Arg::new("purge-db") .long("purge-db") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, the chain database will be deleted. Use with caution.") + .display_order(0) ) .arg( - Arg::with_name("compact-db") + Arg::new("compact-db") .long("compact-db") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, apply compaction to the database on start-up. Use with caution. \ It is generally not recommended unless auto-compaction is disabled.") + .display_order(0) ) .arg( - Arg::with_name("auto-compact-db") + Arg::new("auto-compact-db") .long("auto-compact-db") .help("Enable or disable automatic compaction of the database on finalization.") - .takes_value(true) + .action(ArgAction::Set) .default_value("true") + .display_order(0) ) .arg( - Arg::with_name("prune-payloads") + Arg::new("prune-payloads") .long("prune-payloads") .help("Prune execution payloads from Lighthouse's database. This saves space but \ imposes load on the execution client, as payloads need to be \ reconstructed and sent to syncing peers.") - .takes_value(true) + .action(ArgAction::Set) .default_value("true") + .display_order(0) ) .arg( - Arg::with_name("prune-blobs") + Arg::new("prune-blobs") .long("prune-blobs") .value_name("BOOLEAN") .help("Prune blobs from Lighthouse's database when they are older than the data \ data availability boundary relative to the current epoch.") - .takes_value(true) + .action(ArgAction::Set) .default_value("true") + .display_order(0) ) .arg( - Arg::with_name("epochs-per-blob-prune") + Arg::new("epochs-per-blob-prune") .long("epochs-per-blob-prune") .value_name("EPOCHS") .help("The epoch interval with which to prune blobs from Lighthouse's \ database when they are older than the data availability boundary \ relative to the current epoch.") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") + .display_order(0) ) .arg( - Arg::with_name("blob-prune-margin-epochs") + Arg::new("blob-prune-margin-epochs") .long("blob-prune-margin-epochs") .value_name("EPOCHS") .help("The margin for blob pruning in epochs. The oldest blobs are pruned \ up until data_availability_boundary - blob_prune_margin_epochs.") - .takes_value(true) + .action(ArgAction::Set) .default_value("0") + .display_order(0) ) /* * Misc. */ .arg( - Arg::with_name("graffiti") + Arg::new("graffiti") .long("graffiti") .help( "Specify your custom graffiti to be included in blocks. \ Defaults to the current version and commit, truncated to fit in 32 bytes. " ) .value_name("GRAFFITI") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("max-skip-slots") + Arg::new("max-skip-slots") .long("max-skip-slots") .help( "Refuse to skip more than this many slots when processing an attestation. \ @@ -801,43 +992,48 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { but could also cause unnecessary consensus failures, so is disabled by default." ) .value_name("NUM_SLOTS") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) /* * Slasher. */ .arg( - Arg::with_name("slasher") + Arg::new("slasher") .long("slasher") .help( "Run a slasher alongside the beacon node. It is currently only recommended for \ expert users because of the immaturity of the slasher UX and the extra \ resources required." ) - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("slasher-dir") + Arg::new("slasher-dir") .long("slasher-dir") .help( "Set the slasher's database directory." ) .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .requires("slasher") + .display_order(0) ) .arg( - Arg::with_name("slasher-update-period") + Arg::new("slasher-update-period") .long("slasher-update-period") .help( "Configure how often the slasher runs batch processing." ) .value_name("SECONDS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-slot-offset") + Arg::new("slasher-slot-offset") .long("slasher-slot-offset") .help( "Set the delay from the start of the slot at which the slasher should ingest \ @@ -846,10 +1042,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .value_name("SECONDS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-history-length") + Arg::new("slasher-history-length") .long("slasher-history-length") .help( "Configure how many epochs of history the slasher keeps. Immutable after \ @@ -857,65 +1054,74 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .value_name("EPOCHS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-max-db-size") + Arg::new("slasher-max-db-size") .long("slasher-max-db-size") .help( "Maximum size of the MDBX database used by the slasher." ) .value_name("GIGABYTES") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-att-cache-size") + Arg::new("slasher-att-cache-size") .long("slasher-att-cache-size") .help("Set the maximum number of attestation roots for the slasher to cache") .value_name("COUNT") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-chunk-size") + Arg::new("slasher-chunk-size") .long("slasher-chunk-size") .help( "Number of epochs per validator per chunk stored on disk." ) .value_name("EPOCHS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-validator-chunk-size") + Arg::new("slasher-validator-chunk-size") .long("slasher-validator-chunk-size") .help( "Number of validators per chunk stored on disk." ) .value_name("NUM_VALIDATORS") .requires("slasher") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("slasher-broadcast") + Arg::new("slasher-broadcast") .long("slasher-broadcast") .help("Broadcast slashings found by the slasher to the rest of the network \ [Enabled by default].") - .takes_value(true) + .action(ArgAction::Set) .default_value("true") + .num_args(0..=1) + .default_missing_value("true") + .display_order(0) ) .arg( - Arg::with_name("slasher-backend") + Arg::new("slasher-backend") .long("slasher-backend") .value_name("DATABASE") .help("Set the database backend to be used by the slasher.") - .takes_value(true) - .possible_values(slasher::DatabaseBackend::VARIANTS) + .action(ArgAction::Set) + .value_parser(slasher::DatabaseBackend::VARIANTS.to_vec()) .requires("slasher") + .display_order(0) ) .arg( - Arg::with_name("wss-checkpoint") + Arg::new("wss-checkpoint") .long("wss-checkpoint") .help( "Specify a weak subjectivity checkpoint in `block_root:epoch` format to verify \ @@ -924,94 +1130,109 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { state use --checkpoint-sync-url." ) .value_name("WSS_CHECKPOINT") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("checkpoint-state") + Arg::new("checkpoint-state") .long("checkpoint-state") .help("Set a checkpoint state to start syncing from. Must be aligned and match \ --checkpoint-block. Using --checkpoint-sync-url instead is recommended.") .value_name("STATE_SSZ") - .takes_value(true) + .action(ArgAction::Set) .requires("checkpoint-block") + .display_order(0) ) .arg( - Arg::with_name("checkpoint-block") + Arg::new("checkpoint-block") .long("checkpoint-block") .help("Set a checkpoint block to start syncing from. Must be aligned and match \ --checkpoint-state. Using --checkpoint-sync-url instead is recommended.") .value_name("BLOCK_SSZ") - .takes_value(true) + .action(ArgAction::Set) .requires("checkpoint-state") + .display_order(0) ) .arg( - Arg::with_name("checkpoint-blobs") + Arg::new("checkpoint-blobs") .long("checkpoint-blobs") .help("Set the checkpoint blobs to start syncing from. Must be aligned and match \ --checkpoint-block. Using --checkpoint-sync-url instead is recommended.") .value_name("BLOBS_SSZ") - .takes_value(true) + .action(ArgAction::Set) .requires("checkpoint-block") + .display_order(0) ) .arg( - Arg::with_name("checkpoint-sync-url") + Arg::new("checkpoint-sync-url") .long("checkpoint-sync-url") .help("Set the remote beacon node HTTP endpoint to use for checkpoint sync.") .value_name("BEACON_NODE") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("checkpoint-state") + .display_order(0) ) .arg( - Arg::with_name("checkpoint-sync-url-timeout") + Arg::new("checkpoint-sync-url-timeout") .long("checkpoint-sync-url-timeout") .help("Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint.") .value_name("SECONDS") - .takes_value(true) + .action(ArgAction::Set) .default_value("180") + .display_order(0) ) .arg( - Arg::with_name("allow-insecure-genesis-sync") + Arg::new("allow-insecure-genesis-sync") .long("allow-insecure-genesis-sync") .help("Enable syncing from genesis, which is generally insecure and incompatible with data availability checks. \ Checkpoint syncing is the preferred method for syncing a node. \ Only use this flag when testing. DO NOT use on mainnet!") .conflicts_with("checkpoint-sync-url") .conflicts_with("checkpoint-state") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("reconstruct-historic-states") + Arg::new("reconstruct-historic-states") .long("reconstruct-historic-states") .help("After a checkpoint sync, reconstruct historic states in the database. This requires syncing all the way back to genesis.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("validator-monitor-auto") + Arg::new("validator-monitor-auto") .long("validator-monitor-auto") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Enables the automatic detection and monitoring of validators connected to the \ HTTP API and using the subnet subscription endpoint. This generally has the \ effect of providing additional logging and metrics for locally controlled \ validators.") + .display_order(0) ) .arg( - Arg::with_name("validator-monitor-pubkeys") + Arg::new("validator-monitor-pubkeys") .long("validator-monitor-pubkeys") .help("A comma-separated list of 0x-prefixed validator public keys. \ These validators will receive special monitoring and additional \ logging.") .value_name("PUBKEYS") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("validator-monitor-file") + Arg::new("validator-monitor-file") .long("validator-monitor-file") .help("As per --validator-monitor-pubkeys, but the comma-separated list is \ contained within a file at the given path.") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("validator-monitor-individual-tracking-threshold") + Arg::new("validator-monitor-individual-tracking-threshold") .long("validator-monitor-individual-tracking-threshold") .help("Once the validator monitor reaches this number of local validators \ it will stop collecting per-validator Prometheus metrics and issuing \ @@ -1019,59 +1240,73 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { This avoids infeasibly high cardinality in the Prometheus database and \ high log volume when using many validators. Defaults to 64.") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("disable-lock-timeouts") + Arg::new("disable-lock-timeouts") .long("disable-lock-timeouts") .help("Disable the timeouts applied to some internal locks by default. This can \ lead to less spurious failures on slow hardware but is considered \ experimental as it may obscure performance issues.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("disable-proposer-reorgs") + Arg::new("disable-proposer-reorgs") .long("disable-proposer-reorgs") .help("Do not attempt to reorg late blocks from other validators when proposing.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-threshold") + Arg::new("proposer-reorg-threshold") .long("proposer-reorg-threshold") + .action(ArgAction::Set) .value_name("PERCENT") .help("Percentage of head vote weight below which to attempt a proposer reorg. \ Default: 20%") .conflicts_with("disable-proposer-reorgs") + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-parent-threshold") + Arg::new("proposer-reorg-parent-threshold") .long("proposer-reorg-parent-threshold") .value_name("PERCENT") .help("Percentage of parent vote weight above which to attempt a proposer reorg. \ Default: 160%") .conflicts_with("disable-proposer-reorgs") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-epochs-since-finalization") + Arg::new("proposer-reorg-epochs-since-finalization") .long("proposer-reorg-epochs-since-finalization") + .action(ArgAction::Set) .value_name("EPOCHS") .help("Maximum number of epochs since finalization at which proposer reorgs are \ allowed. Default: 2") .conflicts_with("disable-proposer-reorgs") + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-cutoff") + Arg::new("proposer-reorg-cutoff") .long("proposer-reorg-cutoff") .value_name("MILLISECONDS") + .action(ArgAction::Set) .help("Maximum delay after the start of the slot at which to propose a reorging \ block. Lower values can prevent failed reorgs by ensuring the block has \ ample time to propagate and be processed by the network. The default is \ 1/12th of a slot (1 second on mainnet)") .conflicts_with("disable-proposer-reorgs") + .display_order(0) ) .arg( - Arg::with_name("proposer-reorg-disallowed-offsets") + Arg::new("proposer-reorg-disallowed-offsets") .long("proposer-reorg-disallowed-offsets") + .action(ArgAction::Set) .value_name("N1,N2,...") .help("Comma-separated list of integer offsets which can be used to avoid \ proposing reorging blocks at certain slots. An offset of N means that \ @@ -1080,66 +1315,75 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { avoided. Any offsets supplied with this flag will impose additional \ restrictions.") .conflicts_with("disable-proposer-reorgs") + .display_order(0) ) .arg( - Arg::with_name("prepare-payload-lookahead") + Arg::new("prepare-payload-lookahead") .long("prepare-payload-lookahead") .value_name("MILLISECONDS") .help("The time before the start of a proposal slot at which payload attributes \ should be sent. Low values are useful for execution nodes which don't \ improve their payload after the first call, and high values are useful \ for ensuring the EL is given ample notice. Default: 1/3 of a slot.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("always-prepare-payload") + Arg::new("always-prepare-payload") .long("always-prepare-payload") .help("Send payload attributes with every fork choice update. This is intended for \ use by block builders, relays and developers. You should set a fee \ recipient on this BN and also consider adjusting the \ --prepare-payload-lookahead flag.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("fork-choice-before-proposal-timeout") + Arg::new("fork-choice-before-proposal-timeout") .long("fork-choice-before-proposal-timeout") .help("Set the maximum number of milliseconds to wait for fork choice before \ proposing a block. You can prevent waiting at all by setting the timeout \ to 0, however you risk proposing atop the wrong parent block.") .default_value("250") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("paranoid-block-proposal") + Arg::new("paranoid-block-proposal") .long("paranoid-block-proposal") .help("Paranoid enough to be reading the source? Nice. This flag reverts some \ block proposal optimisations and forces the node to check every attestation \ it includes super thoroughly. This may be useful in an emergency, but not \ otherwise.") - .hidden(true) - .takes_value(false) + .hide(true) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("builder-fallback-skips") + Arg::new("builder-fallback-skips") .long("builder-fallback-skips") .help("If this node is proposing a block and has seen this number of skip slots \ on the canonical chain in a row, it will NOT query any connected builders, \ and will use the local execution engine for payload construction.") .default_value("3") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-fallback-skips-per-epoch") + Arg::new("builder-fallback-skips-per-epoch") .long("builder-fallback-skips-per-epoch") .help("If this node is proposing a block and has seen this number of skip slots \ on the canonical chain in the past `SLOTS_PER_EPOCH`, it will NOT query \ any connected builders, and will use the local execution engine for \ payload construction.") .default_value("8") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-fallback-epochs-since-finalization") + Arg::new("builder-fallback-epochs-since-finalization") .long("builder-fallback-epochs-since-finalization") .help("If this node is proposing a block and the chain has not finalized within \ this number of epochs, it will NOT query any connected builders, \ @@ -1149,152 +1393,180 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { if there are skips slots at the start of an epoch, right before this node \ is set to propose.") .default_value("3") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-fallback-disable-checks") + Arg::new("builder-fallback-disable-checks") .long("builder-fallback-disable-checks") .help("This flag disables all checks related to chain health. This means the builder \ API will always be used for payload construction, regardless of recent chain \ conditions.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("builder-profit-threshold") + Arg::new("builder-profit-threshold") .long("builder-profit-threshold") .value_name("WEI_VALUE") .help("This flag is deprecated and has no effect.") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-user-agent") + Arg::new("builder-user-agent") .long("builder-user-agent") .value_name("STRING") .help("The HTTP user agent to send alongside requests to the builder URL. The \ default is Lighthouse's version string.") .requires("builder") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("reset-payload-statuses") + Arg::new("reset-payload-statuses") .long("reset-payload-statuses") .help("When present, Lighthouse will forget the payload statuses of any \ already-imported blocks. This can assist in the recovery from a consensus \ failure caused by the execution layer.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("disable-deposit-contract-sync") + Arg::new("disable-deposit-contract-sync") .long("disable-deposit-contract-sync") .help("Explicitly disables syncing of deposit logs from the execution node. \ This overrides any previous option that depends on it. \ Useful if you intend to run a non-validating beacon node.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("disable-optimistic-finalized-sync") + Arg::new("disable-optimistic-finalized-sync") .long("disable-optimistic-finalized-sync") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Force Lighthouse to verify every execution block hash with the execution \ client during finalized sync. By default block hashes will be checked in \ Lighthouse and only passed to the EL if initial verification fails.") + .display_order(0) ) .arg( - Arg::with_name("light-client-server") + Arg::new("light-client-server") .long("light-client-server") .help("Act as a full node supporting light clients on the p2p network \ [experimental]") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("gui") + Arg::new("gui") .long("gui") .help("Enable the graphical user interface and all its requirements. \ This enables --http and --validator-monitor-auto and enables SSE logging.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("always-prefer-builder-payload") + Arg::new("always-prefer-builder-payload") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long("always-prefer-builder-payload") .help("This flag is deprecated and has no effect.") + .display_order(0) ) .arg( - Arg::with_name("invalid-gossip-verified-blocks-path") + Arg::new("invalid-gossip-verified-blocks-path") + .action(ArgAction::Set) .long("invalid-gossip-verified-blocks-path") .value_name("PATH") .help("If a block succeeds gossip validation whilst failing full validation, store \ the block SSZ as a file at this path. This feature is only recommended for \ developers. This directory is not pruned, users should be careful to avoid \ filling up their disks.") + .display_order(0) ) .arg( - Arg::with_name("progressive-balances") + Arg::new("progressive-balances") .long("progressive-balances") .value_name("MODE") .help("Deprecated. This optimisation is now the default and cannot be disabled.") - .takes_value(true) - .possible_values(&["fast", "disabled", "checked", "strict"]) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-max-workers") + Arg::new("beacon-processor-max-workers") .long("beacon-processor-max-workers") .value_name("INTEGER") .help("Specifies the maximum concurrent tasks for the task scheduler. Increasing \ this value may increase resource consumption. Reducing the value \ may result in decreased resource usage and diminished performance. The \ default value is the number of logical CPU cores on the host.") - .hidden(true) - .takes_value(true) + .hide(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-work-queue-len") + Arg::new("beacon-processor-work-queue-len") .long("beacon-processor-work-queue-len") .value_name("INTEGER") .help("Specifies the length of the inbound event queue. \ Higher values may prevent messages from being dropped while lower values \ may help protect the node from becoming overwhelmed.") .default_value("16384") - .hidden(true) - .takes_value(true) + .hide(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-reprocess-queue-len") + Arg::new("beacon-processor-reprocess-queue-len") .long("beacon-processor-reprocess-queue-len") .value_name("INTEGER") .help("Specifies the length of the queue for messages requiring delayed processing. \ Higher values may prevent messages from being dropped while lower values \ may help protect the node from becoming overwhelmed.") - .hidden(true) + .hide(true) .default_value("12288") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-attestation-batch-size") + Arg::new("beacon-processor-attestation-batch-size") .long("beacon-processor-attestation-batch-size") .value_name("INTEGER") .help("Specifies the number of gossip attestations in a signature verification batch. \ Higher values may reduce CPU usage in a healthy network whilst lower values may \ increase CPU usage in an unhealthy or hostile network.") - .hidden(true) + .hide(true) .default_value("64") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("beacon-processor-aggregate-batch-size") + Arg::new("beacon-processor-aggregate-batch-size") .long("beacon-processor-aggregate-batch-size") .value_name("INTEGER") .help("Specifies the number of gossip aggregate attestations in a signature \ verification batch. \ Higher values may reduce CPU usage in a healthy network while lower values may \ increase CPU usage in an unhealthy or hostile network.") - .hidden(true) + .hide(true) .default_value("64") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("disable-duplicate-warn-logs") + Arg::new("disable-duplicate-warn-logs") .long("disable-duplicate-warn-logs") .help("This flag is deprecated and has no effect.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) - .group(ArgGroup::with_name("enable_http").args(&["http", "gui", "staking"]).multiple(true)) + .group(ArgGroup::new("enable_http").args(["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 9a1d7df124c..f0d02f6c512 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -5,9 +5,9 @@ use beacon_chain::chain_config::{ }; use beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_chain::TrustedSetup; -use clap::ArgMatches; +use clap::{parser::ValueSource, ArgMatches, Id}; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; -use clap_utils::parse_required; +use clap_utils::{parse_flag, parse_required}; use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use environment::RuntimeContext; @@ -50,7 +50,7 @@ pub fn get_config( client_config.set_data_dir(get_data_dir(cli_args)); // If necessary, remove any existing database and configuration - if client_config.data_dir().exists() && cli_args.is_present("purge-db") { + if client_config.data_dir().exists() && cli_args.get_flag("purge-db") { // Remove the chain_db. let chain_db = client_config.get_db_path(); if chain_db.exists() { @@ -96,7 +96,7 @@ pub fn get_config( * Note: the config values set here can be overwritten by other more specific cli params */ - if cli_args.is_present("staking") { + if cli_args.get_flag("staking") { client_config.http_api.enabled = true; client_config.sync_eth1_chain = true; } @@ -105,22 +105,22 @@ pub fn get_config( * Http API server */ - if cli_args.is_present("enable_http") { + if cli_args.get_one::("enable_http").is_some() { client_config.http_api.enabled = true; - if let Some(address) = cli_args.value_of("http-address") { + if let Some(address) = cli_args.get_one::("http-address") { client_config.http_api.listen_addr = address .parse::() .map_err(|_| "http-address is not a valid IP address.")?; } - if let Some(port) = cli_args.value_of("http-port") { + if let Some(port) = cli_args.get_one::("http-port") { client_config.http_api.listen_port = port .parse::() .map_err(|_| "http-port is not a valid u16.")?; } - if let Some(allow_origin) = cli_args.value_of("http-allow-origin") { + if let Some(allow_origin) = cli_args.get_one::("http-allow-origin") { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -129,7 +129,7 @@ pub fn get_config( client_config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.is_present("http-spec-fork") { + if cli_args.get_one::("http-spec-fork").is_some() { warn!( log, "Ignoring --http-spec-fork"; @@ -137,22 +137,22 @@ pub fn get_config( ); } - if cli_args.is_present("http-enable-tls") { + if cli_args.get_flag("http-enable-tls") { client_config.http_api.tls_config = Some(TlsConfig { cert: cli_args - .value_of("http-tls-cert") + .get_one::("http-tls-cert") .ok_or("--http-tls-cert was not provided.")? .parse::() .map_err(|_| "http-tls-cert is not a valid path name.")?, key: cli_args - .value_of("http-tls-key") + .get_one::("http-tls-key") .ok_or("--http-tls-key was not provided.")? .parse::() .map_err(|_| "http-tls-key is not a valid path name.")?, }); } - if cli_args.is_present("http-allow-sync-stalled") { + if cli_args.get_flag("http-allow-sync-stalled") { warn!( log, "Ignoring --http-allow-sync-stalled"; @@ -170,10 +170,10 @@ pub fn get_config( parse_required(cli_args, "http-duplicate-block-status")?; client_config.http_api.enable_light_client_server = - cli_args.is_present("light-client-server"); + cli_args.get_flag("light-client-server"); } - if cli_args.is_present("light-client-server") { + if cli_args.get_flag("light-client-server") { client_config.chain.enable_light_client_server = true; } @@ -185,23 +185,23 @@ pub fn get_config( * Prometheus metrics HTTP server */ - if cli_args.is_present("metrics") { + if cli_args.get_flag("metrics") { client_config.http_metrics.enabled = true; } - if let Some(address) = cli_args.value_of("metrics-address") { + if let Some(address) = cli_args.get_one::("metrics-address") { client_config.http_metrics.listen_addr = address .parse::() .map_err(|_| "metrics-address is not a valid IP address.")?; } - if let Some(port) = cli_args.value_of("metrics-port") { + if let Some(port) = cli_args.get_one::("metrics-port") { client_config.http_metrics.listen_port = port .parse::() .map_err(|_| "metrics-port is not a valid u16.")?; } - if let Some(allow_origin) = cli_args.value_of("metrics-allow-origin") { + if let Some(allow_origin) = cli_args.get_one::("metrics-allow-origin") { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -213,7 +213,7 @@ pub fn get_config( /* * Explorer metrics */ - if let Some(monitoring_endpoint) = cli_args.value_of("monitoring-endpoint") { + if let Some(monitoring_endpoint) = cli_args.get_one::("monitoring-endpoint") { let update_period_secs = clap_utils::parse_optional(cli_args, "monitoring-endpoint-period")?; @@ -227,7 +227,7 @@ pub fn get_config( // Log a warning indicating an open HTTP server if it wasn't specified explicitly // (e.g. using the --staking flag). - if cli_args.is_present("staking") { + if cli_args.get_flag("staking") { warn!( log, "Running HTTP server on port {}", client_config.http_api.listen_port @@ -235,7 +235,7 @@ pub fn get_config( } // Do not scrape for malloc metrics if we've disabled tuning malloc as it may cause panics. - if cli_args.is_present(DISABLE_MALLOC_TUNING_FLAG) { + if cli_args.get_flag(DISABLE_MALLOC_TUNING_FLAG) { client_config.http_metrics.allocator_metrics_enabled = false; } @@ -246,24 +246,24 @@ pub fn get_config( // When present, use an eth1 backend that generates deterministic junk. // // Useful for running testnets without the overhead of a deposit contract. - if cli_args.is_present("dummy-eth1") { + if cli_args.get_flag("dummy-eth1") { client_config.dummy_eth1_backend = true; } // When present, attempt to sync to an eth1 node. // // Required for block production. - if cli_args.is_present("eth1") { + if cli_args.get_flag("eth1") { client_config.sync_eth1_chain = true; } - if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") { + if let Some(val) = cli_args.get_one::("eth1-blocks-per-log-query") { client_config.eth1.blocks_per_log_query = val .parse() .map_err(|_| "eth1-blocks-per-log-query is not a valid integer".to_string())?; } - if cli_args.is_present("eth1-purge-cache") { + if cli_args.get_flag("eth1-purge-cache") { client_config.eth1.purge_cache = true; } @@ -273,7 +273,7 @@ pub fn get_config( client_config.eth1.cache_follow_distance = Some(follow_distance); } - if let Some(endpoints) = cli_args.value_of("execution-endpoint") { + if let Some(endpoints) = cli_args.get_one::("execution-endpoint") { let mut el_config = execution_layer::Config::default(); // Always follow the deposit contract when there is an execution endpoint. @@ -296,13 +296,14 @@ pub fn get_config( let secret_file: PathBuf; // Parse a single JWT secret from a given file_path, logging warnings if multiple are supplied. - if let Some(secret_files) = cli_args.value_of("execution-jwt") { + if let Some(secret_files) = cli_args.get_one::("execution-jwt") { secret_file = parse_only_one_value(secret_files, PathBuf::from_str, "--execution-jwt", log)?; // Check if the JWT secret key is passed directly via cli flag and persist it to the default // file location. - } else if let Some(jwt_secret_key) = cli_args.value_of("execution-jwt-secret-key") { + } else if let Some(jwt_secret_key) = cli_args.get_one::("execution-jwt-secret-key") + { use std::fs::File; use std::io::Write; secret_file = client_config.data_dir().join(DEFAULT_JWT_FILE); @@ -321,23 +322,27 @@ pub fn get_config( } // Parse and set the payload builder, if any. - if let Some(endpoint) = cli_args.value_of("builder") { + if let Some(endpoint) = cli_args.get_one::("builder") { let payload_builder = parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; el_config.builder_url = Some(payload_builder); el_config.builder_user_agent = clap_utils::parse_optional(cli_args, "builder-user-agent")?; + + el_config.builder_header_timeout = + clap_utils::parse_optional(cli_args, "builder-header-timeout")? + .map(Duration::from_millis); } - if cli_args.is_present("builder-profit-threshold") { + if parse_flag(cli_args, "builder-profit-threshold") { warn!( log, "Ignoring --builder-profit-threshold"; "info" => "this flag is deprecated and will be removed" ); } - if cli_args.is_present("always-prefer-builder-payload") { + if cli_args.get_flag("always-prefer-builder-payload") { warn!( log, "Ignoring --always-prefer-builder-payload"; @@ -380,7 +385,8 @@ pub fn get_config( .map_err(|e| format!("Unable to read trusted setup file: {}", e))?; // Override default trusted setup file if required - if let Some(trusted_setup_file_path) = cli_args.value_of("trusted-setup-file-override") { + if let Some(trusted_setup_file_path) = cli_args.get_one::("trusted-setup-file-override") + { let file = std::fs::File::open(trusted_setup_file_path) .map_err(|e| format!("Failed to open trusted setup file: {}", e))?; let trusted_setup: TrustedSetup = serde_json::from_reader(file) @@ -388,36 +394,42 @@ pub fn get_config( client_config.trusted_setup = Some(trusted_setup); } - if let Some(freezer_dir) = cli_args.value_of("freezer-dir") { + if let Some(freezer_dir) = cli_args.get_one::("freezer-dir") { client_config.freezer_db_path = Some(PathBuf::from(freezer_dir)); } - if let Some(blobs_db_dir) = cli_args.value_of("blobs-dir") { + if let Some(blobs_db_dir) = cli_args.get_one::("blobs-dir") { client_config.blobs_db_path = Some(PathBuf::from(blobs_db_dir)); } - let (sprp, sprp_explicit) = get_slots_per_restore_point::(cli_args)?; + let (sprp, sprp_explicit) = get_slots_per_restore_point::(clap_utils::parse_optional( + cli_args, + "slots-per-restore-point", + )?)?; client_config.store.slots_per_restore_point = sprp; client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; - if let Some(block_cache_size) = cli_args.value_of("block-cache-size") { + if let Some(block_cache_size) = cli_args.get_one::("block-cache-size") { client_config.store.block_cache_size = block_cache_size .parse() .map_err(|_| "block-cache-size is not a valid integer".to_string())?; } - if let Some(cache_size) = clap_utils::parse_optional(cli_args, "state-cache-size")? { - client_config.store.state_cache_size = cache_size; + if let Some(cache_size) = cli_args.get_one::("state-cache-size") { + client_config.store.state_cache_size = cache_size + .parse() + .map_err(|_| "state-cache-size is not a valid integer".to_string())?; } - if let Some(historic_state_cache_size) = cli_args.value_of("historic-state-cache-size") { + if let Some(historic_state_cache_size) = cli_args.get_one::("historic-state-cache-size") + { client_config.store.historic_state_cache_size = historic_state_cache_size .parse() .map_err(|_| "historic-state-cache-size is not a valid integer".to_string())?; } - client_config.store.compact_on_init = cli_args.is_present("compact-db"); - if let Some(compact_on_prune) = cli_args.value_of("auto-compact-db") { + client_config.store.compact_on_init = cli_args.get_flag("compact-db"); + if let Some(compact_on_prune) = cli_args.get_one::("auto-compact-db") { client_config.store.compact_on_prune = compact_on_prune .parse() .map_err(|_| "auto-compact-db takes a boolean".to_string())?; @@ -458,7 +470,7 @@ pub fn get_config( * from lighthouse. * Discovery address is set to localhost by default. */ - if cli_args.is_present("zero-ports") { + if cli_args.get_flag("zero-ports") { client_config.http_api.listen_port = 0; client_config.http_metrics.listen_port = 0; } @@ -524,14 +536,14 @@ pub fn get_config( None }; - client_config.allow_insecure_genesis_sync = cli_args.is_present("allow-insecure-genesis-sync"); + client_config.allow_insecure_genesis_sync = cli_args.get_flag("allow-insecure-genesis-sync"); client_config.genesis = if eth2_network_config.genesis_state_is_known() { // Set up weak subjectivity sync, or start from the hardcoded genesis state. if let (Some(initial_state_path), Some(initial_block_path), opt_initial_blobs_path) = ( - cli_args.value_of("checkpoint-state"), - cli_args.value_of("checkpoint-block"), - cli_args.value_of("checkpoint-blobs"), + cli_args.get_one::("checkpoint-state"), + cli_args.get_one::("checkpoint-block"), + cli_args.get_one::("checkpoint-blobs"), ) { let read = |path: &str| { use std::fs::File; @@ -547,14 +559,14 @@ pub fn get_config( let anchor_state_bytes = read(initial_state_path)?; let anchor_block_bytes = read(initial_block_path)?; - let anchor_blobs_bytes = opt_initial_blobs_path.map(read).transpose()?; + let anchor_blobs_bytes = opt_initial_blobs_path.map(|s| read(s)).transpose()?; ClientGenesis::WeakSubjSszBytes { anchor_state_bytes, anchor_block_bytes, anchor_blobs_bytes, } - } else if let Some(remote_bn_url) = cli_args.value_of("checkpoint-sync-url") { + } else if let Some(remote_bn_url) = cli_args.get_one::("checkpoint-sync-url") { let url = SensitiveUrl::parse(remote_bn_url) .map_err(|e| format!("Invalid checkpoint sync URL: {:?}", e))?; @@ -563,7 +575,7 @@ pub fn get_config( ClientGenesis::GenesisState } } else { - if cli_args.is_present("checkpoint-state") || cli_args.is_present("checkpoint-sync-url") { + if parse_flag(cli_args, "checkpoint-state") || parse_flag(cli_args, "checkpoint-sync-url") { return Err( "Checkpoint sync is not available for this network as no genesis state is known" .to_string(), @@ -572,14 +584,14 @@ pub fn get_config( ClientGenesis::DepositContract }; - if cli_args.is_present("reconstruct-historic-states") { + if cli_args.get_flag("reconstruct-historic-states") { client_config.chain.reconstruct_historic_states = true; client_config.chain.genesis_backfill = true; } - let beacon_graffiti = if let Some(graffiti) = cli_args.value_of("graffiti") { + let beacon_graffiti = if let Some(graffiti) = cli_args.get_one::("graffiti") { GraffitiOrigin::UserSpecified(GraffitiString::from_str(graffiti)?.into()) - } else if cli_args.is_present("private") { + } else if cli_args.get_flag("private") { // When 'private' flag is present, use a zero-initialized bytes array. GraffitiOrigin::UserSpecified(GraffitiString::empty().into()) } else { @@ -588,7 +600,7 @@ pub fn get_config( }; client_config.beacon_graffiti = beacon_graffiti; - if let Some(wss_checkpoint) = cli_args.value_of("wss-checkpoint") { + if let Some(wss_checkpoint) = cli_args.get_one::("wss-checkpoint") { let mut split = wss_checkpoint.split(':'); let root_str = split .next() @@ -623,8 +635,8 @@ pub fn get_config( client_config.chain.weak_subjectivity_checkpoint = Some(Checkpoint { epoch, root }) } - if let Some(max_skip_slots) = cli_args.value_of("max-skip-slots") { - client_config.chain.import_max_skip_slots = match max_skip_slots { + if let Some(max_skip_slots) = cli_args.get_one::("max-skip-slots") { + client_config.chain.import_max_skip_slots = match max_skip_slots.as_str() { "none" => None, n => Some( n.parse() @@ -638,8 +650,8 @@ pub fn get_config( spec.gossip_max_size as usize, ); - if cli_args.is_present("slasher") { - let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") { + if cli_args.get_flag("slasher") { + let slasher_dir = if let Some(slasher_dir) = cli_args.get_one::("slasher-dir") { PathBuf::from(slasher_dir) } else { client_config.data_dir().join("slasher_db") @@ -704,11 +716,11 @@ pub fn get_config( client_config.slasher = Some(slasher_config); } - if cli_args.is_present("validator-monitor-auto") { + if cli_args.get_flag("validator-monitor-auto") { client_config.validator_monitor.auto_register = true; } - if let Some(pubkeys) = cli_args.value_of("validator-monitor-pubkeys") { + if let Some(pubkeys) = cli_args.get_one::("validator-monitor-pubkeys") { let pubkeys = pubkeys .split(',') .map(PublicKeyBytes::from_str) @@ -720,7 +732,7 @@ pub fn get_config( .extend_from_slice(&pubkeys); } - if let Some(path) = cli_args.value_of("validator-monitor-file") { + if let Some(path) = cli_args.get_one::("validator-monitor-file") { let string = fs::read(path) .map_err(|e| format!("Unable to read --validator-monitor-file: {}", e)) .and_then(|bytes| { @@ -747,11 +759,11 @@ pub fn get_config( .individual_tracking_threshold = count; } - if cli_args.is_present("disable-lock-timeouts") { + if cli_args.get_flag("disable-lock-timeouts") { client_config.chain.enable_lock_timeouts = false; } - if cli_args.is_present("disable-proposer-reorgs") { + if cli_args.get_flag("disable-proposer-reorgs") { client_config.chain.re_org_head_threshold = None; client_config.chain.re_org_parent_threshold = None; } else { @@ -789,7 +801,7 @@ pub fn get_config( } // Note: This overrides any previous flags that enable this option. - if cli_args.is_present("disable-deposit-contract-sync") { + if cli_args.get_flag("disable-deposit-contract-sync") { client_config.sync_eth1_chain = false; } @@ -801,7 +813,7 @@ pub fn get_config( / DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR }); - client_config.chain.always_prepare_payload = cli_args.is_present("always-prepare-payload"); + client_config.chain.always_prepare_payload = cli_args.get_flag("always-prepare-payload"); if let Some(timeout) = clap_utils::parse_optional(cli_args, "fork-choice-before-proposal-timeout")? @@ -809,10 +821,9 @@ pub fn get_config( client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; } - client_config.chain.always_reset_payload_statuses = - cli_args.is_present("reset-payload-statuses"); + client_config.chain.always_reset_payload_statuses = cli_args.get_flag("reset-payload-statuses"); - client_config.chain.paranoid_block_proposal = cli_args.is_present("paranoid-block-proposal"); + client_config.chain.paranoid_block_proposal = cli_args.get_flag("paranoid-block-proposal"); /* * Builder fallback configs. @@ -826,32 +837,32 @@ pub fn get_config( .builder_fallback_epochs_since_finalization = clap_utils::parse_required(cli_args, "builder-fallback-epochs-since-finalization")?; client_config.chain.builder_fallback_disable_checks = - cli_args.is_present("builder-fallback-disable-checks"); + cli_args.get_flag("builder-fallback-disable-checks"); // Graphical user interface config. - if cli_args.is_present("gui") { + if cli_args.get_flag("gui") { client_config.http_api.enabled = true; client_config.validator_monitor.auto_register = true; } // Optimistic finalized sync. client_config.chain.optimistic_finalized_sync = - !cli_args.is_present("disable-optimistic-finalized-sync"); + !cli_args.get_flag("disable-optimistic-finalized-sync"); - if cli_args.is_present("genesis-backfill") { + if cli_args.get_flag("genesis-backfill") { client_config.chain.genesis_backfill = true; } // Backfill sync rate-limiting client_config.beacon_processor.enable_backfill_rate_limiting = - !cli_args.is_present("disable-backfill-rate-limiting"); + !cli_args.get_flag("disable-backfill-rate-limiting"); if let Some(path) = clap_utils::parse_optional(cli_args, "invalid-gossip-verified-blocks-path")? { client_config.network.invalid_block_storage = Some(path); } - if cli_args.is_present("progressive-balances") { + if cli_args.get_one::("progressive-balances").is_some() { warn!( log, "Progressive balances mode is deprecated"; @@ -890,10 +901,9 @@ pub fn parse_listening_addresses( log: &Logger, ) -> Result { let listen_addresses_str = cli_args - .values_of("listen-address") + .get_many::("listen-address") .expect("--listen_addresses has a default value"); - - let use_zero_ports = cli_args.is_present("zero-ports"); + let use_zero_ports = parse_flag(cli_args, "zero-ports"); // parse the possible ips let mut maybe_ipv4 = None; @@ -927,28 +937,28 @@ pub fn parse_listening_addresses( // parse the possible tcp ports let port = cli_args - .value_of("port") + .get_one::("port") .expect("--port has a default value") .parse::() .map_err(|parse_error| format!("Failed to parse --port as an integer: {parse_error}"))?; let port6 = cli_args - .value_of("port6") - .map(str::parse::) + .get_one::("port6") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| format!("Failed to parse --port6 as an integer: {parse_error}"))? .unwrap_or(9090); // parse the possible discovery ports. let maybe_disc_port = cli_args - .value_of("discovery-port") - .map(str::parse::) + .get_one::("discovery-port") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| { format!("Failed to parse --discovery-port as an integer: {parse_error}") })?; let maybe_disc6_port = cli_args - .value_of("discovery-port6") - .map(str::parse::) + .get_one::("discovery-port6") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| { format!("Failed to parse --discovery-port6 as an integer: {parse_error}") @@ -956,8 +966,8 @@ pub fn parse_listening_addresses( // parse the possible quic port. let maybe_quic_port = cli_args - .value_of("quic-port") - .map(str::parse::) + .get_one::("quic-port") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| { format!("Failed to parse --quic-port as an integer: {parse_error}") @@ -965,8 +975,8 @@ pub fn parse_listening_addresses( // parse the possible quic port. let maybe_quic6_port = cli_args - .value_of("quic-port6") - .map(str::parse::) + .get_one::("quic-port6") + .map(|s| str::parse::(s)) .transpose() .map_err(|parse_error| { format!("Failed to parse --quic6-port as an integer: {parse_error}") @@ -980,10 +990,10 @@ pub fn parse_listening_addresses( } (None, Some(ipv6)) => { // A single ipv6 address was provided. Set the ports - - if cli_args.is_present("port6") { - warn!(log, "When listening only over IPv6, use the --port flag. The value of --port6 will be ignored.") + if cli_args.value_source("port6") == Some(ValueSource::CommandLine) { + warn!(log, "When listening only over IPv6, use the --port flag. The value of --port6 will be ignored."); } + // use zero ports if required. If not, use the given port. let tcp_port = use_zero_ports .then(unused_port::unused_tcp6_port) @@ -1117,41 +1127,41 @@ pub fn set_network_config( log: &Logger, ) -> Result<(), String> { // If a network dir has been specified, override the `datadir` definition. - if let Some(dir) = cli_args.value_of("network-dir") { + if let Some(dir) = cli_args.get_one::("network-dir") { config.network_dir = PathBuf::from(dir); } else { config.network_dir = data_dir.join(DEFAULT_NETWORK_DIR); }; - if cli_args.is_present("subscribe-all-subnets") { + if parse_flag(cli_args, "subscribe-all-subnets") { config.subscribe_all_subnets = true; } - if cli_args.is_present("import-all-attestations") { + if parse_flag(cli_args, "import-all-attestations") { config.import_all_attestations = true; } - if cli_args.is_present("shutdown-after-sync") { + if parse_flag(cli_args, "shutdown-after-sync") { config.shutdown_after_sync = true; } config.set_listening_addr(parse_listening_addresses(cli_args, log)?); // A custom target-peers command will overwrite the --proposer-only default. - if let Some(target_peers_str) = cli_args.value_of("target-peers") { + if let Some(target_peers_str) = cli_args.get_one::("target-peers") { config.target_peers = target_peers_str .parse::() .map_err(|_| format!("Invalid number of target peers: {}", target_peers_str))?; } - if let Some(value) = cli_args.value_of("network-load") { + if let Some(value) = cli_args.get_one::("network-load") { let network_load = value .parse::() .map_err(|_| format!("Invalid integer: {}", value))?; config.network_load = network_load; } - if let Some(boot_enr_str) = cli_args.value_of("boot-nodes") { + if let Some(boot_enr_str) = cli_args.get_one::("boot-nodes") { let mut enrs: Vec = vec![]; let mut multiaddrs: Vec = vec![]; for addr in boot_enr_str.split(',') { @@ -1176,7 +1186,7 @@ pub fn set_network_config( config.boot_nodes_multiaddr = multiaddrs; } - if let Some(libp2p_addresses_str) = cli_args.value_of("libp2p-addresses") { + if let Some(libp2p_addresses_str) = cli_args.get_one::("libp2p-addresses") { config.libp2p_nodes = libp2p_addresses_str .split(',') .map(|multiaddr| { @@ -1187,11 +1197,11 @@ pub fn set_network_config( .collect::, _>>()?; } - if cli_args.is_present("disable-peer-scoring") { + if parse_flag(cli_args, "disable-peer-scoring") { config.disable_peer_scoring = true; } - if let Some(trusted_peers_str) = cli_args.value_of("trusted-peers") { + if let Some(trusted_peers_str) = cli_args.get_one::("trusted-peers") { config.trusted_peers = trusted_peers_str .split(',') .map(|peer_id| { @@ -1205,7 +1215,7 @@ pub fn set_network_config( } } - if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { + if let Some(enr_udp_port_str) = cli_args.get_one::("enr-udp-port") { config.enr_udp4_port = Some( enr_udp_port_str .parse::() @@ -1213,7 +1223,7 @@ pub fn set_network_config( ); } - if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic-port") { + if let Some(enr_quic_port_str) = cli_args.get_one::("enr-quic-port") { config.enr_quic4_port = Some( enr_quic_port_str .parse::() @@ -1221,7 +1231,7 @@ pub fn set_network_config( ); } - if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp-port") { + if let Some(enr_tcp_port_str) = cli_args.get_one::("enr-tcp-port") { config.enr_tcp4_port = Some( enr_tcp_port_str .parse::() @@ -1229,7 +1239,7 @@ pub fn set_network_config( ); } - if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp6-port") { + if let Some(enr_udp_port_str) = cli_args.get_one::("enr-udp6-port") { config.enr_udp6_port = Some( enr_udp_port_str .parse::() @@ -1237,7 +1247,7 @@ pub fn set_network_config( ); } - if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic6-port") { + if let Some(enr_quic_port_str) = cli_args.get_one::("enr-quic6-port") { config.enr_quic6_port = Some( enr_quic_port_str .parse::() @@ -1245,7 +1255,7 @@ pub fn set_network_config( ); } - if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp6-port") { + if let Some(enr_tcp_port_str) = cli_args.get_one::("enr-tcp6-port") { config.enr_tcp6_port = Some( enr_tcp_port_str .parse::() @@ -1253,7 +1263,7 @@ pub fn set_network_config( ); } - if cli_args.is_present("enr-match") { + if parse_flag(cli_args, "enr-match") { // Match the IP and UDP port in the ENR. if let Some(ipv4_addr) = config.listen_addrs().v4().cloned() { @@ -1291,7 +1301,7 @@ pub fn set_network_config( } } - if let Some(enr_addresses) = cli_args.values_of("enr-address") { + if let Some(enr_addresses) = cli_args.get_many::("enr-address") { let mut enr_ip4 = None; let mut enr_ip6 = None; let mut resolved_enr_ip4 = None; @@ -1369,79 +1379,77 @@ pub fn set_network_config( } } - if cli_args.is_present("disable-enr-auto-update") { + if parse_flag(cli_args, "disable-enr-auto-update") { config.discv5_config.enr_update = false; } - if cli_args.is_present("disable-packet-filter") { + if parse_flag(cli_args, "disable-packet-filter") { warn!(log, "Discv5 packet filter is disabled"); config.discv5_config.enable_packet_filter = false; } - if cli_args.is_present("disable-discovery") { + if parse_flag(cli_args, "disable-discovery") { config.disable_discovery = true; warn!(log, "Discovery is disabled. New peers will not be found"); } - if cli_args.is_present("disable-quic") { + if parse_flag(cli_args, "disable-quic") { config.disable_quic_support = true; } - if cli_args.is_present("disable-upnp") { + if parse_flag(cli_args, "disable-upnp") { config.upnp_enabled = false; } - if cli_args.is_present("private") { + if parse_flag(cli_args, "private") { config.private = true; } - if cli_args.is_present("metrics") { + if parse_flag(cli_args, "metrics") { config.metrics_enabled = true; } - if cli_args.is_present("enable-private-discovery") { + if parse_flag(cli_args, "enable-private-discovery") { config.discv5_config.table_filter = |_| true; } // Light client server config. - config.enable_light_client_server = cli_args.is_present("light-client-server"); + config.enable_light_client_server = parse_flag(cli_args, "light-client-server"); - // The self limiter is disabled by default. - // This flag can be used both with or without a value. Try to parse it first with a value, if - // no value is defined but the flag is present, use the default params. - config.outbound_rate_limiter_config = clap_utils::parse_optional(cli_args, "self-limiter")?; - if cli_args.is_present("self-limiter") && config.outbound_rate_limiter_config.is_none() { - config.outbound_rate_limiter_config = Some(Default::default()); - } + // The self limiter is enabled by default. If the `self-limiter-protocols` flag is not provided, + // the default params will be used. + config.outbound_rate_limiter_config = if parse_flag(cli_args, "disable-self-limiter") { + None + } else if let Some(protocols) = cli_args.get_one::("self-limiter-protocols") { + Some(protocols.parse()?) + } else { + Some(Default::default()) + }; // Proposer-only mode overrides a number of previous configuration parameters. // Specifically, we avoid subscribing to long-lived subnets and wish to maintain a minimal set // of peers. - if cli_args.is_present("proposer-only") { + if parse_flag(cli_args, "proposer-only") { config.subscribe_all_subnets = false; - if cli_args.value_of("target-peers").is_none() { + if cli_args.get_one::("target-peers").is_none() { // If a custom value is not set, change the default to 15 config.target_peers = 15; } config.proposer_only = true; warn!(log, "Proposer-only mode enabled"; "info"=> "Do not connect a validator client to this node unless via the --proposer-nodes flag"); } - // The inbound rate limiter is enabled by default unless `disabled` is passed to the - // `inbound-rate-limiter` flag. Any other value should be parsed as a configuration string. - config.inbound_rate_limiter_config = match cli_args.value_of("inbound-rate-limiter") { - None => { - // Enabled by default, with default values + // The inbound rate limiter is enabled by default unless `disabled` via the + // `disable-inbound-rate-limiter` flag. + config.inbound_rate_limiter_config = if parse_flag(cli_args, "disable-inbound-rate-limiter") { + None + } else { + // Use the default unless values are provided via the `inbound-rate-limiter-protocols` + if let Some(protocols) = cli_args.get_one::("inbound-rate-limiter-protocols") { + Some(protocols.parse()?) + } else { Some(Default::default()) } - Some("disabled") => { - // Explicitly disabled - None - } - Some(config_str) => { - // Enabled with a custom configuration - Some(config_str.parse()?) - } }; Ok(()) } @@ -1454,7 +1462,7 @@ pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf { // directory and the testnet name onto it. cli_args - .value_of("datadir") + .get_one::("datadir") .map(|path| PathBuf::from(path).join(DEFAULT_BEACON_NODE_DIR)) .or_else(|| { dirs::home_dir().map(|home| { @@ -1470,11 +1478,9 @@ pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf { /// /// Return `(sprp, set_explicitly)` where `set_explicitly` is `true` if the user provided the value. pub fn get_slots_per_restore_point( - cli_args: &ArgMatches, + slots_per_restore_point: Option, ) -> Result<(u64, bool), String> { - if let Some(slots_per_restore_point) = - clap_utils::parse_optional(cli_args, "slots-per-restore-point")? - { + if let Some(slots_per_restore_point) = slots_per_restore_point { Ok((slots_per_restore_point, true)) } else { let default = std::cmp::min( diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index ee782c650e2..40b667a7447 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -1,6 +1,3 @@ -#[macro_use] -extern crate clap; - mod cli; mod config; @@ -20,7 +17,7 @@ use slasher::{DatabaseBackendOverride, Slasher}; use slog::{info, warn}; use std::ops::{Deref, DerefMut}; use std::sync::Arc; -use types::EthSpec; +use types::{ChainSpec, Epoch, EthSpec, ForkName}; /// A type-alias to the tighten the definition of a production-intended `Client`. pub type ProductionClient = @@ -44,7 +41,7 @@ impl ProductionBeaconNode { /// configurations hosted remotely. pub async fn new_from_cli( context: RuntimeContext, - matches: ArgMatches<'static>, + matches: ArgMatches, ) -> Result { let client_config = get_config::(&matches, &context)?; Self::new(context, client_config).await @@ -81,9 +78,19 @@ impl ProductionBeaconNode { TimeoutRwLock::disable_timeouts() } + if let Err(misaligned_forks) = validator_fork_epochs(&spec) { + warn!( + log, + "Fork boundaries are not well aligned / multiples of 256"; + "info" => "This may cause issues as fork boundaries do not align with the \ + start of sync committee period.", + "misaligned_forks" => ?misaligned_forks, + ); + } + let builder = ClientBuilder::new(context.eth_spec_instance.clone()) .runtime_context(context) - .chain_spec(spec) + .chain_spec(spec.clone()) .beacon_processor(client_config.beacon_processor.clone()) .http_api_config(client_config.http_api.clone()) .disk_store( @@ -116,8 +123,12 @@ impl ProductionBeaconNode { _ => {} } let slasher = Arc::new( - Slasher::open(slasher_config, log.new(slog::o!("service" => "slasher"))) - .map_err(|e| format!("Slasher open error: {:?}", e))?, + Slasher::open( + slasher_config, + Arc::new(spec), + log.new(slog::o!("service" => "slasher")), + ) + .map_err(|e| format!("Slasher open error: {:?}", e))?, ); builder.slasher(slasher) } else { @@ -182,6 +193,28 @@ impl ProductionBeaconNode { } } +fn validator_fork_epochs(spec: &ChainSpec) -> Result<(), Vec<(ForkName, Epoch)>> { + // @dapplion: "We try to schedule forks such that the fork epoch is a multiple of 256, to keep + // historical vectors in the same fork. Indirectly that makes light client periods align with + // fork boundaries." + let sync_committee_period = spec.epochs_per_sync_committee_period; // 256 + let is_fork_boundary_misaligned = |epoch: Epoch| epoch % sync_committee_period != 0; + + let forks_with_misaligned_epochs = ForkName::list_all_fork_epochs(spec) + .iter() + .filter_map(|(fork, fork_epoch_opt)| { + fork_epoch_opt + .and_then(|epoch| is_fork_boundary_misaligned(epoch).then_some((*fork, epoch))) + }) + .collect::>(); + + if forks_with_misaligned_epochs.is_empty() { + Ok(()) + } else { + Err(forks_with_misaligned_epochs) + } +} + impl Deref for ProductionBeaconNode { type Target = ProductionClient; @@ -205,3 +238,23 @@ impl lighthouse_network::discv5::Executor for Discv5Executor { self.0.spawn(future, "discv5") } } + +#[cfg(test)] +mod test { + use super::*; + use types::MainnetEthSpec; + + #[test] + fn test_validator_fork_epoch_alignments() { + let mut spec = MainnetEthSpec::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(256)); + spec.deneb_fork_epoch = Some(Epoch::new(257)); + spec.electra_fork_epoch = None; + let result = validator_fork_epochs(&spec); + assert_eq!( + result, + Err(vec![(ForkName::Deneb, spec.deneb_fork_epoch.unwrap())]) + ); + } +} diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index b7822670078..7bf1ef76bef 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -25,7 +25,3 @@ lru = { workspace = true } sloggers = { workspace = true } directory = { workspace = true } strum = { workspace = true } -safe_arith = { workspace = true } -bls = { workspace = true } -smallvec = { workspace = true } -logging = { workspace = true } diff --git a/beacon_node/store/src/consensus_context.rs b/beacon_node/store/src/consensus_context.rs index 08fad17b14b..281106d9aaa 100644 --- a/beacon_node/store/src/consensus_context.rs +++ b/beacon_node/store/src/consensus_context.rs @@ -1,7 +1,7 @@ use ssz_derive::{Decode, Encode}; use state_processing::ConsensusContext; use std::collections::HashMap; -use types::{AttestationData, BitList, EthSpec, Hash256, IndexedAttestation, Slot}; +use types::{EthSpec, Hash256, IndexedAttestation, Slot}; /// The consensus context is stored on disk as part of the data availability overflow cache. /// @@ -21,8 +21,7 @@ pub struct OnDiskConsensusContext { /// /// They are not part of the on-disk format. #[ssz(skip_serializing, skip_deserializing)] - indexed_attestations: - HashMap<(AttestationData, BitList), IndexedAttestation>, + indexed_attestations: HashMap>, } impl OnDiskConsensusContext { diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 484a1139bf9..9c247c983a9 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -2485,6 +2485,57 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } + + /// Prune states from the hot database which are prior to the split. + /// + /// This routine is important for cleaning up advanced states which are stored in the database + /// with a temporary flag. + pub fn prune_old_hot_states(&self) -> Result<(), Error> { + let split = self.get_split_info(); + debug!( + self.log, + "Database state pruning started"; + "split_slot" => split.slot, + ); + let mut state_delete_batch = vec![]; + for res in self + .hot_db + .iter_column::(DBColumn::BeaconStateSummary) + { + let (state_root, summary_bytes) = res?; + let summary = HotStateSummary::from_ssz_bytes(&summary_bytes)?; + + if summary.slot <= split.slot { + let old = summary.slot < split.slot; + let non_canonical = summary.slot == split.slot + && state_root != split.state_root + && !split.state_root.is_zero(); + if old || non_canonical { + let reason = if old { + "old dangling state" + } else { + "non-canonical" + }; + debug!( + self.log, + "Deleting state"; + "state_root" => ?state_root, + "slot" => summary.slot, + "reason" => reason, + ); + state_delete_batch.push(StoreOp::DeleteState(state_root, Some(summary.slot))); + } + } + } + let num_deleted_states = state_delete_batch.len(); + self.do_atomically_with_block_and_blobs_cache(state_delete_batch)?; + debug!( + self.log, + "Database state pruning complete"; + "num_deleted_states" => num_deleted_states, + ); + Ok(()) + } } /// Advance the split point of the store, moving new finalized states to the freezer. diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 66032d89c52..0e0965670b5 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -7,9 +7,6 @@ //! //! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See //! tests for implementation examples. -#[macro_use] -extern crate lazy_static; - mod chunk_writer; pub mod chunked_iter; pub mod chunked_vector; @@ -292,7 +289,7 @@ impl DBColumn { /// This function returns the number of bytes used by keys in a given column. pub fn key_size(self) -> usize { match self { - Self::OverflowLRUCache => 33, // See `OverflowKey` encode impl. + Self::OverflowLRUCache => 33, // DEPRECATED Self::BeaconMeta | Self::BeaconBlock | Self::BeaconState diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 1675051bd80..a22dc4aab4c 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(19); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(21); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 2d901fdd932..f8dbbfec988 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -1,6 +1,7 @@ pub use lighthouse_metrics::{set_gauge, try_create_int_gauge, *}; use directory::size_of_dir; +use lazy_static::lazy_static; use std::path::Path; lazy_static! { @@ -54,14 +55,6 @@ lazy_static! { "store_beacon_state_hot_get_total", "Total number of hot beacon states requested from the store (cache or DB)" ); - pub static ref BEACON_STATE_CACHE_HIT_COUNT: Result = try_create_int_counter( - "store_beacon_state_cache_hit_total", - "Number of hits to the store's state cache" - ); - pub static ref BEACON_STATE_CACHE_CLONE_TIME: Result = try_create_histogram( - "store_beacon_state_cache_clone_time", - "Time to load a beacon block from the block cache" - ); pub static ref BEACON_STATE_READ_TIMES: Result = try_create_histogram( "store_beacon_state_read_seconds", "Total time required to read a BeaconState from the database" @@ -105,30 +98,6 @@ lazy_static! { "store_beacon_blobs_cache_hit_total", "Number of hits to the store's blob cache" ); - pub static ref BEACON_BLOCK_READ_TIMES: Result = try_create_histogram( - "store_beacon_block_read_overhead_seconds", - "Overhead on reading a beacon block from the DB (e.g., decoding)" - ); - pub static ref BEACON_BLOCK_READ_COUNT: Result = try_create_int_counter( - "store_beacon_block_read_total", - "Total number of beacon block reads from the DB" - ); - pub static ref BEACON_BLOCK_READ_BYTES: Result = try_create_int_counter( - "store_beacon_block_read_bytes_total", - "Total number of beacon block bytes read from the DB" - ); - pub static ref BEACON_BLOCK_WRITE_TIMES: Result = try_create_histogram( - "store_beacon_block_write_overhead_seconds", - "Overhead on writing a beacon block to the DB (e.g., encoding)" - ); - pub static ref BEACON_BLOCK_WRITE_COUNT: Result = try_create_int_counter( - "store_beacon_block_write_total", - "Total number of beacon block writes the DB" - ); - pub static ref BEACON_BLOCK_WRITE_BYTES: Result = try_create_int_counter( - "store_beacon_block_write_bytes_total", - "Total number of beacon block bytes written to the DB" - ); } /// Updates the global metrics registry with store-related information. diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index e56d0580ac2..8f40b4b9241 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -121,7 +121,7 @@ where // Electra #[superstruct(only(Electra))] - pub deposit_receipts_start_index: u64, + pub deposit_requests_start_index: u64, #[superstruct(only(Electra))] pub deposit_balance_to_consume: u64, #[superstruct(only(Electra))] @@ -133,7 +133,6 @@ where #[superstruct(only(Electra))] pub earliest_consolidation_epoch: Epoch, - // TODO(electra) should these be optional? #[superstruct(only(Electra))] pub pending_balance_deposits: List, #[superstruct(only(Electra))] @@ -285,7 +284,7 @@ impl PartialBeaconState { latest_execution_payload_header, next_withdrawal_index, next_withdrawal_validator_index, - deposit_receipts_start_index, + deposit_requests_start_index, deposit_balance_to_consume, exit_balance_to_consume, earliest_exit_epoch, @@ -558,7 +557,7 @@ impl TryInto> for PartialBeaconState { latest_execution_payload_header, next_withdrawal_index, next_withdrawal_validator_index, - deposit_receipts_start_index, + deposit_requests_start_index, deposit_balance_to_consume, exit_balance_to_consume, earliest_exit_epoch, diff --git a/book/.markdownlint.yml b/book/.markdownlint.yml new file mode 100644 index 00000000000..5d6bda29f1e --- /dev/null +++ b/book/.markdownlint.yml @@ -0,0 +1,28 @@ +# MD010: https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md010---hard-tabs +MD010: + # Set code blocks to false so that code blocks will be ignored, default is true + code_blocks: false + +#MD013 line length: https://github.com/DavidAnson/markdownlint/blob/main/doc/md013.md +# Set to false as this will also interfere with help_x.md files, and it is not necessary to comply with the line length of 80 +MD013: false + +# MD028: set to false to allow blank line between blockquote: https://github.com/DavidAnson/markdownlint/blob/main/doc/md028.md +# This is because the blockquotes are shown separatedly (a deisred outcome) when having a blank line in between +MD028: false + +# MD024: set siblings_only to true so that same headings with different parent headings are allowed +# https://github.com/DavidAnson/markdownlint/blob/main/doc/md024.md +MD024: + siblings_only: true + +# MD033 in-line html: https://github.com/DavidAnson/markdownlint/blob/main/doc/md033.md +# In-line html is fine in the markdown files, so this is set to false +MD033: false + +# MD036 set to false to preserve the emphasis on deprecation notice on key-management.md (a heading is not necessary) +MD036: false + +# MD040 code blocks should have a language specified: https://github.com/DavidAnson/markdownlint/blob/main/doc/md040.md +# Set to false as the help_x.md files are code blocks without a language specified, which is fine and does not need to change +MD040: false \ No newline at end of file diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 1a35d9d139c..7fb0b2f4e70 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -2,66 +2,66 @@ * [Introduction](./intro.md) * [Installation](./installation.md) - * [Pre-Built Binaries](./installation-binaries.md) - * [Docker](./docker.md) - * [Build from Source](./installation-source.md) - * [Raspberry Pi 4](./pi.md) - * [Cross-Compiling](./cross-compiling.md) - * [Homebrew](./homebrew.md) - * [Update Priorities](./installation-priorities.md) + * [Pre-Built Binaries](./installation-binaries.md) + * [Docker](./docker.md) + * [Build from Source](./installation-source.md) + * [Raspberry Pi 4](./pi.md) + * [Cross-Compiling](./cross-compiling.md) + * [Homebrew](./homebrew.md) + * [Update Priorities](./installation-priorities.md) * [Run a Node](./run_a_node.md) * [Become a Validator](./mainnet-validator.md) * [Validator Management](./validator-management.md) - * [The `validator-manager` Command](./validator-manager.md) - * [Creating validators](./validator-manager-create.md) - * [Moving validators](./validator-manager-move.md) - * [Slashing Protection](./slashing-protection.md) - * [Voluntary Exits](./voluntary-exit.md) - * [Partial Withdrawals](./partial-withdrawal.md) - * [Validator Monitoring](./validator-monitoring.md) - * [Doppelganger Protection](./validator-doppelganger.md) - * [Suggested Fee Recipient](./suggested-fee-recipient.md) - * [Validator Graffiti](./graffiti.md) + * [The `validator-manager` Command](./validator-manager.md) + * [Creating validators](./validator-manager-create.md) + * [Moving validators](./validator-manager-move.md) + * [Slashing Protection](./slashing-protection.md) + * [Voluntary Exits](./voluntary-exit.md) + * [Partial Withdrawals](./partial-withdrawal.md) + * [Validator Monitoring](./validator-monitoring.md) + * [Doppelganger Protection](./validator-doppelganger.md) + * [Suggested Fee Recipient](./suggested-fee-recipient.md) + * [Validator Graffiti](./graffiti.md) * [APIs](./api.md) - * [Beacon Node API](./api-bn.md) - * [Lighthouse API](./api-lighthouse.md) - * [Validator Inclusion APIs](./validator-inclusion.md) - * [Validator Client API](./api-vc.md) - * [Endpoints](./api-vc-endpoints.md) - * [Authorization Header](./api-vc-auth-header.md) - * [Signature Header](./api-vc-sig-header.md) - * [Prometheus Metrics](./advanced_metrics.md) + * [Beacon Node API](./api-bn.md) + * [Lighthouse API](./api-lighthouse.md) + * [Validator Inclusion APIs](./validator-inclusion.md) + * [Validator Client API](./api-vc.md) + * [Endpoints](./api-vc-endpoints.md) + * [Authorization Header](./api-vc-auth-header.md) + * [Signature Header](./api-vc-sig-header.md) + * [Prometheus Metrics](./advanced_metrics.md) * [Lighthouse UI (Siren)](./lighthouse-ui.md) - * [Installation](./ui-installation.md) - * [Authentication](./ui-authentication.md) - * [Configuration](./ui-configuration.md) - * [Usage](./ui-usage.md) - * [FAQs](./ui-faqs.md) + * [Installation](./ui-installation.md) + * [Authentication](./ui-authentication.md) + * [Configuration](./ui-configuration.md) + * [Usage](./ui-usage.md) + * [FAQs](./ui-faqs.md) * [Advanced Usage](./advanced.md) - * [Checkpoint Sync](./checkpoint-sync.md) - * [Custom Data Directories](./advanced-datadir.md) - * [Proposer Only Beacon Nodes](./advanced-proposer-only.md) - * [Remote Signing with Web3Signer](./validator-web3signer.md) - * [Database Configuration](./advanced_database.md) - * [Database Migrations](./database-migrations.md) - * [Key Management (Deprecated)](./key-management.md) - * [Key Recovery](./key-recovery.md) - * [Advanced Networking](./advanced_networking.md) - * [Running a Slasher](./slasher.md) - * [Redundancy](./redundancy.md) - * [Release Candidates](./advanced-release-candidates.md) - * [MEV](./builders.md) - * [Merge Migration](./merge-migration.md) - * [Late Block Re-orgs](./late-block-re-orgs.md) - * [Blobs](./advanced-blobs.md) + * [Checkpoint Sync](./checkpoint-sync.md) + * [Custom Data Directories](./advanced-datadir.md) + * [Proposer Only Beacon Nodes](./advanced-proposer-only.md) + * [Remote Signing with Web3Signer](./validator-web3signer.md) + * [Database Configuration](./advanced_database.md) + * [Database Migrations](./database-migrations.md) + * [Key Management (Deprecated)](./key-management.md) + * [Key Recovery](./key-recovery.md) + * [Advanced Networking](./advanced_networking.md) + * [Running a Slasher](./slasher.md) + * [Redundancy](./redundancy.md) + * [Release Candidates](./advanced-release-candidates.md) + * [MEV](./builders.md) + * [Merge Migration](./merge-migration.md) + * [Late Block Re-orgs](./late-block-re-orgs.md) + * [Blobs](./advanced-blobs.md) * [Built-In Documentation](./help_general.md) - * [Beacon Node](./help_bn.md) - * [Validator Client](./help_vc.md) - * [Validator Manager](./help_vm.md) - * [Create](./help_vm_create.md) - * [Import](./help_vm_import.md) - * [Move](./help_vm_move.md) + * [Beacon Node](./help_bn.md) + * [Validator Client](./help_vc.md) + * [Validator Manager](./help_vm.md) + * [Create](./help_vm_create.md) + * [Import](./help_vm_import.md) + * [Move](./help_vm_move.md) * [Contributing](./contributing.md) - * [Development Environment](./setup.md) + * [Development Environment](./setup.md) * [FAQs](./faq.md) -* [Protocol Developers](./developers.md) \ No newline at end of file +* [Protocol Developers](./developers.md) diff --git a/book/src/advanced-blobs.md b/book/src/advanced-blobs.md index eee404a9be0..785bd5797dd 100644 --- a/book/src/advanced-blobs.md +++ b/book/src/advanced-blobs.md @@ -1,8 +1,8 @@ # Blobs -In the Deneb network upgrade, one of the changes is the implementation of EIP-4844, also known as [Proto-danksharding](https://blog.ethereum.org/2024/02/27/dencun-mainnet-announcement). Alongside with this, a new term named `blob` (binary large object) is introduced. Blobs are "side-cars" carrying transaction data in a block. They are mainly used by Ethereum layer 2 operators. As far as stakers are concerned, the main difference with the introduction of blobs is the increased storage requirement. +In the Deneb network upgrade, one of the changes is the implementation of EIP-4844, also known as [Proto-danksharding](https://blog.ethereum.org/2024/02/27/dencun-mainnet-announcement). Alongside with this, a new term named `blob` (binary large object) is introduced. Blobs are "side-cars" carrying transaction data in a block. They are mainly used by Ethereum layer 2 operators. As far as stakers are concerned, the main difference with the introduction of blobs is the increased storage requirement. -### FAQ +## FAQ 1. What is the storage requirement for blobs? @@ -10,33 +10,32 @@ In the Deneb network upgrade, one of the changes is the implementation of EIP-48 One blob is 128 KB in size. Each block can carry a maximum of 6 blobs. Blobs will be kept for 4096 epochs and pruned afterwards. This means that the maximum increase in storage requirement will be: - ``` + ```text 2**17 bytes / blob * 6 blobs / block * 32 blocks / epoch * 4096 epochs = 96 GB ``` However, the blob base fee targets 3 blobs per block and it works similarly to how EIP-1559 operates in the Ethereum gas fee. Therefore, practically it is very likely to average to 3 blobs per blocks, which translates to a storage requirement of 48 GB. - 1. Do I have to add any flags for blobs? - No, you can use the default values for blob-related flags, which means you do not need add or remove any flags. + No, you can use the default values for blob-related flags, which means you do not need add or remove any flags. 1. What if I want to keep all blobs? Use the flag `--prune-blobs false` in the beacon node. The storage requirement will be: - ``` + ```text 2**17 bytes * 3 blobs / block * 7200 blocks / day * 30 days = 79GB / month or 948GB / year ``` - + To keep blobs for a custom period, you may use the flag `--blob-prune-margin-epochs ` which keeps blobs for 4096+EPOCHS specified in the flag. 1. How to see the info of the blobs database? - We can call the API: + We can call the API: ```bash curl "http://localhost:5052/lighthouse/database/info" | jq ``` - Refer to [Lighthouse API](./api-lighthouse.md#lighthousedatabaseinfo) for an example response. \ No newline at end of file + Refer to [Lighthouse API](./api-lighthouse.md#lighthousedatabaseinfo) for an example response. diff --git a/book/src/advanced-datadir.md b/book/src/advanced-datadir.md index 074857346e9..7ad993a1072 100644 --- a/book/src/advanced-datadir.md +++ b/book/src/advanced-datadir.md @@ -1,4 +1,4 @@ -## Custom Data Directories +# Custom Data Directories Users can override the default Lighthouse data directories (e.g., `~/.lighthouse/mainnet`) using the `--datadir` flag. The custom data directory mirrors the structure of any network specific default directory (e.g. `~/.lighthouse/mainnet`). @@ -11,10 +11,11 @@ lighthouse --network mainnet --datadir /var/lib/my-custom-dir account validator lighthouse --network mainnet --datadir /var/lib/my-custom-dir bn --staking lighthouse --network mainnet --datadir /var/lib/my-custom-dir vc ``` + The first step creates a `validators` directory under `/var/lib/my-custom-dir` which contains the imported keys and [`validator_definitions.yml`](./validator-management.md). After that, we simply run the beacon chain and validator client with the custom dir path. -### Relative Paths +## Relative Paths [#2682]: https://github.com/sigp/lighthouse/pull/2682 [#2846]: https://github.com/sigp/lighthouse/pull/2846 @@ -40,7 +41,7 @@ be applied. On start-up, if a split directory scenario is detected (i.e. `~/here Lighthouse will continue to operate with split directories. In such a scenario, the following harmless log will show: -``` +```text WARN Legacy datadir location location: "/home/user/datadir/beacon", msg: this occurs when using relative paths for a datadir location ``` diff --git a/book/src/advanced-proposer-only.md b/book/src/advanced-proposer-only.md index c3347e044b7..1ea36109883 100644 --- a/book/src/advanced-proposer-only.md +++ b/book/src/advanced-proposer-only.md @@ -2,7 +2,7 @@ Lighthouse allows for more exotic setups that can minimize attack vectors by adding redundant beacon nodes and dividing the roles of attesting and block -production between them. +production between them. The purpose of this is to minimize attack vectors where malicious users obtain the network identities (IP addresses) of beacon @@ -24,7 +24,7 @@ harder to identify as a potential node to attack and will also consume less resources. Specifically, this flag reduces the default peer count (to a safe minimal -number as maintaining peers on attestation subnets do not need to be considered), +number as maintaining peers on attestation subnets do not need to be considered), prevents the node from subscribing to any attestation-subnets or sync-committees which is a primary way for attackers to de-anonymize validators. @@ -34,7 +34,6 @@ validators. > normal beacon node, the validator may fail to handle its duties correctly and > result in a loss of income. - ## The Validator Client The validator client can be given a list of HTTP API endpoints representing @@ -53,7 +52,6 @@ these nodes for added security). > producing a more profitable block. Any block builders should therefore be > attached to the `--beacon-nodes` and not necessarily the `--proposer-nodes`. - ## Setup Overview The intended set-up to take advantage of this mechanism is to run one (or more) diff --git a/book/src/advanced-release-candidates.md b/book/src/advanced-release-candidates.md index a539aa489cd..9f00da9ae98 100644 --- a/book/src/advanced-release-candidates.md +++ b/book/src/advanced-release-candidates.md @@ -20,7 +20,7 @@ you're looking for stable Lighthouse**. From time to time, Lighthouse may use the terms "release candidate" and "pre release" interchangeably. A pre release is identical to a release candidate. -### Examples +## Examples [`v1.4.0-rc.0`] has `rc` in the version string and is therefore a release candidate. This release is *not* stable and is *not* intended for critical tasks on mainnet (e.g., staking). @@ -36,9 +36,8 @@ Users may wish to try a release candidate for the following reasons: - To help detect bugs and regressions before they reach production. - To provide feedback on annoyances before they make it into a release and become harder to change or revert. -There can also be a scenario that a bug has been found and requires an urgent fix. An example of incidence is [v4.0.2-rc.0](https://github.com/sigp/lighthouse/releases/tag/v4.0.2-rc.0) which contains a hot-fix to address high CPU usage experienced after the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023. In this scenario, we will announce the release candidate on [Github](https://github.com/sigp/lighthouse/releases) and also on [Discord](https://discord.gg/cyAszAh) to recommend users to update to the release candidate version. +There can also be a scenario that a bug has been found and requires an urgent fix. An example of incidence is [v4.0.2-rc.0](https://github.com/sigp/lighthouse/releases/tag/v4.0.2-rc.0) which contains a hot-fix to address high CPU usage experienced after the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023. In this scenario, we will announce the release candidate on [Github](https://github.com/sigp/lighthouse/releases) and also on [Discord](https://discord.gg/cyAszAh) to recommend users to update to the release candidate version. ## When *not* to use a release candidate Other than the above scenarios, it is generally not recommended to use release candidates for any critical tasks on mainnet (e.g., staking). To test new release candidate features, try one of the testnets (e.g., Holesky). - diff --git a/book/src/advanced.md b/book/src/advanced.md index 21e732afa18..1a882835a47 100644 --- a/book/src/advanced.md +++ b/book/src/advanced.md @@ -15,7 +15,7 @@ tips about how things work under the hood. * [Key Management](./key-management.md): explore how to generate wallet with Lighthouse. * [Key Recovery](./key-recovery.md): explore how to recover wallet and validator with Lighthouse. * [Advanced Networking](./advanced_networking.md): open your ports to have a diverse and healthy set of peers. -* [Running a Slasher](./slasher.md): contribute to the health of the network by running a slasher. +* [Running a Slasher](./slasher.md): contribute to the health of the network by running a slasher. * [Redundancy](./redundancy.md): want to have more than one beacon node as backup? This is for you. * [Release Candidates](./advanced-release-candidates.md): latest release of Lighthouse to get feedback from users. * [Maximal Extractable Value](./builders.md): use external builders for a potential higher rewards during block proposals diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index f65fb104154..345fff69815 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -29,7 +29,7 @@ some example values. | Enthusiast (prev. default) | 2048 | hundreds of GB | 10.2 s | | Validator only (default) | 8192 | tens of GB | 41 s | -*Last update: Dec 2023. +*Last update: Dec 2023. As we can see, it's a high-stakes trade-off! The relationships to disk usage and historical state load time are both linear – doubling SPRP halves disk usage and doubles load time. The minimum SPRP @@ -40,9 +40,9 @@ The default value is 8192 for databases synced from scratch using Lighthouse v2. The values shown in the table are approximate, calculated using a simple heuristic: each `BeaconState` consumes around 145MB of disk space, and each block replayed takes around 5ms. The -**Yearly Disk Usage** column shows the approximate size of the freezer DB _alone_ (hot DB not included), calculated proportionally using the total freezer database disk usage. +**Yearly Disk Usage** column shows the approximate size of the freezer DB _alone_ (hot DB not included), calculated proportionally using the total freezer database disk usage. The **Load Historical State** time is the worst-case load time for a state in the last slot -before a restore point. +before a restore point. To run a full archival node with fast access to beacon states and a SPRP of 32, the disk usage will be more than 10 TB per year, which is impractical for many users. As such, users may consider running the [tree-states](https://github.com/sigp/lighthouse/releases/tag/v5.0.111-exp) release, which only uses less than 200 GB for a full archival node. The caveat is that it is currently experimental and in alpha release (as of Dec 2023), thus not recommended for running mainnet validators. Nevertheless, it is suitable to be used for analysis purposes, and if you encounter any issues in tree-states, we do appreciate any feedback. We plan to have a stable release of tree-states in 1H 2024. diff --git a/book/src/advanced_metrics.md b/book/src/advanced_metrics.md index 3141f336a12..323ba8f58a7 100644 --- a/book/src/advanced_metrics.md +++ b/book/src/advanced_metrics.md @@ -30,7 +30,6 @@ curl localhost:5054/metrics ## Validator Client Metrics - By default, these metrics are disabled but can be enabled with the `--metrics` flag. Use the `--metrics-address`, `--metrics-port` and `--metrics-allow-origin` flags to customize the metrics server. @@ -78,7 +77,7 @@ You can adjust the frequency at which Lighthouse sends metrics to the remote ser `--monitoring-endpoint-period` flag. It takes an integer value in seconds, defaulting to 60 seconds. -``` +```bash lighthouse bn --monitoring-endpoint-period 60 --monitoring-endpoint "https://url" ``` diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index 5fabf57d568..732b4f51e65 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -5,8 +5,7 @@ be adjusted to handle a variety of network situations. This section outlines some of these configuration parameters and their consequences at the networking level and their general intended use. - -### Target Peers +## Target Peers The beacon node has a `--target-peers` CLI parameter. This allows you to instruct the beacon node how many peers it should try to find and maintain. @@ -38,7 +37,7 @@ large peer count will not speed up sync. For these reasons, we recommend users do not modify the `--target-peers` count drastically and use the (recommended) default. -### NAT Traversal (Port Forwarding) +## NAT Traversal (Port Forwarding) Lighthouse, by default, uses port 9000 for both TCP and UDP. Since v4.5.0, Lighthouse will also attempt to make QUIC connections via UDP port 9001 by default. Lighthouse will still function if it is behind a NAT without any port mappings. Although @@ -62,36 +61,39 @@ TCP and UDP ports (9000 TCP/UDP, and 9001 UDP by default). > explicitly specify them using the `--enr-tcp-port` and `--enr-udp-port` as > explained in the following section. -### How to Open Ports +## How to Open Ports The steps to do port forwarding depends on the router, but the general steps are given below: + 1. Determine the default gateway IP: -- On Linux: open a terminal and run `ip route | grep default`, the result should look something similar to `default via 192.168.50.1 dev wlp2s0 proto dhcp metric 600`. The `192.168.50.1` is your router management default gateway IP. -- On MacOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. -- On Windows: open a command prompt and run `ipconfig` and look for the `Default Gateway` which will show you the gateway IP. - The default gateway IP usually looks like 192.168.X.X. Once you obtain the IP, enter it to a web browser and it will lead you to the router management page. + - On Linux: open a terminal and run `ip route | grep default`, the result should look something similar to `default via 192.168.50.1 dev wlp2s0 proto dhcp metric 600`. The `192.168.50.1` is your router management default gateway IP. + - On MacOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. + - On Windows: open a command prompt and run `ipconfig` and look for the `Default Gateway` which will show you the gateway IP. + + The default gateway IP usually looks like 192.168.X.X. Once you obtain the IP, enter it to a web browser and it will lead you to the router management page. + +1. Login to the router management page. The login credentials are usually available in the manual or the router, or it can be found on a sticker underneath the router. You can also try the login credentials for some common router brands listed [here](https://www.noip.com/support/knowledgebase/general-port-forwarding-guide/). -2. Login to the router management page. The login credentials are usually available in the manual or the router, or it can be found on a sticker underneath the router. You can also try the login credentials for some common router brands listed [here](https://www.noip.com/support/knowledgebase/general-port-forwarding-guide/). +1. Navigate to the port forward settings in your router. The exact step depends on the router, but typically it will fall under the "Advanced" section, under the name "port forwarding" or "virtual server". -3. Navigate to the port forward settings in your router. The exact step depends on the router, but typically it will fall under the "Advanced" section, under the name "port forwarding" or "virtual server". +1. Configure a port forwarding rule as below: -4. Configure a port forwarding rule as below: -- Protocol: select `TCP/UDP` or `BOTH` -- External port: `9000` -- Internal port: `9000` -- IP address: Usually there is a dropdown list for you to select the device. Choose the device that is running Lighthouse. + - Protocol: select `TCP/UDP` or `BOTH` + - External port: `9000` + - Internal port: `9000` + - IP address: Usually there is a dropdown list for you to select the device. Choose the device that is running Lighthouse. -Since V4.5.0 port 9001/UDP is also used for QUIC support. + Since V4.5.0 port 9001/UDP is also used for QUIC support. -- Protocol: select `UDP` -- External port: `9001` -- Internal port: `9001` -- IP address: Choose the device that is running Lighthouse. + - Protocol: select `UDP` + - External port: `9001` + - Internal port: `9001` + - IP address: Choose the device that is running Lighthouse. -5. To check that you have successfully opened the ports, go to [yougetsignal](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. Note: this will only confirm if port 9000/TCP is open. You will need to ensure you have correctly setup port forwarding for the UDP ports (`9000` and `9001` by default). +1. To check that you have successfully opened the ports, go to [yougetsignal](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. Note: this will only confirm if port 9000/TCP is open. You will need to ensure you have correctly setup port forwarding for the UDP ports (`9000` and `9001` by default). -### ENR Configuration +## ENR Configuration Lighthouse has a number of CLI parameters for constructing and modifying the local Ethereum Node Record (ENR). Examples are `--enr-address`, @@ -113,8 +115,7 @@ harder for peers to find you or potentially making it harder for other peers to find each other. We recommend not touching these settings unless for a more advanced use case. - -### IPv6 support +## IPv6 support As noted in the previous sections, two fundamental parts to ensure good connectivity are: The parameters that configure the sockets over which @@ -122,7 +123,7 @@ Lighthouse listens for connections, and the parameters used to tell other peers how to connect to your node. This distinction is relevant and applies to most nodes that do not run directly on a public network. -#### Configuring Lighthouse to listen over IPv4/IPv6/Dual stack +### Configuring Lighthouse to listen over IPv4/IPv6/Dual stack To listen over only IPv6 use the same parameters as done when listening over IPv4 only: @@ -136,6 +137,7 @@ TCP and UDP. This can be configured with `--quic-port`. To listen over both IPv4 and IPv6: + - Set two listening addresses using the `--listen-address` flag twice ensuring the two addresses are one IPv4, and the other IPv6. When doing so, the `--port` and `--discovery-port` flags will apply exclusively to IPv4. Note @@ -149,7 +151,7 @@ To listen over both IPv4 and IPv6: UDP over IPv6. This will default to the value given to `--port6` + 1. This flag has no effect when listening over IPv6 only. -##### Configuration Examples +#### Configuration Examples > When using `--listen-address :: --listen-address 0.0.0.0 --port 9909`, listening will be set up as follows: > @@ -175,7 +177,8 @@ To listen over both IPv4 and IPv6: > It listens on the default value of `--port6` (`9090`) for TCP, and port `9999` for UDP. > QUIC will use port `9091` for UDP, which is the default `--port6` value (`9090`) + 1. -#### Configuring Lighthouse to advertise IPv6 reachable addresses +### Configuring Lighthouse to advertise IPv6 reachable addresses + Lighthouse supports IPv6 to connect to other nodes both over IPv6 exclusively, and dual stack using one socket for IPv4 and another socket for IPv6. In both scenarios, the previous sections still apply. In summary: @@ -205,7 +208,7 @@ In the general case, a user will not require to set these explicitly. Update these options only if you can guarantee your node is reachable with these values. -#### Known caveats +### Known caveats IPv6 link local addresses are likely to have poor connectivity if used in topologies with more than one interface. Use global addresses for the general diff --git a/book/src/api-bn.md b/book/src/api-bn.md index 3e57edd8dbd..e7c900e84d9 100644 --- a/book/src/api-bn.md +++ b/book/src/api-bn.md @@ -10,15 +10,15 @@ A Lighthouse beacon node can be configured to expose an HTTP server by supplying The following CLI flags control the HTTP server: - `--http`: enable the HTTP server (required even if the following flags are - provided). + provided). - `--http-port`: specify the listen port of the server. - `--http-address`: specify the listen address of the server. It is _not_ recommended to listen on `0.0.0.0`, please see [Security](#security) below. - `--http-allow-origin`: specify the value of the `Access-Control-Allow-Origin` - header. The default is to not supply a header. + header. The default is to not supply a header. - `--http-enable-tls`: serve the HTTP server over TLS. Must be used with `--http-tls-cert` - and `http-tls-key`. This feature is currently experimental, please see - [Serving the HTTP API over TLS](#serving-the-http-api-over-tls) below. + and `http-tls-key`. This feature is currently experimental, please see + [Serving the HTTP API over TLS](#serving-the-http-api-over-tls) below. - `--http-tls-cert`: specify the path to the certificate file for Lighthouse to use. - `--http-tls-key`: specify the path to the private key file for Lighthouse to use. @@ -38,18 +38,18 @@ the listening address from `localhost` should only be done with extreme care. To safely provide access to the API from a different machine you should use one of the following standard techniques: -* Use an [SSH tunnel][ssh_tunnel], i.e. access `localhost` remotely. This is recommended, and +- Use an [SSH tunnel][ssh_tunnel], i.e. access `localhost` remotely. This is recommended, and doesn't require setting `--http-address`. -* Use a firewall to limit access to certain remote IPs, e.g. allow access only from one other +- Use a firewall to limit access to certain remote IPs, e.g. allow access only from one other machine on the local network. -* Shield Lighthouse behind an HTTP server with rate-limiting such as NGINX. This is only +- Shield Lighthouse behind an HTTP server with rate-limiting such as NGINX. This is only recommended for advanced users, e.g. beacon node hosting providers. Additional risks to be aware of include: -* The `node/identity` and `node/peers` endpoints expose information about your node's peer-to-peer +- The `node/identity` and `node/peers` endpoints expose information about your node's peer-to-peer identity. -* The `--http-allow-origin` flag changes the server's CORS policy, allowing cross-site requests +- The `--http-allow-origin` flag changes the server's CORS policy, allowing cross-site requests from browsers. You should only supply it if you understand the risks, e.g. malicious websites accessing your beacon node if you use the same machine for staking and web browsing. @@ -57,7 +57,6 @@ Additional risks to be aware of include: Start a beacon node and an execution node according to [Run a node](./run_a_node.md). Note that since [The Merge](https://ethereum.org/en/roadmap/merge/), an execution client is required to be running along with a beacon node. Hence, the query on Beacon Node APIs requires users to run both. While there are some Beacon Node APIs that you can query with only the beacon node, such as the [node version](https://ethereum.github.io/beacon-APIs/#/Node/getNodeVersion), in general an execution client is required to get the updated information about the beacon chain, such as [state root](https://ethereum.github.io/beacon-APIs/#/Beacon/getStateRoot), [headers](https://ethereum.github.io/beacon-APIs/#/Beacon/getBlockHeaders) and many others, which are dynamically progressing with time. - ## HTTP Request/Response Examples This section contains some simple examples of using the HTTP API via `curl`. @@ -124,9 +123,11 @@ curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H " } } ``` + You can replace `1` in the above command with the validator index that you would like to query. Other API query can be done similarly by changing the link according to the Beacon API. ### Events API + The [events API](https://ethereum.github.io/beacon-APIs/#/Events/eventstream) provides information such as the payload attributes that are of interest to block builders and relays. To query the payload attributes, it is necessary to run Lighthouse beacon node with the flag `--always-prepare-payload`. It is also recommended to add the flag `--prepare-payload-lookahead 8000` which configures the payload attributes to be sent at 4s into each slot (or 8s from the start of the next slot). An example of the command is: ```bash @@ -141,8 +142,8 @@ An example of response is: data:{"version":"capella","data":{"proposal_slot":"11047","proposer_index":"336057","parent_block_root":"0x26f8999d270dd4677c2a1c815361707157a531f6c599f78fa942c98b545e1799","parent_block_number":"9259","parent_block_hash":"0x7fb788cd7afa814e578afa00a3edd250cdd4c8e35c22badd327d981b5bda33d2","payload_attributes":{"timestamp":"1696034964","prev_randao":"0xeee34d7a3f6b99ade6c6a881046c9c0e96baab2ed9469102d46eb8d6e4fde14c","suggested_fee_recipient":"0x0000000000000000000000000000000000000001","withdrawals":[{"index":"40705","validator_index":"360712","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1202941"},{"index":"40706","validator_index":"360713","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1201138"},{"index":"40707","validator_index":"360714","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1215255"},{"index":"40708","validator_index":"360715","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1161977"},{"index":"40709","validator_index":"360716","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1257278"},{"index":"40710","validator_index":"360717","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1247740"},{"index":"40711","validator_index":"360718","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1204337"},{"index":"40712","validator_index":"360719","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1183575"},{"index":"40713","validator_index":"360720","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1157785"},{"index":"40714","validator_index":"360721","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1143371"},{"index":"40715","validator_index":"360722","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1234787"},{"index":"40716","validator_index":"360723","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1286673"},{"index":"40717","validator_index":"360724","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1419241"},{"index":"40718","validator_index":"360725","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1231015"},{"index":"40719","validator_index":"360726","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1304321"},{"index":"40720","validator_index":"360727","address":"0x73b2e0e54510239e22cc936f0b4a6de1acf0abde","amount":"1236543"}]}}} ``` - ## Serving the HTTP API over TLS +> > **Warning**: This feature is currently experimental. The HTTP server can be served over TLS by using the `--http-enable-tls`, @@ -160,10 +161,13 @@ Below is a simple example serving the HTTP API over TLS using a self-signed certificate on Linux: ### Enabling TLS on a beacon node + Generate a self-signed certificate using `openssl`: + ```bash openssl req -x509 -nodes -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -subj "/CN=localhost" ``` + Note that currently Lighthouse only accepts keys that are not password protected. This means we need to run with the `-nodes` flag (short for 'no DES'). @@ -180,21 +184,27 @@ lighthouse bn \ --http-tls-cert cert.pem \ --http-tls-key key.pem ``` + Note that the user running Lighthouse must have permission to read the certificate and key. The API is now being served at `https://localhost:5052`. To test connectivity, you can run the following: + ```bash curl -X GET "https://localhost:5052/eth/v1/node/version" -H "accept: application/json" --cacert cert.pem | jq ``` + ### Connecting a validator client + In order to connect a validator client to a beacon node over TLS, the validator client needs to be aware of the certificate. There are two ways to do this: + #### Option 1: Add the certificate to the operating system trust store + The process for this will vary depending on your operating system. Below are the instructions for Ubuntu and Arch Linux: @@ -211,13 +221,16 @@ sudo trust extract-compat ``` Now the validator client can be connected to the beacon node by running: + ```bash lighthouse vc --beacon-nodes https://localhost:5052 ``` #### Option 2: Specify the certificate via CLI + You can also specify any custom certificates via the validator client CLI like so: + ```bash lighthouse vc --beacon-nodes https://localhost:5052 --beacon-nodes-tls-certs cert.pem ``` diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index ce71450987d..b63505c4901 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -16,12 +16,12 @@ Although we don't recommend that users rely on these endpoints, we document them briefly so they can be utilized by developers and researchers. +## `/lighthouse/health` - -### `/lighthouse/health` *Note: This endpoint is presently only available on Linux.* Returns information regarding the health of the host machine. + ```bash curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/json" | jq ``` @@ -64,7 +64,8 @@ curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/j ``` -### `/lighthouse/ui/health` +## `/lighthouse/ui/health` + Returns information regarding the health of the host machine. ```bash @@ -101,8 +102,10 @@ curl -X GET "http://localhost:5052/lighthouse/ui/health" -H "accept: applicatio } ``` -### `/lighthouse/ui/validator_count` +## `/lighthouse/ui/validator_count` + Returns an overview of validators. + ```bash curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: application/json" | jq ``` @@ -123,9 +126,10 @@ curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: ap } ``` +## `/lighthouse/ui/validator_metrics` -### `/lighthouse/ui/validator_metrics` Re-exposes certain metrics from the validator monitor to the HTTP API. This API requires that the beacon node to have the flag `--validator-monitor-auto`. This API will only return metrics for the validators currently being monitored and present in the POST data, or the validators running in the validator client. + ```bash curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indices": [12345]}' -H "Content-Type: application/json" | jq ``` @@ -150,7 +154,9 @@ curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indic } } ``` + Running this API without the flag `--validator-monitor-auto` in the beacon node will return null: + ```json { "data": { @@ -159,8 +165,10 @@ Running this API without the flag `--validator-monitor-auto` in the beacon node } ``` -### `/lighthouse/syncing` +## `/lighthouse/syncing` + Returns the sync status of the beacon node. + ```bash curl -X GET "http://localhost:5052/lighthouse/syncing" -H "accept: application/json" | jq ``` @@ -168,6 +176,7 @@ curl -X GET "http://localhost:5052/lighthouse/syncing" -H "accept: application/ There are two possible outcomes, depending on whether the beacon node is syncing or synced. 1. Syncing: + ```json { "data": { @@ -178,20 +187,21 @@ There are two possible outcomes, depending on whether the beacon node is syncing } } ``` + 1. Synced: + ```json { "data": "Synced" } ``` -### `/lighthouse/peers` +## `/lighthouse/peers` ```bash curl -X GET "http://localhost:5052/lighthouse/peers" -H "accept: application/json" | jq ``` - ```json [ { @@ -255,14 +265,14 @@ curl -X GET "http://localhost:5052/lighthouse/peers" -H "accept: application/js ] ``` -### `/lighthouse/peers/connected` +## `/lighthouse/peers/connected` + Returns information about connected peers. + ```bash curl -X GET "http://localhost:5052/lighthouse/peers/connected" -H "accept: application/json" | jq ``` - - ```json [ { @@ -327,7 +337,7 @@ curl -X GET "http://localhost:5052/lighthouse/peers/connected" -H "accept: appl ] ``` -### `/lighthouse/proto_array` +## `/lighthouse/proto_array` ```bash curl -X GET "http://localhost:5052/lighthouse/proto_array" -H "accept: application/json" | jq @@ -335,45 +345,45 @@ curl -X GET "http://localhost:5052/lighthouse/proto_array" -H "accept: applicat *Example omitted for brevity.* -### `/lighthouse/validator_inclusion/{epoch}/{validator_id}` +## `/lighthouse/validator_inclusion/{epoch}/{validator_id}` See [Validator Inclusion APIs](./validator-inclusion.md). -### `/lighthouse/validator_inclusion/{epoch}/global` +## `/lighthouse/validator_inclusion/{epoch}/global` See [Validator Inclusion APIs](./validator-inclusion.md). -### `/lighthouse/eth1/syncing` +## `/lighthouse/eth1/syncing` Returns information regarding execution layer, as it is required for use in consensus layer -#### Fields +### Fields - `head_block_number`, `head_block_timestamp`: the block number and timestamp from the very head of the execution chain. Useful for understanding the immediate health of the execution node that the beacon node is connected to. - `latest_cached_block_number` & `latest_cached_block_timestamp`: the block number and timestamp of the latest block we have in our block cache. - - For correct execution client voting this timestamp should be later than the + - For correct execution client voting this timestamp should be later than the `voting_target_timestamp`. - `voting_target_timestamp`: The latest timestamp allowed for an execution layer block in this voting period. - `eth1_node_sync_status_percentage` (float): An estimate of how far the head of the execution node is from the head of the execution chain. - - `100.0` indicates a fully synced execution node. - - `0.0` indicates an execution node that has not verified any blocks past the - genesis block. + - `100.0` indicates a fully synced execution node. + - `0.0` indicates an execution node that has not verified any blocks past the + genesis block. - `lighthouse_is_cached_and_ready`: Is set to `true` if the caches in the - beacon node are ready for block production. - - This value might be set to - `false` whilst `eth1_node_sync_status_percentage == 100.0` if the beacon - node is still building its internal cache. - - This value might be set to `true` whilst - `eth1_node_sync_status_percentage < 100.0` since the cache only cares - about blocks a certain distance behind the head. + beacon node are ready for block production. + - This value might be set to + `false` whilst `eth1_node_sync_status_percentage == 100.0` if the beacon + node is still building its internal cache. + - This value might be set to `true` whilst + `eth1_node_sync_status_percentage < 100.0` since the cache only cares + about blocks a certain distance behind the head. -#### Example +### Example ```bash curl -X GET "http://localhost:5052/lighthouse/eth1/syncing" -H "accept: application/json" | jq @@ -393,11 +403,11 @@ curl -X GET "http://localhost:5052/lighthouse/eth1/syncing" -H "accept: applica } ``` -### `/lighthouse/eth1/block_cache` +## `/lighthouse/eth1/block_cache` Returns a list of all the execution layer blocks in the execution client voting cache. -#### Example +### Example ```bash curl -X GET "http://localhost:5052/lighthouse/eth1/block_cache" -H "accept: application/json" | jq @@ -424,11 +434,11 @@ curl -X GET "http://localhost:5052/lighthouse/eth1/block_cache" -H "accept: app } ``` -### `/lighthouse/eth1/deposit_cache` +## `/lighthouse/eth1/deposit_cache` Returns a list of all cached logs from the deposit contract. -#### Example +### Example ```bash curl -X GET "http://localhost:5052/lighthouse/eth1/deposit_cache" -H "accept: application/json" | jq @@ -463,7 +473,7 @@ curl -X GET "http://localhost:5052/lighthouse/eth1/deposit_cache" -H "accept: a } ``` -### `/lighthouse/liveness` +## `/lighthouse/liveness` POST request that checks if any of the given validators have attested in the given epoch. Returns a list of objects, each including the validator index, epoch, and `is_live` status of a requested validator. @@ -488,9 +498,7 @@ curl -X POST "http://localhost:5052/lighthouse/liveness" -d '{"indices":["0","1" } ``` - - -### `/lighthouse/database/info` +## `/lighthouse/database/info` Information about the database's split point and anchor info. @@ -498,7 +506,6 @@ Information about the database's split point and anchor info. curl "http://localhost:5052/lighthouse/database/info" | jq ``` - ```json { "schema_version": 18, @@ -541,9 +548,10 @@ reconstruction has yet to be completed. For more information on the specific meanings of these fields see the docs on [Checkpoint Sync](./checkpoint-sync.md#reconstructing-states). +## `/lighthouse/merge_readiness` -### `/lighthouse/merge_readiness` Returns the current difficulty and terminal total difficulty of the network. Before [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, you will see that the current difficulty is less than the terminal total difficulty, An example is shown below: + ```bash curl -X GET "http://localhost:5052/lighthouse/merge_readiness" | jq ``` @@ -574,16 +582,15 @@ As all testnets and Mainnet have been merged, both values will be the same after } ``` - -### `/lighthouse/analysis/attestation_performance/{index}` +## `/lighthouse/analysis/attestation_performance/{index}` Fetch information about the attestation performance of a validator index or all validators for a range of consecutive epochs. Two query parameters are required: -* `start_epoch` (inclusive): the first epoch to compute attestation performance for. -* `end_epoch` (inclusive): the final epoch to compute attestation performance for. +- `start_epoch` (inclusive): the first epoch to compute attestation performance for. +- `end_epoch` (inclusive): the final epoch to compute attestation performance for. Example: @@ -649,18 +656,18 @@ curl -X GET "http://localhost:5052/lighthouse/analysis/attestation_performance/g Caveats: -* For maximum efficiency the start_epoch should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. - This is because the state _prior_ to the `start_epoch` needs to be loaded from the database, +- For maximum efficiency the start_epoch should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. + This is because the state *prior* to the `start_epoch` needs to be loaded from the database, and loading a state on a boundary is most efficient. -### `/lighthouse/analysis/block_rewards` +## `/lighthouse/analysis/block_rewards` Fetch information about the block rewards paid to proposers for a range of consecutive blocks. Two query parameters are required: -* `start_slot` (inclusive): the slot of the first block to compute rewards for. -* `end_slot` (inclusive): the slot of the last block to compute rewards for. +- `start_slot` (inclusive): the slot of the first block to compute rewards for. +- `end_slot` (inclusive): the slot of the last block to compute rewards for. Example: @@ -668,7 +675,6 @@ Example: curl -X GET "http://localhost:5052/lighthouse/analysis/block_rewards?start_slot=1&end_slot=1" | jq ``` - The first few lines of the response would look like: ```json @@ -680,7 +686,7 @@ The first few lines of the response would look like: "slot": "1", "parent_slot": "0", "proposer_index": 93, - "graffiti": "EF #vm-eth2-raw-iron-prater-101" + "graffiti": "EF #vm-eth2-raw-iron-101" }, "attestation_rewards": { "total": 637260, @@ -698,25 +704,25 @@ The first few lines of the response would look like: Caveats: -* Presently only attestation and sync committee rewards are computed. -* The output format is verbose and subject to change. Please see [`BlockReward`][block_reward_src] +- Presently only attestation and sync committee rewards are computed. +- The output format is verbose and subject to change. Please see [`BlockReward`][block_reward_src] in the source. -* For maximum efficiency the `start_slot` should satisfy `start_slot % slots_per_restore_point == 1`. - This is because the state _prior_ to the `start_slot` needs to be loaded from the database, and +- For maximum efficiency the `start_slot` should satisfy `start_slot % slots_per_restore_point == 1`. + This is because the state *prior* to the `start_slot` needs to be loaded from the database, and loading a state on a boundary is most efficient. [block_reward_src]: https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_rewards.rs -### `/lighthouse/analysis/block_packing` +## `/lighthouse/analysis/block_packing` Fetch information about the block packing efficiency of blocks for a range of consecutive epochs. Two query parameters are required: -* `start_epoch` (inclusive): the epoch of the first block to compute packing efficiency for. -* `end_epoch` (inclusive): the epoch of the last block to compute packing efficiency for. +- `start_epoch` (inclusive): the epoch of the first block to compute packing efficiency for. +- `end_epoch` (inclusive): the epoch of the last block to compute packing efficiency for. ```bash curl -X GET "http://localhost:5052/lighthouse/analysis/block_packing_efficiency?start_epoch=1&end_epoch=1" | jq @@ -745,13 +751,12 @@ An excerpt of the response looks like: Caveats: -* `start_epoch` must not be `0`. -* For maximum efficiency the `start_epoch` should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. - This is because the state _prior_ to the `start_epoch` needs to be loaded from the database, and +- `start_epoch` must not be `0`. +- For maximum efficiency the `start_epoch` should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. + This is because the state *prior* to the `start_epoch` needs to be loaded from the database, and loading a state on a boundary is most efficient. - -### `/lighthouse/logs` +## `/lighthouse/logs` This is a Server Side Event subscription endpoint. This allows a user to read the Lighthouse logs directly from the HTTP API endpoint. This currently @@ -764,6 +769,7 @@ curl -N "http://localhost:5052/lighthouse/logs" ``` Should provide an output that emits log events as they occur: + ```json { "data": { @@ -779,7 +785,8 @@ Should provide an output that emits log events as they occur: } ``` -### `/lighthouse/nat` +## `/lighthouse/nat` + Checks if the ports are open. ```bash @@ -787,6 +794,7 @@ curl -X GET "http://localhost:5052/lighthouse/nat" | jq ``` An open port will return: + ```json { "data": true diff --git a/book/src/api-vc-auth-header.md b/book/src/api-vc-auth-header.md index 33f6f6ff7ae..f2f9caf46b5 100644 --- a/book/src/api-vc-auth-header.md +++ b/book/src/api-vc-auth-header.md @@ -11,7 +11,7 @@ HTTP header: Where `` is a string that can be obtained from the validator client host. Here is an example `Authorization` header: -``` +```text Authorization: Bearer api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 ``` @@ -22,17 +22,16 @@ this is `~/.lighthouse/{network}/validators/api-token.txt`. Here's an example using the `cat` command to print the token to the terminal, but any text editor will suffice: -``` -$ cat api-token.txt +```bash +cat api-token.txt api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 ``` - When starting the validator client it will output a log message containing the path to the file containing the api token. -``` -Sep 28 19:17:52.615 INFO HTTP API started api_token_file: "$HOME/prater/validators/api-token.txt", listen_address: 127.0.0.1:5062 +```text +Sep 28 19:17:52.615 INFO HTTP API started api_token_file: "$HOME/holesky/validators/api-token.txt", listen_address: 127.0.0.1:5062 ``` The _path_ to the API token may also be fetched from the HTTP API itself (this endpoint is the only @@ -46,7 +45,7 @@ Response: ```json { - "token_path": "/home/karlm/.lighthouse/prater/validators/api-token.txt" + "token_path": "/home/karlm/.lighthouse/holesky/validators/api-token.txt" } ``` diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index cf52454c2db..a36aa737083 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -2,27 +2,27 @@ ## Endpoints -HTTP Path | Description | +| HTTP Path | Description | | --- | -- | -[`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version. -[`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine. -[`GET /lighthouse/ui/health`](#get-lighthouseuihealth) | Get information about the host machine. Focused for UI applications. -[`GET /lighthouse/spec`](#get-lighthousespec) | Get the Ethereum proof-of-stake consensus specification used by the validator. -[`GET /lighthouse/auth`](#get-lighthouseauth) | Get the location of the authorization token. -[`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators. -[`GET /lighthouse/validators/:voting_pubkey`](#get-lighthousevalidatorsvoting_pubkey) | Get a specific validator. -[`PATCH /lighthouse/validators/:voting_pubkey`](#patch-lighthousevalidatorsvoting_pubkey) | Update a specific validator. -[`POST /lighthouse/validators`](#post-lighthousevalidators) | Create a new validator and mnemonic. -[`POST /lighthouse/validators/keystore`](#post-lighthousevalidatorskeystore) | Import a keystore. -[`POST /lighthouse/validators/mnemonic`](#post-lighthousevalidatorsmnemonic) | Create a new validator from an existing mnemonic. -[`POST /lighthouse/validators/web3signer`](#post-lighthousevalidatorsweb3signer) | Add web3signer validators. -[`GET /lighthouse/logs`](#get-lighthouselogs) | Get logs - -The query to Lighthouse API endpoints requires authorization, see [Authorization Header](./api-vc-auth-header.md). +| [`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version. | +| [`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine. | +| [`GET /lighthouse/ui/health`](#get-lighthouseuihealth) | Get information about the host machine. Focused for UI applications. | +| [`GET /lighthouse/spec`](#get-lighthousespec) | Get the Ethereum proof-of-stake consensus specification used by the validator. | +| [`GET /lighthouse/auth`](#get-lighthouseauth) | Get the location of the authorization token. | +| [`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators. | +| [`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators. | +| [`GET /lighthouse/validators/:voting_pubkey`](#get-lighthousevalidatorsvoting_pubkey) | Get a specific validator. | +| [`PATCH /lighthouse/validators/:voting_pubkey`](#patch-lighthousevalidatorsvoting_pubkey) | Update a specific validator. | +| [`POST /lighthouse/validators`](#post-lighthousevalidators) | Create a new validator and mnemonic. | +| [`POST /lighthouse/validators/keystore`](#post-lighthousevalidatorskeystore) | Import a keystore. | +| [`POST /lighthouse/validators/mnemonic`](#post-lighthousevalidatorsmnemonic) | Create a new validator from an existing mnemonic. | +| [`POST /lighthouse/validators/web3signer`](#post-lighthousevalidatorsweb3signer) | Add web3signer validators. | +| [`GET /lighthouse/logs`](#get-lighthouselogs) | Get logs | + +The query to Lighthouse API endpoints requires authorization, see [Authorization Header](./api-vc-auth-header.md). In addition to the above endpoints Lighthouse also supports all of the [standard keymanager APIs](https://ethereum.github.io/keymanager-APIs/). - ## `GET /lighthouse/version` Returns the software version and `git` commit hash for the Lighthouse binary. @@ -37,6 +37,7 @@ Returns the software version and `git` commit hash for the Lighthouse binary. | Typical Responses | 200 | Command: + ```bash DATADIR=/var/lib/lighthouse curl -X GET "http://localhost:5062/lighthouse/version" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq @@ -44,7 +45,6 @@ curl -X GET "http://localhost:5062/lighthouse/version" -H "Authorization: Bearer Example Response Body: - ```json { "data": { @@ -52,9 +52,11 @@ Example Response Body: } } ``` + > Note: The command provided in this documentation links to the API token file. In this documentation, it is assumed that the API token file is located in `/var/lib/lighthouse/validators/API-token.txt`. If your database is saved in another directory, modify the `DATADIR` accordingly. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. > As an alternative, you can also provide the API token directly, for example, `-H "Authorization: Bearer api-token-0x02dc2a13115cc8c83baf170f597f22b1eb2930542941ab902df3daadebcb8f8176`. In this case, you obtain the token from the file `API token.txt` and the command becomes: + ```bash curl -X GET "http://localhost:5062/lighthouse/version" -H "Authorization: Bearer api-token-0x02dc2a13115cc8c83baf170f597f22b1eb2930542941ab902df3daadebcb8f8176" | jq ``` @@ -75,6 +77,7 @@ Returns information regarding the health of the host machine. *Note: this endpoint is presently only available on Linux.* Command: + ```bash DATADIR=/var/lib/lighthouse curl -X GET "http://localhost:5062/lighthouse/health" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq @@ -133,6 +136,7 @@ Returns information regarding the health of the host machine. | Typical Responses | 200 | Command: + ```bash DATADIR=/var/lib/lighthouse curl -X GET "http://localhost:5062/lighthouse/ui/health" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq @@ -178,10 +182,12 @@ Returns the graffiti that will be used for the next block proposal of each valid | Typical Responses | 200 | Command: + ```bash DATADIR=/var/lib/lighthouse curl -X GET "http://localhost:5062/lighthouse/ui/graffiti" -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" | jq ``` + Example Response Body ```json @@ -219,7 +225,7 @@ Example Response Body ```json { "data": { - "CONFIG_NAME": "prater", + "CONFIG_NAME": "holesky", "PRESET_BASE": "mainnet", "TERMINAL_TOTAL_DIFFICULTY": "10790000", "TERMINAL_BLOCK_HASH": "0x0000000000000000000000000000000000000000000000000000000000000000", @@ -323,7 +329,7 @@ Example Response Body ## `GET /lighthouse/auth` Fetch the filesystem path of the [authorization token](./api-vc-auth-header.md). -Unlike the other endpoints this may be called _without_ providing an authorization token. +Unlike the other endpoints this may be called *without* providing an authorization token. This API is intended to be called from the same machine as the validator client, so that the token file may be read by a local user with access rights. @@ -347,7 +353,7 @@ Example Response Body ```json { - "token_path": "/home/karlm/.lighthouse/prater/validators/api-token.txt" + "token_path": "/home/karlm/.lighthouse/holesky/validators/api-token.txt" } ``` @@ -440,7 +446,6 @@ and `graffiti`. The following example updates a validator from `enabled: true` | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 200, 400 | - Example Request Body ```json @@ -458,6 +463,7 @@ curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf4 -H "Content-Type: application/json" \ -d "{\"enabled\":false}" | jq ``` + ### Example Response Body ```json @@ -466,12 +472,11 @@ null A `null` response indicates that the request is successful. At the same time, `lighthouse vc` will log: -``` +```text INFO Disabled validator voting_pubkey: 0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde INFO Modified key_cache saved successfully ``` - ## `POST /lighthouse/validators/` Create any number of new validators, all of which will share a common mnemonic @@ -510,7 +515,8 @@ Validators are generated from the mnemonic according to ] ``` -Command: +Command: + ```bash DATADIR=/var/lib/lighthouse curl -X POST http://localhost:5062/lighthouse/validators \ @@ -560,7 +566,7 @@ curl -X POST http://localhost:5062/lighthouse/validators \ `lighthouse vc` will log: -``` +```text INFO Enabled validator voting_pubkey: 0x8ffbc881fb60841a4546b4b385ec5e9b5090fd1c4395e568d98b74b94b41a912c6101113da39d43c101369eeb9b48e50, signing_method: local_keystore INFO Modified key_cache saved successfully INFO Disabled validator voting_pubkey: 0xa9fadd620dc68e9fe0d6e1a69f6c54a0271ad65ab5a509e645e45c6e60ff8f4fc538f301781193a08b55821444801502 @@ -625,6 +631,7 @@ Import a keystore into the validator client. We can use [JSON to String Converter](https://jsontostring.com/) so that the above data can be properly presented as a command. The command is as below: Command: + ```bash DATADIR=/var/lib/lighthouse curl -X POST http://localhost:5062/lighthouse/validators/keystore \ @@ -636,6 +643,7 @@ curl -X POST http://localhost:5062/lighthouse/validators/keystore \ As this is an example for demonstration, the above command will return `InvalidPassword`. However, with a keystore file and correct password, running the above command will import the keystore to the validator client. An example of a success message is shown below: ### Example Response Body + ```json { "data": { @@ -717,7 +725,7 @@ curl -X POST http://localhost:5062/lighthouse/validators/mnemonic \ `lighthouse vc` will log: -``` +```text INFO Enabled validator voting_pubkey: 0xa062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db380, signing_method: local_keystore INFO Modified key_cache saved successfully ``` @@ -759,8 +767,8 @@ Create any number of new validators, all of which will refer to a Some of the fields above may be omitted or nullified to obtain default values (e.g., `graffiti`, `request_timeout_ms`). - Command: + ```bash DATADIR=/var/lib/lighthouse curl -X POST http://localhost:5062/lighthouse/validators/web3signer \ @@ -769,21 +777,18 @@ curl -X POST http://localhost:5062/lighthouse/validators/web3signer \ -d "[{\"enable\":true,\"description\":\"validator_one\",\"graffiti\":\"Mr F was here\",\"suggested_fee_recipient\":\"0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d\",\"voting_public_key\":\"0xa062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db380\",\"builder_proposals\":true,\"url\":\"http://path-to-web3signer.com\",\"root_certificate_path\":\"/path/to/certificate.pem\",\"client_identity_path\":\"/path/to/identity.p12\",\"client_identity_password\":\"pass\",\"request_timeout_ms\":12000}]" ``` - ### Example Response Body - ```json null ``` A `null` response indicates that the request is successful. At the same time, `lighthouse vc` will log: -``` +```text INFO Enabled validator voting_pubkey: 0xa062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db380, signing_method: remote_signer ``` - ## `GET /lighthouse/logs` Provides a subscription to receive logs as Server Side Events. Currently the diff --git a/book/src/api-vc-sig-header.md b/book/src/api-vc-sig-header.md index a1b9b104f9d..468f714cfa9 100644 --- a/book/src/api-vc-sig-header.md +++ b/book/src/api-vc-sig-header.md @@ -9,7 +9,7 @@ The validator client HTTP server adds the following header to all responses: Example `Signature` header: -``` +```text Signature: 0x304402205b114366444112580bf455d919401e9c869f5af067cd496016ab70d428b5a99d0220067aede1eb5819eecfd5dd7a2b57c5ac2b98f25a7be214b05684b04523aef873 ``` @@ -83,7 +83,7 @@ The previous Javascript example was written using the output from the following curl -v localhost:5062/lighthouse/version -H "Authorization: Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123" ``` -``` +```text * Trying ::1:5062... * connect to ::1 port 5062 failed: Connection refused * Trying 127.0.0.1:5062... diff --git a/book/src/api-vc.md b/book/src/api-vc.md index a3400016eca..630a0320066 100644 --- a/book/src/api-vc.md +++ b/book/src/api-vc.md @@ -19,11 +19,11 @@ A Lighthouse validator client can be configured to expose a HTTP server by suppl The following CLI flags control the HTTP server: - `--http`: enable the HTTP server (required even if the following flags are - provided). + provided). - `--http-address`: specify the listen address of the server. It is almost always unsafe to use a non-default HTTP listen address. Use this with caution. See the **Security** section below for more information. - `--http-port`: specify the listen port of the server. - `--http-allow-origin`: specify the value of the `Access-Control-Allow-Origin` - header. The default is to not supply a header. + header. The default is to not supply a header. ## Security diff --git a/book/src/builders.md b/book/src/builders.md index 930d330d994..5b8e9ddb8b7 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -18,30 +18,34 @@ a missed proposal and the opportunity cost of lost block rewards. The beacon node and validator client each require a new flag for lighthouse to be fully compatible with builder API servers. -``` +```bash lighthouse bn --builder https://mainnet-builder.test ``` + The `--builder` flag will cause the beacon node to simultaneously query the provided URL and the local execution engine during block production for a block payload with stubbed-out transactions. If either fails, the successful result will be used; If both succeed, the more profitable result will be used. The beacon node will *only* query for this type of block (a "blinded" block) when a validator specifically requests it. Otherwise, it will continue to serve full blocks as normal. In order to configure the validator client to query for blinded blocks, you should use the following flag: -``` +```bash lighthouse vc --builder-proposals ``` + With the `--builder-proposals` flag, the validator client will ask for blinded blocks for all validators it manages. -``` +```bash lighthouse vc --prefer-builder-proposals ``` + With the `--prefer-builder-proposals` flag, the validator client will always prefer blinded blocks, regardless of the payload value, for all validators it manages. -``` +```bash lighthouse vc --builder-boost-factor ``` + With the `--builder-boost-factor` flag, a percentage multiplier is applied to the builder's payload value when choosing between a -builder payload header and payload from the paired execution node. For example, `--builder-boost-factor 50` will only use the builder payload if it is 2x more profitable than the local payload. +builder payload header and payload from the paired execution node. For example, `--builder-boost-factor 50` will only use the builder payload if it is 2x more profitable than the local payload. In order to configure whether a validator queries for blinded blocks check out [this section.](#validator-client-configuration) @@ -88,7 +92,6 @@ You can also update the configured gas limit with these requests. #### `PATCH /lighthouse/validators/:voting_pubkey` - #### HTTP Specification | Property | Specification | @@ -100,12 +103,14 @@ You can also update the configured gas limit with these requests. #### Example Path -``` +```text localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde ``` #### Example Request Body + Each field is optional. + ```json { "builder_proposals": true, @@ -113,7 +118,7 @@ Each field is optional. } ``` -Command: +Command: ```bash DATADIR=/var/lib/lighthouse @@ -125,6 +130,7 @@ curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf4 "gas_limit": 30000001 }' | jq ``` + If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"` #### Example Response Body @@ -135,7 +141,7 @@ null A `null` response indicates that the request is successful. At the same time, `lighthouse vc` will show a log which looks like: -``` +```text INFO Published validator registrations to the builder network, count: 3, service: preparation ``` @@ -147,7 +153,7 @@ Refer to [suggested fee recipient](suggested-fee-recipient.md) documentation. You can also directly configure these fields in the `validator_definitions.yml` file. -``` +```text --- - enabled: true voting_public_key: "0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007" @@ -178,16 +184,16 @@ checks to try and avoid scenarios like this. By default, Lighthouse is strict with these conditions, but we encourage users to learn about and adjust them. -- `--builder-fallback-skips` - If we've seen this number of skip slots on the canonical chain in a row prior to proposing, we will NOT query +* `--builder-fallback-skips` - If we've seen this number of skip slots on the canonical chain in a row prior to proposing, we will NOT query any connected builders, and will use the local execution engine for payload construction. -- `--builder-fallback-skips-per-epoch` - If we've seen this number of skip slots on the canonical chain in the past `SLOTS_PER_EPOCH`, we will NOT +* `--builder-fallback-skips-per-epoch` - If we've seen this number of skip slots on the canonical chain in the past `SLOTS_PER_EPOCH`, we will NOT query any connected builders, and will use the local execution engine for payload construction. -- `--builder-fallback-epochs-since-finalization` - If we're proposing and the chain has not finalized within +* `--builder-fallback-epochs-since-finalization` - If we're proposing and the chain has not finalized within this number of epochs, we will NOT query any connected builders, and will use the local execution engine for payload construction. Setting this value to anything less than 2 will cause the node to NEVER query connected builders. Setting it to 2 will cause this condition to be hit if there are skips slots at the start of an epoch, right before this node is set to propose. -- `--builder-fallback-disable-checks` - This flag disables all checks related to chain health. This means the builder +* `--builder-fallback-disable-checks` - This flag disables all checks related to chain health. This means the builder API will always be used for payload construction, regardless of recent chain conditions. ## Checking your builder config @@ -196,20 +202,20 @@ You can check that your builder is configured correctly by looking for these log On start-up, the beacon node will log if a builder is configured: -``` +```text INFO Using external block builder ``` At regular intervals the validator client will log that it successfully registered its validators with the builder network: -``` +```text INFO Published validator registrations to the builder network ``` When you successfully propose a block using a builder, you will see this log on the beacon node: -``` +```text INFO Successfully published a block to the builder network ``` @@ -218,34 +224,35 @@ for `INFO` and `WARN` messages indicating why the builder was not used. Examples of messages indicating fallback to a locally produced block are: -``` +```text INFO Builder did not return a payload ``` -``` +```text WARN Builder error when requesting payload ``` -``` +```text WARN Builder returned invalid payload ``` -``` +```text INFO Builder payload ignored ``` -``` +```text INFO Chain is unhealthy, using local payload ``` In case of fallback you should see a log indicating that the locally produced payload was used in place of one from the builder: -``` +```text INFO Reconstructing a full block using a local payload ``` ## Information for block builders and relays + Block builders and relays can query beacon node events from the [Events API](https://ethereum.github.io/beacon-APIs/#/Events/eventstream). An example of querying the payload attributes in the Events API is outlined in [Beacon node API - Events API](./api-bn.md#events-api) [mev-rs]: https://github.com/ralexstokes/mev-rs diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 37677c00add..2bf028acfec 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -15,20 +15,20 @@ To begin checkpoint sync you will need HTTP API access to another synced beacon checkpoint sync by providing the other beacon node's URL to `--checkpoint-sync-url`, alongside any other flags: -``` +```bash lighthouse bn --checkpoint-sync-url "http://remote-bn:5052" ... ``` Lighthouse will print a message to indicate that checkpoint sync is being used: -``` +```text INFO Starting checkpoint sync remote_url: http://remote-bn:8000/, service: beacon ``` After a short time (usually less than a minute), it will log the details of the checkpoint loaded from the remote beacon node: -``` +```text INFO Loaded checkpoint block and state state_root: 0xe8252c68784a8d5cc7e5429b0e95747032dd1dcee0d1dc9bdaf6380bf90bc8a6, block_root: 0x5508a20147299b1a7fe9dbea1a8b3bf979f74c52e7242039bd77cbff62c0695a, slot: 2034720, service: beacon ``` @@ -43,7 +43,8 @@ as soon as forwards sync completes. ### Use a community checkpoint sync endpoint The Ethereum community provides various [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) for you to choose from for your initial checkpoint state. Select one for your network and use it as the url for the `--checkpoint-sync-url` flag. e.g. -``` + +```bash lighthouse bn --checkpoint-sync-url https://example.com/ ... ``` @@ -52,7 +53,7 @@ lighthouse bn --checkpoint-sync-url https://example.com/ ... If the beacon node fails to start due to a timeout from the checkpoint sync server, you can try running it again with a longer timeout by adding the flag `--checkpoint-sync-url-timeout`. -``` +```bash lighthouse bn --checkpoint-sync-url-timeout 300 --checkpoint-sync-url https://example.com/ ... ``` @@ -66,7 +67,7 @@ from the checkpoint back to genesis. The beacon node will log messages similar to the following each minute while it completes backfill sync: -``` +```text INFO Downloading historical blocks est_time: 5 hrs 0 mins, speed: 111.96 slots/sec, distance: 2020451 slots (40 weeks 0 days), service: slot_notifier ``` @@ -80,21 +81,16 @@ Once backfill is complete, a `INFO Historical block download complete` log will 1. What if I have an existing database? How can I use checkpoint sync? -The existing beacon database needs to be deleted before Lighthouse will attempt checkpoint sync. -You can do this by providing the `--purge-db` flag, or by manually deleting `/beacon`. + The existing beacon database needs to be deleted before Lighthouse will attempt checkpoint sync. + You can do this by providing the `--purge-db` flag, or by manually deleting `/beacon`. -2. Why is checkpoint sync faster? +1. Why is checkpoint sync faster? -Checkpoint sync prioritises syncing to the head of the chain quickly so that the node can perform -its duties. Additionally, it only has to perform lightweight verification of historic blocks: -it checks the hash chain integrity & proposer signature rather than computing the full state -transition. + Checkpoint sync prioritises syncing to the head of the chain quickly so that the node can perform its duties. Additionally, it only has to perform lightweight verification of historic blocks: it checks the hash chain integrity & proposer signature rather than computing the full state transition. -3. Is checkpoint sync less secure? +1. Is checkpoint sync less secure? -No, in fact it is more secure! Checkpoint sync guards against long-range attacks that -genesis sync does not. This is due to a property of Proof of Stake consensus known as [Weak -Subjectivity][weak-subj]. + No, in fact it is more secure! Checkpoint sync guards against long-range attacks that genesis sync does not. This is due to a property of Proof of Stake consensus known as [Weak Subjectivity][weak-subj]. ## Reconstructing States @@ -122,7 +118,7 @@ states: Reconstruction runs from the state lower limit to the upper limit, narrowing the window of unavailable states as it goes. It will log messages like the following to show its progress: -``` +```text INFO State reconstruction in progress remaining: 747519, slot: 466944, service: freezer_db ``` @@ -150,8 +146,19 @@ For more information on historic state storage see the To manually specify a checkpoint use the following two flags: -* `--checkpoint-state`: accepts an SSZ-encoded `BeaconState` blob -* `--checkpoint-block`: accepts an SSZ-encoded `SignedBeaconBlock` blob +* `--checkpoint-state`: accepts an SSZ-encoded `BeaconState` file +* `--checkpoint-block`: accepts an SSZ-encoded `SignedBeaconBlock` file +* `--checkpoint-blobs`: accepts an SSZ-encoded `Blobs` file + +The command is as following: + +```bash +curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v2/debug/beacon/states/$SLOT" > state.ssz +curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v2/beacon/blocks/$SLOT" > block.ssz +curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v1/beacon/blob_sidecars/$SLOT" > blobs.ssz +``` + +where `$SLOT` is the slot number. It can be specified as `head` or `finalized` as well. _Both_ the state and block must be provided and the state **must** match the block. The state may be from the same slot as the block (unadvanced), or advanced to an epoch boundary, diff --git a/book/src/cli.md b/book/src/cli.md index 6540d3fc3a0..f9e7df07488 100644 --- a/book/src/cli.md +++ b/book/src/cli.md @@ -4,10 +4,10 @@ The `lighthouse` binary provides all necessary Ethereum consensus client functio has two primary sub-commands: - `$ lighthouse beacon_node`: the largest and most fundamental component which connects to - the p2p network, processes messages and tracks the head of the beacon - chain. + the p2p network, processes messages and tracks the head of the beacon + chain. - `$ lighthouse validator_client`: a lightweight but important component which loads a validators private - key and signs messages using a `beacon_node` as a source-of-truth. + key and signs messages using a `beacon_node` as a source-of-truth. There are also some ancillary binaries like `lcli` and `account_manager`, but these are primarily for testing. @@ -34,11 +34,11 @@ Each binary supports the `--help` flag, this is the best source of documentation. ```bash -$ lighthouse beacon_node --help +lighthouse beacon_node --help ``` ```bash -$ lighthouse validator_client --help +lighthouse validator_client --help ``` ## Creating a new database/testnet diff --git a/book/src/contributing.md b/book/src/contributing.md index 5b0ab48e866..312acccbc04 100644 --- a/book/src/contributing.md +++ b/book/src/contributing.md @@ -8,7 +8,6 @@ [stable]: https://github.com/sigp/lighthouse/tree/stable [unstable]: https://github.com/sigp/lighthouse/tree/unstable - Lighthouse welcomes contributions. If you are interested in contributing to the Ethereum ecosystem, and you want to learn Rust, Lighthouse is a great project to work on. @@ -56,8 +55,8 @@ Please use [clippy](https://github.com/rust-lang/rust-clippy) and inconsistent code formatting: ```bash -$ cargo clippy --all -$ cargo fmt --all --check +cargo clippy --all +cargo fmt --all --check ``` ### Panics @@ -88,8 +87,9 @@ pub fn my_function(&mut self, _something &[u8]) -> Result { **General Comments** -* Prefer line (``//``) comments to block comments (``/* ... */``) -* Comments can appear on the line prior to the item or after a trailing space. +- Prefer line (``//``) comments to block comments (``/* ... */``) +- Comments can appear on the line prior to the item or after a trailing space. + ```rust // Comment for this struct struct Lighthouse {} @@ -98,8 +98,8 @@ fn make_blockchain() {} // A comment on the same line after a space **Doc Comments** -* The ``///`` is used to generate comments for Docs. -* The comments should come before attributes. +- The ``///`` is used to generate comments for Docs. +- The comments should come before attributes. ```rust /// Stores the core configuration for this Lighthouse instance. @@ -123,9 +123,9 @@ introduction and tutorial for the language). Rust has a steep learning curve, but there are many resources to help. We suggest: -* [Rust Book](https://doc.rust-lang.org/stable/book/) -* [Rust by example](https://doc.rust-lang.org/stable/rust-by-example/) -* [Learning Rust With Entirely Too Many Linked Lists](http://cglab.ca/~abeinges/blah/too-many-lists/book/) -* [Rustlings](https://github.com/rustlings/rustlings) -* [Rust Exercism](https://exercism.io/tracks/rust) -* [Learn X in Y minutes - Rust](https://learnxinyminutes.com/docs/rust/) +- [Rust Book](https://doc.rust-lang.org/stable/book/) +- [Rust by example](https://doc.rust-lang.org/stable/rust-by-example/) +- [Learning Rust With Entirely Too Many Linked Lists](http://cglab.ca/~abeinges/blah/too-many-lists/book/) +- [Rustlings](https://github.com/rustlings/rustlings) +- [Rust Exercism](https://exercism.io/tracks/rust) +- [Learn X in Y minutes - Rust](https://learnxinyminutes.com/docs/rust/) diff --git a/book/src/cross-compiling.md b/book/src/cross-compiling.md index 7cf7f4feb14..dfddcbc2945 100644 --- a/book/src/cross-compiling.md +++ b/book/src/cross-compiling.md @@ -4,7 +4,6 @@ Lighthouse supports cross-compiling, allowing users to run a binary on one platform (e.g., `aarch64`) that was compiled on another platform (e.g., `x86_64`). - ## Instructions Cross-compiling requires [`Docker`](https://docs.docker.com/engine/install/), diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index 1e8e134436e..fc16641da08 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -16,20 +16,19 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| -| v5.1.0 | Mar 2024 | v19 | yes before Deneb | -| v5.0.0 | Feb 2024 | v19 | yes before Deneb | -| v4.6.0 | Dec 2023 | v19 | yes before Deneb | -| v4.6.0-rc.0 | Dec 2023 | v18 | yes before Deneb | -| v4.5.0 | Sep 2023 | v17 | yes | -| v4.4.0 | Aug 2023 | v17 | yes | -| v4.3.0 | Jul 2023 | v17 | yes | -| v4.2.0 | May 2023 | v17 | yes | -| v4.1.0 | Apr 2023 | v16 | no | -| v4.0.1 | Mar 2023 | v16 | no | +| v5.3.0 | Aug 2024 TBD | v22 TBD | no (TBD) | +| v5.2.0 | Jun 2024 | v19 | no | +| v5.1.0 | Mar 2024 | v19 | no | +| v5.0.0 | Feb 2024 | v19 | no | +| v4.6.0 | Dec 2023 | v19 | no | > **Note**: All point releases (e.g. v4.4.1) are schema-compatible with the prior minor release > (e.g. v4.4.0). +> **Note**: Even if no schema downgrade is available, it is still possible to move between versions +> that use the same schema. E.g. you can downgrade from v5.2.0 to v5.0.0 because both use schema +> v19. + > **Note**: Support for old schemas is gradually removed from newer versions of Lighthouse. We usually do this after a major version has been out for a while and everyone has upgraded. Deprecated schema versions for previous releases are archived under @@ -53,13 +52,13 @@ To apply a downgrade you need to use the `lighthouse db migrate` command with th 5. After stopping the beacon node, run the migrate command with the `--to` parameter set to the schema version you would like to downgrade to. -``` +```bash sudo -u "$LH_USER" lighthouse db migrate --to "$VERSION" --datadir "$LH_DATADIR" --network "$NET" ``` For example if you want to downgrade to Lighthouse v4.0.1 from v4.2.0 and you followed Somer Esat's guide, you would run: -``` +```bash sudo -u lighthousebeacon lighthouse db migrate --to 16 --datadir /var/lib/lighthouse --network mainnet ``` @@ -113,7 +112,7 @@ The `schema_version` key indicates that this database is using schema version 16 Alternatively, you can check the schema version with the `lighthouse db` command. -``` +```bash sudo -u lighthousebeacon lighthouse db version --datadir /var/lib/lighthouse --network mainnet ``` @@ -132,25 +131,27 @@ Several conditions need to be met in order to run `lighthouse db`: The general form for a `lighthouse db` command is: -``` +```bash sudo -u "$LH_USER" lighthouse db version --datadir "$LH_DATADIR" --network "$NET" ``` If you followed Somer Esat's guide for mainnet: -``` +```bash sudo systemctl stop lighthousebeacon ``` -``` + +```bash sudo -u lighthousebeacon lighthouse db version --datadir /var/lib/lighthouse --network mainnet ``` If you followed the CoinCashew guide for mainnet: -``` +```bash sudo systemctl stop beacon-chain ``` -``` + +```bash lighthouse db version --network mainnet ``` @@ -178,7 +179,7 @@ Here are the steps to prune historic states: If pruning is available, Lighthouse will log: - ``` + ```text INFO Ready to prune states WARN Pruning states is irreversible WARN Re-run this command with --confirm to commit to state deletion @@ -193,10 +194,10 @@ Here are the steps to prune historic states: The `--confirm` flag ensures that you are aware the action is irreversible, and historic states will be permanently removed. Lighthouse will log: - ``` + ```text INFO Historic states pruned successfully ``` - + 4. After successfully pruning the historic states, you can restart the Lighthouse beacon node: ```bash @@ -207,12 +208,15 @@ Here are the steps to prune historic states: | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|-------------------------------------| -| v4.6.0 | Dec 2023 | v19 | yes before Deneb | -| v4.6.0-rc.0 | Dec 2023 | v18 | yes before Deneb | -| v4.5.0 | Sep 2023 | v17 | yes | -| v4.4.0 | Aug 2023 | v17 | yes | -| v4.3.0 | Jul 2023 | v17 | yes | -| v4.2.0 | May 2023 | v17 | yes | +| v5.2.0 | Jun 2024 | v19 | yes before Deneb using <= v5.2.1 | +| v5.1.0 | Mar 2024 | v19 | yes before Deneb using <= v5.2.1 | +| v5.0.0 | Feb 2024 | v19 | yes before Deneb using <= v5.2.1 | +| v4.6.0 | Dec 2023 | v19 | yes before Deneb using <= v5.2.1 | +| v4.6.0-rc.0 | Dec 2023 | v18 | yes before Deneb using <= v5.2.1 | +| v4.5.0 | Sep 2023 | v17 | yes using <= v5.2.1 | +| v4.4.0 | Aug 2023 | v17 | yes using <= v5.2.1 | +| v4.3.0 | Jul 2023 | v17 | yes using <= v5.2.1 | +| v4.2.0 | May 2023 | v17 | yes using <= v5.2.1 | | v4.1.0 | Apr 2023 | v16 | yes before Capella using <= v4.5.0 | | v4.0.1 | Mar 2023 | v16 | yes before Capella using <= v4.5.0 | | v3.5.0 | Feb 2023 | v15 | yes before Capella using <= v4.5.0 | diff --git a/book/src/developers.md b/book/src/developers.md index ab12bed5b94..244c935ac2f 100644 --- a/book/src/developers.md +++ b/book/src/developers.md @@ -5,7 +5,6 @@ _Documentation for protocol developers._ This section lists Lighthouse-specific decisions that are not strictly spec'd and may be useful for other protocol developers wishing to interact with lighthouse. - ## Custom ENR Fields Lighthouse currently uses the following ENR fields: @@ -18,7 +17,6 @@ Lighthouse currently uses the following ENR fields: | `attnets` | An SSZ bitfield which indicates which of the 64 subnets the node is subscribed to for an extended period of time | | `syncnets` | An SSZ bitfield which indicates which of the sync committee subnets the node is subscribed to | - ### Lighthouse Custom Fields Lighthouse is currently using the following custom ENR fields. @@ -27,7 +25,6 @@ Lighthouse is currently using the following custom ENR fields. | `quic` | The UDP port on which the QUIC transport is listening on IPv4 | | `quic6` | The UDP port on which the QUIC transport is listening on IPv6 | - ## Custom RPC Messages The specification leaves room for implementation-specific errors. Lighthouse uses the following @@ -43,7 +40,6 @@ custom RPC error messages. | 251 | Banned | The peer has been banned and disconnected | | 252 | Banned IP | The IP the node is connected to us with has been banned | - ### Error Codes | Code | Message | Description | diff --git a/book/src/docker.md b/book/src/docker.md index 2c410877e57..16e685491ef 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -30,7 +30,7 @@ If you can see the latest [Lighthouse release](https://github.com/sigp/lighthous ### Example Version Output -``` +```text Lighthouse vx.x.xx-xxxxxxxxx BLS Library: xxxx-xxxxxxx ``` @@ -49,13 +49,13 @@ compatibility (see [Portability](./installation-binaries.md#portability)). To install a specific tag (in this case `latest-modern`), add the tag name to your `docker` commands: -``` +```bash docker pull sigp/lighthouse:latest-modern ``` Image tags follow this format: -``` +```text ${version}${arch}${stability}${modernity}${features} ``` @@ -85,7 +85,6 @@ The `features` is: * `-dev` for a development build with `minimal` preset enabled (`spec-minimal` feature). * empty for a standard build with no custom feature enabled. - Examples: * `latest-unstable-modern`: most recent `unstable` build for all modern CPUs (x86_64 or ARM) diff --git a/book/src/faq.md b/book/src/faq.md index 9cc695c442f..2de7841343c 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -1,6 +1,7 @@ # Frequently Asked Questions ## [Beacon Node](#beacon-node-1) + - [I see a warning about "Syncing deposit contract block cache" or an error about "updating deposit contract cache", what should I do?](#bn-deposit-contract) - [I see beacon logs showing `WARN: Execution engine called failed`, what should I do?](#bn-ee) - [I see beacon logs showing `Error during execution engine upcheck`, what should I do?](#bn-upcheck) @@ -14,8 +15,10 @@ - [My beacon node logs `WARN Error signalling fork choice waiter`, what should I do?](#bn-fork-choice) - [My beacon node logs `ERRO Aggregate attestation queue full`, what should I do?](#bn-queue-full) - [My beacon node logs `WARN Failed to finalize deposit cache`, what should I do?](#bn-deposit-cache) +- [My beacon node logs `WARN Could not verify blob sidecar for gossip`, what does it mean?](#bn-blob) ## [Validator](#validator-1) + - [Why does it take so long for a validator to be activated?](#vc-activation) - [Can I use redundancy in my staking setup?](#vc-redundancy) - [I am missing attestations. Why?](#vc-missed-attestations) @@ -27,6 +30,7 @@ - [How can I delete my validator once it is imported?](#vc-delete) ## [Network, Monitoring and Maintenance](#network-monitoring-and-maintenance-1) + - [I have a low peer count and it is not increasing](#net-peer) - [How do I update lighthouse?](#net-update) - [Do I need to set up any port mappings (port forwarding)?](#net-port-forwarding) @@ -38,13 +42,14 @@ - [How to know how many of my peers are connected through QUIC?](#net-quic) ## [Miscellaneous](#miscellaneous-1) + - [What should I do if I lose my slashing protection database?](#misc-slashing) - [I can't compile lighthouse](#misc-compile) - [How do I check the version of Lighthouse that is running?](#misc-version) - [Does Lighthouse have pruning function like the execution client to save disk space?](#misc-prune) - [Can I use a HDD for the freezer database and only have the hot db on SSD?](#misc-freezer) - [Can Lighthouse log in local timestamp instead of UTC?](#misc-timestamp) -- [My hard disk is full and my validator is down. What should I do? ](#misc-full) +- [My hard disk is full and my validator is down. What should I do?](#misc-full) ## Beacon Node @@ -52,13 +57,13 @@ The error can be a warning: -``` +```text Nov 30 21:04:28.268 WARN Syncing deposit contract block cache est_blocks_remaining: initializing deposits, service: slot_notifier ``` or an error: -``` +```text ERRO Error updating deposit contract cache error: Failed to get remote head and new block ranges: EndpointError(FarBehind), retry_millis: 60000, service: deposit_contract_rpc ``` @@ -80,11 +85,13 @@ The `WARN Execution engine called failed` log is shown when the beacon node cann `error: HttpClient(url: http://127.0.0.1:8551/, kind: timeout, detail: operation timed out), service: exec` which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. One option is to add the flags `--execution-timeout-multiplier 3` and `--disable-lock-timeouts` to the beacon node. However, if the error persists, it is worth digging further to find out the cause. There are a few reasons why this can occur: + 1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. 1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage. 1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD. If the reason for the error message is caused by no. 1 above, you may want to look further. If the execution engine is out of sync suddenly, it is usually caused by ungraceful shutdown. The common causes for ungraceful shutdown are: + - Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. - The service file is not stopped properly. To overcome this, make sure that the process is stopped properly, e.g., during client updates. - Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. To confirm that the error is due to oom, run `sudo dmesg -T | grep killed` to look for killed processes. If you are using geth as the execution client, a short term solution is to reduce the resources used. For example, you can reduce the cache by adding the flag `--cache 2048`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. @@ -95,7 +102,7 @@ An example of the full error is: `ERRO Error during execution engine upcheck error: HttpClient(url: http://127.0.0.1:8551/, kind: request, detail: error trying to connect: tcp connect error: Connection refused (os error 111)), service: exec` -Connection refused means the beacon node cannot reach the execution client. This could be due to the execution client is offline or the configuration is wrong. If the execution client is offline, run the execution engine and the error will disappear. +Connection refused means the beacon node cannot reach the execution client. This could be due to the execution client is offline or the configuration is wrong. If the execution client is offline, run the execution engine and the error will disappear. If it is a configuration issue, ensure that the execution engine can be reached. The standard endpoint to connect to the execution client is `--execution-endpoint http://localhost:8551`. If the execution client is on a different host, the endpoint to connect to it will change, e.g., `--execution-endpoint http://IP_address:8551` where `IP_address` is the IP of the execution client node (you may also need additional flags to be set). If it is using another port, the endpoint link needs to be changed accordingly. Once the execution client/beacon node is configured correctly, the error will disappear. @@ -109,13 +116,12 @@ INFO Downloading historical blocks est_time: --, distance: 4524545 slo If the same log appears every minute and you do not see progress in downloading historical blocks, you can try one of the followings: - - Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the ports 9000 TCP/UDP and 9001 UDP to increase peer count. - - Restart the beacon node. - +- Check the number of peers you are connected to. If you have low peers (less than 50), try to do port forwarding on the ports 9000 TCP/UDP and 9001 UDP to increase peer count. +- Restart the beacon node. ### I proposed a block but the beacon node shows `could not publish message` with error `duplicate` as below, should I be worried? -``` +```text INFO Block from HTTP API already known` WARN Could not publish message error: Duplicate, service: libp2p ``` @@ -128,7 +134,7 @@ In short, it is nothing to worry about. The log looks like: -``` +```text WARN Head is optimistic execution_block_hash: 0x47e7555f1d4215d1ad409b1ac188b008fcb286ed8f38d3a5e8078a0af6cbd6e1, info: chain not fully verified, block and attestation production disabled until execution engine syncs, service: slot_notifier ``` @@ -138,7 +144,7 @@ It means the beacon node will follow the chain, but it will not be able to attes An example of the log is shown below: -``` +```text CRIT Beacon block processing error error: ValidatorPubkeyCacheLockTimeout, service: beacon WARN BlockProcessingFailure outcome: ValidatorPubkeyCacheLockTimeout, msg: unexpected condition in processing block. ``` @@ -149,7 +155,7 @@ A `Timeout` error suggests that the computer may be overloaded at the moment, fo An example of the full log is shown below: -``` +```text WARN BlockProcessingFailure outcome: MissingBeaconBlock(0xbdba211f8d72029554e405d8e4906690dca807d1d7b1bc8c9b88d7970f1648bc), msg: unexpected condition in processing block. ``` @@ -165,41 +171,41 @@ This warning usually comes with an http error code. Some examples are given belo 1. The log shows: -``` -WARN Error processing HTTP API request method: GET, path: /eth/v1/validator/attestation_data, status: 500 Internal Server Error, elapsed: 305.65µs -``` + ```text + WARN Error processing HTTP API request method: GET, path: /eth/v1/validator/attestation_data, status: 500 Internal Server Error, elapsed: 305.65µs + ``` -The error is `500 Internal Server Error`. This suggests that the execution client is not synced. Once the execution client is synced, the error will disappear. + The error is `500 Internal Server Error`. This suggests that the execution client is not synced. Once the execution client is synced, the error will disappear. -2. The log shows: +1. The log shows: -``` -WARN Error processing HTTP API request method: POST, path: /eth/v1/validator/duties/attester/199565, status: 503 Service Unavailable, elapsed: 96.787µs -``` + ```text + WARN Error processing HTTP API request method: POST, path: /eth/v1/validator/duties/attester/199565, status: 503 Service Unavailable, elapsed: 96.787µs + ``` -The error is `503 Service Unavailable`. This means that the beacon node is still syncing. When this happens, the validator client will log: + The error is `503 Service Unavailable`. This means that the beacon node is still syncing. When this happens, the validator client will log: -``` -ERRO Failed to download attester duties err: FailedToDownloadAttesters("Some endpoints failed, num_failed: 2 http://localhost:5052/ => Unavailable(NotSynced), http://localhost:5052/ => RequestFailed(ServerMessage(ErrorMessage { code: 503, message: \"SERVICE_UNAVAILABLE: beacon node is syncing -``` + ```text + ERRO Failed to download attester duties err: FailedToDownloadAttesters("Some endpoints failed, num_failed: 2 http://localhost:5052/ => Unavailable(NotSynced), http://localhost:5052/ => RequestFailed(ServerMessage(ErrorMessage { code: 503, message: \"SERVICE_UNAVAILABLE: beacon node is syncing + ``` -This means that the validator client is sending requests to the beacon node. However, as the beacon node is still syncing, it is therefore unable to fulfil the request. The error will disappear once the beacon node is synced. + This means that the validator client is sending requests to the beacon node. However, as the beacon node is still syncing, it is therefore unable to fulfil the request. The error will disappear once the beacon node is synced. ### My beacon node logs `WARN Error signalling fork choice waiter`, what should I do? An example of the full log is shown below: -``` +```text WARN Error signalling fork choice waiter slot: 6763073, error: ForkChoiceSignalOutOfOrder { current: Slot(6763074), latest: Slot(6763073) }, service: state_advance ``` This suggests that the computer resources are being overwhelmed. It could be due to high CPU usage or high disk I/O usage. This can happen, e.g., when the beacon node is downloading historical blocks, or when the execution client is syncing. The error will disappear when the resources used return to normal or when the node is synced. - ### My beacon node logs `ERRO Aggregate attestation queue full`, what should I do? An example of the full log is shown below: -``` + +```text ERRO Aggregate attestation queue full, queue_len: 4096, msg: the system has insufficient resources for load, module: network::beacon_processor:1542 ``` @@ -207,7 +213,17 @@ This suggests that the computer resources are being overwhelmed. It could be due ### My beacon node logs `WARN Failed to finalize deposit cache`, what should I do? -This is a known [bug](https://github.com/sigp/lighthouse/issues/3707) that will fix by itself. +This is a known [bug](https://github.com/sigp/lighthouse/issues/3707) that will fix by itself. + +### My beacon node logs `WARN Could not verify blob sidecar for gossip`, what does it mean? + +An example of the full log is shown below: + +```text +Jun 07 23:05:12.170 WARN Could not verify blob sidecar for gossip. Ignoring the blob sidecar, commitment: 0xaa97…6f54, index: 1, root: 0x93b8…c47c, slot: 9248017, error: PastFinalizedSlot { blob_slot: Slot(9248017), finalized_slot: Slot(9248032) }, module: network::network_beacon_processor::gossip_methods:720 +``` + +The `PastFinalizedSlot` indicates that the time at which the node received the blob has past the finalization period. This could be due to a peer sending an earlier blob. The log will be gone when Lighthouse eventually drops the peer. ## Validator @@ -312,7 +328,9 @@ However, there are some components which can be configured with redundancy. See [Redundancy](./redundancy.md) guide for more information. ### I am missing attestations. Why? + The first thing is to ensure both consensus and execution clients are synced with the network. If they are synced, there may still be some issues with the node setup itself that is causing the missed attestations. Check the setup to ensure that: + - the clock is synced - the computer has sufficient resources and is not overloaded - the internet is working well @@ -320,14 +338,24 @@ The first thing is to ensure both consensus and execution clients are synced wit You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). -Another cause for missing attestations is delays during block processing. When this happens, the debug logs will show (debug logs can be found under `$datadir/beacon/logs`): +Another cause for missing attestations is the block arriving late, or there are delays during block processing. -``` -DEBG Delayed head block set_as_head_delay: Some(93.579425ms), imported_delay: Some(1.460405278s), observed_delay: Some(2.540811921s), block_delay: 4.094796624s, slot: 6837344, proposer_index: 211108, block_root: 0x2c52231c0a5a117401f5231585de8aa5dd963bc7cbc00c544e681342eedd1700, service: beacon +An example of the log: (debug logs can be found under `$datadir/beacon/logs`): + +```text +Delayed head block, set_as_head_time_ms: 27, imported_time_ms: 168, attestable_delay_ms: 4209, available_delay_ms: 4186, execution_time_ms: 201, blob_delay_ms: 3815, observed_delay_ms: 3984, total_delay_ms: 4381, slot: 1886014, proposer_index: 733, block_root: 0xa7390baac88d50f1cbb5ad81691915f6402385a12521a670bbbd4cd5f8bf3934, service: beacon, module: beacon_chain::canonical_head:1441 ``` -The fields to look for are `imported_delay > 1s` and `observed_delay < 3s`. The `imported_delay` is how long the node took to process the block. The `imported_delay` of larger than 1 second suggests that there is slowness in processing the block. It could be due to high CPU usage, high I/O disk usage or the clients are doing some background maintenance processes. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). The `observed_delay` of less than 3 seconds means that the block is not arriving late from the block proposer. Combining the above, this implies that the validator should have been able to attest to the block, but failed due to slowness in the node processing the block. +The field to look for is `attestable_delay`, which defines the time when a block is ready for the validator to attest. If the `attestable_delay` is greater than 4s which has past the window of attestation, the attestation wil fail. In the above example, the delay is mostly caused by late block observed by the node, as shown in `observed_delay`. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). Ideally, `observed_delay` should be less than 3 seconds. In this example, the validator failed to attest the block due to the block arriving late. +Another example of log: + +``` +DEBG Delayed head block, set_as_head_time_ms: 22, imported_time_ms: 312, attestable_delay_ms: 7052, available_delay_ms: 6874, execution_time_ms: 4694, blob_delay_ms: 2159, observed_delay_ms: 2179, total_delay_ms: 7209, slot: 1885922, proposer_index: 606896, block_root: 0x9966df24d24e722d7133068186f0caa098428696e9f441ac416d0aca70cc0a23, service: beacon, module: beacon_chain::canonical_head:1441 +/159.69.68.247/tcp/9000, service: libp2p, module: lighthouse_network::service:1811 +``` + +In this example, we see that the `execution_time_ms` is 4694ms. The `execution_time_ms` is how long the node took to process the block. The `execution_time_ms` of larger than 1 second suggests that there is slowness in processing the block. If the `execution_time_ms` is high, it could be due to high CPU usage, high I/O disk usage or the clients are doing some background maintenance processes. ### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? @@ -337,18 +365,17 @@ You could also check for the sync aggregate participation percentage on block ex Another possible reason for missing the head vote is due to a chain "reorg". A reorg can happen if the proposer publishes block `n` late, and the proposer of block `n+1` builds upon block `n-1` instead of `n`. This is called a "reorg". Due to the reorg, block `n` was never included in the chain. If you are assigned to attest at slot `n`, it is possible you may still attest to block `n` despite most of the network recognizing the block as being late. In this case you will miss the head reward. - ### Can I submit a voluntary exit message without running a beacon node? Yes. Beaconcha.in provides the tool to broadcast the message. You can create the voluntary exit message file with [ethdo](https://github.com/wealdtech/ethdo/releases/tag/v1.30.0) and submit the message via the [beaconcha.in](https://beaconcha.in/tools/broadcast) website. A guide on how to use `ethdo` to perform voluntary exit can be found [here](https://github.com/eth-educators/ethstaker-guides/blob/main/voluntary-exit.md). It is also noted that you can submit your BLS-to-execution-change message to update your withdrawal credentials from type `0x00` to `0x01` using the same link. -If you would like to still use Lighthouse to submit the message, you will need to run a beacon node and an execution client. For the beacon node, you can use checkpoint sync to quickly sync the chain under a minute. On the other hand, the execution client can be syncing and *needs not be synced*. This implies that it is possible to broadcast a voluntary exit message within a short time by quickly spinning up a node. +If you would like to still use Lighthouse to submit the message, you will need to run a beacon node and an execution client. For the beacon node, you can use checkpoint sync to quickly sync the chain under a minute. On the other hand, the execution client can be syncing and _needs not be synced_. This implies that it is possible to broadcast a voluntary exit message within a short time by quickly spinning up a node. ### Does increasing the number of validators increase the CPU and other computer resources used? -A computer with hardware specifications stated in the [Recommended System Requirements](./installation.md#recommended-system-requirements) can run hundreds validators with only marginal increase in CPU usage. +A computer with hardware specifications stated in the [Recommended System Requirements](./installation.md#recommended-system-requirements) can run hundreds validators with only marginal increase in CPU usage. ### I want to add new validators. Do I have to reimport the existing keys? @@ -360,8 +387,7 @@ Generally yes. If you do not want to stop `lighthouse vc`, you can use the [key manager API](./api-vc-endpoints.md) to import keys. - -### How can I delete my validator once it is imported? +### How can I delete my validator once it is imported? Lighthouse supports the [KeyManager API](https://ethereum.github.io/keymanager-APIs/#/Local%20Key%20Manager/deleteKeys) to delete validators and remove them from the `validator_definitions.yml` file. To do so, start the validator client with the flag `--http` and call the API. @@ -371,7 +397,7 @@ If you are looking to delete the validators in one node and import it to another ### I have a low peer count and it is not increasing -If you cannot find *ANY* peers at all, it is likely that you have incorrect +If you cannot find _ANY_ peers at all, it is likely that you have incorrect network configuration settings. Ensure that the network you wish to connect to is correct (the beacon node outputs the network it is connecting to in the initial boot-up log lines). On top of this, ensure that you are not using the @@ -385,26 +411,25 @@ expect, there are a few things to check on: 1. Ensure that port forward was correctly set up as described [here](./advanced_networking.md#nat-traversal-port-forwarding). -To check that the ports are forwarded, run the command: + To check that the ports are forwarded, run the command: - ```bash - curl http://localhost:5052/lighthouse/nat - ``` + ```bash + curl http://localhost:5052/lighthouse/nat + ``` -It should return `{"data":true}`. If it returns `{"data":false}`, you may want to double check if the port forward was correctly set up. + It should return `{"data":true}`. If it returns `{"data":false}`, you may want to double check if the port forward was correctly set up. -If the ports are open, you should have incoming peers. To check that you have incoming peers, run the command: + If the ports are open, you should have incoming peers. To check that you have incoming peers, run the command: - ```bash - curl localhost:5052/lighthouse/peers | jq '.[] | select(.peer_info.connection_direction=="Incoming")' - ``` + ```bash + curl localhost:5052/lighthouse/peers | jq '.[] | select(.peer_info.connection_direction=="Incoming")' + ``` -If you have incoming peers, it should return a lot of data containing information of peers. If the response is empty, it means that you have no incoming peers and there the ports are not open. You may want to double check if the port forward was correctly set up. + If you have incoming peers, it should return a lot of data containing information of peers. If the response is empty, it means that you have no incoming peers and there the ports are not open. You may want to double check if the port forward was correctly set up. -2. Check that you do not lower the number of peers using the flag `--target-peers`. The default is 80. A lower value set will lower the maximum number of peers your node can connect to, which may potentially interrupt the validator performance. We recommend users to leave the `--target peers` untouched to keep a diverse set of peers. - -3. Ensure that you have a quality router for the internet connection. For example, if you connect the router to many devices including the node, it may be possible that the router cannot handle all routing tasks, hence struggling to keep up the number of peers. Therefore, using a quality router for the node is important to keep a healthy number of peers. +1. Check that you do not lower the number of peers using the flag `--target-peers`. The default is 100. A lower value set will lower the maximum number of peers your node can connect to, which may potentially interrupt the validator performance. We recommend users to leave the `--target peers` untouched to keep a diverse set of peers. +1. Ensure that you have a quality router for the internet connection. For example, if you connect the router to many devices including the node, it may be possible that the router cannot handle all routing tasks, hence struggling to keep up the number of peers. Therefore, using a quality router for the node is important to keep a healthy number of peers. ### How do I update lighthouse? @@ -415,7 +440,7 @@ If you are updating by rebuilding from source, see [here.](./installation-source If you are running the docker image provided by Sigma Prime on Dockerhub, you can update to specific versions, for example: ```bash -$ docker pull sigp/lighthouse:v1.0.0 +docker pull sigp/lighthouse:v1.0.0 ``` If you are building a docker image, the process will be similar to the one described [here.](./docker.md#building-the-docker-image) @@ -461,7 +486,7 @@ Monitoring](./validator-monitoring.md) for more information. Lighthouse has also The setting on the beacon node is the same for both cases below. In the beacon node, specify `lighthouse bn --http-address local_IP` so that the beacon node is listening on the local network rather than `localhost`. You can find the `local_IP` by running the command `hostname -I | awk '{print $1}'` on the server running the beacon node. -1. If the beacon node and validator clients are on different servers *in the same network*, the setting in the validator client is as follows: +1. If the beacon node and validator clients are on different servers _in the same network_, the setting in the validator client is as follows: Use the flag `--beacon-nodes` to point to the beacon node. For example, `lighthouse vc --beacon-nodes http://local_IP:5052` where `local_IP` is the local IP address of the beacon node and `5052` is the default `http-port` of the beacon node. @@ -475,34 +500,33 @@ The setting on the beacon node is the same for both cases below. In the beacon n You can refer to [Redundancy](./redundancy.md) for more information. -2. If the beacon node and validator clients are on different servers *and different networks*, it is necessary to perform port forwarding of the SSH port (e.g., the default port 22) on the router, and also allow firewall on the SSH port. The connection can be established via port forwarding on the router. - +2. If the beacon node and validator clients are on different servers _and different networks_, it is necessary to perform port forwarding of the SSH port (e.g., the default port 22) on the router, and also allow firewall on the SSH port. The connection can be established via port forwarding on the router. - - In the validator client, use the flag `--beacon-nodes` to point to the beacon node. However, since the beacon node and the validator client are on different networks, the IP address to use is the public IP address of the beacon node, i.e., `lighthouse vc --beacon-nodes http://public_IP:5052`. You can get the public IP address of the beacon node by running the command ` dig +short myip.opendns.com @resolver1.opendns.com` on the server running the beacon node. + In the validator client, use the flag `--beacon-nodes` to point to the beacon node. However, since the beacon node and the validator client are on different networks, the IP address to use is the public IP address of the beacon node, i.e., `lighthouse vc --beacon-nodes http://public_IP:5052`. You can get the public IP address of the beacon node by running the command `dig +short myip.opendns.com @resolver1.opendns.com` on the server running the beacon node. Additionally, port forwarding of port 5052 on the router connected to the beacon node is required for the vc to connect to the bn. To do port forwarding, refer to [how to open ports](./advanced_networking.md#how-to-open-ports). - If you have firewall setup, e.g., `ufw`, you will need to allow connections to port 5052 (assuming that the default port is used). Since the beacon node HTTP/HTTPS API is public-facing (i.e., the 5052 port is now exposed to the internet due to port forwarding), we strongly recommend users to apply IP-address filtering to the BN/VC connection from malicious actors. This can be done using the command: - ``` + ```bash sudo ufw allow from vc_IP_address proto tcp to any port 5052 ``` - where `vc_IP_address` is the public IP address of the validator client. The command will only allow connections to the beacon node from the validator client IP address to prevent malicious attacks on the beacon node over the internet. + where `vc_IP_address` is the public IP address of the validator client. The command will only allow connections to the beacon node from the validator client IP address to prevent malicious attacks on the beacon node over the internet. It is also worth noting that the `--beacon-nodes` flag can also be used for redundancy of beacon nodes. For example, let's say you have a beacon node and a validator client running on the same host, and a second beacon node on another server as a backup. In this case, you can use `lighthouse vc --beacon-nodes http://localhost:5052, http://IP-address:5052` on the validator client. ### Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address? + No. Lighthouse will auto-detect the change and update your Ethereum Node Record (ENR). You just need to make sure you are not manually setting the ENR with `--enr-address` (which, for common use cases, this flag is not used). ### How to change the TCP/UDP port 9000 that Lighthouse listens on? + Use the flag `--port ` in the beacon node. This flag can be useful when you are running two beacon nodes at the same time. You can leave one beacon node as the default port 9000, and configure the second beacon node to listen on, e.g., `--port 9100`. Since V4.5.0, Lighthouse supports QUIC and by default will use the value of `--port` + 1 to listen via UDP (default `9001`). This can be configured by using the flag `--quic-port`. Refer to [Advanced Networking](./advanced_networking.md#nat-traversal-port-forwarding) for more information. -### Lighthouse `v4.3.0` introduces a change where a node will subscribe to only 2 subnets in total. I am worried that this will impact my validators return. +### Lighthouse `v4.3.0` introduces a change where a node will subscribe to only 2 subnets in total. I am worried that this will impact my validators return Previously, having more validators means subscribing to more subnets. Since the change, a node will now only subscribe to 2 subnets in total. This will bring about significant reductions in bandwidth for nodes with multiple validators. @@ -512,20 +536,23 @@ If you would still like to subscribe to all subnets, you can use the flag `subsc ### How to know how many of my peers are connected via QUIC? -With `--metrics` enabled in the beacon node, you can find the number of peers connected via QUIC using: +With `--metrics` enabled in the beacon node, the [Grafana Network dashboard](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/Network.json) displays the connected by transport, which will show the number of peers connected via QUIC. + +Alternatively, you can find the number of peers connected via QUIC manually using: ```bash - curl -s "http://localhost:5054/metrics" | grep libp2p_quic_peers + curl -s "http://localhost:5054/metrics" | grep 'transport="quic"' ``` A response example is: +```text +libp2p_peers_multi{direction="inbound",transport="quic"} 27 +libp2p_peers_multi{direction="none",transport="quic"} 0 +libp2p_peers_multi{direction="outbound",transport="quic"} 9 ``` -# HELP libp2p_quic_peers Count of libp2p peers currently connected via QUIC -# TYPE libp2p_quic_peers gauge -libp2p_quic_peers 4 -``` -which shows that there are 4 peers connected via QUIC. + +which shows that there are a total of 36 peers connected via QUIC. ## Miscellaneous @@ -552,19 +579,22 @@ Specs: mainnet (true), minimal (false), gnosis (true) If you download the binary file, navigate to the location of the directory, for example, the binary file is in `/usr/local/bin`, run `/usr/local/bin/lighthouse --version`, the example of output is the same as above. Alternatively, if you have Lighthouse running, on the same computer, you can run: + ```bash curl "http://127.0.0.1:5052/eth/v1/node/version" ``` Example of output: + ```bash {"data":{"version":"Lighthouse/v4.1.0-693886b/x86_64-linux"}} ``` + which says that the version is v4.1.0. ### Does Lighthouse have pruning function like the execution client to save disk space? -Yes, Lighthouse supports [state pruning](./database-migrations.md#how-to-prune-historic-states) which can help to save disk space. +Yes, Lighthouse supports [state pruning](./database-migrations.md#how-to-prune-historic-states) which can help to save disk space. ### Can I use a HDD for the freezer database and only have the hot db on SSD? @@ -574,20 +604,6 @@ Yes, you can do so by using the flag `--freezer-dir /path/to/freezer_db` in the The reason why Lighthouse logs in UTC is due to the dependency on an upstream library that is [yet to be resolved](https://github.com/sigp/lighthouse/issues/3130). Alternatively, using the flag `disable-log-timestamp` in combination with systemd will suppress the UTC timestamps and print the logs in local timestamps. -### My hard disk is full and my validator is down. What should I do? +### My hard disk is full and my validator is down. What should I do? A quick way to get the validator back online is by removing the Lighthouse beacon node database and resync Lighthouse using checkpoint sync. A guide to do this can be found in the [Lighthouse Discord server](https://discord.com/channels/605577013327167508/605577013331361793/1019755522985050142). With some free space left, you will then be able to prune the execution client database to free up more space. - - - - - - - - - - - - - - diff --git a/book/src/graffiti.md b/book/src/graffiti.md index 302f8f96795..ba9c7d05d70 100644 --- a/book/src/graffiti.md +++ b/book/src/graffiti.md @@ -2,14 +2,16 @@ Lighthouse provides four options for setting validator graffiti. -### 1. Using the "--graffiti-file" flag on the validator client +## 1. Using the "--graffiti-file" flag on the validator client + Users can specify a file with the `--graffiti-file` flag. This option is useful for dynamically changing graffitis for various use cases (e.g. drawing on the beaconcha.in graffiti wall). This file is loaded once on startup and reloaded everytime a validator is chosen to propose a block. Usage: `lighthouse vc --graffiti-file graffiti_file.txt` The file should contain key value pairs corresponding to validator public keys and their associated graffiti. The file can also contain a `default` key for the default case. -``` + +```text default: default_graffiti public_key1: graffiti1 public_key2: graffiti2 @@ -18,7 +20,7 @@ public_key2: graffiti2 Below is an example of a graffiti file: -``` +```text default: Lighthouse 0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007: mr f was here 0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477: mr v was here @@ -26,13 +28,15 @@ default: Lighthouse Lighthouse will first search for the graffiti corresponding to the public key of the proposing validator, if there are no matches for the public key, then it uses the graffiti corresponding to the default key if present. -### 2. Setting the graffiti in the `validator_definitions.yml` +## 2. Setting the graffiti in the `validator_definitions.yml` + Users can set validator specific graffitis in `validator_definitions.yml` with the `graffiti` key. This option is recommended for static setups where the graffitis won't change on every new block proposal. -You can also update the graffitis in the `validator_definitions.yml` file using the [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey). See example in [Set Graffiti via HTTP](#set-graffiti-via-http). +You can also update the graffitis in the `validator_definitions.yml` file using the [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey). See example in [Set Graffiti via HTTP](#set-graffiti-via-http). Below is an example of the validator_definitions.yml with validator specific graffitis: -``` + +```text --- - enabled: true voting_public_key: "0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007" @@ -48,32 +52,35 @@ Below is an example of the validator_definitions.yml with validator specific gra graffiti: "somethingprofound" ``` -### 3. Using the "--graffiti" flag on the validator client +## 3. Using the "--graffiti" flag on the validator client + Users can specify a common graffiti for all their validators using the `--graffiti` flag on the validator client. Usage: `lighthouse vc --graffiti example` -### 4. Using the "--graffiti" flag on the beacon node +## 4. Using the "--graffiti" flag on the beacon node + Users can also specify a common graffiti using the `--graffiti` flag on the beacon node as a common graffiti for all validators. Usage: `lighthouse bn --graffiti fortytwo` > Note: The order of preference for loading the graffiti is as follows: +> > 1. Read from `--graffiti-file` if provided. -> 2. If `--graffiti-file` is not provided or errors, read graffiti from `validator_definitions.yml`. -> 3. If graffiti is not specified in `validator_definitions.yml`, load the graffiti passed in the `--graffiti` flag on the validator client. -> 4. If the `--graffiti` flag on the validator client is not passed, load the graffiti passed in the `--graffiti` flag on the beacon node. -> 4. If the `--graffiti` flag is not passed, load the default Lighthouse graffiti. +> 1. If `--graffiti-file` is not provided or errors, read graffiti from `validator_definitions.yml`. +> 1. If graffiti is not specified in `validator_definitions.yml`, load the graffiti passed in the `--graffiti` flag on the validator client. +> 1. If the `--graffiti` flag on the validator client is not passed, load the graffiti passed in the `--graffiti` flag on the beacon node. +> 1. If the `--graffiti` flag is not passed, load the default Lighthouse graffiti. -### Set Graffiti via HTTP +## Set Graffiti via HTTP Use the [Lighthouse API](api-vc-endpoints.md) to set graffiti on a per-validator basis. This method updates the graffiti -both in memory and in the `validator_definitions.yml` file. The new graffiti will be used in the next block proposal +both in memory and in the `validator_definitions.yml` file. The new graffiti will be used in the next block proposal without requiring a validator client restart. Refer to [Lighthouse API](api-vc-endpoints.html#patch-lighthousevalidatorsvoting_pubkey) for API specification. -#### Example Command +### Example Command ```bash DATADIR=/var/lib/lighthouse @@ -85,4 +92,4 @@ curl -X PATCH "http://localhost:5062/lighthouse/validators/0xb0148e6348264131bf4 }' | jq ``` -A `null` response indicates that the request is successful. \ No newline at end of file +A `null` response indicates that the request is successful. diff --git a/book/src/help_bn.md b/book/src/help_bn.md index e437925a0e8..5288b6a1de6 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -1,489 +1,606 @@ # Beacon Node ``` -Sigma Prime -The primary component which connects to the Ethereum 2.0 P2P network and downloads, verifies and stores blocks. Provides -a HTTP API for querying the beacon chain and publishing messages to the network. - -USAGE: - lighthouse beacon_node [FLAGS] [OPTIONS] - -FLAGS: - --allow-insecure-genesis-sync Enable syncing from genesis, which is generally insecure and incompatible - with data availability checks. Checkpoint syncing is the preferred method - for syncing a node. Only use this flag when testing. DO NOT use on - mainnet! - --always-prefer-builder-payload This flag is deprecated and has no effect. - --always-prepare-payload Send payload attributes with every fork choice update. This is intended - for use by block builders, relays and developers. You should set a fee - recipient on this BN and also consider adjusting the --prepare-payload- - lookahead flag. - --builder-fallback-disable-checks This flag disables all checks related to chain health. This means the - builder API will always be used for payload construction, regardless of - recent chain conditions. - --compact-db If present, apply compaction to the database on start-up. Use with - caution. It is generally not recommended unless auto-compaction is - disabled. - --disable-backfill-rate-limiting Disable the backfill sync rate-limiting. This allow users to just sync - the entire chain as fast as possible, however it can result in resource - contention which degrades staking performance. Stakers should generally - choose to avoid this flag since backfill sync is not required for - staking. - --disable-deposit-contract-sync Explicitly disables syncing of deposit logs from the execution node. This - overrides any previous option that depends on it. Useful if you intend to - run a non-validating beacon node. - --disable-duplicate-warn-logs This flag is deprecated and has no effect. - -x, --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP - address and port as seen by other peers on the network. This disables - this feature, fixing the ENR's IP/PORT to those specified on boot. - --disable-lock-timeouts Disable the timeouts applied to some internal locks by default. This can - lead to less spurious failures on slow hardware but is considered - experimental as it may obscure performance issues. - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag - will generally increase memory usage, it should only be provided when - debugging specific memory allocation issues. - --disable-optimistic-finalized-sync Force Lighthouse to verify every execution block hash with the execution - client during finalized sync. By default block hashes will be checked in - Lighthouse and only passed to the EL if initial verification fails. - --disable-packet-filter Disables the discovery packet filter. Useful for testing in smaller - networks - --disable-proposer-reorgs Do not attempt to reorg late blocks from other validators when proposing. - --disable-quic Disables the quic transport. The node will rely solely on the TCP - transport for libp2p connections. - --disable-upnp Disables UPnP support. Setting this will prevent Lighthouse from - attempting to automatically establish external port mappings. - --dummy-eth1 If present, uses an eth1 backend that generates static dummy - data.Identical to the method used at the 2019 Canada interop. - --enable-private-discovery Lighthouse by default does not discover private IP addresses. Set this - flag to enable connection attempts to local addresses. - -e, --enr-match Sets the local ENR IP address and port to match those set for lighthouse. - Specifically, the IP address will be the value of --listen-address and - the UDP port will be --discovery-port. - --eth1 If present the node will connect to an eth1 node. This is required for - block production, you must use this flag if you wish to serve a - validator. - --eth1-purge-cache Purges the eth1 block and deposit caches - --genesis-backfill Attempts to download blocks all the way back to genesis when checkpoint - syncing. - --gui Enable the graphical user interface and all its requirements. This - enables --http and --validator-monitor-auto and enables SSE logging. - -h, --help Prints help information - --http Enable the RESTful HTTP API server. Disabled by default. - --http-enable-tls Serves the RESTful HTTP API server over TLS. This feature is currently - experimental. - --import-all-attestations Import and aggregate all attestations, regardless of validator - subscriptions. This will only import attestations from already-subscribed - subnets, use with --subscribe-all-subnets to ensure all attestations are - received for import. - --light-client-server Act as a full node supporting light clients on the p2p network - [experimental] - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed - to store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they - can be read by any user on the machine. Note that logs can often contain - sensitive information about your validator and so this flag should be - used with caution. For Windows users, the log file permissions will be - inherited from the parent folder. - --metrics Enable the Prometheus metrics HTTP server. Disabled by default. - --private Prevents sending various client identification information. - --proposer-only Sets this beacon node at be a block proposer only node. This will run the - beacon node in a minimal configuration that is sufficient for block - publishing only. This flag should be used for a beacon node being - referenced by validator client using the --proposer-node flag. This - configuration is for enabling more secure setups. - --purge-db If present, the chain database will be deleted. Use with caution. - --reconstruct-historic-states After a checkpoint sync, reconstruct historic states in the database. - This requires syncing all the way back to genesis. - --reset-payload-statuses When present, Lighthouse will forget the payload statuses of any already- - imported blocks. This can assist in the recovery from a consensus - failure caused by the execution layer. - --shutdown-after-sync Shutdown beacon node as soon as sync is completed. Backfill sync will not - be performed before shutdown. - --slasher Run a slasher alongside the beacon node. It is currently only recommended - for expert users because of the immaturity of the slasher UX and the - extra resources required. - --staking Standard option for a staking beacon node. This will enable the HTTP - server on localhost:5052 and import deposit logs from the execution node. - This is equivalent to `--http` on merge-ready networks, or `--http - --eth1` pre-merge - --subscribe-all-subnets Subscribe to all subnets regardless of validator count. This will also - advertise the beacon node as being long-lived subscribed to all subnets. - --validator-monitor-auto Enables the automatic detection and monitoring of validators connected to - the HTTP API and using the subnet subscription endpoint. This generally - has the effect of providing additional logging and metrics for locally - controlled validators. - -V, --version Prints version information - -z, --zero-ports Sets all listening TCP/UDP ports to 0, allowing the OS to choose some - arbitrary free ports. - -OPTIONS: - --auto-compact-db - Enable or disable automatic compaction of the database on finalization. [default: true] - - --blob-prune-margin-epochs - The margin for blob pruning in epochs. The oldest blobs are pruned up until data_availability_boundary - - blob_prune_margin_epochs. [default: 0] - --blobs-dir - Data directory for the blobs database. - - --block-cache-size - Specifies how many blocks the database should cache in memory [default: 5] - - --boot-nodes - One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network. Multiaddr is also supported. - - --builder - The URL of a service compatible with the MEV-boost API. - - --builder-fallback-epochs-since-finalization - If this node is proposing a block and the chain has not finalized within this number of epochs, it will NOT - query any connected builders, and will use the local execution engine for payload construction. Setting this - value to anything less than 2 will cause the node to NEVER query connected builders. Setting it to 2 will - cause this condition to be hit if there are skips slots at the start of an epoch, right before this node is - set to propose. [default: 3] - --builder-fallback-skips - If this node is proposing a block and has seen this number of skip slots on the canonical chain in a row, it - will NOT query any connected builders, and will use the local execution engine for payload construction. - [default: 3] - --builder-fallback-skips-per-epoch - If this node is proposing a block and has seen this number of skip slots on the canonical chain in the past - `SLOTS_PER_EPOCH`, it will NOT query any connected builders, and will use the local execution engine for - payload construction. [default: 8] - --builder-profit-threshold - This flag is deprecated and has no effect. - - --builder-user-agent - The HTTP user agent to send alongside requests to the builder URL. The default is Lighthouse's version - string. - --checkpoint-blobs - Set the checkpoint blobs to start syncing from. Must be aligned and match --checkpoint-block. Using - --checkpoint-sync-url instead is recommended. - --checkpoint-block - Set a checkpoint block to start syncing from. Must be aligned and match --checkpoint-state. Using - --checkpoint-sync-url instead is recommended. - --checkpoint-state - Set a checkpoint state to start syncing from. Must be aligned and match --checkpoint-block. Using - --checkpoint-sync-url instead is recommended. - --checkpoint-sync-url - Set the remote beacon node HTTP endpoint to use for checkpoint sync. - - --checkpoint-sync-url-timeout - Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint. [default: 180] - - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --discovery-port - The UDP port that discovery will listen on. Defaults to `port` - - --discovery-port6 - The UDP port that discovery will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to - `port6` - --enr-address
... - The IP address/ DNS address to broadcast to other peers on how to reach this node. If a DNS address is - provided, the enr-address is set to the IP address it resolves to and does not auto-update based on PONG - responses in discovery. Set this only if you are sure other nodes can connect to your local node on this - address. This will update the `ip4` or `ip6` ENR fields accordingly. To update both, set this flag twice - with the different values. - --enr-quic-port - The quic UDP4 port that will be set on the local ENR. Set this only if you are sure other nodes can connect - to your local node on this port over IPv4. - --enr-quic6-port - The quic UDP6 port that will be set on the local ENR. Set this only if you are sure other nodes can connect - to your local node on this port over IPv6. - --enr-tcp-port - The TCP4 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on - this port over IPv4. The --port flag is used if this is not set. - --enr-tcp6-port - The TCP6 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on - this port over IPv6. The --port6 flag is used if this is not set. - --enr-udp-port - The UDP4 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on - this port over IPv4. - --enr-udp6-port - The UDP6 port of the local ENR. Set this only if you are sure other nodes can connect to your local node on - this port over IPv6. - --epochs-per-blob-prune - The epoch interval with which to prune blobs from Lighthouse's database when they are older than the data - availability boundary relative to the current epoch. [default: 1] - --epochs-per-migration - The number of epochs to wait between running the migration of data from the hot DB to the cold DB. Less - frequent runs can be useful for minimizing disk writes [default: 1] - --eth1-blocks-per-log-query - Specifies the number of blocks that a deposit log query should span. This will reduce the size of responses - from the Eth1 endpoint. [default: 1000] - --eth1-cache-follow-distance - Specifies the distance between the Eth1 chain head and the last block which should be imported into the - cache. Setting this value lower can help compensate for irregular Proof-of-Work block times, but setting it - too low can make the node vulnerable to re-orgs. - --execution-endpoint - Server endpoint for an execution layer JWT-authenticated HTTP JSON-RPC connection. Uses the same endpoint to - populate the deposit cache. - --execution-jwt - File path which contains the hex-encoded JWT secret for the execution endpoint provided in the --execution- - endpoint flag. - --execution-jwt-id - Used by the beacon node to communicate a unique identifier to execution nodes during JWT authentication. It - corresponds to the 'id' field in the JWT claims object.Set to empty by default - --execution-jwt-secret-key - Hex-encoded JWT secret for the execution endpoint provided in the --execution-endpoint flag. - - --execution-jwt-version - Used by the beacon node to communicate a client version to execution nodes during JWT authentication. It - corresponds to the 'clv' field in the JWT claims object.Set to empty by default - --execution-timeout-multiplier - Unsigned integer to multiply the default execution timeouts by. [default: 1] - - --fork-choice-before-proposal-timeout - Set the maximum number of milliseconds to wait for fork choice before proposing a block. You can prevent - waiting at all by setting the timeout to 0, however you risk proposing atop the wrong parent block. - [default: 250] - --freezer-dir - Data directory for the freezer database. - - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] - - --graffiti - Specify your custom graffiti to be included in blocks. Defaults to the current version and commit, truncated - to fit in 32 bytes. - --historic-state-cache-size - Specifies how many states from the freezer database should cache in memory [default: 1] - - --http-address
- Set the listen address for the RESTful HTTP API server. - - --http-allow-origin - Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not - recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of - this server (e.g., http://localhost:5052). - --http-duplicate-block-status - Status code to send when a block that is already known is POSTed to the HTTP API. - - --http-enable-beacon-processor - The beacon processor is a scheduler which provides quality-of-service and DoS protection. When set to - "true", HTTP API requests will be queued and scheduled alongside other tasks. When set to "false", HTTP API - responses will be executed immediately. - --http-port - Set the listen TCP port for the RESTful HTTP API server. - - --http-sse-capacity-multiplier - Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. Increasing this value can - prevent messages from being dropped. - --http-tls-cert - The path of the certificate to be used when serving the HTTP API server over TLS. - - --http-tls-key - The path of the private key to be used when serving the HTTP API server over TLS. Must not be password- - protected. - --invalid-gossip-verified-blocks-path - If a block succeeds gossip validation whilst failing full validation, store the block SSZ as a file at this - path. This feature is only recommended for developers. This directory is not pruned, users should be careful - to avoid filling up their disks. - --libp2p-addresses - One or more comma-delimited multiaddrs to manually connect to a libp2p peer without an ENR. - - --listen-address
... - The address lighthouse will listen for UDP and TCP connections. To listen over IpV4 and IpV6 set this flag - twice with the different values. - Examples: - - --listen-address '0.0.0.0' will listen over IPv4. - - --listen-address '::' will listen over IPv6. - - --listen-address '0.0.0.0' --listen-address '::' will listen over both IPv4 and IPv6. The order of the - given addresses is not relevant. However, multiple IPv4, or multiple IPv6 addresses will not be accepted. - [default: 0.0.0.0] - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --max-skip-slots - Refuse to skip more than this many slots when processing an attestation. This prevents nodes on minority - forks from wasting our time and disk space, but could also cause unnecessary consensus failures, so is - disabled by default. - --metrics-address
- Set the listen address for the Prometheus metrics HTTP server. - - --metrics-allow-origin - Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not - recommended in production). If no value is supplied, the CORS allowed origin is set to the listen address of - this server (e.g., http://localhost:5054). - --metrics-port - Set the listen TCP port for the Prometheus metrics HTTP server. - - --monitoring-endpoint
- Enables the monitoring service for sending system metrics to a remote endpoint. This can be used to monitor - your setup on certain services (e.g. beaconcha.in). This flag sets the endpoint where the beacon node - metrics will be sent. Note: This will send information to a remote sever which may identify and associate - your validators, IP address and other personal information. Always use a HTTPS connection and never provide - an untrusted URL. - --monitoring-endpoint-period - Defines how many seconds to wait between each message sent to the monitoring-endpoint. Default: 60s - - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --network-dir - Data directory for network keys. Defaults to network/ inside the beacon node dir. - - --port - The TCP/UDP ports to listen on. There are two UDP ports. The discovery UDP port will be set to this value - and the Quic UDP port will be set to this value + 1. The discovery port can be modified by the --discovery- - port flag and the quic port can be modified by the --quic-port flag. If listening over both IPv4 - and IPv6 the --port flag will apply to the IPv4 address and --port6 to the IPv6 address. [default: 9000] - --port6 - The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 and IPv6. Defaults to 9090 when - required. The Quic UDP port will be set to this value + 1. [default: 9090] - --prepare-payload-lookahead - The time before the start of a proposal slot at which payload attributes should be sent. Low values are - useful for execution nodes which don't improve their payload after the first call, and high values are - useful for ensuring the EL is given ample notice. Default: 1/3 of a slot. - --progressive-balances - Deprecated. This optimisation is now the default and cannot be disabled. [possible values: fast, disabled, - checked, strict] - --proposer-reorg-cutoff - Maximum delay after the start of the slot at which to propose a reorging block. Lower values can prevent - failed reorgs by ensuring the block has ample time to propagate and be processed by the network. The default - is 1/12th of a slot (1 second on mainnet) - --proposer-reorg-disallowed-offsets - Comma-separated list of integer offsets which can be used to avoid proposing reorging blocks at certain - slots. An offset of N means that reorging proposals will not be attempted at any slot such that `slot % - SLOTS_PER_EPOCH == N`. By default only re-orgs at offset 0 will be avoided. Any offsets supplied with this - flag will impose additional restrictions. - --proposer-reorg-epochs-since-finalization - Maximum number of epochs since finalization at which proposer reorgs are allowed. Default: 2 - - --proposer-reorg-parent-threshold - Percentage of parent vote weight above which to attempt a proposer reorg. Default: 160% - - --proposer-reorg-threshold - Percentage of head vote weight below which to attempt a proposer reorg. Default: 20% - - --prune-blobs - Prune blobs from Lighthouse's database when they are older than the data data availability boundary relative - to the current epoch. [default: true] - --prune-payloads - Prune execution payloads from Lighthouse's database. This saves space but imposes load on the execution - client, as payloads need to be reconstructed and sent to syncing peers. [default: true] - --quic-port - The UDP port that quic will listen on. Defaults to `port` + 1 - - --quic-port6 - The UDP port that quic will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to `port6` + - 1 - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --shuffling-cache-size - Some HTTP API requests can be optimised by caching the shufflings at each epoch. This flag allows the user - to set the shuffling cache size in epochs. Shufflings are dependent on validator count and setting this - value to a large number can consume a large amount of memory. - --slasher-att-cache-size - Set the maximum number of attestation roots for the slasher to cache - - --slasher-backend - Set the database backend to be used by the slasher. [possible values: lmdb, disabled] - - --slasher-broadcast - Broadcast slashings found by the slasher to the rest of the network [Enabled by default]. [default: true] - - --slasher-chunk-size - Number of epochs per validator per chunk stored on disk. - - --slasher-dir - Set the slasher's database directory. - - --slasher-history-length - Configure how many epochs of history the slasher keeps. Immutable after initialization. - - --slasher-max-db-size - Maximum size of the MDBX database used by the slasher. - - --slasher-slot-offset - Set the delay from the start of the slot at which the slasher should ingest attestations. Only effective if - the slasher-update-period is a multiple of the slot duration. - --slasher-update-period - Configure how often the slasher runs batch processing. - - --slasher-validator-chunk-size - Number of validators per chunk stored on disk. - - --slots-per-restore-point - Specifies how often a freezer DB restore point should be stored. Cannot be changed after initialization. - [default: 8192 (mainnet) or 64 (minimal)] - --state-cache-size - Specifies the size of the snapshot cache [default: 3] - - --suggested-fee-recipient - Emergency fallback fee recipient for use in case the validator client does not have one configured. You - should set this flag on the validator client instead of (or in addition to) setting it here. - --target-peers - The target number of peers. - - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --trusted-peers - One or more comma-delimited trusted peer ids which always have the highest score according to the peer - scoring system. - --trusted-setup-file-override - Path to a json file containing the trusted setup params. NOTE: This will override the trusted setup that is - generated from the mainnet kzg ceremony. Use with caution - --validator-monitor-file - As per --validator-monitor-pubkeys, but the comma-separated list is contained within a file at the given - path. - --validator-monitor-individual-tracking-threshold - Once the validator monitor reaches this number of local validators it will stop collecting per-validator - Prometheus metrics and issuing per-validator logs. Instead, it will provide aggregate metrics and logs. This - avoids infeasibly high cardinality in the Prometheus database and high log volume when using many - validators. Defaults to 64. - --validator-monitor-pubkeys - A comma-separated list of 0x-prefixed validator public keys. These validators will receive special - monitoring and additional logging. - --wss-checkpoint - Specify a weak subjectivity checkpoint in `block_root:epoch` format to verify the node's sync against. The - block root should be 0x-prefixed. Note that this flag is for verification only, to perform a checkpoint sync - from a recent state use --checkpoint-sync-url. +The primary component which connects to the Ethereum 2.0 P2P network and +downloads, verifies and stores blocks. Provides a HTTP API for querying the +beacon chain and publishing messages to the network. + +Usage: lighthouse beacon_node [OPTIONS] + +Options: + --auto-compact-db + Enable or disable automatic compaction of the database on + finalization. [default: true] + --blob-prune-margin-epochs + The margin for blob pruning in epochs. The oldest blobs are pruned up + until data_availability_boundary - blob_prune_margin_epochs. [default: + 0] + --blobs-dir + Data directory for the blobs database. + --block-cache-size + Specifies how many blocks the database should cache in memory + [default: 5] + --boot-nodes + One or more comma-delimited base64-encoded ENR's to bootstrap the p2p + network. Multiaddr is also supported. + --builder + The URL of a service compatible with the MEV-boost API. + --builder-fallback-epochs-since-finalization + If this node is proposing a block and the chain has not finalized + within this number of epochs, it will NOT query any connected + builders, and will use the local execution engine for payload + construction. Setting this value to anything less than 2 will cause + the node to NEVER query connected builders. Setting it to 2 will cause + this condition to be hit if there are skips slots at the start of an + epoch, right before this node is set to propose. [default: 3] + --builder-fallback-skips + If this node is proposing a block and has seen this number of skip + slots on the canonical chain in a row, it will NOT query any connected + builders, and will use the local execution engine for payload + construction. [default: 3] + --builder-fallback-skips-per-epoch + If this node is proposing a block and has seen this number of skip + slots on the canonical chain in the past `SLOTS_PER_EPOCH`, it will + NOT query any connected builders, and will use the local execution + engine for payload construction. [default: 8] + --builder-header-timeout + Defines a timeout value (in milliseconds) to use when fetching a block + header from the builder API. [default: 1000] + --builder-profit-threshold + This flag is deprecated and has no effect. + --builder-user-agent + The HTTP user agent to send alongside requests to the builder URL. The + default is Lighthouse's version string. + --checkpoint-blobs + Set the checkpoint blobs to start syncing from. Must be aligned and + match --checkpoint-block. Using --checkpoint-sync-url instead is + recommended. + --checkpoint-block + Set a checkpoint block to start syncing from. Must be aligned and + match --checkpoint-state. Using --checkpoint-sync-url instead is + recommended. + --checkpoint-state + Set a checkpoint state to start syncing from. Must be aligned and + match --checkpoint-block. Using --checkpoint-sync-url instead is + recommended. + --checkpoint-sync-url + Set the remote beacon node HTTP endpoint to use for checkpoint sync. + --checkpoint-sync-url-timeout + Set the timeout for checkpoint sync calls to remote beacon node HTTP + endpoint. [default: 180] + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --discovery-port + The UDP port that discovery will listen on. Defaults to `port` + --discovery-port6 + The UDP port that discovery will listen on over IPv6 if listening over + both IPv4 and IPv6. Defaults to `port6` + --enr-address
... + The IP address/ DNS address to broadcast to other peers on how to + reach this node. If a DNS address is provided, the enr-address is set + to the IP address it resolves to and does not auto-update based on + PONG responses in discovery. Set this only if you are sure other nodes + can connect to your local node on this address. This will update the + `ip4` or `ip6` ENR fields accordingly. To update both, set this flag + twice with the different values. + --enr-quic-port + The quic UDP4 port that will be set on the local ENR. Set this only if + you are sure other nodes can connect to your local node on this port + over IPv4. + --enr-quic6-port + The quic UDP6 port that will be set on the local ENR. Set this only if + you are sure other nodes can connect to your local node on this port + over IPv6. + --enr-tcp-port + The TCP4 port of the local ENR. Set this only if you are sure other + nodes can connect to your local node on this port over IPv4. The + --port flag is used if this is not set. + --enr-tcp6-port + The TCP6 port of the local ENR. Set this only if you are sure other + nodes can connect to your local node on this port over IPv6. The + --port6 flag is used if this is not set. + --enr-udp-port + The UDP4 port of the local ENR. Set this only if you are sure other + nodes can connect to your local node on this port over IPv4. + --enr-udp6-port + The UDP6 port of the local ENR. Set this only if you are sure other + nodes can connect to your local node on this port over IPv6. + --epochs-per-blob-prune + The epoch interval with which to prune blobs from Lighthouse's + database when they are older than the data availability boundary + relative to the current epoch. [default: 1] + --epochs-per-migration + The number of epochs to wait between running the migration of data + from the hot DB to the cold DB. Less frequent runs can be useful for + minimizing disk writes [default: 1] + --eth1-blocks-per-log-query + Specifies the number of blocks that a deposit log query should span. + This will reduce the size of responses from the Eth1 endpoint. + [default: 1000] + --eth1-cache-follow-distance + Specifies the distance between the Eth1 chain head and the last block + which should be imported into the cache. Setting this value lower can + help compensate for irregular Proof-of-Work block times, but setting + it too low can make the node vulnerable to re-orgs. + --execution-endpoint + Server endpoint for an execution layer JWT-authenticated HTTP JSON-RPC + connection. Uses the same endpoint to populate the deposit cache. + --execution-jwt + File path which contains the hex-encoded JWT secret for the execution + endpoint provided in the --execution-endpoint flag. + --execution-jwt-id + Used by the beacon node to communicate a unique identifier to + execution nodes during JWT authentication. It corresponds to the 'id' + field in the JWT claims object.Set to empty by default + --execution-jwt-secret-key + Hex-encoded JWT secret for the execution endpoint provided in the + --execution-endpoint flag. + --execution-jwt-version + Used by the beacon node to communicate a client version to execution + nodes during JWT authentication. It corresponds to the 'clv' field in + the JWT claims object.Set to empty by default + --execution-timeout-multiplier + Unsigned integer to multiply the default execution timeouts by. + [default: 1] + --fork-choice-before-proposal-timeout + Set the maximum number of milliseconds to wait for fork choice before + proposing a block. You can prevent waiting at all by setting the + timeout to 0, however you risk proposing atop the wrong parent block. + [default: 250] + --freezer-dir + Data directory for the freezer database. + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --graffiti + Specify your custom graffiti to be included in blocks. Defaults to the + current version and commit, truncated to fit in 32 bytes. + --historic-state-cache-size + Specifies how many states from the freezer database should cache in + memory [default: 1] + --http-address
+ Set the listen address for the RESTful HTTP API server. + --http-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. + Use * to allow any origin (not recommended in production). If no value + is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5052). + --http-duplicate-block-status + Status code to send when a block that is already known is POSTed to + the HTTP API. + --http-enable-beacon-processor + The beacon processor is a scheduler which provides quality-of-service + and DoS protection. When set to "true", HTTP API requests will be + queued and scheduled alongside other tasks. When set to "false", HTTP + API responses will be executed immediately. + --http-port + Set the listen TCP port for the RESTful HTTP API server. + --http-sse-capacity-multiplier + Multiplier to apply to the length of HTTP server-sent-event (SSE) + channels. Increasing this value can prevent messages from being + dropped. + --http-tls-cert + The path of the certificate to be used when serving the HTTP API + server over TLS. + --http-tls-key + The path of the private key to be used when serving the HTTP API + server over TLS. Must not be password-protected. + --inbound-rate-limiter-protocols + Configures the inbound rate limiter (requests received by this + node).Rate limit quotas per protocol can be set in the form of + :/. To set quotas for multiple + protocols, separate them by ';'. This is enabled by default, using + default quotas. To disable rate limiting use the + disable-inbound-rate-limiter flag instead. + --invalid-gossip-verified-blocks-path + If a block succeeds gossip validation whilst failing full validation, + store the block SSZ as a file at this path. This feature is only + recommended for developers. This directory is not pruned, users should + be careful to avoid filling up their disks. + --libp2p-addresses + One or more comma-delimited multiaddrs to manually connect to a libp2p + peer without an ENR. + --listen-address [
...] + The address lighthouse will listen for UDP and TCP connections. To + listen over IpV4 and IpV6 set this flag twice with the different + values. + Examples: + - --listen-address '0.0.0.0' will listen over IPv4. + - --listen-address '::' will listen over IPv6. + - --listen-address '0.0.0.0' --listen-address '::' will listen over + both IPv4 and IPv6. The order of the given addresses is not relevant. + However, multiple IPv4, or multiple IPv6 addresses will not be + accepted. [default: 0.0.0.0] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 10] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --max-skip-slots + Refuse to skip more than this many slots when processing an + attestation. This prevents nodes on minority forks from wasting our + time and disk space, but could also cause unnecessary consensus + failures, so is disabled by default. + --metrics-address
+ Set the listen address for the Prometheus metrics HTTP server. + --metrics-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. + Use * to allow any origin (not recommended in production). If no value + is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5054). + --metrics-port + Set the listen TCP port for the Prometheus metrics HTTP server. + --monitoring-endpoint
+ Enables the monitoring service for sending system metrics to a remote + endpoint. This can be used to monitor your setup on certain services + (e.g. beaconcha.in). This flag sets the endpoint where the beacon node + metrics will be sent. Note: This will send information to a remote + sever which may identify and associate your validators, IP address and + other personal information. Always use a HTTPS connection and never + provide an untrusted URL. + --monitoring-endpoint-period + Defines how many seconds to wait between each message sent to the + monitoring-endpoint. Default: 60s + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --network-dir + Data directory for network keys. Defaults to network/ inside the + beacon node dir. + --port + The TCP/UDP ports to listen on. There are two UDP ports. The discovery + UDP port will be set to this value and the Quic UDP port will be set + to this value + 1. The discovery port can be modified by the + --discovery-port flag and the quic port can be modified by the + --quic-port flag. If listening over both IPv4 and IPv6 the --port flag + will apply to the IPv4 address and --port6 to the IPv6 address. + [default: 9000] + --port6 + The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 + and IPv6. Defaults to 9090 when required. The Quic UDP port will be + set to this value + 1. [default: 9090] + --prepare-payload-lookahead + The time before the start of a proposal slot at which payload + attributes should be sent. Low values are useful for execution nodes + which don't improve their payload after the first call, and high + values are useful for ensuring the EL is given ample notice. Default: + 1/3 of a slot. + --progressive-balances + Deprecated. This optimisation is now the default and cannot be + disabled. + --proposer-reorg-cutoff + Maximum delay after the start of the slot at which to propose a + reorging block. Lower values can prevent failed reorgs by ensuring the + block has ample time to propagate and be processed by the network. The + default is 1/12th of a slot (1 second on mainnet) + --proposer-reorg-disallowed-offsets + Comma-separated list of integer offsets which can be used to avoid + proposing reorging blocks at certain slots. An offset of N means that + reorging proposals will not be attempted at any slot such that `slot % + SLOTS_PER_EPOCH == N`. By default only re-orgs at offset 0 will be + avoided. Any offsets supplied with this flag will impose additional + restrictions. + --proposer-reorg-epochs-since-finalization + Maximum number of epochs since finalization at which proposer reorgs + are allowed. Default: 2 + --proposer-reorg-parent-threshold + Percentage of parent vote weight above which to attempt a proposer + reorg. Default: 160% + --proposer-reorg-threshold + Percentage of head vote weight below which to attempt a proposer + reorg. Default: 20% + --prune-blobs + Prune blobs from Lighthouse's database when they are older than the + data data availability boundary relative to the current epoch. + [default: true] + --prune-payloads + Prune execution payloads from Lighthouse's database. This saves space + but imposes load on the execution client, as payloads need to be + reconstructed and sent to syncing peers. [default: true] + --quic-port + The UDP port that quic will listen on. Defaults to `port` + 1 + --quic-port6 + The UDP port that quic will listen on over IPv6 if listening over both + IPv4 and IPv6. Defaults to `port6` + 1 + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + --self-limiter-protocols + Enables the outbound rate limiter (requests made by this node).Rate + limit quotas per protocol can be set in the form of + :/. To set quotas for multiple + protocols, separate them by ';'. If the self rate limiter is enabled + and a protocol is not present in the configuration, the quotas used + for the inbound rate limiter will be used. + --shuffling-cache-size + Some HTTP API requests can be optimised by caching the shufflings at + each epoch. This flag allows the user to set the shuffling cache size + in epochs. Shufflings are dependent on validator count and setting + this value to a large number can consume a large amount of memory. + --slasher-att-cache-size + Set the maximum number of attestation roots for the slasher to cache + --slasher-backend + Set the database backend to be used by the slasher. [possible values: + lmdb, disabled] + --slasher-broadcast [] + Broadcast slashings found by the slasher to the rest of the network + [Enabled by default]. [default: true] + --slasher-chunk-size + Number of epochs per validator per chunk stored on disk. + --slasher-dir + Set the slasher's database directory. + --slasher-history-length + Configure how many epochs of history the slasher keeps. Immutable + after initialization. + --slasher-max-db-size + Maximum size of the MDBX database used by the slasher. + --slasher-slot-offset + Set the delay from the start of the slot at which the slasher should + ingest attestations. Only effective if the slasher-update-period is a + multiple of the slot duration. + --slasher-update-period + Configure how often the slasher runs batch processing. + --slasher-validator-chunk-size + Number of validators per chunk stored on disk. + --slots-per-restore-point + Specifies how often a freezer DB restore point should be stored. + Cannot be changed after initialization. [default: 8192 (mainnet) or 64 + (minimal)] + --state-cache-size + Specifies the size of the state cache [default: 128] + --suggested-fee-recipient + Emergency fallback fee recipient for use in case the validator client + does not have one configured. You should set this flag on the + validator client instead of (or in addition to) setting it here. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --target-peers + The target number of peers. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + --trusted-peers + One or more comma-delimited trusted peer ids which always have the + highest score according to the peer scoring system. + --trusted-setup-file-override + Path to a json file containing the trusted setup params. NOTE: This + will override the trusted setup that is generated from the mainnet kzg + ceremony. Use with caution + --validator-monitor-file + As per --validator-monitor-pubkeys, but the comma-separated list is + contained within a file at the given path. + --validator-monitor-individual-tracking-threshold + Once the validator monitor reaches this number of local validators it + will stop collecting per-validator Prometheus metrics and issuing + per-validator logs. Instead, it will provide aggregate metrics and + logs. This avoids infeasibly high cardinality in the Prometheus + database and high log volume when using many validators. Defaults to + 64. + --validator-monitor-pubkeys + A comma-separated list of 0x-prefixed validator public keys. These + validators will receive special monitoring and additional logging. + --wss-checkpoint + Specify a weak subjectivity checkpoint in `block_root:epoch` format to + verify the node's sync against. The block root should be 0x-prefixed. + Note that this flag is for verification only, to perform a checkpoint + sync from a recent state use --checkpoint-sync-url. + -V, --version + Print version + +Flags: + --allow-insecure-genesis-sync + Enable syncing from genesis, which is generally insecure and + incompatible with data availability checks. Checkpoint syncing is the + preferred method for syncing a node. Only use this flag when testing. + DO NOT use on mainnet! + --always-prefer-builder-payload + This flag is deprecated and has no effect. + --always-prepare-payload + Send payload attributes with every fork choice update. This is + intended for use by block builders, relays and developers. You should + set a fee recipient on this BN and also consider adjusting the + --prepare-payload-lookahead flag. + --builder-fallback-disable-checks + This flag disables all checks related to chain health. This means the + builder API will always be used for payload construction, regardless + of recent chain conditions. + --compact-db + If present, apply compaction to the database on start-up. Use with + caution. It is generally not recommended unless auto-compaction is + disabled. + --disable-backfill-rate-limiting + Disable the backfill sync rate-limiting. This allow users to just sync + the entire chain as fast as possible, however it can result in + resource contention which degrades staking performance. Stakers should + generally choose to avoid this flag since backfill sync is not + required for staking. + --disable-deposit-contract-sync + Explicitly disables syncing of deposit logs from the execution node. + This overrides any previous option that depends on it. Useful if you + intend to run a non-validating beacon node. + --disable-duplicate-warn-logs + This flag is deprecated and has no effect. + --disable-enr-auto-update + Discovery automatically updates the nodes local ENR with an external + IP address and port as seen by other peers on the network. This + disables this feature, fixing the ENR's IP/PORT to those specified on + boot. + --disable-inbound-rate-limiter + Disables the inbound rate limiter (requests received by this node). + --disable-lock-timeouts + Disable the timeouts applied to some internal locks by default. This + can lead to less spurious failures on slow hardware but is considered + experimental as it may obscure performance issues. + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + --disable-optimistic-finalized-sync + Force Lighthouse to verify every execution block hash with the + execution client during finalized sync. By default block hashes will + be checked in Lighthouse and only passed to the EL if initial + verification fails. + --disable-packet-filter + Disables the discovery packet filter. Useful for testing in smaller + networks + --disable-proposer-reorgs + Do not attempt to reorg late blocks from other validators when + proposing. + --disable-quic + Disables the quic transport. The node will rely solely on the TCP + transport for libp2p connections. + --disable-self-limiter + Disables the outbound rate limiter (requests sent by this node). + --disable-upnp + Disables UPnP support. Setting this will prevent Lighthouse from + attempting to automatically establish external port mappings. + --dummy-eth1 + If present, uses an eth1 backend that generates static dummy + data.Identical to the method used at the 2019 Canada interop. + -e, --enr-match + Sets the local ENR IP address and port to match those set for + lighthouse. Specifically, the IP address will be the value of + --listen-address and the UDP port will be --discovery-port. + --enable-private-discovery + Lighthouse by default does not discover private IP addresses. Set this + flag to enable connection attempts to local addresses. + --eth1 + If present the node will connect to an eth1 node. This is required for + block production, you must use this flag if you wish to serve a + validator. + --eth1-purge-cache + Purges the eth1 block and deposit caches + --genesis-backfill + Attempts to download blocks all the way back to genesis when + checkpoint syncing. + --gui + Enable the graphical user interface and all its requirements. This + enables --http and --validator-monitor-auto and enables SSE logging. + -h, --help + Prints help information + --http + Enable the RESTful HTTP API server. Disabled by default. + --http-enable-tls + Serves the RESTful HTTP API server over TLS. This feature is currently + experimental. + --import-all-attestations + Import and aggregate all attestations, regardless of validator + subscriptions. This will only import attestations from + already-subscribed subnets, use with --subscribe-all-subnets to ensure + all attestations are received for import. + --light-client-server + Act as a full node supporting light clients on the p2p network + [experimental] + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. + --metrics + Enable the Prometheus metrics HTTP server. Disabled by default. + --private + Prevents sending various client identification information. + --proposer-only + Sets this beacon node at be a block proposer only node. This will run + the beacon node in a minimal configuration that is sufficient for + block publishing only. This flag should be used for a beacon node + being referenced by validator client using the --proposer-node flag. + This configuration is for enabling more secure setups. + --purge-db + If present, the chain database will be deleted. Use with caution. + --reconstruct-historic-states + After a checkpoint sync, reconstruct historic states in the database. + This requires syncing all the way back to genesis. + --reset-payload-statuses + When present, Lighthouse will forget the payload statuses of any + already-imported blocks. This can assist in the recovery from a + consensus failure caused by the execution layer. + --shutdown-after-sync + Shutdown beacon node as soon as sync is completed. Backfill sync will + not be performed before shutdown. + --slasher + Run a slasher alongside the beacon node. It is currently only + recommended for expert users because of the immaturity of the slasher + UX and the extra resources required. + --staking + Standard option for a staking beacon node. This will enable the HTTP + server on localhost:5052 and import deposit logs from the execution + node. This is equivalent to `--http` on merge-ready networks, or + `--http --eth1` pre-merge + --subscribe-all-subnets + Subscribe to all subnets regardless of validator count. This will also + advertise the beacon node as being long-lived subscribed to all + subnets. + --validator-monitor-auto + Enables the automatic detection and monitoring of validators connected + to the HTTP API and using the subnet subscription endpoint. This + generally has the effect of providing additional logging and metrics + for locally controlled validators. + -z, --zero-ports + Sets all listening TCP/UDP ports to 0, allowing the OS to choose some + arbitrary free ports. ``` + diff --git a/book/src/help_general.md b/book/src/help_general.md index 551f93e2bf1..84bc67a86e2 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -1,108 +1,141 @@ # Lighthouse General Commands ``` -Sigma Prime -Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon node, a validator client and utilities for managing -validator accounts. +Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon node, a +validator client and utilities for managing validator accounts. -USAGE: - lighthouse [FLAGS] [OPTIONS] [SUBCOMMAND] +Usage: lighthouse [OPTIONS] [COMMAND] -FLAGS: - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will - generally increase memory usage, it should only be provided when debugging - specific memory allocation issues. - -l DEPRECATED Enables environment logging giving access to sub-protocol logs such - as discv5 and libp2p - -h, --help Prints help information - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed to - store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be - read by any user on the machine. Note that logs can often contain sensitive - information about your validator and so this flag should be used with caution. - For Windows users, the log file permissions will be inherited from the parent - folder. - -V, --version Prints version information +Commands: + account_manager + Utilities for generating and managing Ethereum 2.0 accounts. [aliases: + a, am, account] + beacon_node + The primary component which connects to the Ethereum 2.0 P2P network + and downloads, verifies and stores blocks. Provides a HTTP API for + querying the beacon chain and publishing messages to the network. + [aliases: b, bn, beacon] + boot_node + Start a special Lighthouse process that only serves as a discv5 + boot-node. This process will *not* import blocks or perform most + typical beacon node functions. Instead, it will simply run the discv5 + service and assist nodes on the network to discover each other. This + is the recommended way to provide a network boot-node since it has a + reduced attack surface compared to a full beacon node. + database_manager + Manage a beacon node database. [aliases: db] + validator_client + When connected to a beacon node, performs the duties of a staked + validator (e.g., proposing blocks and attestations). [aliases: v, vc, + validator] + validator_manager + Utilities for managing a Lighthouse validator client via the HTTP API. + [aliases: vm, validator-manager] + help + Print this message or the help of the given subcommand(s) -OPTIONS: - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] +Options: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 10] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + -V, --version + Print version - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - -SUBCOMMANDS: - account_manager Utilities for generating and managing Ethereum 2.0 accounts. [aliases: a, am, account, - account_manager] - beacon_node The primary component which connects to the Ethereum 2.0 P2P network and downloads, - verifies and stores blocks. Provides a HTTP API for querying the beacon chain and - publishing messages to the network. [aliases: b, bn, beacon] - boot_node Start a special Lighthouse process that only serves as a discv5 boot-node. This process - will *not* import blocks or perform most typical beacon node functions. Instead, it will - simply run the discv5 service and assist nodes on the network to discover each other. This - is the recommended way to provide a network boot-node since it has a reduced attack surface - compared to a full beacon node. - database_manager Manage a beacon node database [aliases: db] - help Prints this message or the help of the given subcommand(s) - validator_client When connected to a beacon node, performs the duties of a staked validator (e.g., proposing - blocks and attestations). [aliases: v, vc, validator] - validator_manager Utilities for managing a Lighthouse validator client via the HTTP API. [aliases: vm, - validator-manager, validator_manager] +Flags: + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + -h, --help + Prints help information + -l + DEPRECATED Enables environment logging giving access to sub-protocol + logs such as discv5 and libp2p + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. ``` + diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 670893c272e..bdcf7b60e20 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -1,34 +1,182 @@ # Validator Client ``` -When connected to a beacon node, performs the duties of a staked validator (e.g., proposing blocks and attestations). - -USAGE: - lighthouse validator_client [FLAGS] [OPTIONS] - -FLAGS: - --builder-proposals - If this flag is set, Lighthouse will query the Beacon Node for only block headers during proposals and will - sign over headers. Useful for outsourcing execution payload construction during proposals. - --disable-auto-discover - If present, do not attempt to discover new validators in the validators-dir. Validators will need to be - manually added to the validator_definitions.yml file. - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning - If present, do not configure the system allocator. Providing this flag will generally increase memory usage, - it should only be provided when debugging specific memory allocation issues. - --disable-run-on-all - DEPRECATED. Use --broadcast. By default, Lighthouse publishes attestation, sync committee subscriptions and - proposer preparation messages to all beacon nodes provided in the `--beacon-nodes flag`. This option changes - that behaviour such that these api calls only go out to the first available and synced beacon node - --disable-slashing-protection-web3signer - Disable Lighthouse's slashing protection for all web3signer keys. This can reduce the I/O burden on the VC - but is only safe if slashing protection is enabled on the remote signer and is implemented correctly. DO NOT - ENABLE THIS FLAG UNLESS YOU ARE CERTAIN THAT SLASHING PROTECTION IS ENABLED ON THE REMOTE SIGNER. YOU WILL - GET SLASHED IF YOU USE THIS FLAG WITHOUT ENABLING WEB3SIGNER'S SLASHING PROTECTION. - --distributed - Enables functionality required for running the validator in a distributed validator cluster. +When connected to a beacon node, performs the duties of a staked validator +(e.g., proposing blocks and attestations). +Usage: lighthouse validator_client [OPTIONS] + +Options: + --beacon-nodes + Comma-separated addresses to one or more beacon node HTTP APIs. + Default is http://localhost:5052. + --beacon-nodes-tls-certs + Comma-separated paths to custom TLS certificates to use when + connecting to a beacon node (and/or proposer node). These certificates + must be in PEM format and are used in addition to the OS trust store. + Commas must only be used as a delimiter, and must not be part of the + certificate path. + --broadcast + Comma-separated list of beacon API topics to broadcast to all beacon + nodes. Possible values are: none, attestations, blocks, subscriptions, + sync-committee. Default (when flag is omitted) is to broadcast + subscriptions only. + --builder-boost-factor + Defines the boost factor, a percentage multiplier to apply to the + builder's payload value when choosing between a builder payload header + and payload from the local execution node. + --builder-registration-timestamp-override + This flag takes a unix timestamp value that will be used to override + the timestamp used in the builder api registration + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --gas-limit + The gas limit to be used in all builder proposals for all validators + managed by this validator client. Note this will not necessarily be + used if the gas limit set here moves too far from the previous block's + gas limit. [default: 30,000,000] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --graffiti + Specify your custom graffiti to be included in blocks. + --graffiti-file + Specify a graffiti file to load validator graffitis from. + --http-address
+ Set the address for the HTTP address. The HTTP server is not encrypted + and therefore it is unsafe to publish on a public network. When this + flag is used, it additionally requires the explicit use of the + `--unencrypted-http-transport` flag to ensure the user is aware of the + risks involved. For access via the Internet, users should apply + transport-layer security like a HTTPS reverse-proxy or SSH tunnelling. + --http-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. + Use * to allow any origin (not recommended in production). If no value + is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5062). + --http-port + Set the listen TCP port for the RESTful HTTP API server. + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 10] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --metrics-address
+ Set the listen address for the Prometheus metrics HTTP server. + --metrics-allow-origin + Set the value of the Access-Control-Allow-Origin response HTTP header. + Use * to allow any origin (not recommended in production). If no value + is supplied, the CORS allowed origin is set to the listen address of + this server (e.g., http://localhost:5064). + --metrics-port + Set the listen TCP port for the Prometheus metrics HTTP server. + --monitoring-endpoint
+ Enables the monitoring service for sending system metrics to a remote + endpoint. This can be used to monitor your setup on certain services + (e.g. beaconcha.in). This flag sets the endpoint where the beacon node + metrics will be sent. Note: This will send information to a remote + sever which may identify and associate your validators, IP address and + other personal information. Always use a HTTPS connection and never + provide an untrusted URL. + --monitoring-endpoint-period + Defines how many seconds to wait between each message sent to the + monitoring-endpoint. Default: 60s + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --proposer-nodes + Comma-separated addresses to one or more beacon node HTTP APIs. These + specify nodes that are used to send beacon block proposals. A failure + will revert back to the standard beacon nodes specified in + --beacon-nodes. + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + --secrets-dir + The directory which contains the password to unlock the validator + voting keypairs. Each password should be contained in a file where the + name is the 0x-prefixed hex representation of the validators voting + public key. Defaults to ~/.lighthouse/{network}/secrets. + --suggested-fee-recipient + Once the merge has happened, this address will receive transaction + fees from blocks proposed by this validator client. If a fee recipient + is configured in the validator definitions it takes priority over this + value. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + --validator-registration-batch-size + Defines the number of validators per validator/register_validator + request sent to the BN. This value can be reduced to avoid timeouts + from builders. [default: 500] + --validators-dir + The directory which contains the validator keystores, deposit data for + each validator along with the common slashing protection database and + the validator_definitions.yml + --web3-signer-keep-alive-timeout + Keep-alive timeout for each web3signer connection. Set to 'null' to + never timeout [default: 20000] + --web3-signer-max-idle-connections + Maximum number of idle connections to maintain per web3signer host. + Default is unlimited. + +<<<<<<< HEAD --enable-doppelganger-protection If this flag is set, Lighthouse will delay startup for three epochs and monitor for messages on the network by any of the validators managed by this client. This will result in three (possibly four) epochs worth of @@ -219,5 +367,108 @@ OPTIONS: --web3-signer-max-idle-connections Maximum number of idle connections to maintain per web3signer host. Default is unlimited. +======= +Flags: + --builder-proposals + If this flag is set, Lighthouse will query the Beacon Node for only + block headers during proposals and will sign over headers. Useful for + outsourcing execution payload construction during proposals. + --disable-auto-discover + If present, do not attempt to discover new validators in the + validators-dir. Validators will need to be manually added to the + validator_definitions.yml file. + --disable-latency-measurement-service + Disables the service that periodically attempts to measure latency to + BNs. + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + --disable-run-on-all + DEPRECATED. Use --broadcast. By default, Lighthouse publishes + attestation, sync committee subscriptions and proposer preparation + messages to all beacon nodes provided in the `--beacon-nodes flag`. + This option changes that behaviour such that these api calls only go + out to the first available and synced beacon node + --disable-slashing-protection-web3signer + Disable Lighthouse's slashing protection for all web3signer keys. This + can reduce the I/O burden on the VC but is only safe if slashing + protection is enabled on the remote signer and is implemented + correctly. DO NOT ENABLE THIS FLAG UNLESS YOU ARE CERTAIN THAT + SLASHING PROTECTION IS ENABLED ON THE REMOTE SIGNER. YOU WILL GET + SLASHED IF YOU USE THIS FLAG WITHOUT ENABLING WEB3SIGNER'S SLASHING + PROTECTION. + --distributed + Enables functionality required for running the validator in a + distributed validator cluster. + --enable-doppelganger-protection + If this flag is set, Lighthouse will delay startup for three epochs + and monitor for messages on the network by any of the validators + managed by this client. This will result in three (possibly four) + epochs worth of missed attestations. If an attestation is detected + during this period, it means it is very likely that you are running a + second validator client with the same keys. This validator client will + immediately shutdown if this is detected in order to avoid potentially + committing a slashable offense. Use this flag in order to ENABLE this + functionality, without this flag Lighthouse will begin attesting + immediately. + --enable-high-validator-count-metrics + Enable per validator metrics for > 64 validators. Note: This flag is + automatically enabled for <= 64 validators. Enabling this metric for + higher validator counts will lead to higher volume of prometheus + metrics being collected. + -h, --help + Prints help information + --http + Enable the RESTful HTTP API server. Disabled by default. + --http-allow-keystore-export + If present, allow access to the DELETE /lighthouse/keystores HTTP API + method, which allows exporting keystores and passwords to HTTP API + consumers who have access to the API token. This method is useful for + exporting validators, however it should be used with caution since it + exposes private key data to authorized users. + --http-store-passwords-in-secrets-dir + If present, any validators created via the HTTP will have keystore + passwords stored in the secrets-dir rather than the validator + definitions file. + --init-slashing-protection + If present, do not require the slashing protection database to exist + before running. You SHOULD NOT use this flag unless you're certain + that a new slashing protection database is required. Usually, your + database will have been initialized when you imported your validator + keys. If you misplace your database and then run with this flag you + risk being slashed. + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. + --metrics + Enable the Prometheus metrics HTTP server. Disabled by default. + --prefer-builder-proposals + If this flag is set, Lighthouse will always prefer blocks constructed + by builders, regardless of payload value. + --produce-block-v3 + Enable block production via the block v3 endpoint for this validator + client. This should only be enabled when paired with a beacon node + that has this endpoint implemented. This flag will be enabled by + default in future. + --unencrypted-http-transport + This is a safety flag to ensure that the user is aware that the http + transport is unencrypted and using a custom HTTP address is unsafe. + --use-long-timeouts + If present, the validator client will use longer timeouts for requests + made to the beacon node. This flag is generally not recommended, + longer timeouts can cause missed duties when fallbacks are used. +>>>>>>> 8a32df756ddaa8831182c016311f25a3c26cf36f ``` + diff --git a/book/src/help_vm.md b/book/src/help_vm.md index db01164a92b..99a45c1a76d 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -3,96 +3,126 @@ ``` Utilities for managing a Lighthouse validator client via the HTTP API. -USAGE: - lighthouse validator_manager [FLAGS] [OPTIONS] [SUBCOMMAND] +Usage: lighthouse validator_manager [OPTIONS] [COMMAND] -FLAGS: - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will - generally increase memory usage, it should only be provided when debugging - specific memory allocation issues. - -h, --help Prints help information - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed to - store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be - read by any user on the machine. Note that logs can often contain sensitive - information about your validator and so this flag should be used with caution. - For Windows users, the log file permissions will be inherited from the parent - folder. - -V, --version Prints version information +Commands: + create + Creates new validators from BIP-39 mnemonic. A JSON file will be + created which contains all the validator keystores and other validator + data. This file can then be imported to a validator client using the + "import-validators" command. Another, optional JSON file is created + which contains a list of validator deposits in the same format as the + "ethereum/staking-deposit-cli" tool. + import + Uploads validators to a validator client using the HTTP API. The + validators are defined in a JSON file which can be generated using the + "create-validators" command. + move + Uploads validators to a validator client using the HTTP API. The + validators are defined in a JSON file which can be generated using the + "create-validators" command. This command only supports validators + signing via a keystore on the local file system (i.e., not Web3Signer + validators). + help + Print this message or the help of the given subcommand(s) -OPTIONS: - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] +Options: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 10] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - -SUBCOMMANDS: - create Creates new validators from BIP-39 mnemonic. A JSON file will be created which contains all the - validator keystores and other validator data. This file can then be imported to a validator client - using the "import-validators" command. Another, optional JSON file is created which contains a list of - validator deposits in the same format as the "ethereum/staking-deposit-cli" tool. - help Prints this message or the help of the given subcommand(s) - import Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file - which can be generated using the "create-validators" command. - move Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file - which can be generated using the "create-validators" command. This command only supports validators - signing via a keystore on the local file system (i.e., not Web3Signer validators). +Flags: + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + -h, --help + Prints help information + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. ``` + diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index 2fa54265abd..1803bb534c6 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -1,138 +1,169 @@ # Validator Manager Create ``` -Creates new validators from BIP-39 mnemonic. A JSON file will be created which contains all the validator keystores and -other validator data. This file can then be imported to a validator client using the "import-validators" command. -Another, optional JSON file is created which contains a list of validator deposits in the same format as the -"ethereum/staking-deposit-cli" tool. +Creates new validators from BIP-39 mnemonic. A JSON file will be created which +contains all the validator keystores and other validator data. This file can +then be imported to a validator client using the "import-validators" command. +Another, optional JSON file is created which contains a list of validator +deposits in the same format as the "ethereum/staking-deposit-cli" tool. -USAGE: - lighthouse validator_manager create [FLAGS] [OPTIONS] --output-path +Usage: lighthouse validator_manager create [OPTIONS] --output-path -FLAGS: - --disable-deposits When provided don't generate the deposits JSON file that is commonly used - for submitting validator deposits via a web UI. Using this flag will save - several seconds per validator if the user has an alternate strategy for - submitting deposits. - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag - will generally increase memory usage, it should only be provided when - debugging specific memory allocation issues. - --force-bls-withdrawal-credentials If present, allows BLS withdrawal credentials rather than an execution - address. This is not recommended. - -h, --help Prints help information - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed - to store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can - be read by any user on the machine. Note that logs can often contain - sensitive information about your validator and so this flag should be used - with caution. For Windows users, the log file permissions will be - inherited from the parent folder. - --specify-voting-keystore-password If present, the user will be prompted to enter the voting keystore - password that will be used to encrypt the voting keystores. If this flag - is not provided, a random password will be used. It is not necessary to - keep backups of voting keystore passwords if the mnemonic is safely backed - up. - --stdin-inputs If present, read all user inputs from stdin instead of tty. - -V, --version Prints version information +Options: + --beacon-node + A HTTP(S) address of a beacon node using the beacon-API. If this value + is provided, an error will be raised if any validator key here is + already known as a validator by that beacon node. This helps prevent + the same validator being created twice and therefore slashable + conditions. + --builder-boost-factor + Defines the boost factor, a percentage multiplier to apply to the + builder's payload value when choosing between a builder payload header + and payload from the local execution node. + --builder-proposals + When provided, all created validators will attempt to create blocks + via builder rather than the local EL. [possible values: true, false] + --count + The number of validators to create, regardless of how many already + exist + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --deposit-gwei + The GWEI value of the deposit amount. Defaults to the minimum amount + required for an active validator (MAX_EFFECTIVE_BALANCE) + --eth1-withdrawal-address + If this field is set, the given eth1 address will be used to create + the withdrawal credentials. Otherwise, it will generate withdrawal + credentials with the mnemonic-derived withdrawal public key in + EIP-2334 format. + --first-index + The first of consecutive key indexes you wish to create. [default: 0] + --gas-limit + All created validators will use this gas limit. It is recommended to + leave this as the default value by not specifying this flag. + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 10] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --mnemonic-path + If present, the mnemonic will be read in from this file. + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --output-path + The path to a directory where the validator and (optionally) deposits + files will be created. The directory will be created if it does not + exist. + --prefer-builder-proposals + If this flag is set, Lighthouse will always prefer blocks constructed + by builders, regardless of payload value. [possible values: true, + false] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + --suggested-fee-recipient + All created validators will use this value for the suggested fee + recipient. Omit this flag to use the default value from the VC. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. -OPTIONS: - --beacon-node - A HTTP(S) address of a beacon node using the beacon-API. If this value is provided, an error will be raised - if any validator key here is already known as a validator by that beacon node. This helps prevent the same - validator being created twice and therefore slashable conditions. - --builder-boost-factor - Defines the boost factor, a percentage multiplier to apply to the builder's payload value when choosing - between a builder payload header and payload from the local execution node. - --builder-proposals - When provided, all created validators will attempt to create blocks via builder rather than the local EL. - [possible values: true, false] - --count - The number of validators to create, regardless of how many already exist - - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --deposit-gwei - The GWEI value of the deposit amount. Defaults to the minimum amount required for an active validator - (MAX_EFFECTIVE_BALANCE) - --eth1-withdrawal-address - If this field is set, the given eth1 address will be used to create the withdrawal credentials. Otherwise, - it will generate withdrawal credentials with the mnemonic-derived withdrawal public key in EIP-2334 format. - --first-index - The first of consecutive key indexes you wish to create. [default: 0] - - --gas-limit - All created validators will use this gas limit. It is recommended to leave this as the default value by not - specifying this flag. - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] - - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --mnemonic-path - If present, the mnemonic will be read in from this file. - - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --output-path - The path to a directory where the validator and (optionally) deposits files will be created. The directory - will be created if it does not exist. - --prefer-builder-proposals - If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload - value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --suggested-fee-recipient - All created validators will use this value for the suggested fee recipient. Omit this flag to use the - default value from the VC. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. +Flags: + --disable-deposits + When provided don't generate the deposits JSON file that is commonly + used for submitting validator deposits via a web UI. Using this flag + will save several seconds per validator if the user has an alternate + strategy for submitting deposits. + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + --force-bls-withdrawal-credentials + If present, allows BLS withdrawal credentials rather than an execution + address. This is not recommended. + -h, --help + Prints help information + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. + --specify-voting-keystore-password + If present, the user will be prompted to enter the voting keystore + password that will be used to encrypt the voting keystores. If this + flag is not provided, a random password will be used. It is not + necessary to keep backups of voting keystore passwords if the mnemonic + is safely backed up. + --stdin-inputs + If present, read all user inputs from stdin instead of tty. ``` + diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index e6ff351dac2..e18aad79589 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -1,102 +1,126 @@ # Validator Manager Import ``` -Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be -generated using the "create-validators" command. +Uploads validators to a validator client using the HTTP API. The validators are +defined in a JSON file which can be generated using the "create-validators" +command. -USAGE: - lighthouse validator_manager import [FLAGS] [OPTIONS] --validators-file +Usage: lighthouse validator_manager import [OPTIONS] --validators-file -FLAGS: - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will - generally increase memory usage, it should only be provided when debugging - specific memory allocation issues. - -h, --help Prints help information - --ignore-duplicates If present, ignore any validators which already exist on the VC. Without this - flag, the process will terminate without making any changes. This flag should - be used with caution, whilst it does not directly cause slashable conditions, - it might be an indicator that something is amiss. Users should also be careful - to avoid submitting duplicate deposits for validators that already exist on the - VC. - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed to - store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be - read by any user on the machine. Note that logs can often contain sensitive - information about your validator and so this flag should be used with caution. - For Windows users, the log file permissions will be inherited from the parent - folder. - -V, --version Prints version information +Options: + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 10] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + --validators-file + The path to a JSON file containing a list of validators to be imported + to the validator client. This file is usually named "validators.json". + --vc-token + The file containing a token required by the validator client. + --vc-url + A HTTP(S) address of a validator client using the keymanager-API. If + this value is not supplied then a 'dry run' will be conducted where no + changes are made to the validator client. [default: + http://localhost:5062] -OPTIONS: - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] - - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --validators-file - The path to a JSON file containing a list of validators to be imported to the validator client. This file is - usually named "validators.json". - --vc-token - The file containing a token required by the validator client. - - --vc-url - A HTTP(S) address of a validator client using the keymanager-API. If this value is not supplied then a 'dry - run' will be conducted where no changes are made to the validator client. [default: http://localhost:5062] +Flags: + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + -h, --help + Prints help information + --ignore-duplicates + If present, ignore any validators which already exist on the VC. + Without this flag, the process will terminate without making any + changes. This flag should be used with caution, whilst it does not + directly cause slashable conditions, it might be an indicator that + something is amiss. Users should also be careful to avoid submitting + duplicate deposits for validators that already exist on the VC. + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. ``` + diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md index fe1d4c5ae94..faef0a5783d 100644 --- a/book/src/help_vm_move.md +++ b/book/src/help_vm_move.md @@ -1,119 +1,147 @@ # Validator Manager Move ``` -Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be -generated using the "create-validators" command. This command only supports validators signing via a keystore on the +Uploads validators to a validator client using the HTTP API. The validators are +defined in a JSON file which can be generated using the "create-validators" +command. This command only supports validators signing via a keystore on the local file system (i.e., not Web3Signer validators). -USAGE: - lighthouse validator_manager move [FLAGS] [OPTIONS] --dest-vc-token --dest-vc-url --src-vc-token --src-vc-url +Usage: lighthouse validator_manager move [OPTIONS] --src-vc-token --src-vc-url --dest-vc-token --dest-vc-url -FLAGS: - --disable-log-timestamp If present, do not include timestamps in logging output. - --disable-malloc-tuning If present, do not configure the system allocator. Providing this flag will - generally increase memory usage, it should only be provided when debugging - specific memory allocation issues. - -h, --help Prints help information - --log-color Force outputting colors when emitting logs to the terminal. - --logfile-compress If present, compress old log files. This can help reduce the space needed to - store old logs. - --logfile-no-restricted-perms If present, log files will be generated as world-readable meaning they can be - read by any user on the machine. Note that logs can often contain sensitive - information about your validator and so this flag should be used with caution. - For Windows users, the log file permissions will be inherited from the parent - folder. - --stdin-inputs If present, read all user inputs from stdin instead of tty. - -V, --version Prints version information +Options: + --builder-boost-factor + Defines the boost factor, a percentage multiplier to apply to the + builder's payload value when choosing between a builder payload header + and payload from the local execution node. + --builder-proposals + When provided, all created validators will attempt to create blocks + via builder rather than the local EL. [possible values: true, false] + --count + The number of validators to move. + -d, --datadir + Used to specify a custom root data directory for lighthouse keys and + databases. Defaults to $HOME/.lighthouse/{network} where network is + the value of the `network` flag Note: Users should specify separate + custom datadirs for different networks. + --debug-level + Specifies the verbosity level used when emitting logs to the terminal. + [default: info] [possible values: info, debug, trace, warn, error, + crit] + --dest-vc-token + The file containing a token required by the destination validator + client. + --dest-vc-url + A HTTP(S) address of a validator client using the keymanager-API. This + validator client is the "destination" and will have new validators + added as they are removed from the "source" validator client. + --gas-limit + All created validators will use this gas limit. It is recommended to + leave this as the default value by not specifying this flag. + --genesis-state-url + A URL of a beacon-API compatible server from which to download the + genesis state. Checkpoint sync server URLs can generally be used with + this flag. If not supplied, a default URL or the --checkpoint-sync-url + may be used. If the genesis state is already included in this binary + then this value will be ignored. + --genesis-state-url-timeout + The timeout in seconds for the request to --genesis-state-url. + [default: 180] + --log-format + Specifies the log format used when emitting logs to the terminal. + [possible values: JSON] + --logfile + File path where the log file will be stored. Once it grows to the + value specified in `--logfile-max-size` a new log file is generated + where future logs are stored. Once the number of log files exceeds the + value specified in `--logfile-max-number` the oldest log file will be + overwritten. + --logfile-debug-level + The verbosity level used when emitting logs to the log file. [default: + debug] [possible values: info, debug, trace, warn, error, crit] + --logfile-format + Specifies the log format used when emitting logs to the logfile. + [possible values: DEFAULT, JSON] + --logfile-max-number + The maximum number of log files that will be stored. If set to 0, + background file logging is disabled. [default: 10] + --logfile-max-size + The maximum size (in MB) each log file can grow to before rotating. If + set to 0, background file logging is disabled. [default: 200] + --network + Name of the Eth2 chain Lighthouse will sync and follow. [possible + values: mainnet, gnosis, chiado, sepolia, holesky] + --prefer-builder-proposals + If this flag is set, Lighthouse will always prefer blocks constructed + by builders, regardless of payload value. [possible values: true, + false] + --safe-slots-to-import-optimistically + Used to coordinate manual overrides of the + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override this parameter in the event of an + attack at the PoS transition block. Incorrect use of this flag can + cause your node to possibly accept an invalid chain or sync more + slowly. Be extremely careful with this flag. + --src-vc-token + The file containing a token required by the source validator client. + --src-vc-url + A HTTP(S) address of a validator client using the keymanager-API. This + validator client is the "source" and contains the validators that are + to be moved. + --stdin-inputs + If present, read all user inputs from stdin instead of tty. + --suggested-fee-recipient + All created validators will use this value for the suggested fee + recipient. Omit this flag to use the default value from the VC. + -t, --testnet-dir + Path to directory containing eth2_testnet specs. Defaults to a + hard-coded Lighthouse testnet. Only effective if there is no existing + database. + --terminal-block-hash-epoch-override + Used to coordinate manual overrides to the + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only + be used if the user has a clear understanding that the broad Ethereum + community has elected to override the terminal PoW block. Incorrect + use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag. + --terminal-block-hash-override + Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH + parameter. This flag should only be used if the user has a clear + understanding that the broad Ethereum community has elected to + override the terminal PoW block. Incorrect use of this flag will cause + your node to experience a consensus failure. Be extremely careful with + this flag. + --terminal-total-difficulty-override + Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY + parameter. Accepts a 256-bit decimal integer (not a hex value). This + flag should only be used if the user has a clear understanding that + the broad Ethereum community has elected to override the terminal + difficulty. Incorrect use of this flag will cause your node to + experience a consensus failure. Be extremely careful with this flag. + --validators + The validators to be moved. Either a list of 0x-prefixed validator + pubkeys or the keyword "all". -OPTIONS: - --builder-boost-factor - Defines the boost factor, a percentage multiplier to apply to the builder's payload value when choosing - between a builder payload header and payload from the local execution node. - --builder-proposals - When provided, all created validators will attempt to create blocks via builder rather than the local EL. - [possible values: true, false] - --count The number of validators to move. - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: - info, debug, trace, warn, error, crit] - --dest-vc-token - The file containing a token required by the destination validator client. - - --dest-vc-url - A HTTP(S) address of a validator client using the keymanager-API. This validator client is the "destination" - and will have new validators added as they are removed from the "source" validator client. - --gas-limit - All created validators will use this gas limit. It is recommended to leave this as the default value by not - specifying this flag. - --genesis-state-url - A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server - URLs can generally be used with this flag. If not supplied, a default URL or the --checkpoint-sync-url may - be used. If the genesis state is already included in this binary then this value will be ignored. - --genesis-state-url-timeout - The timeout in seconds for the request to --genesis-state-url. [default: 180] - - --log-format - Specifies the log format used when emitting logs to the terminal. [possible values: JSON] - - --logfile - File path where the log file will be stored. Once it grows to the value specified in `--logfile-max-size` a - new log file is generated where future logs are stored. Once the number of log files exceeds the value - specified in `--logfile-max-number` the oldest log file will be overwritten. - --logfile-debug-level - The verbosity level used when emitting logs to the log file. [default: debug] [possible values: info, - debug, trace, warn, error, crit] - --logfile-format - Specifies the log format used when emitting logs to the logfile. [possible values: DEFAULT, JSON] - - --logfile-max-number - The maximum number of log files that will be stored. If set to 0, background file logging is disabled. - [default: 5] - --logfile-max-size - The maximum size (in MB) each log file can grow to before rotating. If set to 0, background file logging is - disabled. [default: 200] - --network - Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, prater, goerli, gnosis, - chiado, sepolia, holesky] - --prefer-builder-proposals - If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload - value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - this parameter in the event of an attack at the PoS transition block. Incorrect use of this flag can cause - your node to possibly accept an invalid chain or sync more slowly. Be extremely careful with this flag. - --src-vc-token - The file containing a token required by the source validator client. - - --src-vc-url - A HTTP(S) address of a validator client using the keymanager-API. This validator client is the "source" and - contains the validators that are to be moved. - --suggested-fee-recipient - All created validators will use this value for the suggested fee recipient. Omit this flag to use the - default value from the VC. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should - only be used if the user has a clear understanding that the broad Ethereum community has elected to override - the terminal PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. - Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. This flag should only be used if - the user has a clear understanding that the broad Ethereum community has elected to override the terminal - PoW block. Incorrect use of this flag will cause your node to experience a consensus failure. Be extremely - careful with this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. Accepts a 256-bit decimal - integer (not a hex value). This flag should only be used if the user has a clear understanding that the - broad Ethereum community has elected to override the terminal difficulty. Incorrect use of this flag will - cause your node to experience a consensus failure. Be extremely careful with this flag. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --validators - The validators to be moved. Either a list of 0x-prefixed validator pubkeys or the keyword "all". +Flags: + --disable-log-timestamp + If present, do not include timestamps in logging output. + --disable-malloc-tuning + If present, do not configure the system allocator. Providing this flag + will generally increase memory usage, it should only be provided when + debugging specific memory allocation issues. + -h, --help + Prints help information + --log-color + Force outputting colors when emitting logs to the terminal. + --logfile-compress + If present, compress old log files. This can help reduce the space + needed to store old logs. + --logfile-no-restricted-perms + If present, log files will be generated as world-readable meaning they + can be read by any user on the machine. Note that logs can often + contain sensitive information about your validator and so this flag + should be used with caution. For Windows users, the log file + permissions will be inherited from the parent folder. ``` + diff --git a/book/src/homebrew.md b/book/src/homebrew.md index 486de371f86..da92dcb26ce 100644 --- a/book/src/homebrew.md +++ b/book/src/homebrew.md @@ -5,7 +5,7 @@ Lighthouse is available on Linux and macOS via the [Homebrew package manager](ht Please note that this installation method is maintained by the Homebrew community. It is not officially supported by the Lighthouse team. -### Installation +## Installation Install the latest version of the [`lighthouse`][formula] formula with: @@ -13,7 +13,7 @@ Install the latest version of the [`lighthouse`][formula] formula with: brew install lighthouse ``` -### Usage +## Usage If Homebrew is installed to your `PATH` (default), simply run: @@ -27,7 +27,7 @@ Alternatively, you can find the `lighthouse` binary at: "$(brew --prefix)/bin/lighthouse" --help ``` -### Maintenance +## Maintenance The [formula][] is kept up-to-date by the Homebrew community and a bot that lists for new releases. diff --git a/book/src/installation-binaries.md b/book/src/installation-binaries.md index 30bf03e14ee..580b5c19d45 100644 --- a/book/src/installation-binaries.md +++ b/book/src/installation-binaries.md @@ -30,16 +30,16 @@ a `x86_64` binary. 1. Go to the [Releases](https://github.com/sigp/lighthouse/releases) page and select the latest release. 1. Download the `lighthouse-${VERSION}-x86_64-unknown-linux-gnu.tar.gz` binary. For example, to obtain the binary file for v4.0.1 (the latest version at the time of writing), a user can run the following commands in a linux terminal: + ```bash cd ~ curl -LO https://github.com/sigp/lighthouse/releases/download/v4.0.1/lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz tar -xvf lighthouse-v4.0.1-x86_64-unknown-linux-gnu.tar.gz ``` + 1. Test the binary with `./lighthouse --version` (it should print the version). 1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. For example, to copy `lighthouse` from the current directory to `usr/bin`, run `sudo cp lighthouse /usr/bin`. - - > Windows users will need to execute the commands in Step 2 from PowerShell. ## Portability @@ -49,10 +49,10 @@ sacrifice the ability to make use of modern CPU instructions. If you have a modern CPU then you should try running a non-portable build to get a 20-30% speed up. -* For **x86_64**, any CPU supporting the [ADX](https://en.wikipedia.org/wiki/Intel_ADX) instruction set +- For **x86_64**, any CPU supporting the [ADX](https://en.wikipedia.org/wiki/Intel_ADX) instruction set extension is compatible with the optimized build. This includes Intel Broadwell (2014) and newer, and AMD Ryzen (2017) and newer. -* For **ARMv8**, most CPUs are compatible with the optimized build, including the Cortex-A72 used by +- For **ARMv8**, most CPUs are compatible with the optimized build, including the Cortex-A72 used by the Raspberry Pi 4. ## Troubleshooting diff --git a/book/src/installation-source.md b/book/src/installation-source.md index c2f5861576d..be03a189de7 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -23,7 +23,7 @@ The rustup installer provides an easy way to update the Rust compiler, and works With Rust installed, follow the instructions below to install dependencies relevant to your operating system. -#### Ubuntu +### Ubuntu Install the following packages: @@ -42,7 +42,7 @@ sudo apt update && sudo apt install -y git gcc g++ make cmake pkg-config llvm-de After this, you are ready to [build Lighthouse](#build-lighthouse). -#### Fedora/RHEL/CentOS +### Fedora/RHEL/CentOS Install the following packages: @@ -52,7 +52,7 @@ yum -y install git make perl clang cmake After this, you are ready to [build Lighthouse](#build-lighthouse). -#### macOS +### macOS 1. Install the [Homebrew][] package manager. 1. Install CMake using Homebrew: @@ -61,21 +61,22 @@ After this, you are ready to [build Lighthouse](#build-lighthouse). brew install cmake ``` - [Homebrew]: https://brew.sh/ After this, you are ready to [build Lighthouse](#build-lighthouse). -#### Windows +### Windows 1. Install [Git](https://git-scm.com/download/win). 1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows. > Tips: > - Use PowerShell to install. In Windows, search for PowerShell and run as administrator. > - You must ensure `Get-ExecutionPolicy` is not Restricted. To test this, run `Get-ExecutionPolicy` in PowerShell. If it returns `restricted`, then run `Set-ExecutionPolicy AllSigned`, and then run + ```bash Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) ``` + > - To verify that Chocolatey is ready, run `choco` and it should return the version. 1. Install Make, CMake and LLVM using Chocolatey: @@ -158,14 +159,14 @@ FEATURES=gnosis,slasher-lmdb make Commonly used features include: -* `gnosis`: support for the Gnosis Beacon Chain. -* `portable`: support for legacy hardware. -* `modern`: support for exclusively modern hardware. -* `slasher-lmdb`: support for the LMDB slasher backend. Enabled by default. -* `slasher-mdbx`: support for the MDBX slasher backend. -* `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. +- `gnosis`: support for the Gnosis Beacon Chain. +- `portable`: support for legacy hardware. +- `modern`: support for exclusively modern hardware. +- `slasher-lmdb`: support for the LMDB slasher backend. Enabled by default. +- `slasher-mdbx`: support for the MDBX slasher backend. +- `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. Not supported on Windows. -* `spec-minimal`: support for the minimal preset (useful for testing). +- `spec-minimal`: support for the minimal preset (useful for testing). Default features (e.g. `slasher-lmdb`) may be opted out of using the `--no-default-features` argument for `cargo`, which can be plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. @@ -184,9 +185,9 @@ You can customise the compiler settings used to compile Lighthouse via Lighthouse includes several profiles which can be selected via the `PROFILE` environment variable. -* `release`: default for source builds, enables most optimisations while not taking too long to +- `release`: default for source builds, enables most optimisations while not taking too long to compile. -* `maxperf`: default for binary releases, enables aggressive optimisations including full LTO. +- `maxperf`: default for binary releases, enables aggressive optimisations including full LTO. Although compiling with this profile improves some benchmarks by around 20% compared to `release`, it imposes a _significant_ cost at compile time and is only recommended if you have a fast CPU. diff --git a/book/src/installation.md b/book/src/installation.md index e8caf5c4577..a0df394bd2d 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -19,20 +19,17 @@ There are also community-maintained installation methods: - Arch Linux AUR packages: [source](https://aur.archlinux.org/packages/lighthouse-ethereum), [binary](https://aur.archlinux.org/packages/lighthouse-ethereum-bin). - - ## Recommended System Requirements -Before [The Merge](https://ethereum.org/en/roadmap/merge/), Lighthouse was able to run on its own with low to mid-range consumer hardware, but would perform best when provided with ample system resources. +Before [The Merge](https://ethereum.org/en/roadmap/merge/), Lighthouse was able to run on its own with low to mid-range consumer hardware, but would perform best when provided with ample system resources. After [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, it is necessary to run Lighthouse together with an execution client ([Nethermind](https://nethermind.io/), [Besu](https://www.hyperledger.org/use/besu), [Erigon](https://github.com/ledgerwatch/erigon), [Geth](https://geth.ethereum.org/)). The following system requirements listed are therefore for running a Lighthouse beacon node combined with an execution client , and a validator client with a modest number of validator keys (less than 100): +- CPU: Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer +- Memory: 32 GB RAM* +- Storage: 2 TB solid state drive +- Network: 100 Mb/s download, 20 Mb/s upload broadband connection -* CPU: Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer -* Memory: 32 GB RAM* -* Storage: 2 TB solid state drive -* Network: 100 Mb/s download, 20 Mb/s upload broadband connection - -> *Note: 16 GB RAM is becoming rather limited due to the increased resources required. 16 GB RAM would likely result in out of memory errors in the case of a spike in computing demand (e.g., caused by a bug) or during periods of non-finality of the beacon chain. Users with 16 GB RAM also have a limited choice when it comes to selecting an execution client, which does not help with the [client diversity](https://clientdiversity.org/). We therefore recommend users to have at least 32 GB RAM for long term health of the node, while also giving users the flexibility to change client should the thought arise. +> *Note: 16 GB RAM is becoming rather limited due to the increased resources required. 16 GB RAM would likely result in out of memory errors in the case of a spike in computing demand (e.g., caused by a bug) or during periods of non-finality of the beacon chain. Users with 16 GB RAM also have a limited choice when it comes to selecting an execution client, which does not help with the [client diversity](https://clientdiversity.org/). We therefore recommend users to have at least 32 GB RAM for long term health of the node, while also giving users the flexibility to change client should the thought arise. Last update: April 2023 diff --git a/book/src/intro.md b/book/src/intro.md index ef16913d686..9892a8a49db 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -24,7 +24,6 @@ You may read this book from start to finish, or jump to some of these topics: - Utilize the whole stack by starting a [local testnet](./setup.md#local-testnets). - Query the [RESTful HTTP API](./api.md) using `curl`. - Prospective contributors can read the [Contributing](./contributing.md) section to understand how we develop and test Lighthouse. diff --git a/book/src/key-management.md b/book/src/key-management.md index b2bb7737fd4..fa6e99a2aa6 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -40,29 +40,32 @@ to secure BTC, ETH and many other coins. We defined some terms in the context of validator key management: - **Mnemonic**: a string of 24 words that is designed to be easy to write down - and remember. E.g., _"radar fly lottery mirror fat icon bachelor sadness - type exhaust mule six beef arrest you spirit clog mango snap fox citizen - already bird erase"_. - - Defined in BIP-39 + and remember. E.g., _"radar fly lottery mirror fat icon bachelor sadness + type exhaust mule six beef arrest you spirit clog mango snap fox citizen + already bird erase"_. + - Defined in BIP-39 - **Wallet**: a wallet is a JSON file which stores an - encrypted version of a mnemonic. - - Defined in EIP-2386 + encrypted version of a mnemonic. + - Defined in EIP-2386 - **Keystore**: typically created by wallet, it contains a single encrypted BLS - keypair. - - Defined in EIP-2335. + keypair. + - Defined in EIP-2335. - **Voting Keypair**: a BLS public and private keypair which is used for - signing blocks, attestations and other messages on regular intervals in the beacon chain. + signing blocks, attestations and other messages on regular intervals in the beacon chain. - **Withdrawal Keypair**: a BLS public and private keypair which will be - required _after_ Phase 0 to manage ETH once a validator has exited. + required _after_ Phase 0 to manage ETH once a validator has exited. ## Create a validator + There are 2 steps involved to create a validator key using Lighthouse: + 1. [Create a wallet](#step-1-create-a-wallet-and-record-the-mnemonic) 1. [Create a validator](#step-2-create-a-validator) The following example demonstrates how to create a single validator key. ### Step 1: Create a wallet and record the mnemonic + A wallet allows for generating practically unlimited validators from an easy-to-remember 24-word string (a mnemonic). As long as that mnemonic is backed up, all validator keys can be trivially re-generated. @@ -72,40 +75,43 @@ mnemonic is encrypted with a password. It is the responsibility of the user to define a strong password. The password is only required for interacting with the wallet, it is not required for recovering keys from a mnemonic. -To create a wallet, use the `lighthouse account wallet` command. For example, if we wish to create a new wallet for the Goerli testnet named `wally` and saves it in `~/.lighthouse/goerli/wallets` with a randomly generated password saved +To create a wallet, use the `lighthouse account wallet` command. For example, if we wish to create a new wallet for the Holesky testnet named `wally` and saves it in `~/.lighthouse/holesky/wallets` with a randomly generated password saved to `./wallet.pass`: ```bash -lighthouse --network goerli account wallet create --name wally --password-file wally.pass +lighthouse --network holesky account wallet create --name wally --password-file wally.pass ``` -Using the above command, a wallet will be created in `~/.lighthouse/goerli/wallets` with the name + +Using the above command, a wallet will be created in `~/.lighthouse/holesky/wallets` with the name `wally`. It is encrypted using the password defined in the -`wally.pass` file. +`wally.pass` file. During the wallet creation process, a 24-word mnemonic will be displayed. Record the mnemonic because it allows you to recreate the files in the case of data loss. > Notes: -> - When navigating to the directory `~/.lighthouse/goerli/wallets`, one will not see the wallet name `wally`, but a hexadecimal folder containing the wallet file. However, when interacting with `lighthouse` in the CLI, the name `wally` will be used. +> +> - When navigating to the directory `~/.lighthouse/holesky/wallets`, one will not see the wallet name `wally`, but a hexadecimal folder containing the wallet file. However, when interacting with `lighthouse` in the CLI, the name `wally` will be used. > - The password is not `wally.pass`, it is the _content_ of the > `wally.pass` file. > - If `wally.pass` already exists, the wallet password will be set to the content > of that file. ### Step 2: Create a validator + Validators are fundamentally represented by a BLS keypair. In Lighthouse, we use a wallet to generate these keypairs. Once a wallet exists, the `lighthouse account validator create` command can be used to generate the BLS keypair and all necessary information to submit a validator deposit. With the `wally` wallet created in [Step 1](#step-1-create-a-wallet-and-record-the-mnemonic), we can create a validator with the command: ```bash -lighthouse --network goerli account validator create --wallet-name wally --wallet-password wally.pass --count 1 +lighthouse --network holesky account validator create --wallet-name wally --wallet-password wally.pass --count 1 ``` -This command will: -- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/goerli/wallets`, updating it so that it generates a new key next time. -- Create a new directory `~/.lighthouse/goerli/validators` containing: - - An encrypted keystore file `voting-keystore.json` containing the validator's voting keypair. - - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH`) which can be submitted to the deposit - contract for the Goerli testnet. Other networks can be set via the - `--network` parameter. -- Create a new directory `~/.lighthouse/goerli/secrets` which stores a password to the validator's voting keypair. +This command will: +- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/holesky/wallets`, updating it so that it generates a new key next time. +- Create a new directory `~/.lighthouse/holesky/validators` containing: + - An encrypted keystore file `voting-keystore.json` containing the validator's voting keypair. + - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH`) which can be submitted to the deposit + contract for the Goerli testnet. Other networks can be set via the + `--network` parameter. +- Create a new directory `~/.lighthouse/holesky/secrets` which stores a password to the validator's voting keypair. If you want to create another validator in the future, repeat [Step 2](#step-2-create-a-validator). The wallet keeps track of how many validators it has generated and ensures that a new validator is generated each time. The important thing is to keep the 24-word mnemonic safe so that it can be used to generate new validator keys if needed. @@ -116,16 +122,16 @@ If you want to create another validator in the future, repeat [Step 2](#step-2-c There are three important directories in Lighthouse validator key management: - `wallets/`: contains encrypted wallets which are used for hierarchical - key derivation. - - Defaults to `~/.lighthouse/{network}/wallets` + key derivation. + - Defaults to `~/.lighthouse/{network}/wallets` - `validators/`: contains a directory for each validator containing - encrypted keystores and other validator-specific data. - - Defaults to `~/.lighthouse/{network}/validators` + encrypted keystores and other validator-specific data. + - Defaults to `~/.lighthouse/{network}/validators` - `secrets/`: since the validator signing keys are "hot", the validator process - needs access to the passwords to decrypt the keystores in the validators - directory. These passwords are stored here. - - Defaults to `~/.lighthouse/{network}/secrets` - + needs access to the passwords to decrypt the keystores in the validators + directory. These passwords are stored here. + - Defaults to `~/.lighthouse/{network}/secrets` + where `{network}` is the name of the network passed in the `--network` parameter. When the validator client boots, it searches the `validators/` for directories diff --git a/book/src/key-recovery.md b/book/src/key-recovery.md index a996e95cbc5..a0593ddd94b 100644 --- a/book/src/key-recovery.md +++ b/book/src/key-recovery.md @@ -1,6 +1,5 @@ # Key Recovery - Generally, validator keystore files are generated alongside a *mnemonic*. If the keystore and/or the keystore password are lost, this mnemonic can regenerate a new, equivalent keystore with a new password. @@ -8,9 +7,9 @@ regenerate a new, equivalent keystore with a new password. There are two ways to recover keys using the `lighthouse` CLI: - `lighthouse account validator recover`: recover one or more EIP-2335 keystores from a mnemonic. - These keys can be used directly in a validator client. + These keys can be used directly in a validator client. - `lighthouse account wallet recover`: recover an EIP-2386 wallet from a - mnemonic. + mnemonic. ## ⚠️ Warning @@ -18,10 +17,10 @@ There are two ways to recover keys using the `lighthouse` CLI: resort.** Key recovery entails significant risks: - Exposing your mnemonic to a computer at any time puts it at risk of being - compromised. Your mnemonic is **not encrypted** and is a target for theft. + compromised. Your mnemonic is **not encrypted** and is a target for theft. - It's completely possible to regenerate a validator keypairs that is already active - on some other validator client. Running the same keypairs on two different - validator clients is very likely to result in slashing. + on some other validator client. Running the same keypairs on two different + validator clients is very likely to result in slashing. ## Recover EIP-2335 validator keystores @@ -32,7 +31,6 @@ index on the same mnemonic always results in the same validator keypair being generated (see [EIP-2334](https://eips.ethereum.org/EIPS/eip-2334) for more detail). - Using the `lighthouse account validator recover` command you can generate the keystores that correspond to one or more indices in the mnemonic: @@ -41,7 +39,6 @@ keystores that correspond to one or more indices in the mnemonic: - `lighthouse account validator recover --first-index 1`: recover only index `1`. - `lighthouse account validator recover --first-index 1 --count 2`: recover indices `1, 2`. - For each of the indices recovered in the above commands, a directory will be created in the `--validator-dir` location (default `~/.lighthouse/{network}/validators`) which contains all the information necessary to run a validator using the diff --git a/book/src/lighthouse-ui.md b/book/src/lighthouse-ui.md index 81098715f3f..106a5e89472 100644 --- a/book/src/lighthouse-ui.md +++ b/book/src/lighthouse-ui.md @@ -23,7 +23,7 @@ information: - [Installation Guide](./ui-installation.md) - Information to install and run the Lighthouse UI. - [Configuration Guide](./ui-configuration.md) - Explanation of how to setup - and configure Siren. + and configure Siren. - [Authentication Guide](./ui-authentication.md) - Explanation of how Siren authentication works and protects validator actions. - [Usage](./ui-usage.md) - Details various Siren components. - [FAQs](./ui-faqs.md) - Frequently Asked Questions. diff --git a/book/src/mainnet-validator.md b/book/src/mainnet-validator.md index 942ca09b8ec..c53be97ccf9 100644 --- a/book/src/mainnet-validator.md +++ b/book/src/mainnet-validator.md @@ -1,7 +1,6 @@ # Become an Ethereum Consensus Mainnet Validator [launchpad]: https://launchpad.ethereum.org/ -[lh-book]: https://lighthouse-book.sigmaprime.io/ [advanced-datadir]: ./advanced-datadir.md [license]: https://github.com/sigp/lighthouse/blob/stable/LICENSE [slashing]: ./slashing-protection.md @@ -18,7 +17,6 @@ Being educated is critical to a validator's success. Before submitting your main - Reading through this documentation, especially the [Slashing Protection][slashing] section. - Performing a web search and doing your own research. - > > **Please note**: the Lighthouse team does not take any responsibility for losses or damages > occurred through the use of Lighthouse. We have an experienced internal security team and have @@ -27,7 +25,6 @@ Being educated is critical to a validator's success. Before submitting your main > due to the actions of other actors on the consensus layer or software bugs. See the > [software license][license] for more detail on liability. - ## Become a validator There are five primary steps to become a validator: @@ -39,23 +36,24 @@ There are five primary steps to become a validator: 1. [Submit deposit](#step-5-submit-deposit-32eth-per-validator) > **Important note**: The guide below contains both mainnet and testnet instructions. We highly recommend *all* users to **run a testnet validator** prior to staking mainnet ETH. By far, the best technical learning experience is to run a testnet validator. You can get hands-on experience with all the tools and it's a great way to test your staking -hardware. 32 ETH is a significant outlay and joining a testnet is a great way to "try before you buy". +hardware. 32 ETH is a significant outlay and joining a testnet is a great way to "try before you buy". > **Never use real ETH to join a testnet!** Testnet such as the Holesky testnet uses Holesky ETH which is worthless. This allows experimentation without real-world costs. ### Step 1. Create validator keys The Ethereum Foundation provides the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli/releases) for creating validator keys. Download and run the `staking-deposit-cli` with the command: + ```bash ./deposit new-mnemonic ``` + and follow the instructions to generate the keys. When prompted for a network, select `mainnet` if you want to run a mainnet validator, or select `holesky` if you want to run a Holesky testnet validator. A new mnemonic will be generated in the process. > **Important note:** A mnemonic (or seed phrase) is a 24-word string randomly generated in the process. It is highly recommended to write down the mnemonic and keep it safe offline. It is important to ensure that the mnemonic is never stored in any digital form (computers, mobile phones, etc) connected to the internet. Please also make one or more backups of the mnemonic to ensure your ETH is not lost in the case of data loss. It is very important to keep your mnemonic private as it represents the ultimate control of your ETH. Upon completing this step, the files `deposit_data-*.json` and `keystore-m_*.json` will be created. The keys that are generated from staking-deposit-cli can be easily loaded into a Lighthouse validator client (`lighthouse vc`) in [Step 3](#step-3-import-validator-keys-to-lighthouse). In fact, both of these programs are designed to work with each other. - > Lighthouse also supports creating validator keys, see [Key management](./key-management.md) for more info. ### Step 2. Start an execution client and Lighthouse beacon node @@ -64,15 +62,17 @@ Start an execution client and Lighthouse beacon node according to the [Run a Nod ### Step 3. Import validator keys to Lighthouse -In [Step 1](#step-1-create-validator-keys), the staking-deposit-cli will generate the validator keys into a `validator_keys` directory. Let's assume that +In [Step 1](#step-1-create-validator-keys), the staking-deposit-cli will generate the validator keys into a `validator_keys` directory. Let's assume that this directory is `$HOME/staking-deposit-cli/validator_keys`. Using the default `validators` directory in Lighthouse (`~/.lighthouse/mainnet/validators`), run the following command to import validator keys: Mainnet: + ```bash lighthouse --network mainnet account validator import --directory $HOME/staking-deposit-cli/validator_keys ``` Holesky testnet: + ```bash lighthouse --network holesky account validator import --directory $HOME/staking-deposit-cli/validator_keys ``` @@ -85,7 +85,6 @@ lighthouse --network holesky account validator import --directory $HOME/staking- > Docker users should use the command from the [Docker](#docker-users) documentation. - The user will be prompted for a password for each keystore discovered: ``` @@ -122,11 +121,10 @@ WARNING: DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH ANOTHER CLIENT, OR Y Once you see the above message, you have successfully imported the validator keys. You can now proceed to the next step to start the validator client. - ### Step 4. Start Lighthouse validator client After the keys are imported, the user can start performing their validator duties -by starting the Lighthouse validator client `lighthouse vc`: +by starting the Lighthouse validator client `lighthouse vc`: Mainnet: @@ -135,11 +133,12 @@ lighthouse vc --network mainnet --suggested-fee-recipient YourFeeRecipientAddres ``` Holesky testnet: + ```bash lighthouse vc --network holesky --suggested-fee-recipient YourFeeRecipientAddress ``` -The `validator client` manages validators using data obtained from the beacon node via a HTTP API. You are highly recommended to enter a fee-recipient by changing `YourFeeRecipientAddress` to an Ethereum address under your control. +The `validator client` manages validators using data obtained from the beacon node via a HTTP API. You are highly recommended to enter a fee-recipient by changing `YourFeeRecipientAddress` to an Ethereum address under your control. When `lighthouse vc` starts, check that the validator public key appears as a `voting_pubkey` as shown below: @@ -156,9 +155,9 @@ by the protocol. After you have successfully run and synced the execution client, beacon node and validator client, you can now proceed to submit the deposit. Go to the mainnet [Staking launchpad](https://launchpad.ethereum.org/en/) (or [Holesky staking launchpad](https://holesky.launchpad.ethereum.org/en/) for testnet validator) and carefully go through the steps to becoming a validator. Once you are ready, you can submit the deposit by sending 32ETH per validator to the deposit contract. Upload the `deposit_data-*.json` file generated in [Step 1](#step-1-create-validator-keys) to the Staking launchpad. -> **Important note:** Double check that the deposit contract for mainnet is `0x00000000219ab540356cBB839Cbe05303d7705Fa` before you confirm the transaction. +> **Important note:** Double check that the deposit contract for mainnet is `0x00000000219ab540356cBB839Cbe05303d7705Fa` before you confirm the transaction. -Once the deposit transaction is confirmed, it will take a minimum of ~16 hours to a few days/weeks for the beacon chain to process and activate your validator, depending on the queue. Refer to our [FAQ - Why does it take so long for a validator to be activated](./faq.md#why-does-it-take-so-long-for-a-validator-to-be-activated) for more info. +Once the deposit transaction is confirmed, it will take a minimum of ~16 hours to a few days/weeks for the beacon chain to process and activate your validator, depending on the queue. Refer to our [FAQ - Why does it take so long for a validator to be activated](./faq.md#why-does-it-take-so-long-for-a-validator-to-be-activated) for more info. Once your validator is activated, the validator client will start to publish attestations each epoch: @@ -172,10 +171,11 @@ If you propose a block, the log will look like: Dec 03 08:49:36.225 INFO Successfully published block slot: 98, attestations: 2, deposits: 0, service: block ``` -Congratulations! Your validator is now performing its duties and you will receive rewards for securing the Ethereum network. +Congratulations! Your validator is now performing its duties and you will receive rewards for securing the Ethereum network. ### What is next? -After the validator is running and performing its duties, it is important to keep the validator online to continue accumulating rewards. However, there could be problems with the computer, the internet or other factors that cause the validator to be offline. For this, it is best to subscribe to notifications, e.g., via [beaconcha.in](https://beaconcha.in/) which will send notifications about missed attestations and/or proposals. You will be notified about the validator's offline status and will be able to react promptly. + +After the validator is running and performing its duties, it is important to keep the validator online to continue accumulating rewards. However, there could be problems with the computer, the internet or other factors that cause the validator to be offline. For this, it is best to subscribe to notifications, e.g., via [beaconcha.in](https://beaconcha.in/) which will send notifications about missed attestations and/or proposals. You will be notified about the validator's offline status and will be able to react promptly. The next important thing is to stay up to date with updates to Lighthouse and the execution client. Updates are released from time to time, typically once or twice a month. For Lighthouse updates, you can subscribe to notifications on [Github](https://github.com/sigp/lighthouse) by clicking on `Watch`. If you only want to receive notification on new releases, select `Custom`, then `Releases`. You could also join [Lighthouse Discord](https://discord.gg/cyAszAh) where we will make an announcement when there is a new release. @@ -202,9 +202,10 @@ Here we use two `-v` volumes to attach: - `~/.lighthouse` on the host to `/root/.lighthouse` in the Docker container. - The `validator_keys` directory in the present working directory of the host - to the `/root/validator_keys` directory of the Docker container. + to the `/root/validator_keys` directory of the Docker container. ### Start Lighthouse beacon node and validator client + Those using Docker images can start the processes with: ```bash @@ -222,8 +223,5 @@ $ docker run \ lighthouse --network mainnet vc ``` - If you get stuck you can always reach out on our [Discord][discord] or [create an issue](https://github.com/sigp/lighthouse/issues/new). - - diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index a5769162b03..6de05cff2a4 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -16,7 +16,7 @@ the merge: be made to your `lighthouse vc` configuration, and are covered on the [Suggested fee recipient](./suggested-fee-recipient.md) page. -Additionally, you _must_ update Lighthouse to v3.0.0 (or later), and must update your execution +Additionally, you *must* update Lighthouse to v3.0.0 (or later), and must update your execution engine to a merge-ready version. ## When? @@ -27,7 +27,7 @@ All networks (**Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln | Network | Bellatrix | The Merge | Remark | |---------|-------------------------------|-------------------------------| -----------| -| Ropsten | 2nd June 2022 | 8th June 2022 | Deprecated | +| Ropsten | 2nd June 2022 | 8th June 2022 | Deprecated | | Sepolia | 20th June 2022 | 6th July 2022 | | | Goerli | 4th August 2022 | 10th August 2022 | Previously named `Prater`| | Mainnet | 6th September 2022| 15th September 2022| | @@ -55,7 +55,7 @@ has the authority to control the execution engine. > needing to pass a jwt secret file. The execution engine connection must be **exclusive**, i.e. you must have one execution node -per beacon node. The reason for this is that the beacon node _controls_ the execution node. Please +per beacon node. The reason for this is that the beacon node *controls* the execution node. Please see the [FAQ](#faq) for further information about why many:1 and 1:many configurations are not supported. @@ -173,7 +173,7 @@ client to be able to connect to the beacon node. ### Can I use `http://localhost:8545` for the execution endpoint? Most execution nodes use port `8545` for the Ethereum JSON-RPC API. Unless custom configuration is -used, an execution node _will not_ provide the necessary engine API on port `8545`. You should +used, an execution node *will not* provide the necessary engine API on port `8545`. You should not attempt to use `http://localhost:8545` as your engine URL and should instead use `http://localhost:8551`. @@ -209,4 +209,3 @@ guidance for specific setups. - [Ethereum Staking Launchpad: Merge Readiness](https://launchpad.ethereum.org/en/merge-readiness). - [CoinCashew: Ethereum Merge Upgrade Checklist](https://www.coincashew.com/coins/overview-eth/archived-guides/ethereum-merge-upgrade-checklist-for-home-stakers-and-validators) - [EthDocker: Merge Preparation](https://eth-docker.net/About/MergePrep/) -- [Remy Roy: How to join the Goerli/Prater merge testnet](https://github.com/remyroy/ethstaker/blob/main/merge-goerli-prater.md) diff --git a/book/src/partial-withdrawal.md b/book/src/partial-withdrawal.md index e5a0a97c6cf..26003e1f2fe 100644 --- a/book/src/partial-withdrawal.md +++ b/book/src/partial-withdrawal.md @@ -2,12 +2,13 @@ After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023: - - if a validator has a withdrawal credential type `0x00`, the rewards will continue to accumulate and will be locked in the beacon chain. - - if a validator has a withdrawal credential type `0x01`, any rewards above 32ETH will be periodically withdrawn to the withdrawal address. This is also known as the "validator sweep", i.e., once the "validator sweep" reaches your validator's index, your rewards will be withdrawn to the withdrawal address. At the time of writing, with 560,000+ validators on the Ethereum mainnet, you shall expect to receive the rewards approximately every 5 days. +- if a validator has a withdrawal credential type `0x00`, the rewards will continue to accumulate and will be locked in the beacon chain. +- if a validator has a withdrawal credential type `0x01`, any rewards above 32ETH will be periodically withdrawn to the withdrawal address. This is also known as the "validator sweep", i.e., once the "validator sweep" reaches your validator's index, your rewards will be withdrawn to the withdrawal address. At the time of writing, with 560,000+ validators on the Ethereum mainnet, you shall expect to receive the rewards approximately every 5 days. + +## FAQ -### FAQ 1. How to know if I have the withdrawal credentials type `0x00` or `0x01`? - + Refer [here](./voluntary-exit.md#1-how-to-know-if-i-have-the-withdrawal-credentials-type-0x01). 2. My validator has withdrawal credentials type `0x00`, is there a deadline to update my withdrawal credentials? @@ -16,8 +17,8 @@ After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12 3. Do I have to do anything to get my rewards after I update the withdrawal credentials to type `0x01`? - No. The "validator sweep" occurs automatically and you can expect to receive the rewards every *n* days, [more information here](./voluntary-exit.md#4-when-will-i-get-my-staked-fund-after-voluntary-exit-if-my-validator-is-of-type-0x01). + No. The "validator sweep" occurs automatically and you can expect to receive the rewards every *n* days, [more information here](./voluntary-exit.md#4-when-will-i-get-my-staked-fund-after-voluntary-exit-if-my-validator-is-of-type-0x01). Figure below summarizes partial withdrawals. - ![partial](./imgs/partial-withdrawal.png) \ No newline at end of file + ![partial](./imgs/partial-withdrawal.png) diff --git a/book/src/pi.md b/book/src/pi.md index 2fea91ad179..b91ecab548c 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -4,22 +4,21 @@ Tested on: - - Raspberry Pi 4 Model B (4GB) - - `Ubuntu 20.04 LTS (GNU/Linux 5.4.0-1011-raspi aarch64)` - +- Raspberry Pi 4 Model B (4GB) +- `Ubuntu 20.04 LTS (GNU/Linux 5.4.0-1011-raspi aarch64)` *Note: [Lighthouse supports cross-compiling](./cross-compiling.md) to target a Raspberry Pi (`aarch64`). Compiling on a faster machine (i.e., `x86_64` desktop) may be convenient.* -### 1. Install Ubuntu +## 1. Install Ubuntu Follow the [Ubuntu Raspberry Pi installation instructions](https://ubuntu.com/download/raspberry-pi). **A 64-bit version is required** A graphical environment is not required in order to use Lighthouse. Only the terminal and an Internet connection are necessary. -### 2. Install Packages +## 2. Install Packages Install the Ubuntu dependencies: @@ -32,7 +31,7 @@ sudo apt update && sudo apt install -y git gcc g++ make cmake pkg-config llvm-de > - If there are difficulties, try updating the package manager with `sudo apt > update`. -### 3. Install Rust +## 3. Install Rust Install Rust as per [rustup](https://rustup.rs/): @@ -47,7 +46,7 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh > be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`. > - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`. -### 4. Install Lighthouse +## 4. Install Lighthouse ```bash git clone https://github.com/sigp/lighthouse.git diff --git a/book/src/redundancy.md b/book/src/redundancy.md index bd1976f9503..ee685a17cf7 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -1,7 +1,5 @@ # Redundancy -[subscribe-api]: https://ethereum.github.io/beacon-APIs/#/Validator/prepareBeaconCommitteeSubnet - There are three places in Lighthouse where redundancy is notable: 1. ✅ GOOD: Using a redundant beacon node in `lighthouse vc --beacon-nodes` @@ -38,9 +36,9 @@ duties as long as *at least one* of the beacon nodes is available. There are a few interesting properties about the list of `--beacon-nodes`: - *Ordering matters*: the validator client prefers a beacon node that is - earlier in the list. + earlier in the list. - *Synced is preferred*: the validator client prefers a synced beacon node over - one that is still syncing. + one that is still syncing. - *Failure is sticky*: if a beacon node fails, it will be flagged as offline and won't be retried again for the rest of the slot (12 seconds). This helps prevent the impact of time-outs and other lengthy errors. @@ -49,7 +47,6 @@ There are a few interesting properties about the list of `--beacon-nodes`: > provided (if it is desired). It will only be used as default if no `--beacon-nodes` flag is > provided at all. - ### Configuring a redundant Beacon Node In our previous example, we listed `http://192.168.1.1:5052` as a redundant @@ -58,8 +55,10 @@ following flags: - `--http`: starts the HTTP API server. - `--http-address local_IP`: where `local_IP` is the private IP address of the computer running the beacon node. This is only required if your backup beacon node is on a different host. + > Note: You could also use `--http-address 0.0.0.0`, but this allows *any* external IP address to access the HTTP server. As such, a firewall should be configured to deny unauthorized access to port `5052`. - - `--execution-endpoint`: see [Merge Migration](./merge-migration.md). + +- `--execution-endpoint`: see [Merge Migration](./merge-migration.md). - `--execution-jwt`: see [Merge Migration](./merge-migration.md). For example one could use the following command to provide a backup beacon node: @@ -107,7 +106,7 @@ The default is `--broadcast subscriptions`. To also broadcast blocks for example ## Redundant execution nodes Lighthouse previously supported redundant execution nodes for fetching data from the deposit -contract. On merged networks _this is no longer supported_. Each Lighthouse beacon node must be +contract. On merged networks *this is no longer supported*. Each Lighthouse beacon node must be configured in a 1:1 relationship with an execution node. For more information on the rationale behind this decision please see the [Merge Migration](./merge-migration.md) documentation. diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index ab42c0c10a5..6c1f23d8e8e 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -8,9 +8,8 @@ You should be finished with one [Installation](./installation.md) method of your 1. Set up a [beacon node](#step-3-set-up-a-beacon-node-using-lighthouse); 1. [Check logs for sync status](#step-4-check-logs-for-sync-status); - - ## Step 1: Create a JWT secret file + A JWT secret file is used to secure the communication between the execution client and the consensus client. In this step, we will create a JWT secret file which will be used in later steps. ```bash @@ -21,18 +20,15 @@ openssl rand -hex 32 | tr -d "\n" | sudo tee /secrets/jwt.hex ## Step 2: Set up an execution node The Lighthouse beacon node *must* connect to an execution engine in order to validate the transactions present in blocks. The execution engine connection must be *exclusive*, i.e. you must have one execution node -per beacon node. The reason for this is that the beacon node _controls_ the execution node. Select an execution client from the list below and run it: - +per beacon node. The reason for this is that the beacon node *controls* the execution node. Select an execution client from the list below and run it: - [Nethermind](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) - [Besu](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) - [Erigon](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) - [Geth](https://geth.ethereum.org/docs/getting-started/consensus-clients) - > Note: Each execution engine has its own flags for configuring the engine API and JWT secret to connect to a beacon node. Please consult the relevant page of your execution engine as above for the required flags. - Once the execution client is up, just let it continue running. The execution client will start syncing when it connects to a beacon node. Depending on the execution client and computer hardware specifications, syncing can take from a few hours to a few days. You can safely proceed to Step 3 to set up a beacon node while the execution client is still syncing. ## Step 3: Set up a beacon node using Lighthouse @@ -50,9 +46,10 @@ lighthouse bn \ --http ``` -> Note: If you download the binary file, you need to navigate to the directory of the binary file to run the above command. +> Note: If you download the binary file, you need to navigate to the directory of the binary file to run the above command. + +Notable flags: -Notable flags: - `--network` flag, which selects a network: - `lighthouse` (no flag): Mainnet. - `lighthouse --network mainnet`: Mainnet. @@ -71,14 +68,11 @@ provide a `--network` flag instead of relying on the default. - `--checkpoint-sync-url`: Lighthouse supports fast sync from a recent finalized checkpoint. Checkpoint sync is *optional*; however, we **highly recommend** it since it is substantially faster than syncing from genesis while still providing the same functionality. The checkpoint sync is done using [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) provided by the Ethereum community. For example, in the above command, we use the URL for Sigma Prime's checkpoint sync server for mainnet `https://mainnet.checkpoint.sigp.io`. - `--http`: to expose an HTTP server of the beacon chain. The default listening address is `http://localhost:5052`. The HTTP API is required for the beacon node to accept connections from the *validator client*, which manages keys. - - If you intend to run the beacon node without running the validator client (e.g., for non-staking purposes such as supporting the network), you can modify the above command so that the beacon node is configured for non-staking purposes: - ### Non-staking -``` +``` lighthouse bn \ --network mainnet \ --execution-endpoint http://localhost:8551 \ @@ -89,16 +83,14 @@ lighthouse bn \ Since we are not staking, we can use the `--disable-deposit-contract-sync` flag to disable syncing of deposit logs from the execution node. - - Once Lighthouse runs, we can monitor the logs to see if it is syncing correctly. - - ## Step 4: Check logs for sync status -Several logs help you identify if Lighthouse is running correctly. + +Several logs help you identify if Lighthouse is running correctly. ### Logs - Checkpoint sync + If you run Lighthouse with the flag `--checkpoint-sync-url`, Lighthouse will print a message to indicate that checkpoint sync is being used: ``` @@ -147,11 +139,11 @@ as `verified` indicating that they have been processed successfully by the execu INFO Synced, slot: 3690668, block: 0x1244…cb92, epoch: 115333, finalized_epoch: 115331, finalized_root: 0x0764…2a3d, exec_hash: 0x929c…1ff6 (verified), peers: 78 ``` -Once you see the above message - congratulations! This means that your node is synced and you have contributed to the decentralization and security of the Ethereum network. +Once you see the above message - congratulations! This means that your node is synced and you have contributed to the decentralization and security of the Ethereum network. ## Further readings -Several other resources are the next logical step to explore after running your beacon node: +Several other resources are the next logical step to explore after running your beacon node: - If you intend to run a validator, proceed to [become a validator](./mainnet-validator.md); - Explore how to [manage your keys](./key-management.md); diff --git a/book/src/setup.md b/book/src/setup.md index c678b4387a2..d3da68f97cc 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -9,6 +9,7 @@ particularly useful for development but still a good way to ensure you have the base dependencies. The additional requirements for developers are: + - [`anvil`](https://github.com/foundry-rs/foundry/tree/master/crates/anvil). This is used to simulate the execution chain during tests. You'll get failures during tests if you don't have `anvil` available on your `PATH`. @@ -17,10 +18,11 @@ The additional requirements for developers are: - [`java 17 runtime`](https://openjdk.java.net/projects/jdk/). 17 is the minimum, used by web3signer_tests. - [`libpq-dev`](https://www.postgresql.org/docs/devel/libpq.html). Also known as - `libpq-devel` on some systems. + `libpq-devel` on some systems. - [`docker`](https://www.docker.com/). Some tests need docker installed and **running**. ## Using `make` + Commands to run the test suite are available via the `Makefile` in the project root for the benefit of CI/CD. We list some of these commands below so you can run them locally and avoid CI failures: @@ -31,7 +33,7 @@ you can run them locally and avoid CI failures: - `$ make test-ef`: (medium) runs the Ethereum Foundation test vectors. - `$ make test-full`: (slow) runs the full test suite (including all previous commands). This is approximately everything - that is required to pass CI. + that is required to pass CI. _The lighthouse test suite is quite extensive, running the whole suite may take 30+ minutes._ @@ -80,6 +82,7 @@ test result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; fini Alternatively, since `lighthouse` is a cargo workspace you can use `-p eth2_ssz` where `eth2_ssz` is the package name as defined `/consensus/ssz/Cargo.toml` + ```bash $ head -2 consensus/ssz/Cargo.toml [package] @@ -120,13 +123,14 @@ test src/lib.rs - (line 10) ... ok test result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.15s$ cargo test -p eth2_ssz ``` -#### test_logger +### test_logger The test_logger, located in `/common/logging/` can be used to create a `Logger` that by default returns a NullLogger. But if `--features 'logging/test_logger'` is passed while testing the logs are displayed. This can be very helpful while debugging tests. Example: + ``` $ cargo test -p beacon_chain validator_pubkey_cache::test::basic_operation --features 'logging/test_logger' Finished test [unoptimized + debuginfo] target(s) in 0.20s diff --git a/book/src/slasher.md b/book/src/slasher.md index 79a2d1f8ebe..3310f6c9eff 100644 --- a/book/src/slasher.md +++ b/book/src/slasher.md @@ -8,6 +8,7 @@ extra income for your validators. However it is currently only recommended for e of the immaturity of the slasher UX and the extra resources required. ## Minimum System Requirements + * Quad-core CPU * 16 GB RAM * 256 GB solid state storage (in addition to the space requirement for the beacon node DB) @@ -47,8 +48,8 @@ directory. It is possible to use one of several database backends with the slasher: -- LMDB (default) -- MDBX +* LMDB (default) +* MDBX The advantage of MDBX is that it performs compaction, resulting in less disk usage over time. The disadvantage is that upstream MDBX is unstable, so Lighthouse is pinned to a specific version. @@ -113,13 +114,13 @@ changed after initialization. * Flag: `--slasher-max-db-size GIGABYTES` * Argument: maximum size of the database in gigabytes -* Default: 256 GB +* Default: 512 GB Both database backends LMDB and MDBX place a hard limit on the size of the database file. You can use the `--slasher-max-db-size` flag to set this limit. It can be adjusted after initialization if the limit is reached. -By default the limit is set to accommodate the default history length and around 600K validators (with about 30% headroom) but +By default the limit is set to accommodate the default history length and around 1 million validators but you can set it lower if running with a reduced history length. The space required scales approximately linearly in validator count and history length, i.e. if you halve either you can halve the space required. diff --git a/book/src/slashing-protection.md b/book/src/slashing-protection.md index 38348d2094c..2d580f1c312 100644 --- a/book/src/slashing-protection.md +++ b/book/src/slashing-protection.md @@ -65,17 +65,17 @@ interchange file is a record of blocks and attestations signed by a set of valid basically a portable slashing protection database! To import a slashing protection database to Lighthouse, you first need to export your existing client's database. Instructions to export the slashing protection database for other clients are listed below: -- [Lodestar](https://chainsafe.github.io/lodestar/reference/cli/#validator-slashing-protection-export) -- [Nimbus](https://nimbus.guide/migration.html#2-export-slashing-protection-history) -- [Prysm](https://docs.prylabs.network/docs/wallet/slashing-protection#exporting-your-validators-slashing-protection-history) -- [Teku](https://docs.teku.consensys.net/HowTo/Prevent-Slashing#export-a-slashing-protection-file) +* [Lodestar](https://chainsafe.github.io/lodestar/reference/cli/#validator-slashing-protection-export) +* [Nimbus](https://nimbus.guide/migration.html#2-export-slashing-protection-history) +* [Prysm](https://docs.prylabs.network/docs/wallet/slashing-protection#exporting-your-validators-slashing-protection-history) +* [Teku](https://docs.teku.consensys.net/HowTo/Prevent-Slashing#export-a-slashing-protection-file) Once you have the slashing protection database from your existing client, you can now import the database to Lighthouse. With your validator client stopped, you can import a `.json` interchange file from another client using this command: ```bash -lighthouse account validator slashing-protection import +lighthouse account validator slashing-protection import filename.json ``` When importing an interchange file, you still need to import the validator keystores themselves @@ -86,7 +86,7 @@ separately, using the instructions for [import validator keys](./mainnet-validat You can export Lighthouse's database for use with another client with this command: ``` -lighthouse account validator slashing-protection export +lighthouse account validator slashing-protection export filename.json ``` The validator client needs to be stopped in order to export, to guarantee that the data exported is diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index 44accbd143b..4a9be7b963a 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -9,14 +9,14 @@ During post-merge block production, the Beacon Node (BN) will provide a `suggest the execution node. This is a 20-byte Ethereum address which the execution node might choose to set as the recipient of other fees or rewards. There is no guarantee that an execution node will use the `suggested_fee_recipient` to collect fees, -it may use any address it chooses. It is assumed that an honest execution node *will* use the -`suggested_fee_recipient`, but users should note this trust assumption. +it may use any address it chooses. It is assumed that an honest execution node _will_ use the +`suggested_fee_recipient`, but users should note this trust assumption. The `suggested_fee_recipient` can be provided to the VC, which will transmit it to the BN. The BN also has a choice regarding the fee recipient it passes to the execution node, creating another noteworthy trust assumption. -To be sure *you* control your fee recipient value, run your own BN and execution node (don't use +To be sure _you_ control your fee recipient value, run your own BN and execution node (don't use third-party services). ## How to configure a suggested fee recipient @@ -68,7 +68,6 @@ Provide a 0x-prefixed address, e.g. lighthouse vc --suggested-fee-recipient 0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b ... ``` - ### 3. Using the "--suggested-fee-recipient" flag on the beacon node The `--suggested-fee-recipient` can be provided to the BN to act as a default value when the @@ -96,7 +95,8 @@ client. | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 202, 404 | -#### Example Request Body +### Example Request Body + ```json { "ethaddress": "0x1D4E51167DBDC4789a014357f4029ff76381b16c" @@ -120,6 +120,7 @@ curl -X POST \ Note that an authorization header is required to interact with the API. This is specified with the header `-H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)"` which read the API token to supply the authentication. Refer to [Authorization Header](./api-vc-auth-header.md) for more information. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. #### Successful Response (202) + ```json null ``` @@ -137,7 +138,7 @@ The same path with a `GET` request can be used to query the fee recipient for a | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 200, 404 | -Command: +Command: ```bash DATADIR=$HOME/.lighthouse/mainnet @@ -150,6 +151,7 @@ curl -X GET \ ``` #### Successful Response (200) + ```json { "data": { @@ -171,7 +173,7 @@ This is useful if you want the fee recipient to fall back to the validator clien | Required Headers | [`Authorization`](./api-vc-auth-header.md) | | Typical Responses | 204, 404 | -Command: +Command: ```bash DATADIR=$HOME/.lighthouse/mainnet @@ -184,6 +186,7 @@ curl -X DELETE \ ``` #### Successful Response (204) + ```json null ``` diff --git a/book/src/ui-authentication.md b/book/src/ui-authentication.md index 0572824d5c6..8d457c8f688 100644 --- a/book/src/ui-authentication.md +++ b/book/src/ui-authentication.md @@ -2,9 +2,9 @@ To enhance the security of your account, we offer the option to set a session password. This allows the user to avoid re-entering the api-token when performing critical mutating operations on the validator. Instead a user can simply enter their session password. In the absence of a session password, Siren will revert to the api-token specified in your configuration settings as the default security measure. -> This does not protect your validators from unauthorized device access. +> This does not protect your validators from unauthorized device access. -![](imgs/ui-session-auth.png) +![authentication](imgs/ui-session-auth.png) Session passwords must contain at least: @@ -14,20 +14,18 @@ Session passwords must contain at least: - 1 number - 1 special character - ## Protected Actions Prior to executing any sensitive validator action, Siren will request authentication of the session password or api-token. -![](imgs/ui-exit.png) - +![exit](imgs/ui-exit.png) In the event of three consecutive failed attempts, Siren will initiate a security measure by locking all actions and prompting for configuration settings to be renewed to regain access to these features. -![](imgs/ui-fail-auth.png) +![fail-authentication](imgs/ui-fail-auth.png) ## Auto Connect In the event that auto-connect is enabled, refreshing the Siren application will result in a prompt to authenticate the session password or api-token. If three consecutive authentication attempts fail, Siren will activate a security measure by locking the session and prompting for configuration settings to be reset to regain access. -![](imgs/ui-autoconnect-auth.png) \ No newline at end of file +![autoconnect](imgs/ui-autoconnect-auth.png) diff --git a/book/src/ui-configuration.md b/book/src/ui-configuration.md index 31951c3c92e..f5e4bed34a9 100644 --- a/book/src/ui-configuration.md +++ b/book/src/ui-configuration.md @@ -6,7 +6,6 @@ following configuration screen. ![ui-configuration](./imgs/ui-configuration.png) - ## Connecting to the Clients Both the Beacon node and the Validator client need to have their HTTP APIs enabled. These ports should be accessible from the computer running Siren. This allows you to enter the address and ports of the associated Lighthouse @@ -18,7 +17,7 @@ To enable the HTTP API for the beacon node, utilize the `--gui` CLI flag. This a If you require accessibility from another machine within the network, configure the `--http-address` to match the local LAN IP of the system running the Beacon Node and Validator Client. -> To access from another machine on the same network (192.168.0.200) set the Beacon Node and Validator Client `--http-address` as `192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. +> To access from another machine on the same network (192.168.0.200) set the Beacon Node and Validator Client `--http-address` as `192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. In a similar manner, the validator client requires activation of the `--http` flag, along with the optional consideration of configuring the `--http-address` flag. If `--http-address` flag is set on the Validator Client, then the `--unencrypted-http-transport` flag is required as well. These settings will ensure compatibility with Siren's connectivity requirements. @@ -27,7 +26,6 @@ If you run Siren in the browser (by entering `localhost` in the browser), you wi A green tick will appear once Siren is able to connect to both clients. You can specify different ports for each client by clicking on the advanced tab. - ## API Token The API Token is a secret key that allows you to connect to the validator diff --git a/book/src/ui-faqs.md b/book/src/ui-faqs.md index 77821788f66..4e4de225af4 100644 --- a/book/src/ui-faqs.md +++ b/book/src/ui-faqs.md @@ -1,16 +1,20 @@ # Frequently Asked Questions ## 1. Are there any requirements to run Siren? + Yes, the most current Siren version requires Lighthouse v4.3.0 or higher to function properly. These releases can be found on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. ## 2. Where can I find my API token? + The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md). ## 3. How do I fix the Node Network Errors? + If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). ## 4. How do I connect Siren to Lighthouse from a different computer on the same network? -The most effective approach to enable access for a local network computer to Lighthouse's HTTP API ports is by configuring the `--http-address` to match the local LAN IP of the system running the beacon node and validator client. For instance, if the said node operates at `192.168.0.200`, this IP can be specified using the `--http-address` parameter as `--http-address 192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. + +The most effective approach to enable access for a local network computer to Lighthouse's HTTP API ports is by configuring the `--http-address` to match the local LAN IP of the system running the beacon node and validator client. For instance, if the said node operates at `192.168.0.200`, this IP can be specified using the `--http-address` parameter as `--http-address 192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. Subsequently, by designating the host as `192.168.0.200`, you can seamlessly connect Siren to this specific beacon node and validator client pair from any computer situated within the same network. ## 5. How can I use Siren to monitor my validators remotely when I am not at home? @@ -22,6 +26,7 @@ Most contemporary home routers provide options for VPN access in various ways. A In the absence of a VPN, an alternative approach involves utilizing an SSH tunnel. To achieve this, you need remote SSH access to the computer hosting the Beacon Node and Validator Client pair (which necessitates a port forward in your router). In this context, while it is not obligatory to set a `--http-address` flag on the Beacon Node and Validator Client, you can configure an SSH tunnel to the local ports on the node and establish a connection through the tunnel. For instructions on setting up an SSH tunnel, refer to [`Connecting Siren via SSH tunnel`](./ui-faqs.md#6-how-do-i-connect-siren-to-lighthouse-via-a-ssh-tunnel) for detailed guidance. ## 6. How do I connect Siren to Lighthouse via a ssh tunnel? + If you would like to access Siren beyond the local network (i.e across the internet), we recommend using an SSH tunnel. This requires a tunnel for 3 ports: `80` (assuming the port is unchanged as per the [installation guide](./ui-installation.md#docker-recommended)), `5052` (for beacon node) and `5062` (for validator client). You can use the command below to perform SSH tunneling: ```bash @@ -30,13 +35,10 @@ ssh -N -L 80:127.0.0.1:80 -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username ``` - Where `username` is the username of the server and `local_ip` is the local IP address of the server. Note that with the `-N` option in an SSH session, you will not be able to execute commands in the CLI to avoid confusion with ordinary shell sessions. The connection will appear to be "hung" upon a successful connection, but that is normal. Once you have successfully connected to the server via SSH tunneling, you should be able to access Siren by entering `localhost` in a web browser. - You can also access Siren using the app downloaded in the [Siren release page](https://github.com/sigp/siren/releases). To access Siren beyond the local computer, you can use SSH tunneling for ports `5052` and `5062` using the command: - ```bash ssh -N -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip @@ -44,7 +46,9 @@ ssh -N -L 5052:127.0.0.1:5052 -L 5062:127.0.0.1:5062 username@local_ip ``` ## 7. Does Siren support reverse proxy or DNS named addresses? + Yes, if you need to access your beacon or validator from an address such as `https://merp-server:9909/eth2-vc` you should follow the following steps for configuration: + 1. Toggle `https` as your protocol 2. Add your address as `merp-server/eth2-vc` 3. Add your Beacon and Validator ports as `9909` @@ -53,9 +57,10 @@ If you have configured it correctly you should see a green checkmark indicating If you have separate address setups for your Validator Client and Beacon Node respectively you should access the `Advance Settings` on the configuration and repeat the steps above for each address. - ## 8. How do I change my Beacon or Validator address after logging in? + Once you have successfully arrived to the main dashboard, use the sidebar to access the settings view. In the top right-hand corner there is a `Configuration` action button that will redirect you back to the configuration screen where you can make appropriate changes. ## 9. Why doesn't my validator balance graph show any data? + If your graph is not showing data, it usually means your validator node is still caching data. The application must wait at least 3 epochs before it can render any graphical visualizations. This could take up to 20min. diff --git a/book/src/ui-installation.md b/book/src/ui-installation.md index b8ae788c69b..4f7df4e8ff8 100644 --- a/book/src/ui-installation.md +++ b/book/src/ui-installation.md @@ -3,6 +3,7 @@ Siren runs on Linux, MacOS and Windows. ## Version Requirement + The Siren app requires Lighthouse v3.5.1 or higher to function properly. These versions can be found on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. ## Pre-Built Electron Packages @@ -26,26 +27,26 @@ The electron app can be built from source by first cloning the repository and entering the directory: ``` -$ git clone https://github.com/sigp/siren.git -$ cd siren +git clone https://github.com/sigp/siren.git +cd siren ``` Once cloned, the electron app can be built and ran via the Makefile by: ``` -$ make +make ``` alternatively it can be built via: ``` -$ yarn +yarn ``` Once completed successfully the electron app can be run via: ``` -$ yarn dev +yarn dev ``` ### Running In The Browser @@ -59,19 +60,22 @@ production-grade web-server to host the application. `docker` is required to be installed with the service running. The docker image can be built and run via the Makefile by running: + ``` -$ make docker +make docker ``` Alternatively, to run with Docker, the image needs to be built. From the repository directory run: + ``` -$ docker build -t siren . +docker build -t siren . ``` Then to run the image: + ``` -$ docker run --rm -ti --name siren -p 80:80 siren +docker run --rm -ti --name siren -p 80:80 siren ``` This will open port 80 and allow your browser to connect. You can choose @@ -83,20 +87,24 @@ To view Siren, simply go to `http://localhost` in your web browser. #### Development Server A development server can also be built which will expose a local port 3000 via: + ``` -$ yarn start +yarn start ``` Once executed, you can direct your web browser to the following URL to interact with the app: + ``` http://localhost:3000 ``` A production version of the app can be built via + ``` -$ yarn build +yarn build ``` + and then further hosted via a production web server. ### Known Issues diff --git a/book/src/ui-usage.md b/book/src/ui-usage.md index 867a49a91f8..eddee311fdf 100644 --- a/book/src/ui-usage.md +++ b/book/src/ui-usage.md @@ -1,10 +1,10 @@ # Usage -# Dashboard +## Dashboard Siren's dashboard view provides a summary of all performance and key validator metrics. Sync statuses, uptimes, accumulated rewards, hardware and network metrics are all consolidated on the dashboard for evaluation. -![](imgs/ui-dashboard.png) +![dashboard](imgs/ui-dashboard.png) ## Account Earnings @@ -12,66 +12,62 @@ The account earnings component accumulates reward data from all registered valid Below in the earning section, you can also view your total earnings or click the adjacent buttons to view your estimated earnings given a specific time frame based on current device and network conditions. -![](imgs/ui-account-earnings.png) +![earning](imgs/ui-account-earnings.png) ## Validator Table The validator table component is a list of all registered validators, which includes data such as name, index, total balance, earned rewards and current status. Each validator row also contains a link to a detailed data modal and additional data provided by [Beaconcha.in](https://beaconcha.in). -![](imgs/ui-validator-table.png) +![validator-table](imgs/ui-validator-table.png) ## Validator Balance Chart The validator balance component is a graphical representation of each validator balance over the latest 10 epochs. Take note that only active validators are rendered in the chart visualization. -![](imgs/ui-validator-balance1.png) +![validator-balance](imgs/ui-validator-balance1.png) By clicking on the chart component you can filter selected validators in the render. This call allow for greater resolution in the rendered visualization. - - - - +balance-modal +validator-balance2 ## Hardware Usage and Device Diagnostics The hardware usage component gathers information about the device the Beacon Node is currently running. It displays the Disk usage, CPU metrics and memory usage of the Beacon Node device. The device diagnostics component provides the sync status of the execution client and beacon node. - - - +hardware +device ## Log Statistics The log statistics present an hourly combined rate of critical, warning, and error logs from the validator client and beacon node. This analysis enables informed decision-making, troubleshooting, and proactive maintenance for optimal system performance. - +log -# Validator Management +## Validator Management Siren's validator management view provides a detailed overview of all validators with options to deposit to and/or add new validators. Each validator table row displays the validator name, index, balance, rewards, status and all available actions per validator. -![](imgs/ui-validator-management.png) +![validator-management](imgs/ui-validator-management.png) ## Validator Modal Clicking the validator icon activates a detailed validator modal component. This component also allows users to trigger validator actions and as well to view and update validator graffiti. Each modal contains the validator total income with hourly, daily and weekly earnings estimates. - +ui-validator-modal -# Settings +## Settings Siren's settings view provides access to the application theme, version, name, device name and important external links. From the settings page users can also access the configuration screen to adjust any beacon or validator node parameters. -![](imgs/ui-settings.png) - +![settings](imgs/ui-settings.png) -# Validator and Beacon Logs +## Validator and Beacon Logs The logs page provides users with the functionality to access and review recorded logs for both validators and beacons. Users can conveniently observe log severity, messages, timestamps, and any additional data associated with each log entry. The interface allows for seamless switching between validator and beacon log outputs, and incorporates useful features such as built-in text search and the ability to pause log feeds. Additionally, users can obtain log statistics, which are also available on the main dashboard, thereby facilitating a comprehensive overview of the system's log data. Please note that Siren is limited to storing and displaying only the previous 1000 log messages. This also means the text search is limited to the logs that are currently stored within Siren's limit. -![](imgs/ui-logs.png) \ No newline at end of file +![logs](imgs/ui-logs.png) diff --git a/book/src/validator-doppelganger.md b/book/src/validator-doppelganger.md index b62086d4bfb..a3d60d31b3c 100644 --- a/book/src/validator-doppelganger.md +++ b/book/src/validator-doppelganger.md @@ -16,7 +16,7 @@ achieves this by staying silent for 2-3 epochs after a validator is started so i other instances of that validator before starting to sign potentially slashable messages. > Note: Doppelganger Protection is not yet interoperable, so if it is configured on a Lighthouse -> validator client, the client must be connected to a Lighthouse beacon node. +> validator client, the client must be connected to a Lighthouse beacon node. ## Initial Considerations diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index f31d7294499..092c813a1ea 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -12,10 +12,10 @@ In order to apply these APIs, you need to have historical states information in ## Endpoints -HTTP Path | Description | +| HTTP Path | Description | | --- | -- | -[`/lighthouse/validator_inclusion/{epoch}/global`](#global) | A global vote count for a given epoch. -[`/lighthouse/validator_inclusion/{epoch}/{validator_id}`](#individual) | A per-validator breakdown of votes in a given epoch. +| [`/lighthouse/validator_inclusion/{epoch}/global`](#global) | A global vote count for a given epoch. | +| [`/lighthouse/validator_inclusion/{epoch}/{validator_id}`](#individual) | A per-validator breakdown of votes in a given epoch. | ## Global @@ -53,16 +53,17 @@ vote (that is why it is _effective_ `Gwei`). The following fields are returned: - `current_epoch_active_gwei`: the total staked gwei that was active (i.e., - able to vote) during the current epoch. + able to vote) during the current epoch. - `current_epoch_target_attesting_gwei`: the total staked gwei that attested to - the majority-elected Casper FFG target epoch during the current epoch. + the majority-elected Casper FFG target epoch during the current epoch. +- `previous_epoch_active_gwei`: as per `current_epoch_active_gwei`, but during the previous epoch. - `previous_epoch_target_attesting_gwei`: see `current_epoch_target_attesting_gwei`. - `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a - head beacon block that is in the canonical chain. + head beacon block that is in the canonical chain. From this data you can calculate: -#### Justification/Finalization Rate +### Justification/Finalization Rate `previous_epoch_target_attesting_gwei / current_epoch_active_gwei` @@ -95,7 +96,6 @@ The [Global Votes](#global) endpoint is the summation of all of these individual values, please see it for definitions of terms like "current_epoch", "previous_epoch" and "target_attester". - ### HTTP Example ```bash diff --git a/book/src/validator-management.md b/book/src/validator-management.md index bc6aba3c4f9..b9610b69675 100644 --- a/book/src/validator-management.md +++ b/book/src/validator-management.md @@ -41,6 +41,7 @@ Here's an example file with two validators: voting_keystore_path: /home/paul/.lighthouse/validators/0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477/voting-keystore.json voting_keystore_password: myStrongpa55word123&$ ``` + In this example we can see two validators: - A validator identified by the `0x87a5...` public key which is enabled. @@ -51,7 +52,7 @@ In this example we can see two validators: Each permitted field of the file is listed below for reference: - `enabled`: A `true`/`false` indicating if the validator client should consider this - validator "enabled". + validator "enabled". - `voting_public_key`: A validator public key. - `type`: How the validator signs messages (this can be `local_keystore` or `web3signer` (see [Web3Signer](./validator-web3signer.md))). - `voting_keystore_path`: The path to a EIP-2335 keystore. @@ -59,9 +60,9 @@ Each permitted field of the file is listed below for reference: - `voting_keystore_password`: The password to the EIP-2335 keystore. > **Note**: Either `voting_keystore_password_path` or `voting_keystore_password` *must* be -> supplied. If both are supplied, `voting_keystore_password_path` is ignored. +> supplied. If both are supplied, `voting_keystore_password_path` is ignored. ->If you do not wish to have `voting_keystore_password` being stored in the `validator_definitions.yml` file, you can add the field `voting_keystore_password_path` and point it to a file containing the password. The file can be, e.g., on a mounted portable drive that contains the password so that no password is stored on the validating node. +>If you do not wish to have `voting_keystore_password` being stored in the `validator_definitions.yml` file, you can add the field `voting_keystore_password_path` and point it to a file containing the password. The file can be, e.g., on a mounted portable drive that contains the password so that no password is stored on the validating node. ## Populating the `validator_definitions.yml` file @@ -77,7 +78,6 @@ recap: ### Automatic validator discovery - When the `--disable-auto-discover` flag is **not** provided, the validator client will search the `validator-dir` for validators and add any *new* validators to the `validator_definitions.yml` with `enabled: true`. @@ -148,7 +148,6 @@ ensure their `secrets-dir` is organised as below: └── 0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007 ``` - ### Manual configuration The automatic validator discovery process works out-of-the-box with validators @@ -181,7 +180,7 @@ the active validator, the validator client will: password. 1. Use the keystore password to decrypt the keystore and obtain a BLS keypair. 1. Verify that the decrypted BLS keypair matches the `voting_public_key`. -1. Create a `voting-keystore.json.lock` file adjacent to the +1. Create a `voting-keystore.json.lock` file adjacent to the `voting_keystore_path`, indicating that the voting keystore is in-use and should not be opened by another process. 1. Proceed to act for that validator, creating blocks and attestations if/when required. diff --git a/book/src/validator-manager-create.md b/book/src/validator-manager-create.md index 98202d3b52b..d97f953fc19 100644 --- a/book/src/validator-manager-create.md +++ b/book/src/validator-manager-create.md @@ -48,6 +48,7 @@ lighthouse \ --suggested-fee-recipient
\ --output-path ./ ``` + > If the flag `--first-index` is not provided, it will default to using index 0. > The `--suggested-fee-recipient` flag may be omitted to use whatever default > value the VC uses. It does not necessarily need to be identical to @@ -63,6 +64,7 @@ lighthouse \ --validators-file validators.json \ --vc-token ``` + > This is assuming that `validators.json` is in the present working directory. If it is not, insert the directory of the file. > Be sure to remove `./validators.json` after the import is successful since it > contains unencrypted validator keystores. @@ -141,7 +143,6 @@ must be known. The location of the file varies, but it is located in the `~/.lighthouse/mainnet/validators/api-token.txt`. We will use `` to substitute this value. If you are unsure of the `api-token.txt` path, you can run `curl http://localhost:5062/lighthouse/auth` which will show the path. - Once the VC is running, use the `import` command to import the validators to the VC: ```bash @@ -166,16 +167,18 @@ The user should now *securely* delete the `validators.json` file (e.g., `shred - The `validators.json` contains the unencrypted validator keys and must not be shared with anyone. At the same time, `lighthouse vc` will log: + ```bash INFO Importing keystores via standard HTTP API, count: 1 WARN No slashing protection data provided with keystores INFO Enabled validator voting_pubkey: 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f, signing_method: local_keystore INFO Modified key_cache saved successfully ``` -The WARN message means that the `validators.json` file does not contain the slashing protection data. This is normal if you are starting a new validator. The flag `--enable-doppelganger-protection` will also protect users from potential slashing risk. + +The WARN message means that the `validators.json` file does not contain the slashing protection data. This is normal if you are starting a new validator. The flag `--enable-doppelganger-protection` will also protect users from potential slashing risk. The validators will now go through 2-3 epochs of [doppelganger protection](./validator-doppelganger.md) and will automatically start performing -their duties when they are deposited and activated. +their duties when they are deposited and activated. If the host VC contains the same public key as the `validators.json` file, an error will be shown and the `import` process will stop: @@ -194,6 +197,7 @@ lighthouse \ --vc-token \ --ignore-duplicates ``` + and the output will be as follows: ```bash diff --git a/book/src/validator-manager-move.md b/book/src/validator-manager-move.md index 5009e6407e9..10de1fe87c6 100644 --- a/book/src/validator-manager-move.md +++ b/book/src/validator-manager-move.md @@ -100,7 +100,7 @@ lighthouse \ > it is recommended for an additional layer of safety. It will result in 2-3 > epochs of downtime for the validator after it is moved, which is generally an > inconsequential cost in lost rewards or penalties. -> +> > Optionally, users can add the `--http-store-passwords-in-secrets-dir` flag if they'd like to have > the import validator keystore passwords stored in separate files rather than in the > `validator-definitions.yml` file. If you don't know what this means, you can safely omit the flag. @@ -158,7 +158,9 @@ Moved keystore 1 of 2 Moved keystore 2 of 2 Done. ``` + At the same time, `lighthouse vc` will log: + ```bash INFO Importing keystores via standard HTTP API, count: 1 INFO Enabled validator voting_pubkey: 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f, signing_method: local_keystore @@ -183,12 +185,13 @@ lighthouse \ ``` > Note: If you have the `validator-monitor-auto` turned on, the source beacon node may still be reporting the attestation status of the validators that have been moved: + ``` INFO Previous epoch attestation(s) success validators: ["validator_index"], epoch: 100000, service: val_mon, service: beacon ``` -> This is fine as the validator monitor does not know that the validators have been moved (it *does not* mean that the validators have attested twice for the same slot). A restart of the beacon node will resolve this. +> This is fine as the validator monitor does not know that the validators have been moved (it *does not* mean that the validators have attested twice for the same slot). A restart of the beacon node will resolve this. Any errors encountered during the operation should include information on how to proceed. Assistance is also available on our -[Discord](https://discord.gg/cyAszAh). \ No newline at end of file +[Discord](https://discord.gg/cyAszAh). diff --git a/book/src/validator-manager.md b/book/src/validator-manager.md index e3cb74bd668..a71fab1e3ad 100644 --- a/book/src/validator-manager.md +++ b/book/src/validator-manager.md @@ -1,7 +1,6 @@ # Validator Manager [Ethereum Staking Launchpad]: https://launchpad.ethereum.org/en/ -[Import Validators]: #import-validators ## Introduction @@ -32,4 +31,4 @@ The `validator-manager` boasts the following features: ## Guides - [Creating and importing validators using the `create` and `import` commands.](./validator-manager-create.md) -- [Moving validators between two VCs using the `move` command.](./validator-manager-move.md) \ No newline at end of file +- [Moving validators between two VCs using the `move` command.](./validator-manager-move.md) diff --git a/book/src/validator-monitoring.md b/book/src/validator-monitoring.md index 532bd50065f..6439ea83a32 100644 --- a/book/src/validator-monitoring.md +++ b/book/src/validator-monitoring.md @@ -20,7 +20,6 @@ Lighthouse performs validator monitoring in the Beacon Node (BN) instead of the - Users can use a local BN to observe some validators running in a remote location. - Users can monitor validators that are not their own. - ## How to Enable Monitoring The validator monitor is always enabled in Lighthouse, but it might not have any enrolled @@ -57,7 +56,8 @@ Monitor the mainnet validators at indices `0` and `1`: ``` lighthouse bn --validator-monitor-pubkeys 0x933ad9491b62059dd065b560d256d8957a8c402cc6e8d8ee7290ae11e8f7329267a8811c397529dac52ae1342ba58c95,0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c ``` -> Note: The validator monitoring will stop collecting per-validator Prometheus metrics and issuing per-validator logs when the number of validators reaches 64. To continue collecting metrics and logging, use the flag `--validator-monitor-individual-tracking-threshold N` where `N` is a number greater than the number of validators to monitor. + +> Note: The validator monitoring will stop collecting per-validator Prometheus metrics and issuing per-validator logs when the number of validators reaches 64. To continue collecting metrics and logging, use the flag `--validator-monitor-individual-tracking-threshold N` where `N` is a number greater than the number of validators to monitor. ## Observing Monitoring @@ -102,7 +102,7 @@ dashboard contains most of the metrics exposed via the validator monitor. Lighthouse v4.6.0 introduces a new feature to track the performance of a beacon node. This feature internally simulates an attestation for each slot, and outputs a hit or miss for the head, target and source votes. The attestation simulator is turned on automatically (even when there are no validators) and prints logs in the debug level. -> Note: The simulated attestations are never published to the network, so the simulator does not reflect the attestation performance of a validator. +> Note: The simulated attestations are never published to the network, so the simulator does not reflect the attestation performance of a validator. The attestation simulation prints the following logs when simulating an attestation: @@ -118,11 +118,11 @@ DEBG Simulated attestation evaluated, head_hit: true, target_hit: true, source_h ``` An example of a log when the head is missed: + ``` DEBG Simulated attestation evaluated, head_hit: false, target_hit: true, source_hit: true, attestation_slot: Slot(1132623), attestation_head: 0x1c0e53c6ace8d0ff57f4a963e4460fe1c030b37bf1c76f19e40928dc2e214c59, attestation_target: 0xaab25a6d01748cf4528e952666558317b35874074632550c37d935ca2ec63c23, attestation_source: 0x13ccbf8978896c43027013972427ee7ce02b2bb9b898dbb264b870df9288c1e7, service: val_mon, service: beacon, module: beacon_chain::validator_monitor:2051 ``` - With `--metrics` enabled on the beacon node, the following metrics will be recorded: ``` @@ -134,11 +134,12 @@ validator_monitor_attestation_simulator_source_attester_hit_total validator_monitor_attestation_simulator_source_attester_miss_total ``` -A grafana dashboard to view the metrics for attestation simulator is available [here](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/AttestationSimulator.json). +A grafana dashboard to view the metrics for attestation simulator is available [here](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/AttestationSimulator.json). + +The attestation simulator provides an insight into the attestation performance of a beacon node. It can be used as an indication of how expediently the beacon node has completed importing blocks within the 4s time frame for an attestation to be made. -The attestation simulator provides an insight into the attestation performance of a beacon node. It can be used as an indication of how expediently the beacon node has completed importing blocks within the 4s time frame for an attestation to be made. +The attestation simulator _does not_ consider: -The attestation simulator *does not* consider: - the latency between the beacon node and the validator client - the potential delays when publishing the attestation to the network @@ -146,10 +147,6 @@ which are critical factors to consider when evaluating the attestation performan Assuming the above factors are ignored (no delays between beacon node and validator client, and in publishing the attestation to the network): -1. If the attestation simulator says that all votes are hit, it means that if the beacon node were to publish the attestation for this slot, the validator should receive the rewards for the head, target and source votes. +1. If the attestation simulator says that all votes are hit, it means that if the beacon node were to publish the attestation for this slot, the validator should receive the rewards for the head, target and source votes. 1. If the attestation simulator says that the one or more votes are missed, it means that there is a delay in importing the block. The delay could be due to slowness in processing the block (e.g., due to a slow CPU) or that the block is arriving late (e.g., the proposer publishes the block late). If the beacon node were to publish the attestation for this slot, the validator will miss one or more votes (e.g., the head vote). - - - - diff --git a/book/src/voluntary-exit.md b/book/src/voluntary-exit.md index 4ec4837fea9..6261f2e2675 100644 --- a/book/src/voluntary-exit.md +++ b/book/src/voluntary-exit.md @@ -22,20 +22,17 @@ In order to initiate an exit, users can use the `lighthouse account validator ex - The `--password-file` flag is used to specify the path to the file containing the password for the voting keystore. If this flag is not provided, the user will be prompted to enter the password. - After validating the password, the user will be prompted to enter a special exit phrase as a final confirmation after which the voluntary exit will be published to the beacon chain. The exit phrase is the following: > Exit my validator - - Below is an example for initiating a voluntary exit on the Holesky testnet. ``` $ lighthouse --network holesky account validator exit --keystore /path/to/keystore --beacon-node http://localhost:5052 -Running account manager for Prater network +Running account manager for Holesky network validator-dir path: ~/.lighthouse/holesky/validators Enter the keystore password for validator in 0xabcd @@ -71,56 +68,52 @@ After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12 There are two types of withdrawal credentials, `0x00` and `0x01`. To check which type your validator has, go to [Staking launchpad](https://launchpad.ethereum.org/en/withdrawals), enter your validator index and click `verify on mainnet`: - - `withdrawals enabled` means your validator is of type `0x01`, and you will automatically receive the full withdrawal to the withdrawal address that you set. -- `withdrawals not enabled` means your validator is of type `0x00`, and will need to update your withdrawal credentials from `0x00` type to `0x01` type (also known as BLS-to-execution-change, or BTEC) to receive the staked funds. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). - +- `withdrawals enabled` means your validator is of type `0x01`, and you will automatically receive the full withdrawal to the withdrawal address that you set. +- `withdrawals not enabled` means your validator is of type `0x00`, and will need to update your withdrawal credentials from `0x00` type to `0x01` type (also known as BLS-to-execution-change, or BTEC) to receive the staked funds. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). ### 2. What if my validator is of type `0x00` and I do not update my withdrawal credentials after I initiated a voluntary exit? Your staked fund will continue to be locked on the beacon chain. You can update your withdrawal credentials **anytime**, and there is no deadline for that. The catch is that as long as you do not update your withdrawal credentials, your staked funds in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the staked funds be withdrawn to the withdrawal address. -### 3. How many times can I update my withdrawal credentials? - +### 3. How many times can I update my withdrawal credentials? + If your withdrawal credentials is of type `0x00`, you can only update it once to type `0x01`. It is therefore very important to ensure that the withdrawal address you set is an address under your control, preferably an address controlled by a hardware wallet. If your withdrawal credentials is of type `0x01`, it means you have set your withdrawal address previously, and you will not be able to change the withdrawal address. ### 3. When will my BTEC request (update withdrawal credentials to type `0x01`) be processed ? - + Your BTEC request will be included very quickly as soon as a new block is proposed. This should be the case most (if not all) of the time, given that the peak BTEC request time has now past (right after the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023 and lasted for ~ 2 days) . -### 4. When will I get my staked fund after voluntary exit if my validator is of type `0x01`? - +### 4. When will I get my staked fund after voluntary exit if my validator is of type `0x01`? + There are 3 waiting periods until you get the staked funds in your withdrawal address: - - An exit queue: a varying time that takes at a minimum 5 epochs (32 minutes) if there is no queue; or if there are many validators exiting at the same time, it has to go through the exit queue. The exit queue can be from hours to weeks, depending on the number of validators in the exit queue. During this time your validator has to stay online to perform its duties to avoid penalties. - - - A fixed waiting period of 256 epochs (27.3 hours) for the validator's status to become withdrawable. +- An exit queue: a varying time that takes at a minimum 5 epochs (32 minutes) if there is no queue; or if there are many validators exiting at the same time, it has to go through the exit queue. The exit queue can be from hours to weeks, depending on the number of validators in the exit queue. During this time your validator has to stay online to perform its duties to avoid penalties. + +- A fixed waiting period of 256 epochs (27.3 hours) for the validator's status to become withdrawable. - - A varying time of "validator sweep" that can take up to *n* days with *n* listed in the table below. The "validator sweep" is the process of skimming through all eligible validators by index number for withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set. +- A varying time of "validator sweep" that can take up to _n_ days with _n_ listed in the table below. The "validator sweep" is the process of skimming through all eligible validators by index number for withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set.
-| Number of eligible validators | Ideal scenario *n* | Practical scenario *n* | +| Number of eligible validators | Ideal scenario _n_ | Practical scenario _n_ | |:----------------:|:---------------------:|:----:| -| 300000 | 2.60 | 2.63 | -| 400000 | 3.47 | 3.51 | -| 500000 | 4.34 | 4.38 | -| 600000 | 5.21 | 5.26 | -| 700000 | 6.08 | 6.14 | -| 800000 | 6.94 | 7.01 | -| 900000 | 7.81 | 7.89 | -| 1000000 | 8.68 | 8.77 | +| 300000 | 2.60 | 2.63 | +| 400000 | 3.47 | 3.51 | +| 500000 | 4.34 | 4.38 | +| 600000 | 5.21 | 5.26 | +| 700000 | 6.08 | 6.14 | +| 800000 | 6.94 | 7.01 | +| 900000 | 7.81 | 7.89 | +| 1000000 | 8.68 | 8.77 |
> Note: Ideal scenario assumes no block proposals are missed. This means a total of withdrawals of 7200 blocks/day * 16 withdrawals/block = 115200 withdrawals/day. Practical scenario assumes 1% of blocks are missed per day. As an example, if there are 700000 eligible validators, one would expect a waiting time of slightly more than 6 days. - - The total time taken is the summation of the above 3 waiting periods. After these waiting periods, you will receive the staked funds in your withdrawal address. The voluntary exit and full withdrawal process is summarized in the Figure below. ![full](./imgs/full-withdrawal.png) - diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 6cf62e04308..4cde8ea2707 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "5.1.3" +version = "5.2.1" authors = ["Sigma Prime "] edition = { workspace = true } @@ -18,9 +18,6 @@ slog-term = { workspace = true } logging = { workspace = true } slog-async = { workspace = true } slog-scope = "4.3.0" -slog-stdlog = "4.0.0" hex = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } -serde_yaml = { workspace = true } eth2_network_config = { workspace = true } diff --git a/boot_node/src/cli.rs b/boot_node/src/cli.rs index d7ea5ab0b35..440a9d27e2d 100644 --- a/boot_node/src/cli.rs +++ b/boot_node/src/cli.rs @@ -1,18 +1,29 @@ //! Simple logic for spawning a Lighthouse BootNode. -use clap::{App, Arg}; +use clap::{Arg, ArgAction, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; // TODO: Add DOS prevention CLI params -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("boot_node") +pub fn cli_app() -> Command { + Command::new("boot_node") .about("Start a special Lighthouse process that only serves as a discv5 boot-node. This \ process will *not* import blocks or perform most typical beacon node functions. Instead, it \ will simply run the discv5 service and assist nodes on the network to discover each other. \ This is the recommended way to provide a network boot-node since it has a reduced attack \ surface compared to a full beacon node.") - .settings(&[clap::AppSettings::ColoredHelp]) + .styles(get_color_style()) + .display_order(0) .arg( - Arg::with_name("enr-address") + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) + ) + .arg( + Arg::new("enr-address") .long("enr-address") .value_name("ADDRESS") .help("The IP address/ DNS address to broadcast to other peers on how to reach \ @@ -21,31 +32,33 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { discovery. Set this only if you are sure other nodes can connect to your \ local node on this address. This will update the `ip4` or `ip6` ENR fields \ accordingly. To update both, set this flag twice with the different values.") - .multiple(true) - .max_values(2) + .action(ArgAction::Append) + .num_args(1..=2) .required(true) .conflicts_with("network-dir") - .takes_value(true), + .display_order(0) ) .arg( - Arg::with_name("port") + Arg::new("port") .long("port") .value_name("PORT") .help("The UDP port to listen on.") .default_value("9000") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("port6") + Arg::new("port6") .long("port6") .value_name("PORT") .help("The UDP port to listen on over IpV6 when listening over both Ipv4 and \ Ipv6. Defaults to 9090 when required.") .default_value("9090") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("listen-address") + Arg::new("listen-address") .long("listen-address") .value_name("ADDRESS") .help("The address the bootnode will listen for UDP communications. To listen \ @@ -56,53 +69,63 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - --listen-address '0.0.0.0' --listen-address '::' will listen over both \ Ipv4 and Ipv6. The order of the given addresses is not relevant. However, \ multiple Ipv4, or multiple Ipv6 addresses will not be accepted.") - .multiple(true) - .max_values(2) + .num_args(1..=2) .default_value("0.0.0.0") - .takes_value(true) + .action(ArgAction::Append) + .display_order(0) ) .arg( - Arg::with_name("boot-nodes") + Arg::new("boot-nodes") .long("boot-nodes") .allow_hyphen_values(true) .value_name("ENR-LIST/Multiaddr") .help("One or more comma-delimited base64-encoded ENR's or multiaddr strings of peers to initially add to the local routing table") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enr-udp-port") + Arg::new("enr-udp-port") .long("enr-port") .value_name("PORT") .help("The UDP port of the boot node's ENR. This is the port that external peers will dial to reach this boot node. Set this only if the external port differs from the listening port.") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("network-dir") + .display_order(0) ) .arg( - Arg::with_name("enr-udp6-port") + Arg::new("enr-udp6-port") .long("enr-udp6-port") .value_name("PORT") .help("The UDP6 port of the local ENR. Set this only if you are sure other nodes \ can connect to your local node on this port over IpV6.") .conflicts_with("network-dir") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enable-enr-auto-update") - .short("x") + Arg::new("enable-enr-auto-update") + .short('x') + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long("enable-enr-auto-update") .help("Discovery can automatically update the node's local ENR with an external IP address and port as seen by other peers on the network. \ This enables this feature.") + .display_order(0) ) .arg( - Arg::with_name("disable-packet-filter") + Arg::new("disable-packet-filter") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long("disable-packet-filter") .help("Disables discv5 packet filter. Useful for testing in smaller networks") + .display_order(0) ) .arg( - Arg::with_name("network-dir") + Arg::new("network-dir") .value_name("NETWORK_DIR") .long("network-dir") .help("The directory which contains the enr and it's associated private key") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) } diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index a9c89505322..aaa9f084826 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -25,11 +25,10 @@ pub struct BootNodeConfig { impl BootNodeConfig { pub async fn new( - matches: &ArgMatches<'_>, + matches: &ArgMatches, eth2_network_config: &Eth2NetworkConfig, ) -> Result { let data_dir = get_data_dir(matches); - // Try and obtain bootnodes let boot_nodes = { @@ -39,7 +38,7 @@ impl BootNodeConfig { boot_nodes.extend_from_slice(enr); } - if let Some(nodes) = matches.value_of("boot-nodes") { + if let Some(nodes) = matches.get_one::("boot-nodes") { boot_nodes.extend_from_slice( &nodes .split(',') @@ -81,14 +80,14 @@ impl BootNodeConfig { }; // By default this is enabled. If it is not set, revert to false. - if !matches.is_present("enable-enr-auto-update") { + if !matches.get_flag("enable-enr-auto-update") { network_config.discv5_config.enr_update = false; } let private_key = load_private_key(&network_config, &logger); let local_key = CombinedKey::from_libp2p(private_key)?; - let local_enr = if let Some(dir) = matches.value_of("network-dir") { + let local_enr = if let Some(dir) = matches.get_one::("network-dir") { let network_dir: PathBuf = dir.into(); load_enr_from_disk(&network_dir)? } else { @@ -103,14 +102,17 @@ impl BootNodeConfig { .map(Duration::from_secs)?; if eth2_network_config.genesis_state_is_known() { - let genesis_state = eth2_network_config + let mut genesis_state = eth2_network_config .genesis_state::(genesis_state_url.as_deref(), genesis_state_url_timeout, &logger).await? .ok_or_else(|| { "The genesis state for this network is not known, this is an unsupported mode" .to_string() })?; - slog::info!(logger, "Genesis state found"; "root" => genesis_state.canonical_root().to_string()); + let genesis_state_root = genesis_state + .canonical_root() + .map_err(|e| format!("Error hashing genesis state: {e:?}"))?; + slog::info!(logger, "Genesis state found"; "root" => ?genesis_state_root); let enr_fork = spec.enr_fork_id::( types::Slot::from(0u64), genesis_state.genesis_validators_root(), diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index e707dc14f76..669b126bd37 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -14,8 +14,8 @@ const LOG_CHANNEL_SIZE: usize = 2048; /// Run the bootnode given the CLI configuration. pub fn run( - lh_matches: &ArgMatches<'_>, - bn_matches: &ArgMatches<'_>, + lh_matches: &ArgMatches, + bn_matches: &ArgMatches, eth_spec_id: EthSpecId, eth2_network_config: &Eth2NetworkConfig, debug_level: String, @@ -67,8 +67,8 @@ pub fn run( } fn main( - lh_matches: &ArgMatches<'_>, - bn_matches: &ArgMatches<'_>, + lh_matches: &ArgMatches, + bn_matches: &ArgMatches, eth2_network_config: &Eth2NetworkConfig, log: slog::Logger, ) -> Result<(), String> { diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index b6bdd148f4b..286fa9e0f0f 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -12,8 +12,8 @@ use slog::info; use types::EthSpec; pub async fn run( - lh_matches: &ArgMatches<'_>, - bn_matches: &ArgMatches<'_>, + lh_matches: &ArgMatches, + bn_matches: &ArgMatches, eth2_network_config: &Eth2NetworkConfig, log: slog::Logger, ) -> Result<(), String> { @@ -28,7 +28,7 @@ pub async fn run( ð2_network_config.chain_spec::()?, )?; - if lh_matches.is_present("immediate-shutdown") { + if lh_matches.get_flag("immediate-shutdown") { return Ok(()); } diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index 1ebd2b1740f..ea56e7e672a 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -1,5 +1,6 @@ //! A helper library for parsing values from `clap::ArgMatches`. +use clap::builder::styling::*; use clap::ArgMatches; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; use ethereum_types::U256 as Uint256; @@ -15,12 +16,14 @@ pub const BAD_TESTNET_DIR_MESSAGE: &str = "The hard-coded testnet directory was or when there is no default public network to connect to. \ During these times you must specify a --testnet-dir."; +pub const FLAG_HEADER: &str = "Flags"; + /// Try to parse the eth2 network config from the `network`, `testnet-dir` flags in that order. /// Returns the default hardcoded testnet if neither flags are set. pub fn get_eth2_network_config(cli_args: &ArgMatches) -> Result { - let optional_network_config = if cli_args.is_present("network") { + let optional_network_config = if cli_args.contains_id("network") { parse_hardcoded_network(cli_args, "network")? - } else if cli_args.is_present("testnet-dir") { + } else if cli_args.contains_id("testnet-dir") { parse_testnet_dir(cli_args, "testnet-dir")? } else { // if neither is present, assume the default network @@ -92,7 +95,7 @@ pub fn parse_path_with_default_in_home_dir( default: PathBuf, ) -> Result { matches - .value_of(name) + .get_one::(name) .map(|dir| { dir.parse::() .map_err(|e| format!("Unable to parse {}: {}", name, e)) @@ -122,7 +125,8 @@ where ::Err: std::fmt::Display, { matches - .value_of(name) + .try_get_one::(name) + .map_err(|e| format!("Unable to parse {}: {}", name, e))? .map(|val| { val.parse() .map_err(|e| format!("Unable to parse {}: {}", name, e)) @@ -150,7 +154,7 @@ pub fn parse_ssz_optional( name: &'static str, ) -> Result, String> { matches - .value_of(name) + .get_one::(name) .map(|val| { if let Some(stripped) = val.strip_prefix("0x") { let vec = hex::decode(stripped) @@ -190,3 +194,15 @@ where } Ok(()) } + +pub fn get_color_style() -> Styles { + Styles::styled() + .header(AnsiColor::Yellow.on_default()) + .usage(AnsiColor::Green.on_default()) + .literal(AnsiColor::Green.on_default()) + .placeholder(AnsiColor::Green.on_default()) +} + +pub fn parse_flag(matches: &ArgMatches, name: &str) -> bool { + *matches.get_one::(name).unwrap_or(&false) +} diff --git a/common/compare_fields_derive/src/lib.rs b/common/compare_fields_derive/src/lib.rs index 01c5a8f6ef8..1a89ccf4fdf 100644 --- a/common/compare_fields_derive/src/lib.rs +++ b/common/compare_fields_derive/src/lib.rs @@ -1,5 +1,3 @@ -extern crate proc_macro; - use proc_macro::TokenStream; use quote::quote; use syn::{parse_macro_input, DeriveInput}; diff --git a/common/deposit_contract/src/lib.rs b/common/deposit_contract/src/lib.rs index 785b9522135..5b54a05396a 100644 --- a/common/deposit_contract/src/lib.rs +++ b/common/deposit_contract/src/lib.rs @@ -96,7 +96,7 @@ mod tests { let mut deposit_data = DepositData { pubkey: keypair.pk.into(), withdrawal_credentials: Hash256::from_slice(&[42; 32]), - amount: u64::max_value(), + amount: u64::MAX, signature: Signature::empty().into(), }; deposit_data.signature = deposit_data.create_signature(&keypair.sk, spec); diff --git a/common/directory/src/lib.rs b/common/directory/src/lib.rs index e8585c504a0..df03b4f9a4e 100644 --- a/common/directory/src/lib.rs +++ b/common/directory/src/lib.rs @@ -21,9 +21,9 @@ pub const CUSTOM_TESTNET_DIR: &str = "custom"; /// if not present, then checks the "testnet-dir" flag and returns a custom name /// If neither flags are present, returns the default hardcoded network name. pub fn get_network_dir(matches: &ArgMatches) -> String { - if let Some(network_name) = matches.value_of("network") { + if let Some(network_name) = matches.get_one::("network") { network_name.to_string() - } else if matches.value_of("testnet-dir").is_some() { + } else if matches.get_one::("testnet-dir").is_some() { CUSTOM_TESTNET_DIR.to_string() } else { eth2_network_config::DEFAULT_HARDCODED_NETWORK.to_string() diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 0f27bb66721..10b4755ba26 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -10,7 +10,6 @@ edition = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } ssz_types = { workspace = true } -tree_hash = { workspace = true } types = { workspace = true } reqwest = { workspace = true } lighthouse_network = { workspace = true } @@ -29,7 +28,6 @@ futures = { workspace = true } store = { workspace = true } slashing_protection = { workspace = true } mediatype = "0.19.13" -mime = "0.3.16" pretty_reqwest_error = { workspace = true } [dev-dependencies] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index d8b2c8ef2d1..6d000f576f9 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -346,6 +346,19 @@ impl BeaconNodeHttpClient { Ok(()) } + /// Perform a HTTP POST request with a custom timeout and consensus header. + async fn post_with_timeout_and_consensus_header( + &self, + url: U, + body: &T, + timeout: Duration, + fork_name: ForkName, + ) -> Result<(), Error> { + self.post_generic_with_consensus_version(url, body, Some(timeout), fork_name) + .await?; + Ok(()) + } + /// Perform a HTTP POST request with a custom timeout, returning a JSON response. async fn post_with_timeout_and_response( &self, @@ -376,25 +389,6 @@ impl BeaconNodeHttpClient { ok_or_error(response).await } - /// Generic POST function supporting arbitrary responses and timeouts. - /// Does not include Content-Type application/json in the request header. - async fn post_generic_json_without_content_type_header( - &self, - url: U, - body: &T, - timeout: Option, - ) -> Result { - let mut builder = self.client.post(url); - if let Some(timeout) = timeout { - builder = builder.timeout(timeout); - } - - let serialized_body = serde_json::to_vec(body).map_err(Error::InvalidJson)?; - - let response = builder.body(serialized_body).send().await?; - ok_or_error(response).await - } - /// Generic POST function supporting arbitrary responses and timeouts. async fn post_generic_with_consensus_version( &self, @@ -415,6 +409,23 @@ impl BeaconNodeHttpClient { ok_or_error(response).await } + /// Generic POST function that includes octet-stream content type header. + async fn post_generic_with_ssz_header( + &self, + url: U, + body: &T, + ) -> Result { + let builder = self.client.post(url); + let mut headers = HeaderMap::new(); + + headers.insert( + "Content-Type", + HeaderValue::from_static("application/octet-stream"), + ); + let response = builder.headers(headers).json(body).send().await?; + ok_or_error(response).await + } + /// Generic POST function supporting arbitrary responses and timeouts. async fn post_generic_with_consensus_version_and_ssz_body, U: IntoUrl>( &self, @@ -543,6 +554,26 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// TESTING ONLY: This request should fail with a 415 response code. + pub async fn post_beacon_states_validator_balances_with_ssz_header( + &self, + state_id: StateId, + ids: Vec, + ) -> Result { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("validator_balances"); + + let request = ValidatorBalancesRequestBody { ids }; + + self.post_generic_with_ssz_header(path, &request).await + } + /// `POST beacon/states/{state_id}/validator_balances` /// /// Returns `Ok(None)` on a 404 error. @@ -1191,10 +1222,10 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } - /// `GET beacon/blocks/{block_id}/attestations` + /// `GET v1/beacon/blocks/{block_id}/attestations` /// /// Returns `Ok(None)` on a 404 error. - pub async fn get_beacon_blocks_attestations( + pub async fn get_beacon_blocks_attestations_v1( &self, block_id: BlockId, ) -> Result>>>, Error> { @@ -1210,8 +1241,28 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } - /// `POST beacon/pool/attestations` - pub async fn post_beacon_pool_attestations( + /// `GET v2/beacon/blocks/{block_id}/attestations` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blocks_attestations_v2( + &self, + block_id: BlockId, + ) -> Result>>>, Error> + { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks") + .push(&block_id.to_string()) + .push("attestations"); + + self.get_opt(path).await + } + + /// `POST v1/beacon/pool/attestations` + pub async fn post_beacon_pool_attestations_v1( &self, attestations: &[Attestation], ) -> Result<(), Error> { @@ -1229,8 +1280,33 @@ impl BeaconNodeHttpClient { Ok(()) } - /// `GET beacon/pool/attestations?slot,committee_index` - pub async fn get_beacon_pool_attestations( + /// `POST v2/beacon/pool/attestations` + pub async fn post_beacon_pool_attestations_v2( + &self, + attestations: &[Attestation], + fork_name: ForkName, + ) -> Result<(), Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attestations"); + + self.post_with_timeout_and_consensus_header( + path, + &attestations, + self.timeouts.attestation, + fork_name, + ) + .await?; + + Ok(()) + } + + /// `GET v1/beacon/pool/attestations?slot,committee_index` + pub async fn get_beacon_pool_attestations_v1( &self, slot: Option, committee_index: Option, @@ -1256,8 +1332,35 @@ impl BeaconNodeHttpClient { self.get(path).await } - /// `POST beacon/pool/attester_slashings` - pub async fn post_beacon_pool_attester_slashings( + /// `GET v2/beacon/pool/attestations?slot,committee_index` + pub async fn get_beacon_pool_attestations_v2( + &self, + slot: Option, + committee_index: Option, + ) -> Result>>, Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attestations"); + + if let Some(slot) = slot { + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()); + } + + if let Some(index) = committee_index { + path.query_pairs_mut() + .append_pair("committee_index", &index.to_string()); + } + + self.get(path).await + } + + /// `POST v1/beacon/pool/attester_slashings` + pub async fn post_beacon_pool_attester_slashings_v1( &self, slashing: &AttesterSlashing, ) -> Result<(), Error> { @@ -1269,14 +1372,33 @@ impl BeaconNodeHttpClient { .push("pool") .push("attester_slashings"); - self.post_generic_json_without_content_type_header(path, slashing, None) + self.post_generic(path, slashing, None).await?; + + Ok(()) + } + + /// `POST v2/beacon/pool/attester_slashings` + pub async fn post_beacon_pool_attester_slashings_v2( + &self, + slashing: &AttesterSlashing, + fork_name: ForkName, + ) -> Result<(), Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attester_slashings"); + + self.post_generic_with_consensus_version(path, slashing, None, fork_name) .await?; Ok(()) } - /// `GET beacon/pool/attester_slashings` - pub async fn get_beacon_pool_attester_slashings( + /// `GET v1/beacon/pool/attester_slashings` + pub async fn get_beacon_pool_attester_slashings_v1( &self, ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; @@ -1290,6 +1412,21 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `GET v2/beacon/pool/attester_slashings` + pub async fn get_beacon_pool_attester_slashings_v2( + &self, + ) -> Result>>, Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attester_slashings"); + + self.get(path).await + } + /// `POST beacon/pool/proposer_slashings` pub async fn post_beacon_pool_proposer_slashings( &self, @@ -2179,8 +2316,8 @@ impl BeaconNodeHttpClient { self.get_with_timeout(path, self.timeouts.attestation).await } - /// `GET validator/aggregate_attestation?slot,attestation_data_root` - pub async fn get_validator_aggregate_attestation( + /// `GET v1/validator/aggregate_attestation?slot,attestation_data_root` + pub async fn get_validator_aggregate_attestation_v1( &self, slot: Slot, attestation_data_root: Hash256, @@ -2203,6 +2340,32 @@ impl BeaconNodeHttpClient { .await } + /// `GET v2/validator/aggregate_attestation?slot,attestation_data_root,committee_index` + pub async fn get_validator_aggregate_attestation_v2( + &self, + slot: Slot, + attestation_data_root: Hash256, + committee_index: CommitteeIndex, + ) -> Result>>, Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("aggregate_attestation"); + + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()) + .append_pair( + "attestation_data_root", + &format!("{:?}", attestation_data_root), + ) + .append_pair("committee_index", &committee_index.to_string()); + + self.get_opt_with_timeout(path, self.timeouts.attestation) + .await + } + /// `GET validator/sync_committee_contribution` pub async fn get_validator_sync_committee_contribution( &self, @@ -2298,8 +2461,8 @@ impl BeaconNodeHttpClient { .await } - /// `POST validator/aggregate_and_proofs` - pub async fn post_validator_aggregate_and_proof( + /// `POST v1/validator/aggregate_and_proofs` + pub async fn post_validator_aggregate_and_proof_v1( &self, aggregates: &[SignedAggregateAndProof], ) -> Result<(), Error> { @@ -2316,6 +2479,30 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST v2/validator/aggregate_and_proofs` + pub async fn post_validator_aggregate_and_proof_v2( + &self, + aggregates: &[SignedAggregateAndProof], + fork_name: ForkName, + ) -> Result<(), Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("aggregate_and_proofs"); + + self.post_with_timeout_and_consensus_header( + path, + &aggregates, + self.timeouts.attestation, + fork_name, + ) + .await?; + + Ok(()) + } + /// `POST validator/beacon_committee_subscriptions` pub async fn post_validator_beacon_committee_subscriptions( &self, diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 83aeea4bfcc..67fe77a3157 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -1,13 +1,10 @@ -use super::{types::*, PK_LEN, SECRET_PREFIX}; +use super::types::*; use crate::Error; use account_utils::ZeroizeString; -use bytes::Bytes; -use libsecp256k1::{Message, PublicKey, Signature}; use reqwest::{ header::{HeaderMap, HeaderValue}, IntoUrl, }; -use ring::digest::{digest, SHA256}; use sensitive_url::SensitiveUrl; use serde::{de::DeserializeOwned, Serialize}; use std::fmt::{self, Display}; @@ -24,8 +21,7 @@ use types::graffiti::GraffitiString; pub struct ValidatorClientHttpClient { client: reqwest::Client, server: SensitiveUrl, - secret: Option, - server_pubkey: Option, + api_token: Option, authorization_header: AuthorizationHeader, } @@ -46,45 +42,13 @@ impl Display for AuthorizationHeader { } } -/// Parse an API token and return a secp256k1 public key. -/// -/// If the token does not start with the Lighthouse token prefix then `Ok(None)` will be returned. -/// An error will be returned if the token looks like a Lighthouse token but doesn't correspond to a -/// valid public key. -pub fn parse_pubkey(secret: &str) -> Result, Error> { - let secret = if !secret.starts_with(SECRET_PREFIX) { - return Ok(None); - } else { - &secret[SECRET_PREFIX.len()..] - }; - - serde_utils::hex::decode(secret) - .map_err(|e| Error::InvalidSecret(format!("invalid hex: {:?}", e))) - .and_then(|bytes| { - if bytes.len() != PK_LEN { - return Err(Error::InvalidSecret(format!( - "expected {} bytes not {}", - PK_LEN, - bytes.len() - ))); - } - - let mut arr = [0; PK_LEN]; - arr.copy_from_slice(&bytes); - PublicKey::parse_compressed(&arr) - .map_err(|e| Error::InvalidSecret(format!("invalid secp256k1 pubkey: {:?}", e))) - }) - .map(Some) -} - impl ValidatorClientHttpClient { /// Create a new client pre-initialised with an API token. pub fn new(server: SensitiveUrl, secret: String) -> Result { Ok(Self { client: reqwest::Client::new(), server, - server_pubkey: parse_pubkey(&secret)?, - secret: Some(secret.into()), + api_token: Some(secret.into()), authorization_header: AuthorizationHeader::Bearer, }) } @@ -96,8 +60,7 @@ impl ValidatorClientHttpClient { Ok(Self { client: reqwest::Client::new(), server, - secret: None, - server_pubkey: None, + api_token: None, authorization_header: AuthorizationHeader::Omit, }) } @@ -110,15 +73,14 @@ impl ValidatorClientHttpClient { Ok(Self { client, server, - server_pubkey: parse_pubkey(&secret)?, - secret: Some(secret.into()), + api_token: Some(secret.into()), authorization_header: AuthorizationHeader::Bearer, }) } /// Get a reference to this client's API token, if any. pub fn api_token(&self) -> Option<&ZeroizeString> { - self.secret.as_ref() + self.api_token.as_ref() } /// Read an API token from the specified `path`, stripping any trailing whitespace. @@ -128,19 +90,11 @@ impl ValidatorClientHttpClient { } /// Add an authentication token to use when making requests. - /// - /// If the token is Lighthouse-like, a pubkey derivation will be attempted. In the case - /// of failure the token will still be stored, and the client can continue to be used to - /// communicate with non-Lighthouse nodes. pub fn add_auth_token(&mut self, token: ZeroizeString) -> Result<(), Error> { - let pubkey_res = parse_pubkey(token.as_str()); - - self.secret = Some(token); + self.api_token = Some(token); self.authorization_header = AuthorizationHeader::Bearer; - pubkey_res.map(|opt_pubkey| { - self.server_pubkey = opt_pubkey; - }) + Ok(()) } /// Set to `false` to disable sending the `Authorization` header on requests. @@ -160,49 +114,17 @@ impl ValidatorClientHttpClient { self.authorization_header = AuthorizationHeader::Basic; } - async fn signed_body(&self, response: Response) -> Result { - let server_pubkey = self.server_pubkey.as_ref().ok_or(Error::NoServerPubkey)?; - let sig = response - .headers() - .get("Signature") - .ok_or(Error::MissingSignatureHeader)? - .to_str() - .map_err(|_| Error::InvalidSignatureHeader)? - .to_string(); - - let body = response.bytes().await.map_err(Error::from)?; - - let message = - Message::parse_slice(digest(&SHA256, &body).as_ref()).expect("sha256 is 32 bytes"); - - serde_utils::hex::decode(&sig) - .ok() - .and_then(|bytes| { - let sig = Signature::parse_der(&bytes).ok()?; - Some(libsecp256k1::verify(&message, &sig, server_pubkey)) - }) - .filter(|is_valid| *is_valid) - .ok_or(Error::InvalidSignatureHeader)?; - - Ok(body) - } - - async fn signed_json(&self, response: Response) -> Result { - let body = self.signed_body(response).await?; - serde_json::from_slice(&body).map_err(Error::InvalidJson) - } - fn headers(&self) -> Result { let mut headers = HeaderMap::new(); if self.authorization_header == AuthorizationHeader::Basic || self.authorization_header == AuthorizationHeader::Bearer { - let secret = self.secret.as_ref().ok_or(Error::NoToken)?; + let auth_header_token = self.api_token().ok_or(Error::NoToken)?; let header_value = HeaderValue::from_str(&format!( "{} {}", self.authorization_header, - secret.as_str() + auth_header_token.as_str() )) .map_err(|e| { Error::InvalidSecret(format!("secret is invalid as a header value: {}", e)) @@ -240,7 +162,8 @@ impl ValidatorClientHttpClient { async fn get(&self, url: U) -> Result { let response = self.get_response(url).await?; - self.signed_json(response).await + let body = response.bytes().await.map_err(Error::from)?; + serde_json::from_slice(&body).map_err(Error::InvalidJson) } async fn delete(&self, url: U) -> Result<(), Error> { @@ -263,7 +186,14 @@ impl ValidatorClientHttpClient { /// Perform a HTTP GET request, returning `None` on a 404 error. async fn get_opt(&self, url: U) -> Result, Error> { match self.get_response(url).await { - Ok(resp) => self.signed_json(resp).await.map(Option::Some), + Ok(resp) => { + let body = resp.bytes().await.map(Option::Some)?; + if let Some(body) = body { + serde_json::from_slice(&body).map_err(Error::InvalidJson) + } else { + Ok(None) + } + } Err(err) => { if err.status() == Some(StatusCode::NOT_FOUND) { Ok(None) @@ -297,7 +227,8 @@ impl ValidatorClientHttpClient { body: &T, ) -> Result { let response = self.post_with_raw_response(url, body).await?; - self.signed_json(response).await + let body = response.bytes().await.map_err(Error::from)?; + serde_json::from_slice(&body).map_err(Error::InvalidJson) } async fn post_with_unsigned_response( @@ -319,8 +250,7 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::from)?; - let response = ok_or_error(response).await?; - self.signed_body(response).await?; + ok_or_error(response).await?; Ok(()) } diff --git a/common/eth2/src/lighthouse_vc/mod.rs b/common/eth2/src/lighthouse_vc/mod.rs index 81b4fca283a..038726c829a 100644 --- a/common/eth2/src/lighthouse_vc/mod.rs +++ b/common/eth2/src/lighthouse_vc/mod.rs @@ -1,10 +1,3 @@ pub mod http_client; pub mod std_types; pub mod types; - -/// The number of bytes in the secp256k1 public key used as the authorization token for the VC API. -pub const PK_LEN: usize = 33; - -/// The prefix for the secp256k1 public key when it is used as the authorization token for the VC -/// API. -pub const SECRET_PREFIX: &str = "api-token-"; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 838be4beffb..bbcbda3ae55 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -716,6 +716,21 @@ pub struct AttesterData { pub slot: Slot, } +impl AttesterData { + pub fn match_attestation_data( + &self, + attestation_data: &AttestationData, + spec: &ChainSpec, + ) -> bool { + if spec.fork_name_at_slot::(attestation_data.slot) < ForkName::Electra { + self.slot == attestation_data.slot && self.committee_index == attestation_data.index + } else { + // After electra `attestation_data.index` is set to 0 and does not match the duties + self.slot == attestation_data.slot + } + } +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ProposerData { pub pubkey: PublicKeyBytes, @@ -765,6 +780,8 @@ pub struct ValidatorAttestationDataQuery { pub struct ValidatorAggregateAttestationQuery { pub attestation_data_root: Hash256, pub slot: Slot, + #[serde(skip_serializing_if = "Option::is_none")] + pub committee_index: Option, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] @@ -954,6 +971,11 @@ pub struct SseHead { pub execution_optimistic: bool, } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct BlockGossip { + pub slot: Slot, + pub block: Hash256, +} #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct SseChainReorg { pub slot: Slot, @@ -1080,6 +1102,10 @@ pub enum EventKind { #[cfg(feature = "lighthouse")] BlockReward(BlockReward), PayloadAttributes(VersionedSsePayloadAttributes), + ProposerSlashing(Box), + AttesterSlashing(Box>), + BlsToExecutionChange(Box), + BlockGossip(Box), } impl EventKind { @@ -1099,6 +1125,10 @@ impl EventKind { EventKind::LightClientOptimisticUpdate(_) => "light_client_optimistic_update", #[cfg(feature = "lighthouse")] EventKind::BlockReward(_) => "block_reward", + EventKind::ProposerSlashing(_) => "proposer_slashing", + EventKind::AttesterSlashing(_) => "attester_slashing", + EventKind::BlsToExecutionChange(_) => "bls_to_execution_change", + EventKind::BlockGossip(_) => "block_gossip", } } @@ -1179,6 +1209,24 @@ impl EventKind { "block_reward" => Ok(EventKind::BlockReward(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Block Reward: {:?}", e)), )?)), + "attester_slashing" => Ok(EventKind::AttesterSlashing( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Attester Slashing: {:?}", e)) + })?, + )), + "proposer_slashing" => Ok(EventKind::ProposerSlashing( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Proposer Slashing: {:?}", e)) + })?, + )), + "bls_to_execution_change" => Ok(EventKind::BlsToExecutionChange( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Bls To Execution Change: {:?}", e)) + })?, + )), + "block_gossip" => Ok(EventKind::BlockGossip(serde_json::from_str(data).map_err( + |e| ServerError::InvalidServerSentEvent(format!("Block Gossip: {:?}", e)), + )?)), _ => Err(ServerError::InvalidServerSentEvent( "Could not parse event tag".to_string(), )), @@ -1210,6 +1258,10 @@ pub enum EventTopic { LightClientOptimisticUpdate, #[cfg(feature = "lighthouse")] BlockReward, + AttesterSlashing, + ProposerSlashing, + BlsToExecutionChange, + BlockGossip, } impl FromStr for EventTopic { @@ -1231,6 +1283,10 @@ impl FromStr for EventTopic { "light_client_optimistic_update" => Ok(EventTopic::LightClientOptimisticUpdate), #[cfg(feature = "lighthouse")] "block_reward" => Ok(EventTopic::BlockReward), + "attester_slashing" => Ok(EventTopic::AttesterSlashing), + "proposer_slashing" => Ok(EventTopic::ProposerSlashing), + "bls_to_execution_change" => Ok(EventTopic::BlsToExecutionChange), + "block_gossip" => Ok(EventTopic::BlockGossip), _ => Err("event topic cannot be parsed.".to_string()), } } @@ -1253,6 +1309,10 @@ impl fmt::Display for EventTopic { EventTopic::LightClientOptimisticUpdate => write!(f, "light_client_optimistic_update"), #[cfg(feature = "lighthouse")] EventTopic::BlockReward => write!(f, "block_reward"), + EventTopic::AttesterSlashing => write!(f, "attester_slashing"), + EventTopic::ProposerSlashing => write!(f, "proposer_slashing"), + EventTopic::BlsToExecutionChange => write!(f, "bls_to_execution_change"), + EventTopic::BlockGossip => write!(f, "block_gossip"), } } } @@ -1670,11 +1730,11 @@ impl ForkVersionDeserialize for FullBlockContents { } } -impl Into> for FullBlockContents { - fn into(self) -> BeaconBlock { - match self { - Self::BlockContents(block_and_sidecars) => block_and_sidecars.block, - Self::Block(block) => block, +impl From> for BeaconBlock { + fn from(from: FullBlockContents) -> BeaconBlock { + match from { + FullBlockContents::::BlockContents(block_and_sidecars) => block_and_sidecars.block, + FullBlockContents::::Block(block) => block, } } } diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index bf707c4d17b..9104db8f67d 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -192,7 +192,11 @@ macro_rules! define_net { config_dir: ETH2_NET_DIR.config_dir, genesis_state_source: ETH2_NET_DIR.genesis_state_source, config: $this_crate::$include_file!($this_crate, "../", "config.yaml"), - deploy_block: $this_crate::$include_file!($this_crate, "../", "deploy_block.txt"), + deploy_block: $this_crate::$include_file!( + $this_crate, + "../", + "deposit_contract_block.txt" + ), boot_enr: $this_crate::$include_file!($this_crate, "../", "boot_enr.yaml"), genesis_state_bytes: $this_crate::$include_file!($this_crate, "../", "genesis.ssz"), } @@ -284,26 +288,6 @@ define_hardcoded_nets!( // Describes how the genesis state can be obtained. GenesisStateSource::IncludedBytes ), - ( - // Network name (must be unique among all networks). - prater, - // The name of the directory in the `eth2_network_config/built_in_network_configs` - // directory where the configuration files are located for this network. - "prater", - // Describes how the genesis state can be obtained. - GenesisStateSource::IncludedBytes - ), - ( - // Network name (must be unique among all networks). - goerli, - // The name of the directory in the `eth2_network_config/built_in_network_configs` - // directory where the configuration files are located for this network. - // - // The Goerli network is effectively an alias to Prater. - "prater", - // Describes how the genesis state can be obtained. - GenesisStateSource::IncludedBytes - ), ( // Network name (must be unique among all networks). gnosis, diff --git a/common/eth2_interop_keypairs/src/lib.rs b/common/eth2_interop_keypairs/src/lib.rs index 3031e1c4dc1..34c3d6f87c9 100644 --- a/common/eth2_interop_keypairs/src/lib.rs +++ b/common/eth2_interop_keypairs/src/lib.rs @@ -16,11 +16,9 @@ //! //! This implementation passes the [reference implementation //! tests](https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml). -#[macro_use] -extern crate lazy_static; - use bls::{Keypair, PublicKey, SecretKey}; use ethereum_hashing::hash; +use lazy_static::lazy_static; use num_bigint::BigUint; use serde::{Deserialize, Serialize}; use std::fs::File; diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 3807c2e9930..4b34405e5b3 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -13,12 +13,11 @@ eth2_config = { workspace = true } [dev-dependencies] tempfile = { workspace = true } tokio = { workspace = true } +ethereum_ssz = { workspace = true } [dependencies] serde_yaml = { workspace = true } -serde_json = { workspace = true } types = { workspace = true } -ethereum_ssz = { workspace = true } eth2_config = { workspace = true } discv5 = { workspace = true } reqwest = { workspace = true } diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index c869d9cfc83..07d100b011c 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -3,7 +3,7 @@ PRESET_BASE: 'gnosis' # Free-form short name of the network that this configuration applies to - known # canonical network names include: # * 'mainnet' - there can be only one -# * 'prater' - testnet +# * 'holesky' - testnet # Must match the regex: [a-z0-9\-] CONFIG_NAME: 'chiado' diff --git a/common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/chiado/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/chiado/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/chiado/deposit_contract_block.txt diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/gnosis/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/gnosis/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/gnosis/deposit_contract_block.txt diff --git a/common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/holesky/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/holesky/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/holesky/deposit_contract_block.txt diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index c8695123ab0..fc9c002daba 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -6,7 +6,7 @@ PRESET_BASE: 'mainnet' # Free-form short name of the network that this configuration applies to - known # canonical network names include: # * 'mainnet' - there can be only one -# * 'prater' - testnet +# * 'holesky' - testnet # Must match the regex: [a-z0-9\-] CONFIG_NAME: 'mainnet' diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/mainnet/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/mainnet/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/mainnet/deposit_contract_block.txt diff --git a/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml deleted file mode 100644 index 7000ff0bbc4..00000000000 --- a/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# q9f bootnode errai (lighthouse) -# /ip4/135.181.181.239/tcp/9000/p2p/16Uiu2HAmPitcpwsGZf1vGiu6hdwZHsVLyFzVZeNqaSmUaSyM7Xvj -- enr:-LK4QH1xnjotgXwg25IDPjrqRGFnH1ScgNHA3dv1Z8xHCp4uP3N3Jjl_aYv_WIxQRdwZvSukzbwspXZ7JjpldyeVDzMCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhIe1te-Jc2VjcDI1NmsxoQOkcGXqbCJYbcClZ3z5f6NWhX_1YPFRYRRWQpJjwSHpVIN0Y3CCIyiDdWRwgiMo -# q9f bootnode gudja (teku) -# /ip4/135.181.182.51/tcp/9000/p2p/16Uiu2HAmTttt9ZTmCmwmKiV3QR7iTAfnAckwzhswrNmWkthi6meB -- enr:-KG4QCIzJZTY_fs_2vqWEatJL9RrtnPwDCv-jRBuO5FQ2qBrfJubWOWazri6s9HsyZdu-fRUfEzkebhf1nvO42_FVzwDhGV0aDKQed8EKAAAECD__________4JpZIJ2NIJpcISHtbYziXNlY3AyNTZrMaED4m9AqVs6F32rSCGsjtYcsyfQE2K8nDiGmocUY_iq-TSDdGNwgiMog3VkcIIjKA -# Prysm bootnode #1 -- enr:-Ku4QFmUkNp0g9bsLX2PfVeIyT-9WO-PZlrqZBNtEyofOOfLMScDjaTzGxIb1Ns9Wo5Pm_8nlq-SZwcQfTH2cgO-s88Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQLV_jMOIxKbjHFKgrkFvwDvpexo6Nd58TK5k7ss4Vt0IoN1ZHCCG1g -# Lighthouse bootnode #1 -- enr:-Ly4QFPk-cTMxZ3jWTafiNblEZkQIXGF2aVzCIGW0uHp6KaEAvBMoctE8S7YU0qZtuS7By0AA4YMfKoN9ls_GJRccVpFh2F0dG5ldHOI__________-EZXRoMpCC9KcrAgAQIIS2AQAAAAAAgmlkgnY0gmlwhKh3joWJc2VjcDI1NmsxoQKrxz8M1IHwJqRIpDqdVW_U1PeixMW5SfnBD-8idYIQrIhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA -# Lighthouse bootnode #2 -- enr:-L64QJmwSDtaHVgGiqIxJWUtxWg6uLCipsms6j-8BdsOJfTWAs7CLF9HJnVqFE728O-JYUDCxzKvRdeMqBSauHVCMdaCAVWHYXR0bmV0c4j__________4RldGgykIL0pysCABAghLYBAAAAAACCaWSCdjSCaXCEQWxOdolzZWNwMjU2azGhA7Qmod9fK86WidPOzLsn5_8QyzL7ZcJ1Reca7RnD54vuiHN5bmNuZXRzD4N0Y3CCIyiDdWRwgiMo -# Nimbus bootstrap nodes -- enr:-LK4QMzPq4Q7w5R-rnGQDcI8BYky6oPVBGQTbS1JJLVtNi_8PzBLV7Bdzsoame9nJK5bcJYpGHn4SkaDN2CM6tR5G_4Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhAN4yvyJc2VjcDI1NmsxoQKa8Qnp_P2clLIP6VqLKOp_INvEjLszalEnW0LoBZo4YYN0Y3CCI4yDdWRwgiOM -- enr:-LK4QLM_pPHa78R8xlcU_s40Y3XhFjlb3kPddW9lRlY67N5qeFE2Wo7RgzDgRs2KLCXODnacVHMFw1SfpsW3R474RZEBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhANBY-yJc2VjcDI1NmsxoQNsZkFXgKbTzuxF7uwxlGauTGJelE6HD269CcFlZ_R7A4N0Y3CCI4yDdWRwgiOM -# Teku bootnode -- enr:-KK4QH0RsNJmIG0EX9LSnVxMvg-CAOr3ZFF92hunU63uE7wcYBjG1cFbUTvEa5G_4nDJkRhUq9q2ck9xY-VX1RtBsruBtIRldGgykIL0pysBABAg__________-CaWSCdjSCaXCEEnXQ0YlzZWNwMjU2azGhA1grTzOdMgBvjNrk-vqWtTZsYQIi0QawrhoZrsn5Hd56g3RjcIIjKIN1ZHCCIyg diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml deleted file mode 100644 index f474b172c51..00000000000 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ /dev/null @@ -1,134 +0,0 @@ -# Prater config - -# Extends the mainnet preset -PRESET_BASE: 'mainnet' - -CONFIG_NAME: 'prater' - -# Transition -# --------------------------------------------------------------- -# Expected August 10, 2022 -TERMINAL_TOTAL_DIFFICULTY: 10790000 -# By default, don't use these params -TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 -TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 - - -# Genesis -# --------------------------------------------------------------- -# `2**14` (= 16,384) -MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 -# Mar-01-2021 08:53:32 AM +UTC -MIN_GENESIS_TIME: 1614588812 -# Prater area code (Vienna) -GENESIS_FORK_VERSION: 0x00001020 -# Customized for Prater: 1919188 seconds (Mar-23-2021 02:00:00 PM +UTC) -GENESIS_DELAY: 1919188 - - -# Forking -# --------------------------------------------------------------- -# Some forks are disabled for now: -# - These may be re-assigned to another fork-version later -# - Temporarily set to max uint64 value: 2**64 - 1 - -# Altair -ALTAIR_FORK_VERSION: 0x01001020 -ALTAIR_FORK_EPOCH: 36660 -# Bellatrix -BELLATRIX_FORK_VERSION: 0x02001020 -BELLATRIX_FORK_EPOCH: 112260 -# Capella -CAPELLA_FORK_VERSION: 0x03001020 -CAPELLA_FORK_EPOCH: 162304 -# DENEB -DENEB_FORK_VERSION: 0x04001020 -DENEB_FORK_EPOCH: 231680 - -# Time parameters -# --------------------------------------------------------------- -# 12 seconds -SECONDS_PER_SLOT: 12 -# 14 (estimate from Eth1 mainnet) -SECONDS_PER_ETH1_BLOCK: 14 -# 2**8 (= 256) epochs ~27 hours -MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**8 (= 256) epochs ~27 hours -SHARD_COMMITTEE_PERIOD: 256 -# 2**11 (= 2,048) Eth1 blocks ~8 hours -ETH1_FOLLOW_DISTANCE: 2048 - - -# Validator cycle -# --------------------------------------------------------------- -# 2**2 (= 4) -INACTIVITY_SCORE_BIAS: 4 -# 2**4 (= 16) -INACTIVITY_SCORE_RECOVERY_RATE: 16 -# 2**4 * 10**9 (= 16,000,000,000) Gwei -EJECTION_BALANCE: 16000000000 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 -# 2**16 (= 65,536) -CHURN_LIMIT_QUOTIENT: 65536 -# [New in Deneb:EIP7514] 2**3 (= 8) -MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 - -# Fork choice -# --------------------------------------------------------------- -# 40% -PROPOSER_SCORE_BOOST: 40 -# 20% -REORG_HEAD_WEIGHT_THRESHOLD: 20 -# 160% -REORG_PARENT_WEIGHT_THRESHOLD: 160 -# `2` epochs -REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 - -# Deposit contract -# --------------------------------------------------------------- -# Ethereum Goerli testnet -DEPOSIT_CHAIN_ID: 5 -DEPOSIT_NETWORK_ID: 5 -# Prater test deposit contract on Goerli Testnet -DEPOSIT_CONTRACT_ADDRESS: 0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b - -# Networking -# --------------------------------------------------------------- -# `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 -# `2**10` (= 1024) -MAX_REQUEST_BLOCKS: 1024 -# `2**8` (= 256) -EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 -# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) -MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 -# 5s -TTFB_TIMEOUT: 5 -# 10s -RESP_TIMEOUT: 10 -ATTESTATION_PROPAGATION_SLOT_RANGE: 32 -# 500ms -MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 -MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 -MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 -# 2 subnets per node -SUBNETS_PER_NODE: 2 -# 2**8 (= 64) -ATTESTATION_SUBNET_COUNT: 64 -ATTESTATION_SUBNET_EXTRA_BITS: 0 -# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS -ATTESTATION_SUBNET_PREFIX_BITS: 6 -ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 - -# Deneb -# `2**7` (=128) -MAX_REQUEST_BLOCKS_DENEB: 128 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK -MAX_REQUEST_BLOB_SIDECARS: 768 -# `2**12` (= 4096 epochs, ~18 days) -MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 -# `6` -BLOB_SIDECAR_SUBNET_COUNT: 6 diff --git a/common/eth2_network_config/built_in_network_configs/prater/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/prater/deploy_block.txt deleted file mode 100644 index e8c50058b69..00000000000 --- a/common/eth2_network_config/built_in_network_configs/prater/deploy_block.txt +++ /dev/null @@ -1 +0,0 @@ -4367322 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/prater/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/prater/genesis.ssz.zip deleted file mode 100644 index 36bad7fae67..00000000000 Binary files a/common/eth2_network_config/built_in_network_configs/prater/genesis.ssz.zip and /dev/null differ diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml index f88fbc765af..22b711861f0 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml @@ -1,5 +1,6 @@ -# EF Team -- enr:-Iq4QMCTfIMXnow27baRUb35Q8iiFHSIDBJh6hQM5Axohhf4b6Kr_cOCu0htQ5WvVqKvFgY28893DHAg8gnBAXsAVqmGAX53x8JggmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk -- enr:-KG4QE5OIg5ThTjkzrlVF32WT_-XT14WeJtIz2zoTqLLjQhYAmJlnk4ItSoH41_2x0RX0wTFIe5GgjRzU2u7Q1fN4vADhGV0aDKQqP7o7pAAAHAyAAAAAAAAAIJpZIJ2NIJpcISlFsStiXNlY3AyNTZrMaEC-Rrd_bBZwhKpXzFCrStKp1q_HmGOewxY3KwM8ofAj_ODdGNwgiMog3VkcIIjKA -# Teku team (Consensys) -- enr:-Ly4QFoZTWR8ulxGVsWydTNGdwEESueIdj-wB6UmmjUcm-AOPxnQi7wprzwcdo7-1jBW_JxELlUKJdJES8TDsbl1EdNlh2F0dG5ldHOI__78_v2bsV-EZXRoMpA2-lATkAAAcf__________gmlkgnY0gmlwhBLYJjGJc2VjcDI1NmsxoQI0gujXac9rMAb48NtMqtSTyHIeNYlpjkbYpWJw46PmYYhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA +# EF bootnodes +- enr:-Ku4QDZ_rCowZFsozeWr60WwLgOfHzv1Fz2cuMvJqN5iJzLxKtVjoIURY42X_YTokMi3IGstW5v32uSYZyGUXj9Q_IECh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhIpEe5iJc2VjcDI1NmsxoQNHTpFdaNSCEWiN_QqT396nb0PzcUpLe3OVtLph-AciBYN1ZHCCIy0 +- enr:-Ku4QHRyRwEPT7s0XLYzJ_EeeWvZTXBQb4UCGy1F_3m-YtCNTtDlGsCMr4UTgo4uR89pv11uM-xq4w6GKfKhqU31hTgCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhIrFM7WJc2VjcDI1NmsxoQI4diTwChN3zAAkarf7smOHCdFb1q3DSwdiQ_Lc_FdzFIN1ZHCCIy0 +- enr:-Ku4QOkvvf0u5Hg4-HhY-SJmEyft77G5h3rUM8VF_e-Hag5cAma3jtmFoX4WElLAqdILCA-UWFRN1ZCDJJVuEHrFeLkDh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhJK-AWeJc2VjcDI1NmsxoQLFcT5VE_NMiIC8Ll7GypWDnQ4UEmuzD7hF_Hf4veDJwIN1ZHCCIy0 +- enr:-Ku4QH6tYsHKITYeHUu5kdfXgEZWI18EWk_2RtGOn1jBPlx2UlS_uF3Pm5Dx7tnjOvla_zs-wwlPgjnEOcQDWXey51QCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhIs7Mc6Jc2VjcDI1NmsxoQIET4Mlv9YzhrYhX_H9D7aWMemUrvki6W4J2Qo0YmFMp4N1ZHCCIy0 +- enr:-Ku4QDmz-4c1InchGitsgNk4qzorWMiFUoaPJT4G0IiF8r2UaevrekND1o7fdoftNucirj7sFFTTn2-JdC2Ej0p1Mn8Ch2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhKpA-liJc2VjcDI1NmsxoQMpHP5U1DK8O_JQU6FadmWbE42qEdcGlllR8HcSkkfWq4N1ZHCCIy0 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/sepolia/deposit_contract_block.txt similarity index 100% rename from common/eth2_network_config/built_in_network_configs/sepolia/deploy_block.txt rename to common/eth2_network_config/built_in_network_configs/sepolia/deposit_contract_block.txt diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 1ead9a6bde8..fb8c6938cdb 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -29,14 +29,14 @@ use url::Url; pub use eth2_config::GenesisStateSource; -pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt"; +pub const DEPLOY_BLOCK_FILE: &str = "deposit_contract_block.txt"; pub const BOOT_ENR_FILE: &str = "boot_enr.yaml"; pub const GENESIS_STATE_FILE: &str = "genesis.ssz"; pub const BASE_CONFIG_FILE: &str = "config.yaml"; // Creates definitions for: // -// - Each of the `HardcodedNet` values (e.g., `MAINNET`, `PRATER`, etc). +// - Each of the `HardcodedNet` values (e.g., `MAINNET`, `HOLESKY`, etc). // - `HARDCODED_NETS: &[HardcodedNet]` // - `HARDCODED_NET_NAMES: &[&'static str]` instantiate_hardcoded_nets!(eth2_config); @@ -502,13 +502,6 @@ mod tests { .expect("beacon state can decode"); } - #[test] - fn prater_and_goerli_are_equal() { - let goerli = Eth2NetworkConfig::from_hardcoded_net(&GOERLI).unwrap(); - let prater = Eth2NetworkConfig::from_hardcoded_net(&PRATER).unwrap(); - assert_eq!(goerli, prater); - } - #[test] fn hard_coded_nets_work() { for net in HARDCODED_NETS { diff --git a/common/lighthouse_metrics/Cargo.toml b/common/lighthouse_metrics/Cargo.toml index 6d905344011..fe966f4a9c6 100644 --- a/common/lighthouse_metrics/Cargo.toml +++ b/common/lighthouse_metrics/Cargo.toml @@ -7,5 +7,4 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -lazy_static = { workspace = true } prometheus = "0.13.0" diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index 5d25bb313f6..4a76184b8a8 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -20,8 +20,7 @@ //! ## Example //! //! ```rust -//! #[macro_use] -//! extern crate lazy_static; +//! use lazy_static::lazy_static; //! use lighthouse_metrics::*; //! //! // These metrics are "magically" linked to the global registry defined in `lighthouse_metrics`. diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 985eaff1b59..d32d7994689 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v5.1.3-", - fallback = "Lighthouse/v5.1.3" + prefix = "Lighthouse/v5.2.1-", + fallback = "Lighthouse/v5.2.1" ); /// Returns the first eight characters of the latest commit hash for this build. diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index 1fad56d475c..3a03d22f3cb 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -15,7 +15,6 @@ parking_lot = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } slog = { workspace = true } -slog-async = { workspace = true } slog-term = { workspace = true } sloggers = { workspace = true } take_mut = "0.2.2" diff --git a/common/logging/src/async_record.rs b/common/logging/src/async_record.rs index 6f998c61915..81037b11a4e 100644 --- a/common/logging/src/async_record.rs +++ b/common/logging/src/async_record.rs @@ -123,12 +123,10 @@ impl Serializer for ToSendSerializer { take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); Ok(()) } - #[cfg(integer128)] fn emit_u128(&mut self, key: Key, val: u128) -> slog::Result { take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); Ok(()) } - #[cfg(integer128)] fn emit_i128(&mut self, key: Key, val: i128) -> slog::Result { take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); Ok(()) diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index b0e1da00e97..50d04fc088f 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -1,6 +1,4 @@ -#[macro_use] -extern crate lazy_static; - +use lazy_static::lazy_static; use lighthouse_metrics::{ inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult, }; diff --git a/common/logging/src/tracing_metrics_layer.rs b/common/logging/src/tracing_metrics_layer.rs index 08c323ee890..b9dde584b4c 100644 --- a/common/logging/src/tracing_metrics_layer.rs +++ b/common/logging/src/tracing_metrics_layer.rs @@ -1,5 +1,6 @@ //! Exposes [`MetricsLayer`]: A tracing layer that registers metrics of logging events. +use lazy_static::lazy_static; use lighthouse_metrics as metrics; use tracing_log::NormalizeEvent; diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs index 0b2fd835687..890bf47eb44 100644 --- a/common/lru_cache/src/time.rs +++ b/common/lru_cache/src/time.rs @@ -166,6 +166,12 @@ where self.map.contains(key) } + /// List known keys + pub fn keys(&mut self) -> impl Iterator { + self.update(); + self.map.iter() + } + /// Shrink the mappings to fit the current size. pub fn shrink_to_fit(&mut self) { self.map.shrink_to_fit(); diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index 4f54b2ee76b..a742e29457d 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -1,6 +1,3 @@ -#[macro_use] -extern crate lazy_static; - mod manual_slot_clock; mod metrics; mod system_time_slot_clock; diff --git a/common/slot_clock/src/metrics.rs b/common/slot_clock/src/metrics.rs index 23a793b2034..ae3a9b599ff 100644 --- a/common/slot_clock/src/metrics.rs +++ b/common/slot_clock/src/metrics.rs @@ -1,4 +1,5 @@ use crate::SlotClock; +use lazy_static::lazy_static; pub use lighthouse_metrics::*; use types::{EthSpec, Slot}; diff --git a/common/system_health/Cargo.toml b/common/system_health/Cargo.toml index 5f0de80d90e..be339f27792 100644 --- a/common/system_health/Cargo.toml +++ b/common/system_health/Cargo.toml @@ -8,5 +8,4 @@ lighthouse_network = { workspace = true } types = { workspace = true } sysinfo = { workspace = true } serde = { workspace = true } -serde_json = { workspace = true } parking_lot = { workspace = true } diff --git a/common/test_random_derive/src/lib.rs b/common/test_random_derive/src/lib.rs index 648c20121af..8c4b1ef7c35 100644 --- a/common/test_random_derive/src/lib.rs +++ b/common/test_random_derive/src/lib.rs @@ -1,6 +1,4 @@ -extern crate proc_macro; - -use crate::proc_macro::TokenStream; +use proc_macro::TokenStream; use quote::quote; use syn::{parse_macro_input, DeriveInput}; diff --git a/common/validator_dir/src/validator_dir.rs b/common/validator_dir/src/validator_dir.rs index 24b317dcfe3..4f9b786844d 100644 --- a/common/validator_dir/src/validator_dir.rs +++ b/common/validator_dir/src/validator_dir.rs @@ -39,8 +39,6 @@ pub enum Error { /// generally caused by supplying an `amount` at deposit-time that is different to the one used /// at generation-time. Eth1DepositRootMismatch, - #[cfg(feature = "unencrypted_keys")] - SszKeypairError(String), } /// Information required to submit a deposit to the Eth1 deposit contract. diff --git a/common/warp_utils/src/json.rs b/common/warp_utils/src/json.rs index 203a6495a42..6ee5e772616 100644 --- a/common/warp_utils/src/json.rs +++ b/common/warp_utils/src/json.rs @@ -1,4 +1,5 @@ use bytes::Bytes; +use eth2::{CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use serde::de::DeserializeOwned; use std::error::Error as StdError; use warp::{Filter, Rejection}; @@ -16,7 +17,17 @@ impl Json { } pub fn json() -> impl Filter + Copy { - warp::body::bytes().and_then(|bytes: Bytes| async move { - Json::decode(bytes).map_err(|err| reject::custom_deserialize_error(format!("{:?}", err))) - }) + warp::header::optional::(CONTENT_TYPE_HEADER) + .and(warp::body::bytes()) + .and_then(|header: Option, bytes: Bytes| async move { + if let Some(header) = header { + if header == SSZ_CONTENT_TYPE_HEADER { + return Err(reject::unsupported_media_type( + "The request's content-type is not supported".to_string(), + )); + } + } + Json::decode(bytes) + .map_err(|err| reject::custom_deserialize_error(format!("{:?}", err))) + }) } diff --git a/common/warp_utils/src/reject.rs b/common/warp_utils/src/reject.rs index b6bb5ace3d0..9b28c65212c 100644 --- a/common/warp_utils/src/reject.rs +++ b/common/warp_utils/src/reject.rs @@ -2,7 +2,7 @@ use eth2::types::{ErrorMessage, Failure, IndexedErrorMessage}; use std::convert::Infallible; use std::error::Error; use std::fmt; -use warp::{http::StatusCode, reject::Reject}; +use warp::{http::StatusCode, reject::Reject, reply::Response, Reply}; #[derive(Debug)] pub struct ServerSentEventError(pub String); @@ -136,6 +136,15 @@ pub fn invalid_auth(msg: String) -> warp::reject::Rejection { warp::reject::custom(InvalidAuthorization(msg)) } +#[derive(Debug)] +pub struct UnsupportedMediaType(pub String); + +impl Reject for UnsupportedMediaType {} + +pub fn unsupported_media_type(msg: String) -> warp::reject::Rejection { + warp::reject::custom(UnsupportedMediaType(msg)) +} + #[derive(Debug)] pub struct IndexedBadRequestErrors { pub message: String, @@ -170,6 +179,9 @@ pub async fn handle_rejection(err: warp::Rejection) -> Result().is_some() { + code = StatusCode::UNSUPPORTED_MEDIA_TYPE; + message = "UNSUPPORTED_MEDIA_TYPE".to_string(); } else if let Some(e) = err.find::() { message = format!("BAD_REQUEST: body deserialize error: {}", e.0); code = StatusCode::BAD_REQUEST; @@ -243,3 +255,21 @@ pub async fn handle_rejection(err: warp::Rejection) -> Result(res: Result) -> Response { + match res { + Ok(response) => response.into_response(), + Err(e) => match handle_rejection(e).await { + Ok(reply) => reply.into_response(), + Err(_) => warp::reply::with_status( + warp::reply::json(&"unhandled error"), + eth2::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response(), + }, + } +} diff --git a/common/warp_utils/src/task.rs b/common/warp_utils/src/task.rs index 001231f2c6b..e2fa4ebc368 100644 --- a/common/warp_utils/src/task.rs +++ b/common/warp_utils/src/task.rs @@ -1,3 +1,4 @@ +use crate::reject::convert_rejection; use serde::Serialize; use warp::reply::{Reply, Response}; @@ -24,14 +25,16 @@ where } /// A convenience wrapper around `blocking_task` for use with `warp` JSON responses. -pub async fn blocking_json_task(func: F) -> Result +pub async fn blocking_json_task(func: F) -> Response where F: FnOnce() -> Result + Send + 'static, T: Serialize + Send + 'static, { - blocking_response_task(|| { + let result = blocking_response_task(|| { let response = func()?; Ok(warp::reply::json(&response)) }) - .await + .await; + + convert_rejection(result).await } diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml deleted file mode 100644 index 05edc348565..00000000000 --- a/consensus/cached_tree_hash/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "cached_tree_hash" -version = "0.1.0" -authors = ["Michael Sproul "] -edition = { workspace = true } - -[dependencies] -ethereum-types = { workspace = true } -ssz_types = { workspace = true } -ethereum_hashing = { workspace = true } -ethereum_ssz_derive = { workspace = true } -ethereum_ssz = { workspace = true } -tree_hash = { workspace = true } -smallvec = { workspace = true } - -[dev-dependencies] -quickcheck = { workspace = true } -quickcheck_macros = { workspace = true } - -[features] -arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/cached_tree_hash/src/cache.rs b/consensus/cached_tree_hash/src/cache.rs deleted file mode 100644 index 450128f15ed..00000000000 --- a/consensus/cached_tree_hash/src/cache.rs +++ /dev/null @@ -1,237 +0,0 @@ -use crate::cache_arena; -use crate::SmallVec8; -use crate::{Error, Hash256}; -use ethereum_hashing::{hash32_concat, ZERO_HASHES}; -use smallvec::smallvec; -use ssz_derive::{Decode, Encode}; -use tree_hash::BYTES_PER_CHUNK; - -type CacheArena = cache_arena::CacheArena; -type CacheArenaAllocation = cache_arena::CacheArenaAllocation; - -/// Sparse Merkle tree suitable for tree hashing vectors and lists. -#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] -pub struct TreeHashCache { - pub initialized: bool, - /// Depth is such that the tree has a capacity for 2^depth leaves - depth: usize, - /// Sparse layers. - /// - /// The leaves are contained in `self.layers[self.depth]`, and each other layer `i` - /// contains the parents of the nodes in layer `i + 1`. - layers: SmallVec8, -} - -impl TreeHashCache { - /// Create a new cache with the given `depth` with enough nodes allocated to suit `leaves`. All - /// leaves are set to `Hash256::zero()`. - pub fn new(arena: &mut CacheArena, depth: usize, leaves: usize) -> Self { - let mut layers = SmallVec8::with_capacity(depth + 1); - - for i in 0..=depth { - let vec = arena.alloc(); - vec.extend_with_vec( - arena, - smallvec![Hash256::zero(); nodes_per_layer(i, depth, leaves)], - ) - .expect("A newly allocated sub-arena cannot fail unless it has reached max capacity"); - - layers.push(vec) - } - - TreeHashCache { - initialized: false, - depth, - layers, - } - } - - /// Compute the updated Merkle root for the given `leaves`. - pub fn recalculate_merkle_root( - &mut self, - arena: &mut CacheArena, - leaves: impl ExactSizeIterator, - ) -> Result { - let dirty_indices = self.update_leaves(arena, leaves)?; - self.update_merkle_root(arena, dirty_indices) - } - - /// Phase 1 of the algorithm: compute the indices of all dirty leaves. - pub fn update_leaves( - &mut self, - arena: &mut CacheArena, - mut leaves: impl ExactSizeIterator, - ) -> Result, Error> { - let new_leaf_count = leaves.len(); - - if new_leaf_count < self.leaves().len(arena)? { - return Err(Error::CannotShrink); - } else if new_leaf_count > 2usize.pow(self.depth as u32) { - return Err(Error::TooManyLeaves); - } - - let mut dirty = SmallVec8::new(); - - // Update the existing leaves - self.leaves() - .iter_mut(arena)? - .enumerate() - .zip(&mut leaves) - .for_each(|((i, leaf), new_leaf)| { - if !self.initialized || leaf.as_bytes() != new_leaf { - leaf.assign_from_slice(&new_leaf); - dirty.push(i); - } - }); - - // Push the rest of the new leaves (if any) - dirty.extend(self.leaves().len(arena)?..new_leaf_count); - self.leaves() - .extend_with_vec(arena, leaves.map(|l| Hash256::from_slice(&l)).collect())?; - - Ok(dirty) - } - - /// Phase 2: propagate changes upwards from the leaves of the tree, and compute the root. - /// - /// Returns an error if `dirty_indices` is inconsistent with the cache. - pub fn update_merkle_root( - &mut self, - arena: &mut CacheArena, - mut dirty_indices: SmallVec8, - ) -> Result { - if dirty_indices.is_empty() { - return Ok(self.root(arena)); - } - - let mut depth = self.depth; - - while depth > 0 { - let new_dirty_indices = lift_dirty(&dirty_indices); - - for &idx in &new_dirty_indices { - let left_idx = 2 * idx; - let right_idx = left_idx + 1; - - let left = self.layers[depth] - .get(arena, left_idx)? - .ok_or(Error::MissingLeftIdx(left_idx))?; - let right = self.layers[depth] - .get(arena, right_idx)? - .copied() - .unwrap_or_else(|| Hash256::from_slice(&ZERO_HASHES[self.depth - depth])); - - let new_hash = hash32_concat(left.as_bytes(), right.as_bytes()); - - match self.layers[depth - 1].get_mut(arena, idx)? { - Some(hash) => { - hash.assign_from_slice(&new_hash); - } - None => { - // Parent layer should already contain nodes for all non-dirty indices - if idx != self.layers[depth - 1].len(arena)? { - return Err(Error::CacheInconsistent); - } - self.layers[depth - 1].push(arena, Hash256::from_slice(&new_hash))?; - } - } - } - - dirty_indices = new_dirty_indices; - depth -= 1; - } - - self.initialized = true; - - Ok(self.root(arena)) - } - - /// Get the root of this cache, without doing any updates/computation. - pub fn root(&self, arena: &CacheArena) -> Hash256 { - self.layers[0] - .get(arena, 0) - .expect("cached tree should have a root layer") - .copied() - .unwrap_or_else(|| Hash256::from_slice(&ZERO_HASHES[self.depth])) - } - - pub fn leaves(&mut self) -> &mut CacheArenaAllocation { - &mut self.layers[self.depth] - } -} - -/// Compute the dirty indices for one layer up. -fn lift_dirty(dirty_indices: &[usize]) -> SmallVec8 { - let mut new_dirty = SmallVec8::with_capacity(dirty_indices.len()); - - for index in dirty_indices { - new_dirty.push(index / 2) - } - - new_dirty.dedup(); - new_dirty -} - -/// Returns the number of nodes that should be at each layer of a tree with the given `depth` and -/// number of `leaves`. -/// -/// Note: the top-most layer is `0` and a tree that has 8 leaves (4 layers) has a depth of 3 (_not_ -/// a depth of 4). -/// -/// ## Example -/// -/// Consider the following tree that has `depth = 3` and `leaves = 5`. -/// -///```ignore -/// 0 o <-- height 0 has 1 node -/// / \ -/// 1 o o <-- height 1 has 2 nodes -/// / \ / -/// 2 o o o <-- height 2 has 3 nodes -/// /\ /\ / -/// 3 o o o o o <-- height 3 have 5 nodes -/// ``` -fn nodes_per_layer(layer: usize, depth: usize, leaves: usize) -> usize { - if layer == depth { - leaves - } else { - let leaves_per_node = 1 << (depth - layer); - (leaves + leaves_per_node - 1) / leaves_per_node - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn zero_leaves() { - let arena = &mut CacheArena::default(); - - let depth = 3; - let num_leaves = 0; - - let mut cache = TreeHashCache::new(arena, depth, num_leaves); - let leaves: Vec<[u8; BYTES_PER_CHUNK]> = vec![]; - - cache - .recalculate_merkle_root(arena, leaves.into_iter()) - .expect("should calculate root"); - } - - #[test] - fn test_node_per_layer_unbalanced_tree() { - assert_eq!(nodes_per_layer(0, 3, 5), 1); - assert_eq!(nodes_per_layer(1, 3, 5), 2); - assert_eq!(nodes_per_layer(2, 3, 5), 3); - assert_eq!(nodes_per_layer(3, 3, 5), 5); - } - - #[test] - fn test_node_per_layer_balanced_tree() { - assert_eq!(nodes_per_layer(0, 3, 8), 1); - assert_eq!(nodes_per_layer(1, 3, 8), 2); - assert_eq!(nodes_per_layer(2, 3, 8), 4); - assert_eq!(nodes_per_layer(3, 3, 8), 8); - } -} diff --git a/consensus/cached_tree_hash/src/cache_arena.rs b/consensus/cached_tree_hash/src/cache_arena.rs deleted file mode 100644 index 42819e8df59..00000000000 --- a/consensus/cached_tree_hash/src/cache_arena.rs +++ /dev/null @@ -1,498 +0,0 @@ -use crate::SmallVec8; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::cmp::Ordering; -use std::marker::PhantomData; -use std::ops::Range; - -#[derive(Debug, PartialEq, Clone)] -pub enum Error { - UnknownAllocId(usize), - OffsetOverflow, - OffsetUnderflow, - RangeOverFlow, -} - -/// Inspired by the `TypedArena` crate, the `CachedArena` provides a single contiguous memory -/// allocation from which smaller allocations can be produced. In effect this allows for having -/// many `Vec`-like objects all stored contiguously on the heap with the aim of reducing memory -/// fragmentation. -/// -/// Because all of the allocations are stored in one big `Vec`, resizing any of the allocations -/// will mean all items to the right of that allocation will be moved. -#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] -pub struct CacheArena { - /// The backing array, storing cached values. - backing: Vec, - /// A list of offsets indicating the start of each allocation. - offsets: Vec, -} - -impl CacheArena { - /// Instantiate self with a backing array of the given `capacity`. - pub fn with_capacity(capacity: usize) -> Self { - Self { - backing: Vec::with_capacity(capacity), - offsets: vec![], - } - } - - /// Produce an allocation of zero length at the end of the backing array. - pub fn alloc(&mut self) -> CacheArenaAllocation { - let alloc_id = self.offsets.len(); - self.offsets.push(self.backing.len()); - - CacheArenaAllocation { - alloc_id, - _phantom: PhantomData, - } - } - - /// Update `self.offsets` to reflect an allocation increasing in size. - fn grow(&mut self, alloc_id: usize, grow_by: usize) -> Result<(), Error> { - if alloc_id < self.offsets.len() { - self.offsets - .iter_mut() - .skip(alloc_id + 1) - .try_for_each(|offset| { - *offset = offset.checked_add(grow_by).ok_or(Error::OffsetOverflow)?; - - Ok(()) - }) - } else { - Err(Error::UnknownAllocId(alloc_id)) - } - } - - /// Update `self.offsets` to reflect an allocation decreasing in size. - fn shrink(&mut self, alloc_id: usize, shrink_by: usize) -> Result<(), Error> { - if alloc_id < self.offsets.len() { - self.offsets - .iter_mut() - .skip(alloc_id + 1) - .try_for_each(|offset| { - *offset = offset - .checked_sub(shrink_by) - .ok_or(Error::OffsetUnderflow)?; - - Ok(()) - }) - } else { - Err(Error::UnknownAllocId(alloc_id)) - } - } - - /// Similar to `Vec::splice`, however the range is relative to some allocation (`alloc_id`) and - /// the replaced items are not returned (i.e., it is forgetful). - /// - /// To reiterate, the given `range` should be relative to the given `alloc_id`, not - /// `self.backing`. E.g., if the allocation has an offset of `20` and the range is `0..1`, then - /// the splice will translate to `self.backing[20..21]`. - fn splice_forgetful>( - &mut self, - alloc_id: usize, - range: Range, - replace_with: I, - ) -> Result<(), Error> { - let offset = *self - .offsets - .get(alloc_id) - .ok_or(Error::UnknownAllocId(alloc_id))?; - let start = range - .start - .checked_add(offset) - .ok_or(Error::RangeOverFlow)?; - let end = range.end.checked_add(offset).ok_or(Error::RangeOverFlow)?; - - let prev_len = self.backing.len(); - - self.backing.splice(start..end, replace_with); - - match prev_len.cmp(&self.backing.len()) { - Ordering::Greater => self.shrink(alloc_id, prev_len - self.backing.len())?, - Ordering::Less => self.grow(alloc_id, self.backing.len() - prev_len)?, - Ordering::Equal => {} - } - - Ok(()) - } - - /// Returns the length of the specified allocation. - fn len(&self, alloc_id: usize) -> Result { - let start = self - .offsets - .get(alloc_id) - .ok_or(Error::UnknownAllocId(alloc_id))?; - let end = self - .offsets - .get(alloc_id + 1) - .copied() - .unwrap_or(self.backing.len()); - - Ok(end - start) - } - - /// Get the value at position `i`, relative to the offset at `alloc_id`. - fn get(&self, alloc_id: usize, i: usize) -> Result, Error> { - if i < self.len(alloc_id)? { - let offset = self - .offsets - .get(alloc_id) - .ok_or(Error::UnknownAllocId(alloc_id))?; - Ok(self.backing.get(i + offset)) - } else { - Ok(None) - } - } - - /// Mutably get the value at position `i`, relative to the offset at `alloc_id`. - fn get_mut(&mut self, alloc_id: usize, i: usize) -> Result, Error> { - if i < self.len(alloc_id)? { - let offset = self - .offsets - .get(alloc_id) - .ok_or(Error::UnknownAllocId(alloc_id))?; - Ok(self.backing.get_mut(i + offset)) - } else { - Ok(None) - } - } - - /// Returns the range in `self.backing` that is occupied by some allocation. - fn range(&self, alloc_id: usize) -> Result, Error> { - let start = *self - .offsets - .get(alloc_id) - .ok_or(Error::UnknownAllocId(alloc_id))?; - let end = self - .offsets - .get(alloc_id + 1) - .copied() - .unwrap_or(self.backing.len()); - - Ok(start..end) - } - - /// Iterate through all values in some allocation. - fn iter(&self, alloc_id: usize) -> Result, Error> { - Ok(self.backing[self.range(alloc_id)?].iter()) - } - - /// Mutably iterate through all values in some allocation. - fn iter_mut(&mut self, alloc_id: usize) -> Result, Error> { - let range = self.range(alloc_id)?; - Ok(self.backing[range].iter_mut()) - } - - /// Returns the total number of items stored in the arena, the sum of all values in all - /// allocations. - pub fn backing_len(&self) -> usize { - self.backing.len() - } -} - -/// An allocation from a `CacheArena` that behaves like a `Vec`. -/// -/// All functions will modify the given `arena` instead of `self`. As such, it is safe to have -/// multiple instances of this allocation at once. -/// -/// For all functions that accept a `CacheArena` parameter, that arena should always be the one -/// that created `Self`. I.e., do not mix-and-match allocations and arenas unless you _really_ know -/// what you're doing (or want to have a bad time). -#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] -pub struct CacheArenaAllocation { - alloc_id: usize, - #[ssz(skip_serializing, skip_deserializing)] - _phantom: PhantomData, -} - -impl CacheArenaAllocation { - /// Grow the allocation in `arena`, appending `vec` to the current values. - pub fn extend_with_vec( - &self, - arena: &mut CacheArena, - vec: SmallVec8, - ) -> Result<(), Error> { - let len = arena.len(self.alloc_id)?; - arena.splice_forgetful(self.alloc_id, len..len, vec)?; - Ok(()) - } - - /// Push `item` to the end of the current allocation in `arena`. - /// - /// An error is returned if this allocation is not known to the given `arena`. - pub fn push(&self, arena: &mut CacheArena, item: T) -> Result<(), Error> { - let len = arena.len(self.alloc_id)?; - arena.splice_forgetful(self.alloc_id, len..len, vec![item])?; - Ok(()) - } - - /// Get the i'th item in the `arena` (relative to this allocation). - /// - /// An error is returned if this allocation is not known to the given `arena`. - pub fn get<'a>(&self, arena: &'a CacheArena, i: usize) -> Result, Error> { - arena.get(self.alloc_id, i) - } - - /// Mutably get the i'th item in the `arena` (relative to this allocation). - /// - /// An error is returned if this allocation is not known to the given `arena`. - pub fn get_mut<'a>( - &self, - arena: &'a mut CacheArena, - i: usize, - ) -> Result, Error> { - arena.get_mut(self.alloc_id, i) - } - - /// Iterate through all items in the `arena` (relative to this allocation). - pub fn iter<'a>(&self, arena: &'a CacheArena) -> Result, Error> { - arena.iter(self.alloc_id) - } - - /// Mutably iterate through all items in the `arena` (relative to this allocation). - pub fn iter_mut<'a>( - &self, - arena: &'a mut CacheArena, - ) -> Result, Error> { - arena.iter_mut(self.alloc_id) - } - - /// Return the number of items stored in this allocation. - pub fn len(&self, arena: &CacheArena) -> Result { - arena.len(self.alloc_id) - } - - /// Returns true if this allocation is empty. - pub fn is_empty(&self, arena: &CacheArena) -> Result { - self.len(arena).map(|len| len == 0) - } -} - -#[cfg(test)] -mod tests { - use crate::Hash256; - use smallvec::smallvec; - - type CacheArena = super::CacheArena; - type CacheArenaAllocation = super::CacheArenaAllocation; - - fn hash(i: usize) -> Hash256 { - Hash256::from_low_u64_be(i as u64) - } - - fn test_routine(arena: &mut CacheArena, sub: &mut CacheArenaAllocation) { - let mut len = sub.len(arena).expect("should exist"); - - sub.push(arena, hash(len)).expect("should push"); - len += 1; - - assert_eq!( - sub.len(arena).expect("should exist"), - len, - "after first push sub should have len {}", - len - ); - assert!( - !sub.is_empty(arena).expect("should exist"), - "new sub should not be empty" - ); - - sub.push(arena, hash(len)).expect("should push again"); - len += 1; - - assert_eq!( - sub.len(arena).expect("should exist"), - len, - "after second push sub should have len {}", - len - ); - - sub.extend_with_vec(arena, smallvec![hash(len), hash(len + 1)]) - .expect("should extend with vec"); - len += 2; - - assert_eq!( - sub.len(arena).expect("should exist"), - len, - "after extend sub should have len {}", - len - ); - - let collected = sub - .iter(arena) - .expect("should get iter") - .cloned() - .collect::>(); - let collected_mut = sub - .iter_mut(arena) - .expect("should get mut iter") - .map(|v| *v) - .collect::>(); - - for i in 0..len { - assert_eq!( - *sub.get(arena, i) - .expect("should exist") - .expect("should get sub index"), - hash(i), - "get({}) should be hash({})", - i, - i - ); - - assert_eq!( - collected[i], - hash(i), - "collected[{}] should be hash({})", - i, - i - ); - - assert_eq!( - collected_mut[i], - hash(i), - "collected_mut[{}] should be hash({})", - i, - i - ); - } - } - - #[test] - fn single() { - let arena = &mut CacheArena::default(); - - assert_eq!(arena.backing.len(), 0, "should start with an empty backing"); - assert_eq!(arena.offsets.len(), 0, "should start without any offsets"); - - let mut sub = arena.alloc(); - - assert_eq!( - sub.len(arena).expect("should exist"), - 0, - "new sub should have len 0" - ); - assert!( - sub.is_empty(arena).expect("should exist"), - "new sub should be empty" - ); - - test_routine(arena, &mut sub); - } - - #[test] - fn double() { - let arena = &mut CacheArena::default(); - - assert_eq!(arena.backing.len(), 0, "should start with an empty backing"); - assert_eq!(arena.offsets.len(), 0, "should start without any offsets"); - - let mut sub_01 = arena.alloc(); - assert_eq!( - sub_01.len(arena).expect("should exist"), - 0, - "new sub should have len 0" - ); - assert!( - sub_01.is_empty(arena).expect("should exist"), - "new sub should be empty" - ); - - let mut sub_02 = arena.alloc(); - assert_eq!( - sub_02.len(arena).expect("should exist"), - 0, - "new sub should have len 0" - ); - assert!( - sub_02.is_empty(arena).expect("should exist"), - "new sub should be empty" - ); - - test_routine(arena, &mut sub_01); - test_routine(arena, &mut sub_02); - } - - #[test] - fn one_then_other() { - let arena = &mut CacheArena::default(); - - assert_eq!(arena.backing.len(), 0, "should start with an empty backing"); - assert_eq!(arena.offsets.len(), 0, "should start without any offsets"); - - let mut sub_01 = arena.alloc(); - assert_eq!( - sub_01.len(arena).expect("should exist"), - 0, - "new sub should have len 0" - ); - assert!( - sub_01.is_empty(arena).expect("should exist"), - "new sub should be empty" - ); - - test_routine(arena, &mut sub_01); - - let mut sub_02 = arena.alloc(); - assert_eq!( - sub_02.len(arena).expect("should exist"), - 0, - "new sub should have len 0" - ); - assert!( - sub_02.is_empty(arena).expect("should exist"), - "new sub should be empty" - ); - - test_routine(arena, &mut sub_02); - test_routine(arena, &mut sub_01); - test_routine(arena, &mut sub_02); - } - - #[test] - fn many() { - let arena = &mut CacheArena::default(); - - assert_eq!(arena.backing.len(), 0, "should start with an empty backing"); - assert_eq!(arena.offsets.len(), 0, "should start without any offsets"); - - let mut subs = vec![]; - - for i in 0..50 { - if i == 0 { - let sub = arena.alloc(); - assert_eq!( - sub.len(arena).expect("should exist"), - 0, - "new sub should have len 0" - ); - assert!( - sub.is_empty(arena).expect("should exist"), - "new sub should be empty" - ); - subs.push(sub); - - continue; - } else if i % 2 == 0 { - test_routine(arena, &mut subs[i - 1]); - } - - let sub = arena.alloc(); - assert_eq!( - sub.len(arena).expect("should exist"), - 0, - "new sub should have len 0" - ); - assert!( - sub.is_empty(arena).expect("should exist"), - "new sub should be empty" - ); - subs.push(sub); - } - - for sub in subs.iter_mut() { - test_routine(arena, sub); - } - } -} diff --git a/consensus/cached_tree_hash/src/impls.rs b/consensus/cached_tree_hash/src/impls.rs deleted file mode 100644 index efdba32b59d..00000000000 --- a/consensus/cached_tree_hash/src/impls.rs +++ /dev/null @@ -1,138 +0,0 @@ -use crate::{CacheArena, CachedTreeHash, Error, Hash256, TreeHashCache}; -use ssz_types::{typenum::Unsigned, FixedVector, VariableList}; -use std::mem::size_of; -use tree_hash::{mix_in_length, BYTES_PER_CHUNK}; - -/// Compute ceil(log(n)) -/// -/// Smallest number of bits d so that n <= 2^d -pub fn int_log(n: usize) -> usize { - match n.checked_next_power_of_two() { - Some(x) => x.trailing_zeros() as usize, - None => 8 * std::mem::size_of::(), - } -} - -pub fn hash256_leaf_count(len: usize) -> usize { - len -} - -pub fn u64_leaf_count(len: usize) -> usize { - let type_size = size_of::(); - let vals_per_chunk = BYTES_PER_CHUNK / type_size; - - (len + vals_per_chunk - 1) / vals_per_chunk -} - -pub fn hash256_iter( - values: &[Hash256], -) -> impl ExactSizeIterator + '_ { - values.iter().copied().map(Hash256::to_fixed_bytes) -} - -pub fn u64_iter(values: &[u64]) -> impl ExactSizeIterator + '_ { - let type_size = size_of::(); - let vals_per_chunk = BYTES_PER_CHUNK / type_size; - values.chunks(vals_per_chunk).map(move |xs| { - xs.iter().map(|x| x.to_le_bytes()).enumerate().fold( - [0; BYTES_PER_CHUNK], - |mut chunk, (i, x_bytes)| { - chunk[i * type_size..(i + 1) * type_size].copy_from_slice(&x_bytes); - chunk - }, - ) - }) -} - -impl CachedTreeHash for FixedVector { - fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { - TreeHashCache::new( - arena, - int_log(N::to_usize()), - hash256_leaf_count(self.len()), - ) - } - - fn recalculate_tree_hash_root( - &self, - arena: &mut CacheArena, - cache: &mut TreeHashCache, - ) -> Result { - cache.recalculate_merkle_root(arena, hash256_iter(self)) - } -} - -impl CachedTreeHash for FixedVector { - fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { - let vals_per_chunk = BYTES_PER_CHUNK / size_of::(); - TreeHashCache::new( - arena, - int_log(N::to_usize() / vals_per_chunk), - u64_leaf_count(self.len()), - ) - } - - fn recalculate_tree_hash_root( - &self, - arena: &mut CacheArena, - cache: &mut TreeHashCache, - ) -> Result { - cache.recalculate_merkle_root(arena, u64_iter(self)) - } -} - -impl CachedTreeHash for VariableList { - fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { - TreeHashCache::new( - arena, - int_log(N::to_usize()), - hash256_leaf_count(self.len()), - ) - } - - fn recalculate_tree_hash_root( - &self, - arena: &mut CacheArena, - cache: &mut TreeHashCache, - ) -> Result { - Ok(mix_in_length( - &cache.recalculate_merkle_root(arena, hash256_iter(self))?, - self.len(), - )) - } -} - -impl CachedTreeHash for VariableList { - fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { - let vals_per_chunk = BYTES_PER_CHUNK / size_of::(); - TreeHashCache::new( - arena, - int_log(N::to_usize() / vals_per_chunk), - u64_leaf_count(self.len()), - ) - } - - fn recalculate_tree_hash_root( - &self, - arena: &mut CacheArena, - cache: &mut TreeHashCache, - ) -> Result { - Ok(mix_in_length( - &cache.recalculate_merkle_root(arena, u64_iter(self))?, - self.len(), - )) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_int_log() { - for i in 0..63 { - assert_eq!(int_log(2usize.pow(i)), i as usize); - } - assert_eq!(int_log(10), 4); - } -} diff --git a/consensus/cached_tree_hash/src/lib.rs b/consensus/cached_tree_hash/src/lib.rs deleted file mode 100644 index af333f26700..00000000000 --- a/consensus/cached_tree_hash/src/lib.rs +++ /dev/null @@ -1,46 +0,0 @@ -mod cache; -mod cache_arena; -mod impls; -#[cfg(test)] -mod test; -use smallvec::SmallVec; - -type SmallVec8 = SmallVec<[T; 8]>; -pub type CacheArena = cache_arena::CacheArena; - -pub use crate::cache::TreeHashCache; -pub use crate::impls::int_log; -use ethereum_types::H256 as Hash256; - -#[derive(Debug, PartialEq, Clone)] -pub enum Error { - /// Attempting to provide more than 2^depth leaves to a Merkle tree is disallowed. - TooManyLeaves, - /// Shrinking a Merkle tree cache by providing it with less leaves than it currently has is - /// disallowed (for simplicity). - CannotShrink, - /// Cache is inconsistent with the list of dirty indices provided. - CacheInconsistent, - CacheArenaError(cache_arena::Error), - /// Unable to find left index in Merkle tree. - MissingLeftIdx(usize), -} - -impl From for Error { - fn from(e: cache_arena::Error) -> Error { - Error::CacheArenaError(e) - } -} - -/// Trait for types which can make use of a cache to accelerate calculation of their tree hash root. -pub trait CachedTreeHash { - /// Create a new cache appropriate for use with values of this type. - fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> Cache; - - /// Update the cache and use it to compute the tree hash root for `self`. - fn recalculate_tree_hash_root( - &self, - arena: &mut CacheArena, - cache: &mut Cache, - ) -> Result; -} diff --git a/consensus/cached_tree_hash/src/test.rs b/consensus/cached_tree_hash/src/test.rs deleted file mode 100644 index 69b49826bf8..00000000000 --- a/consensus/cached_tree_hash/src/test.rs +++ /dev/null @@ -1,153 +0,0 @@ -use crate::impls::hash256_iter; -use crate::{CacheArena, CachedTreeHash, Error, Hash256, TreeHashCache}; -use ethereum_hashing::ZERO_HASHES; -use quickcheck_macros::quickcheck; -use ssz_types::{ - typenum::{Unsigned, U16, U255, U256, U257}, - FixedVector, VariableList, -}; -use tree_hash::TreeHash; - -fn int_hashes(start: u64, end: u64) -> Vec { - (start..end).map(Hash256::from_low_u64_le).collect() -} - -type List16 = VariableList; -type Vector16 = FixedVector; -type Vector16u64 = FixedVector; - -#[test] -fn max_leaves() { - let arena = &mut CacheArena::default(); - let depth = 4; - let max_len = 2u64.pow(depth as u32); - let mut cache = TreeHashCache::new(arena, depth, 2); - assert!(cache - .recalculate_merkle_root(arena, hash256_iter(&int_hashes(0, max_len - 1))) - .is_ok()); - assert!(cache - .recalculate_merkle_root(arena, hash256_iter(&int_hashes(0, max_len))) - .is_ok()); - assert_eq!( - cache.recalculate_merkle_root(arena, hash256_iter(&int_hashes(0, max_len + 1))), - Err(Error::TooManyLeaves) - ); - assert_eq!( - cache.recalculate_merkle_root(arena, hash256_iter(&int_hashes(0, max_len * 2))), - Err(Error::TooManyLeaves) - ); -} - -#[test] -fn cannot_shrink() { - let arena = &mut CacheArena::default(); - let init_len = 12; - let list1 = List16::new(int_hashes(0, init_len)).unwrap(); - let list2 = List16::new(int_hashes(0, init_len - 1)).unwrap(); - - let mut cache = list1.new_tree_hash_cache(arena); - assert!(list1.recalculate_tree_hash_root(arena, &mut cache).is_ok()); - assert_eq!( - list2.recalculate_tree_hash_root(arena, &mut cache), - Err(Error::CannotShrink) - ); -} - -#[test] -fn empty_leaves() { - let arena = &mut CacheArena::default(); - let depth = 20; - let mut cache = TreeHashCache::new(arena, depth, 0); - assert_eq!( - cache - .recalculate_merkle_root(arena, vec![].into_iter()) - .unwrap() - .as_bytes(), - &ZERO_HASHES[depth][..] - ); -} - -#[test] -fn fixed_vector_hash256() { - let arena = &mut CacheArena::default(); - let len = 16; - let vec = Vector16::new(int_hashes(0, len)).unwrap(); - - let mut cache = vec.new_tree_hash_cache(arena); - - assert_eq!( - vec.tree_hash_root(), - vec.recalculate_tree_hash_root(arena, &mut cache).unwrap() - ); -} - -#[test] -fn fixed_vector_u64() { - let arena = &mut CacheArena::default(); - let len = 16; - let vec = Vector16u64::new((0..len).collect()).unwrap(); - - let mut cache = vec.new_tree_hash_cache(arena); - - assert_eq!( - vec.tree_hash_root(), - vec.recalculate_tree_hash_root(arena, &mut cache).unwrap() - ); -} - -#[test] -fn variable_list_hash256() { - let arena = &mut CacheArena::default(); - let len = 13; - let list = List16::new(int_hashes(0, len)).unwrap(); - - let mut cache = list.new_tree_hash_cache(arena); - - assert_eq!( - list.tree_hash_root(), - list.recalculate_tree_hash_root(arena, &mut cache).unwrap() - ); -} - -#[quickcheck] -fn quickcheck_variable_list_h256_256(leaves_and_skips: Vec<(u64, bool)>) -> bool { - variable_list_h256_test::(leaves_and_skips) -} - -#[quickcheck] -fn quickcheck_variable_list_h256_255(leaves_and_skips: Vec<(u64, bool)>) -> bool { - variable_list_h256_test::(leaves_and_skips) -} - -#[quickcheck] -fn quickcheck_variable_list_h256_257(leaves_and_skips: Vec<(u64, bool)>) -> bool { - variable_list_h256_test::(leaves_and_skips) -} - -fn variable_list_h256_test(leaves_and_skips: Vec<(u64, bool)>) -> bool { - let arena = &mut CacheArena::default(); - let leaves: Vec<_> = leaves_and_skips - .iter() - .map(|(l, _)| Hash256::from_low_u64_be(*l)) - .take(Len::to_usize()) - .collect(); - - let mut list: VariableList; - let init: VariableList = VariableList::new(vec![]).unwrap(); - let mut cache = init.new_tree_hash_cache(arena); - - for (end, (_, update_cache)) in leaves_and_skips.into_iter().enumerate() { - list = VariableList::new(leaves[..end].to_vec()).unwrap(); - - if update_cache - && list - .recalculate_tree_hash_root(arena, &mut cache) - .unwrap() - .as_bytes() - != &list.tree_hash_root()[..] - { - return false; - } - } - true -} diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 2846a0112cd..c55219a6761 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -14,8 +14,8 @@ use std::marker::PhantomData; use std::time::Duration; use types::{ consts::bellatrix::INTERVALS_PER_SLOT, AbstractExecPayload, AttestationShufflingId, - AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, - EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, + AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, + Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestationRef, RelativeEpoch, SignedBeaconBlock, Slot, }; @@ -238,13 +238,13 @@ pub struct QueuedAttestation { target_epoch: Epoch, } -impl From<&IndexedAttestation> for QueuedAttestation { - fn from(a: &IndexedAttestation) -> Self { +impl<'a, E: EthSpec> From> for QueuedAttestation { + fn from(a: IndexedAttestationRef<'a, E>) -> Self { Self { - slot: a.data.slot, - attesting_indices: a.attesting_indices[..].to_vec(), - block_root: a.data.beacon_block_root, - target_epoch: a.data.target.epoch, + slot: a.data().slot, + attesting_indices: a.attesting_indices_to_vec(), + block_root: a.data().beacon_block_root, + target_epoch: a.data().target.epoch, } } } @@ -940,7 +940,7 @@ where /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#validate_on_attestation fn validate_on_attestation( &self, - indexed_attestation: &IndexedAttestation, + indexed_attestation: IndexedAttestationRef, is_from_block: AttestationFromBlock, ) -> Result<(), InvalidAttestation> { // There is no point in processing an attestation with an empty bitfield. Reject @@ -948,20 +948,20 @@ where // // This is not in the specification, however it should be transparent to other nodes. We // return early here to avoid wasting precious resources verifying the rest of it. - if indexed_attestation.attesting_indices.is_empty() { + if indexed_attestation.attesting_indices_is_empty() { return Err(InvalidAttestation::EmptyAggregationBitfield); } - let target = indexed_attestation.data.target; + let target = indexed_attestation.data().target; if matches!(is_from_block, AttestationFromBlock::False) { self.validate_target_epoch_against_current_time(target.epoch)?; } - if target.epoch != indexed_attestation.data.slot.epoch(E::slots_per_epoch()) { + if target.epoch != indexed_attestation.data().slot.epoch(E::slots_per_epoch()) { return Err(InvalidAttestation::BadTargetEpoch { target: target.epoch, - slot: indexed_attestation.data.slot, + slot: indexed_attestation.data().slot, }); } @@ -983,9 +983,9 @@ where // attestation and do not delay consideration for later. let block = self .proto_array - .get_block(&indexed_attestation.data.beacon_block_root) + .get_block(&indexed_attestation.data().beacon_block_root) .ok_or(InvalidAttestation::UnknownHeadBlock { - beacon_block_root: indexed_attestation.data.beacon_block_root, + beacon_block_root: indexed_attestation.data().beacon_block_root, })?; // If an attestation points to a block that is from an earlier slot than the attestation, @@ -993,7 +993,7 @@ where // is from a prior epoch to the attestation, then the target root must be equal to the root // of the block that is being attested to. let expected_target = if target.epoch > block.slot.epoch(E::slots_per_epoch()) { - indexed_attestation.data.beacon_block_root + indexed_attestation.data().beacon_block_root } else { block.target_root }; @@ -1007,10 +1007,10 @@ where // Attestations must not be for blocks in the future. If this is the case, the attestation // should not be considered. - if block.slot > indexed_attestation.data.slot { + if block.slot > indexed_attestation.data().slot { return Err(InvalidAttestation::AttestsToFutureBlock { block: block.slot, - attestation: indexed_attestation.data.slot, + attestation: indexed_attestation.data().slot, }); } @@ -1037,7 +1037,7 @@ where pub fn on_attestation( &mut self, system_time_current_slot: Slot, - attestation: &IndexedAttestation, + attestation: IndexedAttestationRef, is_from_block: AttestationFromBlock, ) -> Result<(), Error> { self.update_time(system_time_current_slot)?; @@ -1055,18 +1055,18 @@ where // (1) becomes weird once we hit finality and fork choice drops the genesis block. (2) is // fine because votes to the genesis block are not useful; all validators implicitly attest // to genesis just by being present in the chain. - if attestation.data.beacon_block_root == Hash256::zero() { + if attestation.data().beacon_block_root == Hash256::zero() { return Ok(()); } self.validate_on_attestation(attestation, is_from_block)?; - if attestation.data.slot < self.fc_store.get_current_slot() { - for validator_index in attestation.attesting_indices.iter() { + if attestation.data().slot < self.fc_store.get_current_slot() { + for validator_index in attestation.attesting_indices_iter() { self.proto_array.process_attestation( *validator_index as usize, - attestation.data.beacon_block_root, - attestation.data.target.epoch, + attestation.data().beacon_block_root, + attestation.data().target.epoch, )?; } } else { @@ -1086,15 +1086,14 @@ where /// Apply an attester slashing to fork choice. /// /// We assume that the attester slashing provided to this function has already been verified. - pub fn on_attester_slashing(&mut self, slashing: &AttesterSlashing) { - let attesting_indices_set = |att: &IndexedAttestation| { - att.attesting_indices - .iter() + pub fn on_attester_slashing(&mut self, slashing: AttesterSlashingRef<'_, E>) { + let attesting_indices_set = |att: IndexedAttestationRef<'_, E>| { + att.attesting_indices_iter() .copied() .collect::>() }; - let att1_indices = attesting_indices_set(&slashing.attestation_1); - let att2_indices = attesting_indices_set(&slashing.attestation_2); + let att1_indices = attesting_indices_set(slashing.attestation_1()); + let att2_indices = attesting_indices_set(slashing.attestation_2()); self.fc_store .extend_equivocating_indices(att1_indices.intersection(&att2_indices).copied()); } diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 3153275fb73..d2935dbca45 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -435,7 +435,12 @@ impl ForkChoiceTest { let validator_committee_index = 0; let validator_index = *head .beacon_state - .get_beacon_committee(current_slot, attestation.data.index) + .get_beacon_committee( + current_slot, + attestation + .committee_index() + .expect("should get committee index"), + ) .expect("should get committees") .committee .get(validator_committee_index) @@ -830,8 +835,13 @@ async fn invalid_attestation_empty_bitfield() { .await .apply_attestation_to_chain( MutationDelay::NoDelay, - |attestation, _| { - attestation.attesting_indices = vec![].into(); + |attestation, _| match attestation { + IndexedAttestation::Base(ref mut att) => { + att.attesting_indices = vec![].into(); + } + IndexedAttestation::Electra(ref mut att) => { + att.attesting_indices = vec![].into(); + } }, |result| { assert_invalid_attestation!(result, InvalidAttestation::EmptyAggregationBitfield) @@ -853,7 +863,7 @@ async fn invalid_attestation_future_epoch() { .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { - attestation.data.target.epoch = Epoch::new(2); + attestation.data_mut().target.epoch = Epoch::new(2); }, |result| { assert_invalid_attestation!( @@ -879,7 +889,7 @@ async fn invalid_attestation_past_epoch() { .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { - attestation.data.target.epoch = Epoch::new(0); + attestation.data_mut().target.epoch = Epoch::new(0); }, |result| { assert_invalid_attestation!( @@ -903,7 +913,7 @@ async fn invalid_attestation_target_epoch() { .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { - attestation.data.slot = Slot::new(1); + attestation.data_mut().slot = Slot::new(1); }, |result| { assert_invalid_attestation!( @@ -929,7 +939,7 @@ async fn invalid_attestation_unknown_target_root() { .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { - attestation.data.target.root = junk; + attestation.data_mut().target.root = junk; }, |result| { assert_invalid_attestation!( @@ -955,7 +965,7 @@ async fn invalid_attestation_unknown_beacon_block_root() { .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { - attestation.data.beacon_block_root = junk; + attestation.data_mut().beacon_block_root = junk; }, |result| { assert_invalid_attestation!( @@ -979,7 +989,7 @@ async fn invalid_attestation_future_block() { .apply_attestation_to_chain( MutationDelay::Blocks(1), |attestation, chain| { - attestation.data.beacon_block_root = chain + attestation.data_mut().beacon_block_root = chain .block_at_slot(chain.slot().unwrap(), WhenSlotSkipped::Prev) .unwrap() .unwrap() @@ -1010,13 +1020,13 @@ async fn invalid_attestation_inconsistent_ffg_vote() { .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, chain| { - attestation.data.target.root = chain + attestation.data_mut().target.root = chain .block_at_slot(Slot::new(1), WhenSlotSkipped::Prev) .unwrap() .unwrap() .canonical_root(); - *attestation_opt.lock().unwrap() = Some(attestation.data.target.root); + *attestation_opt.lock().unwrap() = Some(attestation.data().target.root); *local_opt.lock().unwrap() = Some( chain .block_at_slot(Slot::new(0), WhenSlotSkipped::Prev) @@ -1069,8 +1079,8 @@ async fn valid_attestation_skip_across_epoch() { MutationDelay::NoDelay, |attestation, _chain| { assert_eq!( - attestation.data.target.root, - attestation.data.beacon_block_root + attestation.data().target.root, + attestation.data().beacon_block_root ) }, |result| result.unwrap(), diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index ebb639819d2..57648499753 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -285,17 +285,17 @@ impl ForkChoiceTestDefinition { } } -/// Gives a root that is not the zero hash (unless i is `usize::max_value)`. +/// Gives a root that is not the zero hash (unless i is `usize::MAX)`. fn get_root(i: u64) -> Hash256 { Hash256::from_low_u64_be(i + 1) } -/// Gives a hash that is not the zero hash (unless i is `usize::max_value)`. +/// Gives a hash that is not the zero hash (unless i is `usize::MAX)`. fn get_hash(i: u64) -> ExecutionBlockHash { ExecutionBlockHash::from_root(get_root(i)) } -/// Gives a checkpoint with a root that is not the zero hash (unless i is `usize::max_value)`. +/// Gives a checkpoint with a root that is not the zero hash (unless i is `usize::MAX)`. /// `Epoch` will always equal `i`. fn get_checkpoint(i: u64) -> Checkpoint { Checkpoint { diff --git a/consensus/proto_array/src/fork_choice_test_definition/votes.rs b/consensus/proto_array/src/fork_choice_test_definition/votes.rs index 58ac6af60ba..01994fff9b2 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/votes.rs @@ -738,7 +738,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // Ensure that pruning below the prune threshold does not prune. ops.push(Operation::Prune { finalized_root: get_root(5), - prune_threshold: usize::max_value(), + prune_threshold: usize::MAX, expected_len: 11, }); diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index 780563954c2..b05a55e6862 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -16,5 +16,5 @@ pub use error::Error; pub mod core { pub use super::proto_array::{ProposerBoost, ProtoArray, ProtoNode}; pub use super::proto_array_fork_choice::VoteTracker; - pub use super::ssz_container::{SszContainer, SszContainerV16, SszContainerV17}; + pub use super::ssz_container::{SszContainer, SszContainerV17}; } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 7c2ecfe26a6..efe154a27e1 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -70,7 +70,7 @@ impl InvalidationOperation { pub type ProtoNode = ProtoNodeV17; #[superstruct( - variants(V16, V17), + variants(V17), variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)), no_enum )] @@ -92,12 +92,6 @@ pub struct ProtoNode { pub root: Hash256, #[ssz(with = "four_byte_option_usize")] pub parent: Option, - #[superstruct(only(V16))] - #[ssz(with = "four_byte_option_checkpoint")] - pub justified_checkpoint: Option, - #[superstruct(only(V16))] - #[ssz(with = "four_byte_option_checkpoint")] - pub finalized_checkpoint: Option, #[superstruct(only(V17))] pub justified_checkpoint: Checkpoint, #[superstruct(only(V17))] @@ -116,57 +110,6 @@ pub struct ProtoNode { pub unrealized_finalized_checkpoint: Option, } -impl TryInto for ProtoNodeV16 { - type Error = Error; - - fn try_into(self) -> Result { - let result = ProtoNode { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: self - .justified_checkpoint - .ok_or(Error::MissingJustifiedCheckpoint)?, - finalized_checkpoint: self - .finalized_checkpoint - .ok_or(Error::MissingFinalizedCheckpoint)?, - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, - }; - Ok(result) - } -} - -impl Into for ProtoNode { - fn into(self) -> ProtoNodeV16 { - ProtoNodeV16 { - slot: self.slot, - state_root: self.state_root, - target_root: self.target_root, - current_epoch_shuffling_id: self.current_epoch_shuffling_id, - next_epoch_shuffling_id: self.next_epoch_shuffling_id, - root: self.root, - parent: self.parent, - justified_checkpoint: Some(self.justified_checkpoint), - finalized_checkpoint: Some(self.finalized_checkpoint), - weight: self.weight, - best_child: self.best_child, - best_descendant: self.best_descendant, - execution_status: self.execution_status, - unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, - } - } -} - #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] pub struct ProposerBoost { pub root: Hash256, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index e1faba369f3..4b7050df7d7 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -995,7 +995,7 @@ mod test_compute_deltas { use super::*; use types::MainnetEthSpec; - /// Gives a hash that is not the zero hash (unless i is `usize::max_value)`. + /// Gives a hash that is not the zero hash (unless i is `usize::MAX)`. fn hash_from_index(i: usize) -> Hash256 { Hash256::from_low_u64_be(i as u64 + 1) } diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 3208584dc45..8abb60d8e6a 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -1,6 +1,6 @@ use crate::proto_array::ProposerBoost; use crate::{ - proto_array::{ProtoArray, ProtoNodeV16, ProtoNodeV17}, + proto_array::{ProtoArray, ProtoNodeV17}, proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, Error, JustifiedBalances, }; @@ -16,62 +16,19 @@ four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); pub type SszContainer = SszContainerV17; -#[superstruct( - variants(V16, V17), - variant_attributes(derive(Encode, Decode)), - no_enum -)] +#[superstruct(variants(V17), variant_attributes(derive(Encode, Decode)), no_enum)] pub struct SszContainer { pub votes: Vec, pub balances: Vec, pub prune_threshold: usize, pub justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, - #[superstruct(only(V16))] - pub nodes: Vec, #[superstruct(only(V17))] pub nodes: Vec, pub indices: Vec<(Hash256, usize)>, pub previous_proposer_boost: ProposerBoost, } -impl TryInto for SszContainerV16 { - type Error = Error; - - fn try_into(self) -> Result { - let nodes: Result, Error> = - self.nodes.into_iter().map(TryInto::try_into).collect(); - - Ok(SszContainer { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - nodes: nodes?, - indices: self.indices, - previous_proposer_boost: self.previous_proposer_boost, - }) - } -} - -impl Into for SszContainer { - fn into(self) -> SszContainerV16 { - let nodes = self.nodes.into_iter().map(Into::into).collect(); - - SszContainerV16 { - votes: self.votes, - balances: self.balances, - prune_threshold: self.prune_threshold, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, - nodes, - indices: self.indices, - previous_proposer_boost: self.previous_proposer_boost, - } - } -} - impl From<&ProtoArrayForkChoice> for SszContainer { fn from(from: &ProtoArrayForkChoice) -> Self { let proto_array = &from.proto_array; diff --git a/consensus/safe_arith/src/lib.rs b/consensus/safe_arith/src/lib.rs index c1dbff4c7c8..aa397c0603e 100644 --- a/consensus/safe_arith/src/lib.rs +++ b/consensus/safe_arith/src/lib.rs @@ -155,12 +155,12 @@ mod test { #[test] fn errors() { - assert!(u32::max_value().safe_add(1).is_err()); - assert!(u32::min_value().safe_sub(1).is_err()); - assert!(u32::max_value().safe_mul(2).is_err()); - assert!(u32::max_value().safe_div(0).is_err()); - assert!(u32::max_value().safe_rem(0).is_err()); - assert!(u32::max_value().safe_shl(32).is_err()); - assert!(u32::max_value().safe_shr(32).is_err()); + assert!(u32::MAX.safe_add(1).is_err()); + assert!(u32::MIN.safe_sub(1).is_err()); + assert!(u32::MAX.safe_mul(2).is_err()); + assert!(u32::MAX.safe_div(0).is_err()); + assert!(u32::MAX.safe_rem(0).is_err()); + assert!(u32::MAX.safe_shl(32).is_err()); + assert!(u32::MAX.safe_shr(32).is_err()); } } diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index be5367eb08f..e05c0bcfebc 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -28,6 +28,8 @@ arbitrary = { workspace = true } lighthouse_metrics = { workspace = true } lazy_static = { workspace = true } derivative = { workspace = true } +test_random_derive = { path = "../../common/test_random_derive" } +rand = { workspace = true } [features] default = ["legacy-arith"] diff --git a/consensus/state_processing/src/common/get_attesting_indices.rs b/consensus/state_processing/src/common/get_attesting_indices.rs index a89b71ff2b5..b131f7679a3 100644 --- a/consensus/state_processing/src/common/get_attesting_indices.rs +++ b/consensus/state_processing/src/common/get_attesting_indices.rs @@ -1,32 +1,172 @@ use types::*; -/// Returns validator indices which participated in the attestation, sorted by increasing index. -pub fn get_attesting_indices( - committee: &[usize], - bitlist: &BitList, -) -> Result, BeaconStateError> { - if bitlist.len() != committee.len() { - return Err(BeaconStateError::InvalidBitfield); +pub mod attesting_indices_base { + use crate::per_block_processing::errors::{AttestationInvalid as Invalid, BlockOperationError}; + use types::*; + + /// Convert `attestation` to (almost) indexed-verifiable form. + /// + /// Spec v0.12.1 + pub fn get_indexed_attestation( + committee: &[usize], + attestation: &AttestationBase, + ) -> Result, BlockOperationError> { + let attesting_indices = + get_attesting_indices::(committee, &attestation.aggregation_bits)?; + Ok(IndexedAttestation::Base(IndexedAttestationBase { + attesting_indices: VariableList::new(attesting_indices)?, + data: attestation.data.clone(), + signature: attestation.signature.clone(), + })) } - let mut indices = Vec::with_capacity(bitlist.num_set_bits()); + /// Returns validator indices which participated in the attestation, sorted by increasing index. + pub fn get_attesting_indices( + committee: &[usize], + bitlist: &BitList, + ) -> Result, BeaconStateError> { + if bitlist.len() != committee.len() { + return Err(BeaconStateError::InvalidBitfield); + } + + let mut indices = Vec::with_capacity(bitlist.num_set_bits()); - for (i, validator_index) in committee.iter().enumerate() { - if let Ok(true) = bitlist.get(i) { - indices.push(*validator_index as u64) + for (i, validator_index) in committee.iter().enumerate() { + if let Ok(true) = bitlist.get(i) { + indices.push(*validator_index as u64) + } } + + indices.sort_unstable(); + + Ok(indices) + } +} + +pub mod attesting_indices_electra { + use std::collections::HashSet; + + use crate::per_block_processing::errors::{AttestationInvalid as Invalid, BlockOperationError}; + use safe_arith::SafeArith; + use types::*; + + /// Compute an Electra IndexedAttestation given a list of committees. + /// + /// Committees must be sorted by ascending order 0..committees_per_slot + pub fn get_indexed_attestation( + committees: &[BeaconCommittee], + attestation: &AttestationElectra, + ) -> Result, BlockOperationError> { + let attesting_indices = get_attesting_indices::( + committees, + &attestation.aggregation_bits, + &attestation.committee_bits, + )?; + + Ok(IndexedAttestation::Electra(IndexedAttestationElectra { + attesting_indices: VariableList::new(attesting_indices)?, + data: attestation.data.clone(), + signature: attestation.signature.clone(), + })) + } + + pub fn get_indexed_attestation_from_state( + beacon_state: &BeaconState, + attestation: &AttestationElectra, + ) -> Result, BlockOperationError> { + let committees = beacon_state.get_beacon_committees_at_slot(attestation.data.slot)?; + get_indexed_attestation(&committees, attestation) + } + + /// Shortcut for getting the attesting indices while fetching the committee from the state's cache. + pub fn get_attesting_indices_from_state( + state: &BeaconState, + att: &AttestationElectra, + ) -> Result, BeaconStateError> { + let committees = state.get_beacon_committees_at_slot(att.data.slot)?; + get_attesting_indices::(&committees, &att.aggregation_bits, &att.committee_bits) } - indices.sort_unstable(); + /// Returns validator indices which participated in the attestation, sorted by increasing index. + /// + /// Committees must be sorted by ascending order 0..committees_per_slot + pub fn get_attesting_indices( + committees: &[BeaconCommittee], + aggregation_bits: &BitList, + committee_bits: &BitVector, + ) -> Result, BeaconStateError> { + let mut attesting_indices = vec![]; - Ok(indices) + let committee_indices = get_committee_indices::(committee_bits); + + let mut committee_offset = 0; + + let committee_count_per_slot = committees.len() as u64; + let mut participant_count = 0; + for index in committee_indices { + let beacon_committee = committees + .get(index as usize) + .ok_or(Error::NoCommitteeFound(index))?; + + // This check is new to the spec's `process_attestation` in Electra. + if index >= committee_count_per_slot { + return Err(BeaconStateError::InvalidCommitteeIndex(index)); + } + participant_count.safe_add_assign(beacon_committee.committee.len() as u64)?; + let committee_attesters = beacon_committee + .committee + .iter() + .enumerate() + .filter_map(|(i, &index)| { + if let Ok(aggregation_bit_index) = committee_offset.safe_add(i) { + if aggregation_bits.get(aggregation_bit_index).unwrap_or(false) { + return Some(index as u64); + } + } + None + }) + .collect::>(); + + attesting_indices.extend(committee_attesters); + committee_offset.safe_add_assign(beacon_committee.committee.len())?; + } + + // This check is new to the spec's `process_attestation` in Electra. + if participant_count as usize != aggregation_bits.len() { + return Err(BeaconStateError::InvalidBitfield); + } + + attesting_indices.sort_unstable(); + + Ok(attesting_indices) + } + + pub fn get_committee_indices( + committee_bits: &BitVector, + ) -> Vec { + committee_bits + .iter() + .enumerate() + .filter_map(|(index, bit)| if bit { Some(index as u64) } else { None }) + .collect() + } } /// Shortcut for getting the attesting indices while fetching the committee from the state's cache. pub fn get_attesting_indices_from_state( state: &BeaconState, - att: &Attestation, + att: AttestationRef, ) -> Result, BeaconStateError> { - let committee = state.get_beacon_committee(att.data.slot, att.data.index)?; - get_attesting_indices::(committee.committee, &att.aggregation_bits) + match att { + AttestationRef::Base(att) => { + let committee = state.get_beacon_committee(att.data.slot, att.data.index)?; + attesting_indices_base::get_attesting_indices::( + committee.committee, + &att.aggregation_bits, + ) + } + AttestationRef::Electra(att) => { + attesting_indices_electra::get_attesting_indices_from_state::(state, att) + } + } } diff --git a/consensus/state_processing/src/common/get_indexed_attestation.rs b/consensus/state_processing/src/common/get_indexed_attestation.rs deleted file mode 100644 index 9cf689df40f..00000000000 --- a/consensus/state_processing/src/common/get_indexed_attestation.rs +++ /dev/null @@ -1,21 +0,0 @@ -use super::get_attesting_indices; -use crate::per_block_processing::errors::{AttestationInvalid as Invalid, BlockOperationError}; -use types::*; - -type Result = std::result::Result>; - -/// Convert `attestation` to (almost) indexed-verifiable form. -/// -/// Spec v0.12.1 -pub fn get_indexed_attestation( - committee: &[usize], - attestation: &Attestation, -) -> Result> { - let attesting_indices = get_attesting_indices::(committee, &attestation.aggregation_bits)?; - - Ok(IndexedAttestation { - attesting_indices: VariableList::new(attesting_indices)?, - data: attestation.data.clone(), - signature: attestation.signature.clone(), - }) -} diff --git a/consensus/state_processing/src/common/initiate_validator_exit.rs b/consensus/state_processing/src/common/initiate_validator_exit.rs index a40a9dfd398..d2d4374ab24 100644 --- a/consensus/state_processing/src/common/initiate_validator_exit.rs +++ b/consensus/state_processing/src/common/initiate_validator_exit.rs @@ -8,28 +8,6 @@ pub fn initiate_validator_exit( index: usize, spec: &ChainSpec, ) -> Result<(), Error> { - // We do things in a slightly different order to the spec here. Instead of immediately checking - // whether the validator has already exited, we instead prepare the exit cache and compute the - // cheap-to-calculate values from that. *Then* we look up the validator a single time in the - // validator tree (expensive), make the check and mutate as appropriate. Compared to the spec - // ordering, this saves us from looking up the validator in the validator registry multiple - // times. - - // Ensure the exit cache is built. - state.build_exit_cache(spec)?; - - // Compute exit queue epoch - let delayed_epoch = state.compute_activation_exit_epoch(state.current_epoch(), spec)?; - let mut exit_queue_epoch = state - .exit_cache() - .max_epoch()? - .map_or(delayed_epoch, |epoch| max(epoch, delayed_epoch)); - let exit_queue_churn = state.exit_cache().get_churn_at(exit_queue_epoch)?; - - if exit_queue_churn >= state.get_validator_churn_limit(spec)? { - exit_queue_epoch.safe_add_assign(1)?; - } - let validator = state.get_validator_cow(index)?; // Return if the validator already initiated exit @@ -37,7 +15,28 @@ pub fn initiate_validator_exit( return Ok(()); } - let validator = validator.into_mut()?; + // Ensure the exit cache is built. + state.build_exit_cache(spec)?; + + // Compute exit queue epoch + let exit_queue_epoch = if state.fork_name_unchecked() >= ForkName::Electra { + let effective_balance = state.get_effective_balance(index)?; + state.compute_exit_epoch_and_update_churn(effective_balance, spec)? + } else { + let delayed_epoch = state.compute_activation_exit_epoch(state.current_epoch(), spec)?; + let mut exit_queue_epoch = state + .exit_cache() + .max_epoch()? + .map_or(delayed_epoch, |epoch| max(epoch, delayed_epoch)); + let exit_queue_churn = state.exit_cache().get_churn_at(exit_queue_epoch)?; + + if exit_queue_churn >= state.get_validator_churn_limit(spec)? { + exit_queue_epoch.safe_add_assign(1)?; + } + exit_queue_epoch + }; + + let validator = state.get_validator_mut(index)?; validator.exit_epoch = exit_queue_epoch; validator.withdrawable_epoch = exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; diff --git a/consensus/state_processing/src/common/mod.rs b/consensus/state_processing/src/common/mod.rs index cefc47b0235..0287748fd04 100644 --- a/consensus/state_processing/src/common/mod.rs +++ b/consensus/state_processing/src/common/mod.rs @@ -1,7 +1,6 @@ mod deposit_data_tree; mod get_attestation_participation; mod get_attesting_indices; -mod get_indexed_attestation; mod initiate_validator_exit; mod slash_validator; @@ -11,8 +10,9 @@ pub mod update_progressive_balances_cache; pub use deposit_data_tree::DepositDataTree; pub use get_attestation_participation::get_attestation_participation_flag_indices; -pub use get_attesting_indices::{get_attesting_indices, get_attesting_indices_from_state}; -pub use get_indexed_attestation::get_indexed_attestation; +pub use get_attesting_indices::{ + attesting_indices_base, attesting_indices_electra, get_attesting_indices_from_state, +}; pub use initiate_validator_exit::initiate_validator_exit; pub use slash_validator::slash_validator; diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 520b58a8af3..80d857cc009 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -53,8 +53,8 @@ pub fn slash_validator( // Apply proposer and whistleblower rewards let proposer_index = ctxt.get_proposer_index(state, spec)? as usize; let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); - let whistleblower_reward = - validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; + let whistleblower_reward = validator_effective_balance + .safe_div(spec.whistleblower_reward_quotient_for_state(state))?; let proposer_reward = match state { BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, BeaconState::Altair(_) diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index 073d87be85b..b0eaf3422d3 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,11 +1,11 @@ -use crate::common::get_indexed_attestation; +use crate::common::{attesting_indices_base, attesting_indices_electra}; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; use crate::EpochCacheError; use std::collections::{hash_map::Entry, HashMap}; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, Attestation, AttestationData, BeaconState, BeaconStateError, BitList, - ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, + AbstractExecPayload, AttestationRef, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, + Hash256, IndexedAttestation, IndexedAttestationRef, SignedBeaconBlock, Slot, }; #[derive(Debug, PartialEq, Clone)] @@ -21,8 +21,7 @@ pub struct ConsensusContext { /// Block root of the block at `slot`. pub current_block_root: Option, /// Cache of indexed attestations constructed during block processing. - pub indexed_attestations: - HashMap<(AttestationData, BitList), IndexedAttestation>, + pub indexed_attestations: HashMap>, } #[derive(Debug, PartialEq, Clone)] @@ -148,26 +147,32 @@ impl ConsensusContext { } } - pub fn get_indexed_attestation( - &mut self, + pub fn get_indexed_attestation<'a>( + &'a mut self, state: &BeaconState, - attestation: &Attestation, - ) -> Result<&IndexedAttestation, BlockOperationError> { - let key = ( - attestation.data.clone(), - attestation.aggregation_bits.clone(), - ); - - match self.indexed_attestations.entry(key) { - Entry::Occupied(occupied) => Ok(occupied.into_mut()), - Entry::Vacant(vacant) => { - let committee = - state.get_beacon_committee(attestation.data.slot, attestation.data.index)?; - let indexed_attestation = - get_indexed_attestation(committee.committee, attestation)?; - Ok(vacant.insert(indexed_attestation)) - } + attestation: AttestationRef<'a, E>, + ) -> Result, BlockOperationError> { + let key = attestation.tree_hash_root(); + match attestation { + AttestationRef::Base(attn) => match self.indexed_attestations.entry(key) { + Entry::Occupied(occupied) => Ok(occupied.into_mut()), + Entry::Vacant(vacant) => { + let committee = state.get_beacon_committee(attn.data.slot, attn.data.index)?; + let indexed_attestation = + attesting_indices_base::get_indexed_attestation(committee.committee, attn)?; + Ok(vacant.insert(indexed_attestation)) + } + }, + AttestationRef::Electra(attn) => match self.indexed_attestations.entry(key) { + Entry::Occupied(occupied) => Ok(occupied.into_mut()), + Entry::Vacant(vacant) => { + let indexed_attestation = + attesting_indices_electra::get_indexed_attestation_from_state(state, attn)?; + Ok(vacant.insert(indexed_attestation)) + } + }, } + .map(|indexed_attestation| (*indexed_attestation).to_ref()) } pub fn num_cached_indexed_attestations(&self) -> usize { @@ -177,10 +182,7 @@ impl ConsensusContext { #[must_use] pub fn set_indexed_attestations( mut self, - attestations: HashMap< - (AttestationData, BitList), - IndexedAttestation, - >, + attestations: HashMap>, ) -> Self { self.indexed_attestations = attestations; self diff --git a/consensus/state_processing/src/epoch_cache.rs b/consensus/state_processing/src/epoch_cache.rs index b2f2d85407e..0e940fabe4f 100644 --- a/consensus/state_processing/src/epoch_cache.rs +++ b/consensus/state_processing/src/epoch_cache.rs @@ -9,6 +9,7 @@ use types::{ActivationQueue, BeaconState, ChainSpec, EthSpec, ForkName, Hash256} pub struct PreEpochCache { epoch_key: EpochCacheKey, effective_balances: Vec, + total_active_balance: u64, } impl PreEpochCache { @@ -36,27 +37,59 @@ impl PreEpochCache { Ok(Self { epoch_key, effective_balances: Vec::with_capacity(state.validators().len()), + total_active_balance: 0, }) } - pub fn push_effective_balance(&mut self, effective_balance: u64) { - self.effective_balances.push(effective_balance); + pub fn update_effective_balance( + &mut self, + validator_index: usize, + effective_balance: u64, + is_active_next_epoch: bool, + ) -> Result<(), EpochCacheError> { + if validator_index == self.effective_balances.len() { + self.effective_balances.push(effective_balance); + if is_active_next_epoch { + self.total_active_balance + .safe_add_assign(effective_balance)?; + } + + Ok(()) + } else if let Some(existing_balance) = self.effective_balances.get_mut(validator_index) { + // Update total active balance for a late change in effective balance. This happens when + // processing consolidations. + if is_active_next_epoch { + self.total_active_balance + .safe_add_assign(effective_balance)?; + self.total_active_balance + .safe_sub_assign(*existing_balance)?; + } + *existing_balance = effective_balance; + Ok(()) + } else { + Err(EpochCacheError::ValidatorIndexOutOfBounds { validator_index }) + } + } + + pub fn get_total_active_balance(&self) -> u64 { + self.total_active_balance } pub fn into_epoch_cache( self, - total_active_balance: u64, activation_queue: ActivationQueue, spec: &ChainSpec, ) -> Result { let epoch = self.epoch_key.epoch; + let total_active_balance = self.total_active_balance; let sqrt_total_active_balance = SqrtTotalActiveBalance::new(total_active_balance); let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; let effective_balance_increment = spec.effective_balance_increment; - let max_effective_balance_eth = spec - .max_effective_balance - .safe_div(effective_balance_increment)?; + let max_effective_balance = + spec.max_effective_balance_for_fork(spec.fork_name_at_epoch(epoch)); + let max_effective_balance_eth = + max_effective_balance.safe_div(effective_balance_increment)?; let mut base_rewards = Vec::with_capacity(max_effective_balance_eth.safe_add(1)? as usize); @@ -131,9 +164,9 @@ pub fn initialize_epoch_cache( decision_block_root, }, effective_balances, + total_active_balance, }; - *state.epoch_cache_mut() = - pre_epoch_cache.into_epoch_cache(total_active_balance, activation_queue, spec)?; + *state.epoch_cache_mut() = pre_epoch_cache.into_epoch_cache(activation_queue, spec)?; Ok(()) } diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index a84f359389c..049599ea945 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -2,9 +2,9 @@ use super::per_block_processing::{ errors::BlockProcessingError, process_operations::apply_deposit, }; use crate::common::DepositDataTree; +use crate::upgrade::electra::upgrade_state_to_electra; use crate::upgrade::{ upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, - upgrade_to_electra, }; use safe_arith::{ArithError, SafeArith}; use tree_hash::TreeHash; @@ -32,12 +32,13 @@ pub fn initialize_beacon_state_from_eth1( let mut deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); - for deposit in deposits.iter() { + for deposit in deposits.into_iter() { deposit_tree .push_leaf(deposit.data.tree_hash_root()) .map_err(BlockProcessingError::MerkleTreeError)?; state.eth1_data_mut().deposit_root = deposit_tree.root(); - apply_deposit(&mut state, deposit, spec, true)?; + let Deposit { proof, data } = deposit; + apply_deposit(&mut state, data, Some(proof), true, spec)?; } process_activations(&mut state, spec)?; @@ -115,7 +116,8 @@ pub fn initialize_beacon_state_from_eth1( .electra_fork_epoch .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) { - upgrade_to_electra(&mut state, spec)?; + let post = upgrade_state_to_electra(&mut state, Epoch::new(0), Epoch::new(0), spec)?; + state = post; // Remove intermediate Deneb fork from `state.fork`. state.fork_mut().previous_version = spec.electra_fork_version; diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index 74f9d84bb11..adabf6862d3 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -45,4 +45,4 @@ pub use per_epoch_processing::{ }; pub use per_slot_processing::{per_slot_processing, Error as SlotProcessingError}; pub use types::{EpochCache, EpochCacheError, EpochCacheKey}; -pub use verify_operation::{SigVerifiedOp, VerifyOperation, VerifyOperationAt}; +pub use verify_operation::{SigVerifiedOp, TransformPersist, VerifyOperation, VerifyOperationAt}; diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs index e163f3b76b8..ac5c0f659cd 100644 --- a/consensus/state_processing/src/metrics.rs +++ b/consensus/state_processing/src/metrics.rs @@ -17,6 +17,10 @@ lazy_static! { "beacon_participation_prev_epoch_source_attesting_gwei_total", "Total effective balance (gwei) of validators who attested to the source in the previous epoch" ); + pub static ref PARTICIPATION_CURRENT_EPOCH_TOTAL_ACTIVE_GWEI_TOTAL: Result = try_create_int_gauge( + "beacon_participation_current_epoch_active_gwei_total", + "Total effective balance (gwei) of validators who are active in the current epoch" + ); /* * Processing metrics */ diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 2efa1218829..e7655b453a8 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -20,7 +20,7 @@ pub use verify_attestation::{ }; pub use verify_bls_to_execution_change::verify_bls_to_execution_change; pub use verify_deposit::{ - get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, + get_existing_validator_index, is_valid_deposit_signature, verify_deposit_merkle_proof, }; pub use verify_exit::verify_exit; @@ -503,11 +503,54 @@ pub fn compute_timestamp_at_slot( pub fn get_expected_withdrawals( state: &BeaconState, spec: &ChainSpec, -) -> Result, BlockProcessingError> { +) -> Result<(Withdrawals, Option), BlockProcessingError> { let epoch = state.current_epoch(); let mut withdrawal_index = state.next_withdrawal_index()?; let mut validator_index = state.next_withdrawal_validator_index()?; let mut withdrawals = vec![]; + let fork_name = state.fork_name_unchecked(); + + // [New in Electra:EIP7251] + // Consume pending partial withdrawals + let partial_withdrawals_count = + if let Ok(partial_withdrawals) = state.pending_partial_withdrawals() { + for withdrawal in partial_withdrawals { + if withdrawal.withdrawable_epoch > epoch + || withdrawals.len() == spec.max_pending_partials_per_withdrawals_sweep as usize + { + break; + } + + let withdrawal_balance = state.get_balance(withdrawal.index as usize)?; + let validator = state.get_validator(withdrawal.index as usize)?; + + let has_sufficient_effective_balance = + validator.effective_balance >= spec.min_activation_balance; + let has_excess_balance = withdrawal_balance > spec.min_activation_balance; + + if validator.exit_epoch == spec.far_future_epoch + && has_sufficient_effective_balance + && has_excess_balance + { + let withdrawable_balance = std::cmp::min( + withdrawal_balance.safe_sub(spec.min_activation_balance)?, + withdrawal.amount, + ); + withdrawals.push(Withdrawal { + index: withdrawal_index, + validator_index: withdrawal.index, + address: validator + .get_execution_withdrawal_address(spec) + .ok_or(BeaconStateError::NonExecutionAddresWithdrawalCredential)?, + amount: withdrawable_balance, + }); + withdrawal_index.safe_add_assign(1)?; + } + } + Some(withdrawals.len()) + } else { + None + }; let bound = std::cmp::min( state.validators().len() as u64, @@ -518,24 +561,27 @@ pub fn get_expected_withdrawals( let balance = *state.balances().get(validator_index as usize).ok_or( BeaconStateError::BalancesOutOfBounds(validator_index as usize), )?; - if validator.is_fully_withdrawable_at(balance, epoch, spec) { + if validator.is_fully_withdrawable_at(balance, epoch, spec, fork_name) { withdrawals.push(Withdrawal { index: withdrawal_index, validator_index, address: validator - .get_eth1_withdrawal_address(spec) + .get_execution_withdrawal_address(spec) .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, amount: balance, }); withdrawal_index.safe_add_assign(1)?; - } else if validator.is_partially_withdrawable_validator(balance, spec) { + } else if validator.is_partially_withdrawable_validator(balance, spec, fork_name) { withdrawals.push(Withdrawal { index: withdrawal_index, validator_index, address: validator - .get_eth1_withdrawal_address(spec) + .get_execution_withdrawal_address(spec) .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, - amount: balance.safe_sub(spec.max_effective_balance)?, + amount: balance.safe_sub( + validator + .get_validator_max_effective_balance(spec, state.fork_name_unchecked()), + )?, }); withdrawal_index.safe_add_assign(1)?; } @@ -547,7 +593,7 @@ pub fn get_expected_withdrawals( .safe_rem(state.validators().len() as u64)?; } - Ok(withdrawals.into()) + Ok((withdrawals.into(), partial_withdrawals_count)) } /// Apply withdrawals to the state. @@ -557,9 +603,9 @@ pub fn process_withdrawals>( spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { match state { - BeaconState::Bellatrix(_) => Ok(()), BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => { - let expected_withdrawals = get_expected_withdrawals(state, spec)?; + let (expected_withdrawals, partial_withdrawals_count) = + get_expected_withdrawals(state, spec)?; let expected_root = expected_withdrawals.tree_hash_root(); let withdrawals_root = payload.withdrawals_root()?; @@ -578,6 +624,17 @@ pub fn process_withdrawals>( )?; } + // Update pending partial withdrawals [New in Electra:EIP7251] + if let Some(partial_withdrawals_count) = partial_withdrawals_count { + // TODO(electra): Use efficient pop_front after milhouse release https://github.com/sigp/milhouse/pull/38 + let new_partial_withdrawals = state + .pending_partial_withdrawals()? + .iter_from(partial_withdrawals_count)? + .cloned() + .collect::>(); + *state.pending_partial_withdrawals_mut()? = List::new(new_partial_withdrawals)?; + } + // Update the next withdrawal index if this block contained withdrawals if let Some(latest_withdrawal) = expected_withdrawals.last() { *state.next_withdrawal_index_mut()? = latest_withdrawal.index.safe_add(1)?; @@ -605,6 +662,6 @@ pub fn process_withdrawals>( Ok(()) } // these shouldn't even be encountered but they're here for completeness - BeaconState::Base(_) | BeaconState::Altair(_) => Ok(()), + BeaconState::Base(_) | BeaconState::Altair(_) | BeaconState::Bellatrix(_) => Ok(()), } } diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 3b8a8ea52c9..28ca8935e4a 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -4,7 +4,6 @@ use super::signature_sets::{Error as SignatureSetError, *}; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; use crate::{ConsensusContext, ContextError}; use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet}; -use rayon::prelude::*; use std::borrow::Cow; use types::{ AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256, @@ -171,6 +170,7 @@ where self.include_exits(block)?; self.include_sync_aggregate(block)?; self.include_bls_to_execution_changes(block)?; + self.include_consolidations(block)?; Ok(()) } @@ -247,13 +247,12 @@ where ) -> Result<()> { self.sets .sets - .reserve(block.message().body().attester_slashings().len() * 2); + .reserve(block.message().body().attester_slashings_len() * 2); block .message() .body() .attester_slashings() - .iter() .try_for_each(|attester_slashing| { let (set_1, set_2) = attester_slashing_signature_sets( self.state, @@ -277,20 +276,19 @@ where ) -> Result<()> { self.sets .sets - .reserve(block.message().body().attestations().len()); + .reserve(block.message().body().attestations_len()); block .message() .body() .attestations() - .iter() .try_for_each(|attestation| { let indexed_attestation = ctxt.get_indexed_attestation(self.state, attestation)?; self.sets.push(indexed_attestation_signature_set( self.state, self.get_pubkey.clone(), - &attestation.signature, + attestation.signature(), indexed_attestation, self.spec, )?); @@ -361,6 +359,27 @@ where Ok(()) } + /// Includes all signatures in `self.block.body.consolidations` for verification. + pub fn include_consolidations>( + &mut self, + block: &'a SignedBeaconBlock, + ) -> Result<()> { + if let Ok(consolidations) = block.message().body().consolidations() { + self.sets.sets.reserve(consolidations.len()); + for consolidation in consolidations { + let set = consolidation_signature_set( + self.state, + self.get_pubkey.clone(), + consolidation, + self.spec, + )?; + + self.sets.push(set); + } + } + Ok(()) + } + /// Verify all the signatures that have been included in `self`, returning `true` if and only if /// all the signatures are valid. /// @@ -391,15 +410,10 @@ impl<'a> ParallelSignatureSets<'a> { /// It is not possible to know exactly _which_ signature is invalid here, just that /// _at least one_ was invalid. /// - /// Uses `rayon` to do a map-reduce of Vitalik's method across multiple cores. + /// Blst library spreads the signature verification work across multiple available cores, so + /// this function is already parallelized. #[must_use] pub fn verify(self) -> bool { - let num_sets = self.sets.len(); - let num_chunks = std::cmp::max(1, num_sets / rayon::current_num_threads()); - self.sets - .into_par_iter() - .chunks(num_chunks) - .map(|chunk| verify_signature_sets(chunk.iter())) - .reduce(|| true, |current, this| current && this) + verify_signature_sets(self.sets.iter()) } } diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 336895514f9..cebb10b6071 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -89,6 +89,47 @@ pub enum BlockProcessingError { found: Hash256, }, WithdrawalCredentialsInvalid, + TooManyPendingConsolidations { + consolidations: usize, + limit: usize, + }, + ConsolidationChurnLimitTooLow { + churn_limit: u64, + minimum: u64, + }, + MatchingSourceTargetConsolidation { + index: u64, + }, + InactiveConsolidationSource { + index: u64, + current_epoch: Epoch, + }, + InactiveConsolidationTarget { + index: u64, + current_epoch: Epoch, + }, + SourceValidatorExiting { + index: u64, + }, + TargetValidatorExiting { + index: u64, + }, + FutureConsolidationEpoch { + current_epoch: Epoch, + consolidation_epoch: Epoch, + }, + NoSourceExecutionWithdrawalCredential { + index: u64, + }, + NoTargetExecutionWithdrawalCredential { + index: u64, + }, + MismatchedWithdrawalCredentials { + source_address: Address, + target_address: Address, + }, + InavlidConsolidationSignature, + PendingAttestationInElectra, } impl From for BlockProcessingError { @@ -411,7 +452,10 @@ pub enum ExitInvalid { /// The specified validator has already initiated exit. AlreadyInitiatedExit(u64), /// The exit is for a future epoch. - FutureEpoch { state: Epoch, exit: Epoch }, + FutureEpoch { + state: Epoch, + exit: Epoch, + }, /// The validator has not been active for long enough. TooYoungToExit { current_epoch: Epoch, @@ -422,6 +466,7 @@ pub enum ExitInvalid { /// There was an error whilst attempting to get a set of signatures. The signatures may have /// been invalid or an internal error occurred. SignatureSetError(SignatureSetError), + PendingWithdrawalInQueue(u64), } #[derive(Debug, PartialEq, Clone)] diff --git a/consensus/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs b/consensus/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs index baccd1dbbd2..4bad3315cc4 100644 --- a/consensus/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs @@ -13,11 +13,11 @@ fn error(reason: Invalid) -> BlockOperationError { /// Verify an `IndexedAttestation`. pub fn is_valid_indexed_attestation( state: &BeaconState, - indexed_attestation: &IndexedAttestation, + indexed_attestation: IndexedAttestationRef, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<()> { - let indices = &indexed_attestation.attesting_indices; + let indices = indexed_attestation.attesting_indices_to_vec(); // Verify that indices aren't empty verify!(!indices.is_empty(), Invalid::IndicesEmpty); @@ -36,14 +36,14 @@ pub fn is_valid_indexed_attestation( })?; Ok(()) }; - check_sorted(indices)?; + check_sorted(&indices)?; if verify_signatures.is_true() { verify!( indexed_attestation_signature_set( state, |i| get_pubkey_from_state(state, i), - &indexed_attestation.signature, + indexed_attestation.signature(), indexed_attestation, spec )? diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 3aefcf8a9c5..17607f7f337 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -4,8 +4,11 @@ use crate::common::{ slash_validator, }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; +use crate::signature_sets::consolidation_signature_set; use crate::VerifySignatures; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; +use types::typenum::U33; +use types::validator::is_compounding_withdrawal_credential; pub fn process_operations>( state: &mut BeaconState, @@ -36,6 +39,18 @@ pub fn process_operations>( process_bls_to_execution_changes(state, bls_to_execution_changes, verify_signatures, spec)?; } + if state.fork_name_unchecked().electra_enabled() { + let requests = block_body.execution_payload()?.withdrawal_requests()?; + if let Some(requests) = requests { + process_execution_layer_withdrawal_requests(state, &requests, spec)?; + } + let receipts = block_body.execution_payload()?.deposit_requests()?; + if let Some(receipts) = receipts { + process_deposit_requests(state, &receipts, spec)?; + } + process_consolidations(state, block_body.consolidations()?, verify_signatures, spec)?; + } + Ok(()) } @@ -46,13 +61,16 @@ pub mod base { /// /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. - pub fn process_attestations( + pub fn process_attestations<'a, E: EthSpec, I>( state: &mut BeaconState, - attestations: &[Attestation], + attestations: I, verify_signatures: VerifySignatures, ctxt: &mut ConsensusContext, spec: &ChainSpec, - ) -> Result<(), BlockProcessingError> { + ) -> Result<(), BlockProcessingError> + where + I: Iterator>, + { // Ensure required caches are all built. These should be no-ops during regular operation. state.build_committee_cache(RelativeEpoch::Current, spec)?; state.build_committee_cache(RelativeEpoch::Previous, spec)?; @@ -63,7 +81,7 @@ pub mod base { let proposer_index = ctxt.get_proposer_index(state, spec)?; // Verify and apply each attestation. - for (i, attestation) in attestations.iter().enumerate() { + for (i, attestation) in attestations.enumerate() { verify_attestation_for_block_inclusion( state, attestation, @@ -73,6 +91,12 @@ pub mod base { ) .map_err(|e| e.into_with_index(i))?; + let AttestationRef::Base(attestation) = attestation else { + // Pending attestations have been deprecated in a altair, this branch should + // never happen + return Err(BlockProcessingError::PendingAttestationInElectra); + }; + let pending_attestation = PendingAttestation { aggregation_bits: attestation.aggregation_bits.clone(), data: attestation.data.clone(), @@ -101,24 +125,24 @@ pub mod altair_deneb { use super::*; use crate::common::update_progressive_balances_cache::update_progressive_balances_on_attestation; - pub fn process_attestations( + pub fn process_attestations<'a, E: EthSpec, I>( state: &mut BeaconState, - attestations: &[Attestation], + attestations: I, verify_signatures: VerifySignatures, ctxt: &mut ConsensusContext, spec: &ChainSpec, - ) -> Result<(), BlockProcessingError> { - attestations - .iter() - .enumerate() - .try_for_each(|(i, attestation)| { - process_attestation(state, attestation, i, ctxt, verify_signatures, spec) - }) + ) -> Result<(), BlockProcessingError> + where + I: Iterator>, + { + attestations.enumerate().try_for_each(|(i, attestation)| { + process_attestation(state, attestation, i, ctxt, verify_signatures, spec) + }) } pub fn process_attestation( state: &mut BeaconState, - attestation: &Attestation, + attestation: AttestationRef, att_index: usize, ctxt: &mut ConsensusContext, verify_signatures: VerifySignatures, @@ -128,26 +152,24 @@ pub mod altair_deneb { let previous_epoch = ctxt.previous_epoch; let current_epoch = ctxt.current_epoch; - let attesting_indices = verify_attestation_for_block_inclusion( + let indexed_att = verify_attestation_for_block_inclusion( state, attestation, ctxt, verify_signatures, spec, ) - .map_err(|e| e.into_with_index(att_index))? - .attesting_indices - .clone(); + .map_err(|e| e.into_with_index(att_index))?; // Matching roots, participation flag indices - let data = &attestation.data; + let data = attestation.data(); let inclusion_delay = state.slot().safe_sub(data.slot)?.as_u64(); let participation_flag_indices = get_attestation_participation_flag_indices(state, data, inclusion_delay, spec)?; // Update epoch participation flags. let mut proposer_reward_numerator = 0; - for index in &attesting_indices { + for index in indexed_att.attesting_indices_iter() { let index = *index as usize; let validator_effective_balance = state.epoch_cache().get_effective_balance(index)?; @@ -231,16 +253,19 @@ pub fn process_proposer_slashings( /// /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. -pub fn process_attester_slashings( +pub fn process_attester_slashings<'a, E: EthSpec, I>( state: &mut BeaconState, - attester_slashings: &[AttesterSlashing], + attester_slashings: I, verify_signatures: VerifySignatures, ctxt: &mut ConsensusContext, spec: &ChainSpec, -) -> Result<(), BlockProcessingError> { +) -> Result<(), BlockProcessingError> +where + I: Iterator>, +{ state.build_slashings_cache()?; - for (i, attester_slashing) in attester_slashings.iter().enumerate() { + for (i, attester_slashing) in attester_slashings.enumerate() { let slashable_indices = verify_attester_slashing(state, attester_slashing, verify_signatures, spec) .map_err(|e| e.into_with_index(i))?; @@ -344,17 +369,34 @@ pub fn process_deposits( deposits: &[Deposit], spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - let expected_deposit_len = std::cmp::min( - E::MaxDeposits::to_u64(), - state.get_outstanding_deposit_len()?, - ); - block_verify!( - deposits.len() as u64 == expected_deposit_len, - BlockProcessingError::DepositCountInvalid { - expected: expected_deposit_len as usize, - found: deposits.len(), - } - ); + // [Modified in Electra:EIP6110] + // Disable former deposit mechanism once all prior deposits are processed + // + // If `deposit_requests_start_index` does not exist as a field on `state`, electra is disabled + // which means we always want to use the old check, so this field defaults to `u64::MAX`. + let eth1_deposit_index_limit = state.deposit_requests_start_index().unwrap_or(u64::MAX); + + if state.eth1_deposit_index() < eth1_deposit_index_limit { + let expected_deposit_len = std::cmp::min( + E::MaxDeposits::to_u64(), + state.get_outstanding_deposit_len()?, + ); + block_verify!( + deposits.len() as u64 == expected_deposit_len, + BlockProcessingError::DepositCountInvalid { + expected: expected_deposit_len as usize, + found: deposits.len(), + } + ); + } else { + block_verify!( + deposits.len() as u64 == 0, + BlockProcessingError::DepositCountInvalid { + expected: 0, + found: deposits.len(), + } + ); + } // Verify merkle proofs in parallel. deposits @@ -372,60 +414,96 @@ pub fn process_deposits( // Update the state in series. for deposit in deposits { - apply_deposit(state, deposit, spec, false)?; + apply_deposit(state, deposit.data.clone(), None, true, spec)?; } Ok(()) } -/// Process a single deposit, optionally verifying its merkle proof. +/// Process a single deposit, verifying its merkle proof if provided. pub fn apply_deposit( state: &mut BeaconState, - deposit: &Deposit, + deposit_data: DepositData, + proof: Option>, + increment_eth1_deposit_index: bool, spec: &ChainSpec, - verify_merkle_proof: bool, ) -> Result<(), BlockProcessingError> { let deposit_index = state.eth1_deposit_index() as usize; - if verify_merkle_proof { - verify_deposit_merkle_proof(state, deposit, state.eth1_deposit_index(), spec) + if let Some(proof) = proof { + let deposit = Deposit { + proof, + data: deposit_data.clone(), + }; + verify_deposit_merkle_proof(state, &deposit, state.eth1_deposit_index(), spec) .map_err(|e| e.into_with_index(deposit_index))?; } - state.eth1_deposit_index_mut().safe_add_assign(1)?; + if increment_eth1_deposit_index { + state.eth1_deposit_index_mut().safe_add_assign(1)?; + } // Get an `Option` where `u64` is the validator index if this deposit public key // already exists in the beacon_state. - let validator_index = get_existing_validator_index(state, &deposit.data.pubkey) + let validator_index = get_existing_validator_index(state, &deposit_data.pubkey) .map_err(|e| e.into_with_index(deposit_index))?; - let amount = deposit.data.amount; + let amount = deposit_data.amount; if let Some(index) = validator_index { - // Update the existing validator balance. - increase_balance(state, index as usize, amount)?; + // [Modified in Electra:EIP7251] + if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { + pending_balance_deposits.push(PendingBalanceDeposit { index, amount })?; + + let validator = state + .validators() + .get(index as usize) + .ok_or(BeaconStateError::UnknownValidator(index as usize))?; + + if is_compounding_withdrawal_credential(deposit_data.withdrawal_credentials, spec) + && validator.has_eth1_withdrawal_credential(spec) + && is_valid_deposit_signature(&deposit_data, spec).is_ok() + { + state.switch_to_compounding_validator(index as usize, spec)?; + } + } else { + // Update the existing validator balance. + increase_balance(state, index as usize, amount)?; + } } else { // The signature should be checked for new validators. Return early for a bad // signature. - if verify_deposit_signature(&deposit.data, spec).is_err() { + if is_valid_deposit_signature(&deposit_data, spec).is_err() { return Ok(()); } + let new_validator_index = state.validators().len(); + + // [Modified in Electra:EIP7251] + let (effective_balance, state_balance) = if state.fork_name_unchecked() >= ForkName::Electra + { + (0, 0) + } else { + ( + std::cmp::min( + amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, + spec.max_effective_balance, + ), + amount, + ) + }; // Create a new validator. let validator = Validator { - pubkey: deposit.data.pubkey, - withdrawal_credentials: deposit.data.withdrawal_credentials, + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, activation_eligibility_epoch: spec.far_future_epoch, activation_epoch: spec.far_future_epoch, exit_epoch: spec.far_future_epoch, withdrawable_epoch: spec.far_future_epoch, - effective_balance: std::cmp::min( - amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, - spec.max_effective_balance, - ), + effective_balance, slashed: false, }; state.validators_mut().push(validator)?; - state.balances_mut().push(deposit.data.amount)?; + state.balances_mut().push(state_balance)?; // Altair or later initializations. if let Ok(previous_epoch_participation) = state.previous_epoch_participation_mut() { @@ -437,6 +515,280 @@ pub fn apply_deposit( if let Ok(inactivity_scores) = state.inactivity_scores_mut() { inactivity_scores.push(0)?; } + + // [New in Electra:EIP7251] + if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { + pending_balance_deposits.push(PendingBalanceDeposit { + index: new_validator_index as u64, + amount, + })?; + } + } + + Ok(()) +} + +pub fn process_execution_layer_withdrawal_requests( + state: &mut BeaconState, + requests: &[ExecutionLayerWithdrawalRequest], + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + for request in requests { + let amount = request.amount; + let is_full_exit_request = amount == spec.full_exit_request_amount; + + // If partial withdrawal queue is full, only full exits are processed + if state.pending_partial_withdrawals()?.len() == E::pending_partial_withdrawals_limit() + && !is_full_exit_request + { + continue; + } + + // Verify pubkey exists + let index_opt = state.get_validator_index(&request.validator_pubkey)?; + let Some(index) = index_opt else { + continue; + }; + + let validator = state.get_validator(index)?; + + // Verify withdrawal credentials + let has_correct_credential = validator.has_execution_withdrawal_credential(spec); + let is_correct_source_address = validator + .get_execution_withdrawal_address(spec) + .map(|addr| addr == request.source_address) + .unwrap_or(false); + + if !(has_correct_credential && is_correct_source_address) { + continue; + } + + // Verify the validator is active + if !validator.is_active_at(state.current_epoch()) { + continue; + } + + // Verify exit has not been initiated + if validator.exit_epoch != spec.far_future_epoch { + continue; + } + + // Verify the validator has been active long enough + if state.current_epoch() + < validator + .activation_epoch + .safe_add(spec.shard_committee_period)? + { + continue; + } + + let pending_balance_to_withdraw = state.get_pending_balance_to_withdraw(index)?; + if is_full_exit_request { + // Only exit validator if it has no pending withdrawals in the queue + if pending_balance_to_withdraw == 0 { + initiate_validator_exit(state, index, spec)? + } + continue; + } + + let balance = state.get_balance(index)?; + let has_sufficient_effective_balance = + validator.effective_balance >= spec.min_activation_balance; + let has_excess_balance = balance + > spec + .min_activation_balance + .safe_add(pending_balance_to_withdraw)?; + + // Only allow partial withdrawals with compounding withdrawal credentials + if validator.has_compounding_withdrawal_credential(spec) + && has_sufficient_effective_balance + && has_excess_balance + { + let to_withdraw = std::cmp::min( + balance + .safe_sub(spec.min_activation_balance)? + .safe_sub(pending_balance_to_withdraw)?, + amount, + ); + let exit_queue_epoch = state.compute_exit_epoch_and_update_churn(to_withdraw, spec)?; + let withdrawable_epoch = + exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; + state + .pending_partial_withdrawals_mut()? + .push(PendingPartialWithdrawal { + index: index as u64, + amount: to_withdraw, + withdrawable_epoch, + })?; + } + } + Ok(()) +} + +pub fn process_deposit_requests( + state: &mut BeaconState, + receipts: &[DepositRequest], + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + for receipt in receipts { + // Set deposit receipt start index + if state.deposit_requests_start_index()? == spec.unset_deposit_requests_start_index { + *state.deposit_requests_start_index_mut()? = receipt.index + } + let deposit_data = DepositData { + pubkey: receipt.pubkey, + withdrawal_credentials: receipt.withdrawal_credentials, + amount: receipt.amount, + signature: receipt.signature.clone().into(), + }; + apply_deposit(state, deposit_data, None, false, spec)? + } + + Ok(()) +} + +pub fn process_consolidations( + state: &mut BeaconState, + consolidations: &[SignedConsolidation], + verify_signatures: VerifySignatures, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + if consolidations.is_empty() { + return Ok(()); + } + + // If the pending consolidations queue is full, no consolidations are allowed in the block + let pending_consolidations = state.pending_consolidations()?.len(); + let pending_consolidations_limit = E::pending_consolidations_limit(); + block_verify! { + pending_consolidations < pending_consolidations_limit, + BlockProcessingError::TooManyPendingConsolidations { + consolidations: pending_consolidations, + limit: pending_consolidations_limit + } + } + + // If there is too little available consolidation churn limit, no consolidations are allowed in the block + let churn_limit = state.get_consolidation_churn_limit(spec)?; + block_verify! { + churn_limit > spec.min_activation_balance, + BlockProcessingError::ConsolidationChurnLimitTooLow { + churn_limit, + minimum: spec.min_activation_balance + } + } + + for signed_consolidation in consolidations { + let consolidation = signed_consolidation.message.clone(); + + // Verify that source != target, so a consolidation cannot be used as an exit. + block_verify! { + consolidation.source_index != consolidation.target_index, + BlockProcessingError::MatchingSourceTargetConsolidation { + index: consolidation.source_index + } + } + + let source_validator = state.get_validator(consolidation.source_index as usize)?; + let target_validator = state.get_validator(consolidation.target_index as usize)?; + + // Verify the source and the target are active + let current_epoch = state.current_epoch(); + block_verify! { + source_validator.is_active_at(current_epoch), + BlockProcessingError::InactiveConsolidationSource{ + index: consolidation.source_index, + current_epoch + } + } + block_verify! { + target_validator.is_active_at(current_epoch), + BlockProcessingError::InactiveConsolidationTarget{ + index: consolidation.target_index, + current_epoch + } + } + + // Verify exits for source and target have not been initiated + block_verify! { + source_validator.exit_epoch == spec.far_future_epoch, + BlockProcessingError::SourceValidatorExiting{ + index: consolidation.source_index, + } + } + block_verify! { + target_validator.exit_epoch == spec.far_future_epoch, + BlockProcessingError::TargetValidatorExiting{ + index: consolidation.target_index, + } + } + + // Consolidations must specify an epoch when they become valid; they are not valid before then + block_verify! { + current_epoch >= consolidation.epoch, + BlockProcessingError::FutureConsolidationEpoch { + current_epoch, + consolidation_epoch: consolidation.epoch + } + } + + // Verify the source and the target have Execution layer withdrawal credentials + block_verify! { + source_validator.has_execution_withdrawal_credential(spec), + BlockProcessingError::NoSourceExecutionWithdrawalCredential { + index: consolidation.source_index, + } + } + block_verify! { + target_validator.has_execution_withdrawal_credential(spec), + BlockProcessingError::NoTargetExecutionWithdrawalCredential { + index: consolidation.target_index, + } + } + + // Verify the same withdrawal address + let source_address = source_validator + .get_execution_withdrawal_address(spec) + .ok_or(BeaconStateError::NonExecutionAddresWithdrawalCredential)?; + let target_address = target_validator + .get_execution_withdrawal_address(spec) + .ok_or(BeaconStateError::NonExecutionAddresWithdrawalCredential)?; + block_verify! { + source_address == target_address, + BlockProcessingError::MismatchedWithdrawalCredentials { + source_address, + target_address + } + } + + if verify_signatures.is_true() { + let signature_set = consolidation_signature_set( + state, + |i| get_pubkey_from_state(state, i), + signed_consolidation, + spec, + )?; + block_verify! { + signature_set.verify(), + BlockProcessingError::InavlidConsolidationSignature + } + } + let exit_epoch = state.compute_consolidation_epoch_and_update_churn( + source_validator.effective_balance, + spec, + )?; + let source_validator = state.get_validator_mut(consolidation.source_index as usize)?; + // Initiate source validator exit and append pending consolidation + source_validator.exit_epoch = exit_epoch; + source_validator.withdrawable_epoch = source_validator + .exit_epoch + .safe_add(spec.min_validator_withdrawability_delay)?; + state + .pending_consolidations_mut()? + .push(PendingConsolidation { + source_index: consolidation.source_index, + target_index: consolidation.target_index, + })?; } Ok(()) diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 9468893f762..3c683766adb 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -7,12 +7,12 @@ use ssz::DecodeError; use std::borrow::Cow; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, AggregateSignature, AttesterSlashing, BeaconBlockRef, BeaconState, + AbstractExecPayload, AggregateSignature, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, - InconsistentFork, IndexedAttestation, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, - SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, - SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, + InconsistentFork, IndexedAttestation, IndexedAttestationRef, ProposerSlashing, PublicKey, + PublicKeyBytes, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, + SignedBlsToExecutionChange, SignedConsolidation, SignedContributionAndProof, SignedRoot, + SignedVoluntaryExit, SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, }; pub type Result = std::result::Result; @@ -272,28 +272,28 @@ pub fn indexed_attestation_signature_set<'a, 'b, E, F>( state: &'a BeaconState, get_pubkey: F, signature: &'a AggregateSignature, - indexed_attestation: &'b IndexedAttestation, + indexed_attestation: IndexedAttestationRef<'b, E>, spec: &'a ChainSpec, ) -> Result> where E: EthSpec, F: Fn(usize) -> Option>, { - let mut pubkeys = Vec::with_capacity(indexed_attestation.attesting_indices.len()); - for &validator_idx in &indexed_attestation.attesting_indices { + let mut pubkeys = Vec::with_capacity(indexed_attestation.attesting_indices_len()); + for &validator_idx in indexed_attestation.attesting_indices_iter() { pubkeys.push( get_pubkey(validator_idx as usize).ok_or(Error::ValidatorUnknown(validator_idx))?, ); } let domain = spec.get_domain( - indexed_attestation.data.target.epoch, + indexed_attestation.data().target.epoch, Domain::BeaconAttester, &state.fork(), state.genesis_validators_root(), ); - let message = indexed_attestation.data.signing_root(domain); + let message = indexed_attestation.data().signing_root(domain); Ok(SignatureSet::multiple_pubkeys(signature, pubkeys, message)) } @@ -312,21 +312,21 @@ where E: EthSpec, F: Fn(usize) -> Option>, { - let mut pubkeys = Vec::with_capacity(indexed_attestation.attesting_indices.len()); - for &validator_idx in &indexed_attestation.attesting_indices { + let mut pubkeys = Vec::with_capacity(indexed_attestation.attesting_indices_len()); + for &validator_idx in indexed_attestation.attesting_indices_iter() { pubkeys.push( get_pubkey(validator_idx as usize).ok_or(Error::ValidatorUnknown(validator_idx))?, ); } let domain = spec.get_domain( - indexed_attestation.data.target.epoch, + indexed_attestation.data().target.epoch, Domain::BeaconAttester, fork, genesis_validators_root, ); - let message = indexed_attestation.data.signing_root(domain); + let message = indexed_attestation.data().signing_root(domain); Ok(SignatureSet::multiple_pubkeys(signature, pubkeys, message)) } @@ -335,7 +335,7 @@ where pub fn attester_slashing_signature_sets<'a, E, F>( state: &'a BeaconState, get_pubkey: F, - attester_slashing: &'a AttesterSlashing, + attester_slashing: AttesterSlashingRef<'a, E>, spec: &'a ChainSpec, ) -> Result<(SignatureSet<'a>, SignatureSet<'a>)> where @@ -346,15 +346,15 @@ where indexed_attestation_signature_set( state, get_pubkey.clone(), - &attester_slashing.attestation_1.signature, - &attester_slashing.attestation_1, + attester_slashing.attestation_1().signature(), + attester_slashing.attestation_1(), spec, )?, indexed_attestation_signature_set( state, get_pubkey, - &attester_slashing.attestation_2.signature, - &attester_slashing.attestation_2, + attester_slashing.attestation_2().signature(), + attester_slashing.attestation_2(), spec, )?, )) @@ -425,7 +425,7 @@ where E: EthSpec, F: Fn(usize) -> Option>, { - let slot = signed_aggregate_and_proof.message.aggregate.data.slot; + let slot = signed_aggregate_and_proof.message().aggregate().data().slot; let domain = spec.get_domain( slot.epoch(E::slots_per_epoch()), @@ -434,9 +434,8 @@ where genesis_validators_root, ); let message = slot.signing_root(domain); - let signature = &signed_aggregate_and_proof.message.selection_proof; - let validator_index = signed_aggregate_and_proof.message.aggregator_index; - + let signature = signed_aggregate_and_proof.message().selection_proof(); + let validator_index = signed_aggregate_and_proof.message().aggregator_index(); Ok(SignatureSet::single_pubkey( signature, get_pubkey(validator_index as usize).ok_or(Error::ValidatorUnknown(validator_index))?, @@ -456,9 +455,9 @@ where F: Fn(usize) -> Option>, { let target_epoch = signed_aggregate_and_proof - .message - .aggregate - .data + .message() + .aggregate() + .data() .target .epoch; @@ -468,9 +467,9 @@ where fork, genesis_validators_root, ); - let message = signed_aggregate_and_proof.message.signing_root(domain); - let signature = &signed_aggregate_and_proof.signature; - let validator_index = signed_aggregate_and_proof.message.aggregator_index; + let message = signed_aggregate_and_proof.message().signing_root(domain); + let signature = signed_aggregate_and_proof.signature(); + let validator_index = signed_aggregate_and_proof.message().aggregator_index(); Ok(SignatureSet::single_pubkey( signature, @@ -665,3 +664,37 @@ where message, ))) } + +/// Returns two signature sets, one for the source and one for the target validator +/// in the `SignedConsolidation`. +pub fn consolidation_signature_set<'a, E, F>( + state: &'a BeaconState, + get_pubkey: F, + consolidation: &'a SignedConsolidation, + spec: &'a ChainSpec, +) -> Result> +where + E: EthSpec, + F: Fn(usize) -> Option>, +{ + let source_index = consolidation.message.source_index as usize; + let target_index = consolidation.message.target_index as usize; + + let domain = spec.compute_domain( + Domain::Consolidation, + spec.genesis_fork_version, + state.genesis_validators_root(), + ); + + let message = consolidation.message.signing_root(domain); + let source_pubkey = + get_pubkey(source_index).ok_or(Error::ValidatorUnknown(source_index as u64))?; + let target_pubkey = + get_pubkey(target_index).ok_or(Error::ValidatorUnknown(target_index as u64))?; + + Ok(SignatureSet::multiple_pubkeys( + &consolidation.signature, + vec![source_pubkey, target_pubkey], + message, + )) +} diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index f0055fa80dd..2774dd3d87f 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -388,8 +388,13 @@ async fn invalid_attestation_no_committee_for_index() { .clone() .deconstruct() .0; - head_block.to_mut().body_mut().attestations_mut()[0] - .data + head_block + .to_mut() + .body_mut() + .attestations_mut() + .next() + .unwrap() + .data_mut() .index += 1; let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( @@ -423,11 +428,22 @@ async fn invalid_attestation_wrong_justified_checkpoint() { .clone() .deconstruct() .0; - let old_justified_checkpoint = head_block.body().attestations()[0].data.source; + let old_justified_checkpoint = head_block + .body() + .attestations() + .next() + .unwrap() + .data() + .source; let mut new_justified_checkpoint = old_justified_checkpoint; new_justified_checkpoint.epoch += Epoch::new(1); - head_block.to_mut().body_mut().attestations_mut()[0] - .data + head_block + .to_mut() + .body_mut() + .attestations_mut() + .next() + .unwrap() + .data_mut() .source = new_justified_checkpoint; let mut ctxt = ConsensusContext::new(state.slot()); @@ -467,8 +483,14 @@ async fn invalid_attestation_bad_aggregation_bitfield_len() { .clone() .deconstruct() .0; - head_block.to_mut().body_mut().attestations_mut()[0].aggregation_bits = - Bitfield::with_capacity(spec.target_committee_size).unwrap(); + *head_block + .to_mut() + .body_mut() + .attestations_mut() + .next() + .unwrap() + .aggregation_bits_base_mut() + .unwrap() = Bitfield::with_capacity(spec.target_committee_size).unwrap(); let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( @@ -501,7 +523,13 @@ async fn invalid_attestation_bad_signature() { .clone() .deconstruct() .0; - head_block.to_mut().body_mut().attestations_mut()[0].signature = AggregateSignature::empty(); + *head_block + .to_mut() + .body_mut() + .attestations_mut() + .next() + .unwrap() + .signature_mut() = AggregateSignature::empty(); let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attestations( @@ -536,10 +564,15 @@ async fn invalid_attestation_included_too_early() { .clone() .deconstruct() .0; - let new_attesation_slot = head_block.body().attestations()[0].data.slot + let new_attesation_slot = head_block.body().attestations().next().unwrap().data().slot + Slot::new(MainnetEthSpec::slots_per_epoch()); - head_block.to_mut().body_mut().attestations_mut()[0] - .data + head_block + .to_mut() + .body_mut() + .attestations_mut() + .next() + .unwrap() + .data_mut() .slot = new_attesation_slot; let mut ctxt = ConsensusContext::new(state.slot()); @@ -579,10 +612,15 @@ async fn invalid_attestation_included_too_late() { .clone() .deconstruct() .0; - let new_attesation_slot = head_block.body().attestations()[0].data.slot + let new_attesation_slot = head_block.body().attestations().next().unwrap().data().slot - Slot::new(MainnetEthSpec::slots_per_epoch()); - head_block.to_mut().body_mut().attestations_mut()[0] - .data + head_block + .to_mut() + .body_mut() + .attestations_mut() + .next() + .unwrap() + .data_mut() .slot = new_attesation_slot; let mut ctxt = ConsensusContext::new(state.slot()); @@ -619,8 +657,13 @@ async fn invalid_attestation_target_epoch_slot_mismatch() { .clone() .deconstruct() .0; - head_block.to_mut().body_mut().attestations_mut()[0] - .data + head_block + .to_mut() + .body_mut() + .attestations_mut() + .next() + .unwrap() + .data_mut() .target .epoch += Epoch::new(1); @@ -655,7 +698,7 @@ async fn valid_insert_attester_slashing() { let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attester_slashings( &mut state, - &[attester_slashing], + [attester_slashing.to_ref()].into_iter(), VerifySignatures::True, &mut ctxt, &spec, @@ -671,13 +714,20 @@ async fn invalid_attester_slashing_not_slashable() { let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); - attester_slashing.attestation_1 = attester_slashing.attestation_2.clone(); + match &mut attester_slashing { + AttesterSlashing::Base(ref mut attester_slashing) => { + attester_slashing.attestation_1 = attester_slashing.attestation_2.clone(); + } + AttesterSlashing::Electra(ref mut attester_slashing) => { + attester_slashing.attestation_1 = attester_slashing.attestation_2.clone(); + } + } let mut state = harness.get_current_state(); let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attester_slashings( &mut state, - &[attester_slashing], + [attester_slashing.to_ref()].into_iter(), VerifySignatures::True, &mut ctxt, &spec, @@ -699,13 +749,20 @@ async fn invalid_attester_slashing_1_invalid() { let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); - attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); + match &mut attester_slashing { + AttesterSlashing::Base(ref mut attester_slashing) => { + attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); + } + AttesterSlashing::Electra(ref mut attester_slashing) => { + attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); + } + } let mut state = harness.get_current_state(); let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attester_slashings( &mut state, - &[attester_slashing], + [attester_slashing.to_ref()].into_iter(), VerifySignatures::True, &mut ctxt, &spec, @@ -730,13 +787,20 @@ async fn invalid_attester_slashing_2_invalid() { let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); - attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); + match &mut attester_slashing { + AttesterSlashing::Base(ref mut attester_slashing) => { + attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); + } + AttesterSlashing::Electra(ref mut attester_slashing) => { + attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); + } + } let mut state = harness.get_current_state(); let mut ctxt = ConsensusContext::new(state.slot()); let result = process_operations::process_attester_slashings( &mut state, - &[attester_slashing], + [attester_slashing.to_ref()].into_iter(), VerifySignatures::True, &mut ctxt, &spec, diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index c904ba55f0a..6bfb51d475b 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -17,12 +17,12 @@ fn error(reason: Invalid) -> BlockOperationError { /// Optionally verifies the aggregate signature, depending on `verify_signatures`. pub fn verify_attestation_for_block_inclusion<'ctxt, E: EthSpec>( state: &BeaconState, - attestation: &Attestation, + attestation: AttestationRef<'ctxt, E>, ctxt: &'ctxt mut ConsensusContext, verify_signatures: VerifySignatures, spec: &ChainSpec, -) -> Result<&'ctxt IndexedAttestation> { - let data = &attestation.data; +) -> Result> { + let data = attestation.data(); verify!( data.slot.safe_add(spec.min_attestation_inclusion_delay)? <= state.slot(), @@ -61,17 +61,27 @@ pub fn verify_attestation_for_block_inclusion<'ctxt, E: EthSpec>( /// Spec v0.12.1 pub fn verify_attestation_for_state<'ctxt, E: EthSpec>( state: &BeaconState, - attestation: &Attestation, + attestation: AttestationRef<'ctxt, E>, ctxt: &'ctxt mut ConsensusContext, verify_signatures: VerifySignatures, spec: &ChainSpec, -) -> Result<&'ctxt IndexedAttestation> { - let data = &attestation.data; +) -> Result> { + let data = attestation.data(); - verify!( - data.index < state.get_committee_count_at_slot(data.slot)?, - Invalid::BadCommitteeIndex - ); + // TODO(electra) choosing a validation based on the attestation's fork + // rather than the state's fork makes this simple, but technically the spec + // defines this verification based on the state's fork. + match attestation { + AttestationRef::Base(_) => { + verify!( + data.index < state.get_committee_count_at_slot(data.slot)?, + Invalid::BadCommitteeIndex + ); + } + AttestationRef::Electra(_) => { + verify!(data.index == 0, Invalid::BadCommitteeIndex); + } + } // Verify the Casper FFG vote. verify_casper_ffg_vote(attestation, state)?; @@ -87,10 +97,10 @@ pub fn verify_attestation_for_state<'ctxt, E: EthSpec>( /// /// Spec v0.12.1 fn verify_casper_ffg_vote( - attestation: &Attestation, + attestation: AttestationRef, state: &BeaconState, ) -> Result<()> { - let data = &attestation.data; + let data = attestation.data(); verify!( data.target.epoch == data.slot.epoch(E::slots_per_epoch()), Invalid::TargetEpochSlotMismatch { diff --git a/consensus/state_processing/src/per_block_processing/verify_attester_slashing.rs b/consensus/state_processing/src/per_block_processing/verify_attester_slashing.rs index 0cb215fe93f..7fe4c8bc08b 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attester_slashing.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attester_slashing.rs @@ -18,12 +18,12 @@ fn error(reason: Invalid) -> BlockOperationError { /// invalidity. pub fn verify_attester_slashing( state: &BeaconState, - attester_slashing: &AttesterSlashing, + attester_slashing: AttesterSlashingRef<'_, E>, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result> { - let attestation_1 = &attester_slashing.attestation_1; - let attestation_2 = &attester_slashing.attestation_2; + let attestation_1 = attester_slashing.attestation_1(); + let attestation_2 = attester_slashing.attestation_2(); // Spec: is_slashable_attestation_data verify!( @@ -45,7 +45,7 @@ pub fn verify_attester_slashing( /// Returns Ok(indices) if `indices.len() > 0` pub fn get_slashable_indices( state: &BeaconState, - attester_slashing: &AttesterSlashing, + attester_slashing: AttesterSlashingRef<'_, E>, ) -> Result> { get_slashable_indices_modular(state, attester_slashing, |_, validator| { validator.is_slashable_at(state.current_epoch()) @@ -56,23 +56,22 @@ pub fn get_slashable_indices( /// for determining whether a given validator should be considered slashable. pub fn get_slashable_indices_modular( state: &BeaconState, - attester_slashing: &AttesterSlashing, + attester_slashing: AttesterSlashingRef<'_, E>, is_slashable: F, ) -> Result> where F: Fn(u64, &Validator) -> bool, { - let attestation_1 = &attester_slashing.attestation_1; - let attestation_2 = &attester_slashing.attestation_2; + let attestation_1 = attester_slashing.attestation_1(); + let attestation_2 = attester_slashing.attestation_2(); let attesting_indices_1 = attestation_1 - .attesting_indices - .iter() + .attesting_indices_iter() .cloned() .collect::>(); + let attesting_indices_2 = attestation_2 - .attesting_indices - .iter() + .attesting_indices_iter() .cloned() .collect::>(); diff --git a/consensus/state_processing/src/per_block_processing/verify_deposit.rs b/consensus/state_processing/src/per_block_processing/verify_deposit.rs index a964f3b5740..c996e580a78 100644 --- a/consensus/state_processing/src/per_block_processing/verify_deposit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_deposit.rs @@ -14,7 +14,7 @@ fn error(reason: DepositInvalid) -> BlockOperationError { /// Verify `Deposit.pubkey` signed `Deposit.signature`. /// /// Spec v0.12.1 -pub fn verify_deposit_signature(deposit_data: &DepositData, spec: &ChainSpec) -> Result<()> { +pub fn is_valid_deposit_signature(deposit_data: &DepositData, spec: &ChainSpec) -> Result<()> { let (public_key, signature, msg) = deposit_pubkey_signature_message(deposit_data, spec) .ok_or_else(|| error(DepositInvalid::BadBlsBytes))?; diff --git a/consensus/state_processing/src/per_block_processing/verify_exit.rs b/consensus/state_processing/src/per_block_processing/verify_exit.rs index fc258d38298..dea17dbc0c4 100644 --- a/consensus/state_processing/src/per_block_processing/verify_exit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_exit.rs @@ -79,5 +79,16 @@ pub fn verify_exit( ); } + // [New in Electra:EIP7251] + // Only exit validator if it has no pending withdrawals in the queue + if let Ok(pending_balance_to_withdraw) = + state.get_pending_balance_to_withdraw(exit.validator_index as usize) + { + verify!( + pending_balance_to_withdraw == 0, + ExitInvalid::PendingWithdrawalInQueue(exit.validator_index) + ); + } + Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs index 7e244058038..c5ec80b92a1 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs @@ -1,4 +1,4 @@ -use crate::common::get_attesting_indices; +use crate::common::attesting_indices_base::get_attesting_indices; use safe_arith::SafeArith; use types::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, PendingAttestation}; @@ -30,7 +30,7 @@ impl Default for InclusionInfo { /// Defaults to `delay` at its maximum value and `proposer_index` at zero. fn default() -> Self { Self { - delay: u64::max_value(), + delay: u64::MAX, proposer_index: 0, } } diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 6f48050e161..952ab3f6498 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -100,6 +100,10 @@ impl EpochProcessingSummary { &metrics::PARTICIPATION_PREV_EPOCH_SOURCE_ATTESTING_GWEI_TOTAL, self.previous_epoch_source_attesting_balance()? as i64, ); + metrics::set_gauge( + &metrics::PARTICIPATION_CURRENT_EPOCH_TOTAL_ACTIVE_GWEI_TOTAL, + self.current_epoch_total_active_balance() as i64, + ); Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index de481ec6767..b6c9dbea521 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -25,6 +25,9 @@ pub enum EpochProcessingError { InvalidFlagIndex(usize), MilhouseError(milhouse::Error), EpochCache(EpochCacheError), + SinglePassMissingActivationQueue, + MissingEarliestExitEpoch, + MissingExitBalanceToConsume, } impl From for EpochProcessingError { diff --git a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs index 4b2f940e5f8..3d02d797366 100644 --- a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs @@ -19,19 +19,20 @@ pub fn process_registry_updates( validator.is_active_at(current_epoch) && validator.effective_balance <= spec.ejection_balance }; + let fork_name = state.fork_name_unchecked(); let indices_to_update: Vec<_> = state .validators() .iter() .enumerate() .filter(|(_, validator)| { - validator.is_eligible_for_activation_queue(spec) || is_ejectable(validator) + validator.is_eligible_for_activation_queue(spec, fork_name) || is_ejectable(validator) }) .map(|(idx, _)| idx) .collect(); for index in indices_to_update { let validator = state.get_validator_mut(index)?; - if validator.is_eligible_for_activation_queue(spec) { + if validator.is_eligible_for_activation_queue(spec, fork_name) { validator.activation_eligibility_epoch = current_epoch.safe_add(1)?; } if is_ejectable(validator) { diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 7a95de3317e..514cf639360 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -1,20 +1,24 @@ use crate::{ - common::update_progressive_balances_cache::initialize_progressive_balances_cache, + common::{ + decrease_balance, increase_balance, + update_progressive_balances_cache::initialize_progressive_balances_cache, + }, epoch_cache::{initialize_epoch_cache, PreEpochCache}, per_epoch_processing::{Delta, Error, ParticipationEpochSummary}, }; use itertools::izip; use safe_arith::{SafeArith, SafeArithIter}; use std::cmp::{max, min}; -use std::collections::BTreeSet; +use std::collections::{BTreeSet, HashMap}; use types::{ consts::altair::{ NUM_FLAG_INDICES, PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, }, milhouse::Cow, - ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ExitCache, ForkName, - ParticipationFlags, ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, + ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, + ExitCache, ForkName, List, ParticipationFlags, ProgressiveBalancesCache, RelativeEpoch, + Unsigned, Validator, }; pub struct SinglePassConfig { @@ -22,6 +26,8 @@ pub struct SinglePassConfig { pub rewards_and_penalties: bool, pub registry_updates: bool, pub slashings: bool, + pub pending_balance_deposits: bool, + pub pending_consolidations: bool, pub effective_balance_updates: bool, } @@ -38,6 +44,8 @@ impl SinglePassConfig { rewards_and_penalties: true, registry_updates: true, slashings: true, + pending_balance_deposits: true, + pending_consolidations: true, effective_balance_updates: true, } } @@ -48,6 +56,8 @@ impl SinglePassConfig { rewards_and_penalties: false, registry_updates: false, slashings: false, + pending_balance_deposits: false, + pending_consolidations: false, effective_balance_updates: false, } } @@ -57,6 +67,7 @@ impl SinglePassConfig { struct StateContext { current_epoch: Epoch, next_epoch: Epoch, + finalized_checkpoint: Checkpoint, is_in_inactivity_leak: bool, total_active_balance: u64, churn_limit: u64, @@ -73,6 +84,15 @@ struct SlashingsContext { target_withdrawable_epoch: Epoch, } +struct PendingBalanceDepositsContext { + /// The value to set `next_deposit_index` to *after* processing completes. + next_deposit_index: usize, + /// The value to set `deposit_balance_to_consume` to *after* processing completes. + deposit_balance_to_consume: u64, + /// Total balance increases for each validator due to pending balance deposits. + validator_deposits_to_process: HashMap, +} + struct EffectiveBalancesContext { downward_threshold: u64, upward_threshold: u64, @@ -129,6 +149,7 @@ pub fn process_epoch_single_pass( let state_ctxt = &StateContext { current_epoch, next_epoch, + finalized_checkpoint, is_in_inactivity_leak, total_active_balance, churn_limit, @@ -139,6 +160,16 @@ pub fn process_epoch_single_pass( let slashings_ctxt = &SlashingsContext::new(state, state_ctxt, spec)?; let mut next_epoch_cache = PreEpochCache::new_for_next_epoch(state)?; + let pending_balance_deposits_ctxt = + if fork_name.electra_enabled() && conf.pending_balance_deposits { + Some(PendingBalanceDepositsContext::new(state, spec)?) + } else { + None + }; + + let mut earliest_exit_epoch = state.earliest_exit_epoch().ok(); + let mut exit_balance_to_consume = state.exit_balance_to_consume().ok(); + // Split the state into several disjoint mutable borrows. let ( validators, @@ -165,12 +196,19 @@ pub fn process_epoch_single_pass( // Compute shared values required for different parts of epoch processing. let rewards_ctxt = &RewardsAndPenaltiesContext::new(progressive_balances, state_ctxt, spec)?; - let activation_queue = &epoch_cache - .activation_queue()? - .get_validators_eligible_for_activation( - finalized_checkpoint.epoch, - activation_churn_limit as usize, - ); + + let mut activation_queues = if !fork_name.electra_enabled() { + let activation_queue = epoch_cache + .activation_queue()? + .get_validators_eligible_for_activation( + finalized_checkpoint.epoch, + activation_churn_limit as usize, + ); + let next_epoch_activation_queue = ActivationQueue::default(); + Some((activation_queue, next_epoch_activation_queue)) + } else { + None + }; let effective_balances_ctxt = &EffectiveBalancesContext::new(spec)?; // Iterate over the validators and related fields in one pass. @@ -178,10 +216,6 @@ pub fn process_epoch_single_pass( let mut balances_iter = balances.iter_cow(); let mut inactivity_scores_iter = inactivity_scores.iter_cow(); - // Values computed for the next epoch transition. - let mut next_epoch_total_active_balance = 0; - let mut next_epoch_activation_queue = ActivationQueue::default(); - for (index, &previous_epoch_participation, ¤t_epoch_participation) in izip!( 0..num_validators, previous_epoch_participation.iter(), @@ -246,13 +280,17 @@ pub fn process_epoch_single_pass( // `process_registry_updates` if conf.registry_updates { + let activation_queue_refs = activation_queues + .as_mut() + .map(|(current_queue, next_queue)| (&*current_queue, next_queue)); process_single_registry_update( &mut validator, validator_info, exit_cache, - activation_queue, - &mut next_epoch_activation_queue, + activation_queue_refs, state_ctxt, + earliest_exit_epoch.as_mut(), + exit_balance_to_consume.as_mut(), spec, )?; } @@ -262,13 +300,22 @@ pub fn process_epoch_single_pass( process_single_slashing(&mut balance, &validator, slashings_ctxt, state_ctxt, spec)?; } + // `process_pending_balance_deposits` + if let Some(pending_balance_deposits_ctxt) = &pending_balance_deposits_ctxt { + process_pending_balance_deposits_for_validator( + &mut balance, + validator_info, + pending_balance_deposits_ctxt, + )?; + } + // `process_effective_balance_updates` if conf.effective_balance_updates { process_single_effective_balance_update( + validator_info.index, *balance, &mut validator, - validator_info, - &mut next_epoch_total_active_balance, + validator_info.current_epoch_participation, &mut next_epoch_cache, progressive_balances, effective_balances_ctxt, @@ -278,15 +325,57 @@ pub fn process_epoch_single_pass( } } - if conf.effective_balance_updates { - state.set_total_active_balance(next_epoch, next_epoch_total_active_balance, spec); - *state.epoch_cache_mut() = next_epoch_cache.into_epoch_cache( - next_epoch_total_active_balance, - next_epoch_activation_queue, + if conf.registry_updates && fork_name.electra_enabled() { + if let Ok(earliest_exit_epoch_state) = state.earliest_exit_epoch_mut() { + *earliest_exit_epoch_state = + earliest_exit_epoch.ok_or(Error::MissingEarliestExitEpoch)?; + } + if let Ok(exit_balance_to_consume_state) = state.exit_balance_to_consume_mut() { + *exit_balance_to_consume_state = + exit_balance_to_consume.ok_or(Error::MissingExitBalanceToConsume)?; + } + } + + // Finish processing pending balance deposits if relevant. + // + // This *could* be reordered after `process_pending_consolidations` which pushes only to the end + // of the `pending_balance_deposits` list. But we may as well preserve the write ordering used + // by the spec and do this first. + if let Some(ctxt) = pending_balance_deposits_ctxt { + let new_pending_balance_deposits = List::try_from_iter( + state + .pending_balance_deposits()? + .iter_from(ctxt.next_deposit_index)? + .cloned(), + )?; + *state.pending_balance_deposits_mut()? = new_pending_balance_deposits; + *state.deposit_balance_to_consume_mut()? = ctxt.deposit_balance_to_consume; + } + + // Process consolidations outside the single-pass loop, as they depend on balances for multiple + // validators and cannot be computed accurately inside the loop. + if fork_name.electra_enabled() && conf.pending_consolidations { + process_pending_consolidations( + state, + &mut next_epoch_cache, + effective_balances_ctxt, + conf.effective_balance_updates, + state_ctxt, spec, )?; } + // Finally, finish updating effective balance caches. We need this to happen *after* processing + // of pending consolidations, which recomputes some effective balances. + if conf.effective_balance_updates { + let next_epoch_total_active_balance = next_epoch_cache.get_total_active_balance(); + state.set_total_active_balance(next_epoch, next_epoch_total_active_balance, spec); + let next_epoch_activation_queue = + activation_queues.map_or_else(ActivationQueue::default, |(_, queue)| queue); + *state.epoch_cache_mut() = + next_epoch_cache.into_epoch_cache(next_epoch_activation_queue, spec)?; + } + Ok(summary) } @@ -455,7 +544,42 @@ impl RewardsAndPenaltiesContext { } } +#[allow(clippy::too_many_arguments)] fn process_single_registry_update( + validator: &mut Cow, + validator_info: &ValidatorInfo, + exit_cache: &mut ExitCache, + activation_queues: Option<(&BTreeSet, &mut ActivationQueue)>, + state_ctxt: &StateContext, + earliest_exit_epoch: Option<&mut Epoch>, + exit_balance_to_consume: Option<&mut u64>, + spec: &ChainSpec, +) -> Result<(), Error> { + if !state_ctxt.fork_name.electra_enabled() { + let (activation_queue, next_epoch_activation_queue) = + activation_queues.ok_or(Error::SinglePassMissingActivationQueue)?; + process_single_registry_update_pre_electra( + validator, + validator_info, + exit_cache, + activation_queue, + next_epoch_activation_queue, + state_ctxt, + spec, + ) + } else { + process_single_registry_update_post_electra( + validator, + exit_cache, + state_ctxt, + earliest_exit_epoch.ok_or(Error::MissingEarliestExitEpoch)?, + exit_balance_to_consume.ok_or(Error::MissingExitBalanceToConsume)?, + spec, + ) + } +} + +fn process_single_registry_update_pre_electra( validator: &mut Cow, validator_info: &ValidatorInfo, exit_cache: &mut ExitCache, @@ -466,13 +590,13 @@ fn process_single_registry_update( ) -> Result<(), Error> { let current_epoch = state_ctxt.current_epoch; - if validator.is_eligible_for_activation_queue(spec) { + if validator.is_eligible_for_activation_queue(spec, state_ctxt.fork_name) { validator.make_mut()?.activation_eligibility_epoch = current_epoch.safe_add(1)?; } if validator.is_active_at(current_epoch) && validator.effective_balance <= spec.ejection_balance { - initiate_validator_exit(validator, exit_cache, state_ctxt, spec)?; + initiate_validator_exit(validator, exit_cache, state_ctxt, None, None, spec)?; } if activation_queue.contains(&validator_info.index) { @@ -491,10 +615,49 @@ fn process_single_registry_update( Ok(()) } +fn process_single_registry_update_post_electra( + validator: &mut Cow, + exit_cache: &mut ExitCache, + state_ctxt: &StateContext, + earliest_exit_epoch: &mut Epoch, + exit_balance_to_consume: &mut u64, + spec: &ChainSpec, +) -> Result<(), Error> { + let current_epoch = state_ctxt.current_epoch; + + if validator.is_eligible_for_activation_queue(spec, state_ctxt.fork_name) { + validator.make_mut()?.activation_eligibility_epoch = current_epoch.safe_add(1)?; + } + + if validator.is_active_at(current_epoch) && validator.effective_balance <= spec.ejection_balance + { + initiate_validator_exit( + validator, + exit_cache, + state_ctxt, + Some(earliest_exit_epoch), + Some(exit_balance_to_consume), + spec, + )?; + } + + if validator.is_eligible_for_activation_with_finalized_checkpoint( + &state_ctxt.finalized_checkpoint, + spec, + ) { + validator.make_mut()?.activation_epoch = + spec.compute_activation_exit_epoch(current_epoch)?; + } + + Ok(()) +} + fn initiate_validator_exit( validator: &mut Cow, exit_cache: &mut ExitCache, state_ctxt: &StateContext, + earliest_exit_epoch: Option<&mut Epoch>, + exit_balance_to_consume: Option<&mut u64>, spec: &ChainSpec, ) -> Result<(), Error> { // Return if the validator already initiated exit @@ -502,16 +665,27 @@ fn initiate_validator_exit( return Ok(()); } - // Compute exit queue epoch - let delayed_epoch = spec.compute_activation_exit_epoch(state_ctxt.current_epoch)?; - let mut exit_queue_epoch = exit_cache - .max_epoch()? - .map_or(delayed_epoch, |epoch| max(epoch, delayed_epoch)); - let exit_queue_churn = exit_cache.get_churn_at(exit_queue_epoch)?; - - if exit_queue_churn >= state_ctxt.churn_limit { - exit_queue_epoch.safe_add_assign(1)?; - } + let exit_queue_epoch = if state_ctxt.fork_name.electra_enabled() { + compute_exit_epoch_and_update_churn( + validator, + state_ctxt, + earliest_exit_epoch.ok_or(Error::MissingEarliestExitEpoch)?, + exit_balance_to_consume.ok_or(Error::MissingExitBalanceToConsume)?, + spec, + )? + } else { + // Compute exit queue epoch + let delayed_epoch = spec.compute_activation_exit_epoch(state_ctxt.current_epoch)?; + let mut exit_queue_epoch = exit_cache + .max_epoch()? + .map_or(delayed_epoch, |epoch| max(epoch, delayed_epoch)); + let exit_queue_churn = exit_cache.get_churn_at(exit_queue_epoch)?; + + if exit_queue_churn >= state_ctxt.churn_limit { + exit_queue_epoch.safe_add_assign(1)?; + } + exit_queue_epoch + }; let validator = validator.make_mut()?; validator.exit_epoch = exit_queue_epoch; @@ -522,6 +696,64 @@ fn initiate_validator_exit( Ok(()) } +fn compute_exit_epoch_and_update_churn( + validator: &mut Cow, + state_ctxt: &StateContext, + earliest_exit_epoch_state: &mut Epoch, + exit_balance_to_consume_state: &mut u64, + spec: &ChainSpec, +) -> Result { + let exit_balance = validator.effective_balance; + let mut earliest_exit_epoch = std::cmp::max( + *earliest_exit_epoch_state, + spec.compute_activation_exit_epoch(state_ctxt.current_epoch)?, + ); + + let per_epoch_churn = get_activation_exit_churn_limit(state_ctxt, spec)?; + // New epoch for exits + let mut exit_balance_to_consume = if *earliest_exit_epoch_state < earliest_exit_epoch { + per_epoch_churn + } else { + *exit_balance_to_consume_state + }; + + // Exit doesn't fit in the current earliest epoch + if exit_balance > exit_balance_to_consume { + let balance_to_process = exit_balance.safe_sub(exit_balance_to_consume)?; + let additional_epochs = balance_to_process + .safe_sub(1)? + .safe_div(per_epoch_churn)? + .safe_add(1)?; + earliest_exit_epoch.safe_add_assign(additional_epochs)?; + exit_balance_to_consume.safe_add_assign(additional_epochs.safe_mul(per_epoch_churn)?)?; + } + // Consume the balance and update state variables + *exit_balance_to_consume_state = exit_balance_to_consume.safe_sub(exit_balance)?; + *earliest_exit_epoch_state = earliest_exit_epoch; + + Ok(earliest_exit_epoch) +} + +fn get_activation_exit_churn_limit( + state_ctxt: &StateContext, + spec: &ChainSpec, +) -> Result { + Ok(std::cmp::min( + spec.max_per_epoch_activation_exit_churn_limit, + get_balance_churn_limit(state_ctxt, spec)?, + )) +} + +fn get_balance_churn_limit(state_ctxt: &StateContext, spec: &ChainSpec) -> Result { + let total_active_balance = state_ctxt.total_active_balance; + let churn = std::cmp::max( + spec.min_per_epoch_churn_limit_electra, + total_active_balance.safe_div(spec.churn_limit_quotient)?, + ); + + Ok(churn.safe_sub(churn.safe_rem(spec.effective_balance_increment)?)?) +} + impl SlashingsContext { fn new( state: &BeaconState, @@ -568,6 +800,152 @@ fn process_single_slashing( Ok(()) } +impl PendingBalanceDepositsContext { + fn new(state: &BeaconState, spec: &ChainSpec) -> Result { + let available_for_processing = state + .deposit_balance_to_consume()? + .safe_add(state.get_activation_exit_churn_limit(spec)?)?; + let mut processed_amount = 0; + let mut next_deposit_index = 0; + let mut validator_deposits_to_process = HashMap::new(); + + let pending_balance_deposits = state.pending_balance_deposits()?; + + for deposit in pending_balance_deposits.iter() { + if processed_amount.safe_add(deposit.amount)? > available_for_processing { + break; + } + validator_deposits_to_process + .entry(deposit.index as usize) + .or_insert(0) + .safe_add_assign(deposit.amount)?; + + processed_amount.safe_add_assign(deposit.amount)?; + next_deposit_index.safe_add_assign(1)?; + } + + let deposit_balance_to_consume = if next_deposit_index == pending_balance_deposits.len() { + 0 + } else { + available_for_processing.safe_sub(processed_amount)? + }; + + Ok(Self { + next_deposit_index, + deposit_balance_to_consume, + validator_deposits_to_process, + }) + } +} + +fn process_pending_balance_deposits_for_validator( + balance: &mut Cow, + validator_info: &ValidatorInfo, + pending_balance_deposits_ctxt: &PendingBalanceDepositsContext, +) -> Result<(), Error> { + if let Some(deposit_amount) = pending_balance_deposits_ctxt + .validator_deposits_to_process + .get(&validator_info.index) + { + balance.make_mut()?.safe_add_assign(*deposit_amount)?; + } + Ok(()) +} + +/// We process pending consolidations after all of single-pass epoch processing, and then patch up +/// the effective balances for affected validators. +/// +/// This is safe because processing consolidations does not depend on the `effective_balance`. +fn process_pending_consolidations( + state: &mut BeaconState, + next_epoch_cache: &mut PreEpochCache, + effective_balances_ctxt: &EffectiveBalancesContext, + perform_effective_balance_updates: bool, + state_ctxt: &StateContext, + spec: &ChainSpec, +) -> Result<(), Error> { + let mut next_pending_consolidation: usize = 0; + let current_epoch = state.current_epoch(); + let pending_consolidations = state.pending_consolidations()?.clone(); + + let mut affected_validators = BTreeSet::new(); + + for pending_consolidation in &pending_consolidations { + let source_index = pending_consolidation.source_index as usize; + let target_index = pending_consolidation.target_index as usize; + let source_validator = state.get_validator(source_index)?; + if source_validator.slashed { + next_pending_consolidation.safe_add_assign(1)?; + continue; + } + if source_validator.withdrawable_epoch > current_epoch { + break; + } + + // Calculate the active balance while we have the source validator loaded. This is a safe + // reordering. + let source_balance = *state + .balances() + .get(source_index) + .ok_or(BeaconStateError::UnknownValidator(source_index))?; + let active_balance = + source_validator.get_active_balance(source_balance, spec, state_ctxt.fork_name); + + // Churn any target excess active balance of target and raise its max. + state.switch_to_compounding_validator(target_index, spec)?; + + // Move active balance to target. Excess balance is withdrawable. + decrease_balance(state, source_index, active_balance)?; + increase_balance(state, target_index, active_balance)?; + + affected_validators.insert(source_index); + affected_validators.insert(target_index); + + next_pending_consolidation.safe_add_assign(1)?; + } + + let new_pending_consolidations = List::try_from_iter( + state + .pending_consolidations()? + .iter_from(next_pending_consolidation)? + .cloned(), + )?; + *state.pending_consolidations_mut()? = new_pending_consolidations; + + // the spec tests require we don't perform effective balance updates when testing pending_consolidations + if !perform_effective_balance_updates { + return Ok(()); + } + + // Re-process effective balance updates for validators affected by consolidations. + let (validators, balances, _, current_epoch_participation, _, progressive_balances, _, _) = + state.mutable_validator_fields()?; + for validator_index in affected_validators { + let balance = *balances + .get(validator_index) + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; + let mut validator = validators + .get_cow(validator_index) + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; + let validator_current_epoch_participation = *current_epoch_participation + .get(validator_index) + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; + + process_single_effective_balance_update( + validator_index, + balance, + &mut validator, + validator_current_epoch_participation, + next_epoch_cache, + progressive_balances, + effective_balances_ctxt, + state_ctxt, + spec, + )?; + } + Ok(()) +} + impl EffectiveBalancesContext { fn new(spec: &ChainSpec) -> Result { let hysteresis_increment = spec @@ -584,18 +962,24 @@ impl EffectiveBalancesContext { } } +/// This function abstracts over phase0 and Electra effective balance processing. #[allow(clippy::too_many_arguments)] fn process_single_effective_balance_update( + validator_index: usize, balance: u64, validator: &mut Cow, - validator_info: &ValidatorInfo, - next_epoch_total_active_balance: &mut u64, + validator_current_epoch_participation: ParticipationFlags, next_epoch_cache: &mut PreEpochCache, progressive_balances: &mut ProgressiveBalancesCache, eb_ctxt: &EffectiveBalancesContext, state_ctxt: &StateContext, spec: &ChainSpec, ) -> Result<(), Error> { + // Use the higher effective balance limit if post-Electra and compounding withdrawal credentials + // are set. + let effective_balance_limit = + validator.get_validator_max_effective_balance(spec, state_ctxt.fork_name); + let old_effective_balance = validator.effective_balance; let new_effective_balance = if balance.safe_add(eb_ctxt.downward_threshold)? < validator.effective_balance @@ -606,15 +990,13 @@ fn process_single_effective_balance_update( { min( balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, - spec.max_effective_balance, + effective_balance_limit, ) } else { validator.effective_balance }; - if validator.is_active_at(state_ctxt.next_epoch) { - next_epoch_total_active_balance.safe_add_assign(new_effective_balance)?; - } + let is_active_next_epoch = validator.is_active_at(state_ctxt.next_epoch); if new_effective_balance != old_effective_balance { validator.make_mut()?.effective_balance = new_effective_balance; @@ -623,14 +1005,18 @@ fn process_single_effective_balance_update( // previous epoch once the epoch transition completes. progressive_balances.on_effective_balance_change( validator.slashed, - validator_info.current_epoch_participation, + validator_current_epoch_participation, old_effective_balance, new_effective_balance, )?; } - // Caching: update next epoch effective balances. - next_epoch_cache.push_effective_balance(new_effective_balance); + // Caching: update next epoch effective balances and total active balance. + next_epoch_cache.update_effective_balance( + validator_index, + new_effective_balance, + is_active_next_epoch, + )?; Ok(()) } diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index 872560db3df..3006da25ae7 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -1,5 +1,7 @@ use crate::common::update_progressive_balances_cache::initialize_progressive_balances_cache; -use crate::common::{get_attestation_participation_flag_indices, get_attesting_indices}; +use crate::common::{ + attesting_indices_base::get_attesting_indices, get_attestation_participation_flag_indices, +}; use std::mem; use std::sync::Arc; use types::{ diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index 1e60bf488db..1e532d9f107 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -1,8 +1,8 @@ use safe_arith::SafeArith; use std::mem; use types::{ - BeaconState, BeaconStateElectra, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, - Fork, + BeaconState, BeaconStateElectra, BeaconStateError as Error, ChainSpec, Epoch, EpochCache, + EthSpec, Fork, }; /// Transform a `Deneb` state into an `Electra` state. @@ -26,13 +26,66 @@ pub fn upgrade_to_electra( pre_state.build_total_active_balance_cache(spec)?; let earliest_consolidation_epoch = spec.compute_activation_exit_epoch(epoch)?; + let mut post = upgrade_state_to_electra( + pre_state, + earliest_exit_epoch, + earliest_consolidation_epoch, + spec, + )?; + + *post.exit_balance_to_consume_mut()? = post.get_activation_exit_churn_limit(spec)?; + *post.consolidation_balance_to_consume_mut()? = post.get_consolidation_churn_limit(spec)?; + + // Add validators that are not yet active to pending balance deposits + let validators = post.validators().clone(); + let mut pre_activation = validators + .iter() + .enumerate() + .filter(|(_, validator)| validator.activation_epoch == spec.far_future_epoch) + .collect::>(); + + // Sort the indices by activation_eligibility_epoch and then by index + pre_activation.sort_by(|(index_a, val_a), (index_b, val_b)| { + if val_a.activation_eligibility_epoch == val_b.activation_eligibility_epoch { + index_a.cmp(index_b) + } else { + val_a + .activation_eligibility_epoch + .cmp(&val_b.activation_eligibility_epoch) + } + }); + + // Process validators to queue entire balance and reset them + for (index, _) in pre_activation { + post.queue_entire_balance_and_reset_validator(index, spec)?; + } + + // Ensure early adopters of compounding credentials go through the activation churn + for (index, validator) in validators.iter().enumerate() { + if validator.has_compounding_withdrawal_credential(spec) { + post.queue_excess_active_balance(index, spec)?; + } + } + + *pre_state = post; + + Ok(()) +} + +pub fn upgrade_state_to_electra( + pre_state: &mut BeaconState, + earliest_exit_epoch: Epoch, + earliest_consolidation_epoch: Epoch, + spec: &ChainSpec, +) -> Result, Error> { + let epoch = pre_state.current_epoch(); let pre = pre_state.as_deneb_mut()?; // Where possible, use something like `mem::take` to move fields from behind the &mut // reference. For other fields that don't have a good default value, use `clone`. // // Fixed size vectors get cloned because replacing them would require the same size // allocation as cloning. - let mut post = BeaconState::Electra(BeaconStateElectra { + let post = BeaconState::Electra(BeaconStateElectra { // Versioning genesis_time: pre.genesis_time, genesis_validators_root: pre.genesis_validators_root, @@ -78,7 +131,7 @@ pub fn upgrade_to_electra( next_withdrawal_validator_index: pre.next_withdrawal_validator_index, historical_summaries: pre.historical_summaries.clone(), // Electra - deposit_receipts_start_index: spec.unset_deposit_receipts_start_index, + deposit_requests_start_index: spec.unset_deposit_requests_start_index, deposit_balance_to_consume: 0, exit_balance_to_consume: 0, earliest_exit_epoch, @@ -96,41 +149,5 @@ pub fn upgrade_to_electra( slashings_cache: mem::take(&mut pre.slashings_cache), epoch_cache: EpochCache::default(), }); - *post.exit_balance_to_consume_mut()? = post.get_activation_exit_churn_limit(spec)?; - *post.consolidation_balance_to_consume_mut()? = post.get_consolidation_churn_limit(spec)?; - - // Add validators that are not yet active to pending balance deposits - let validators = post.validators().clone(); - let mut pre_activation = validators - .iter() - .enumerate() - .filter(|(_, validator)| validator.activation_epoch == spec.far_future_epoch) - .collect::>(); - - // Sort the indices by activation_eligibility_epoch and then by index - pre_activation.sort_by(|(index_a, val_a), (index_b, val_b)| { - if val_a.activation_eligibility_epoch == val_b.activation_eligibility_epoch { - index_a.cmp(index_b) - } else { - val_a - .activation_eligibility_epoch - .cmp(&val_b.activation_eligibility_epoch) - } - }); - - // Process validators to queue entire balance and reset them - for (index, _) in pre_activation { - post.queue_entire_balance_and_reset_validator(index, spec)?; - } - - // Ensure early adopters of compounding credentials go through the activation churn - for (index, validator) in validators.iter().enumerate() { - if validator.has_compounding_withdrawal_credential(spec) { - post.queue_excess_active_balance(index, spec)?; - } - } - - *pre_state = post; - - Ok(()) + Ok(post) } diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index b3924cd9732..3b20c67b4d9 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -7,35 +7,115 @@ use crate::per_block_processing::{ verify_proposer_slashing, }; use crate::VerifySignatures; +use arbitrary::Arbitrary; use derivative::Derivative; use smallvec::{smallvec, SmallVec}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; +use test_random_derive::TestRandom; use types::{ - AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, ProposerSlashing, - SignedBlsToExecutionChange, SignedVoluntaryExit, + test_utils::TestRandom, AttesterSlashing, AttesterSlashingBase, AttesterSlashingOnDisk, + AttesterSlashingRefOnDisk, BeaconState, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, + ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, }; const MAX_FORKS_VERIFIED_AGAINST: usize = 2; +pub trait TransformPersist { + type Persistable: Encode + Decode; + type PersistableRef<'a>: Encode + where + Self: 'a; + + /// Returns a reference to the object in a form that implements `Encode` + fn as_persistable_ref(&self) -> Self::PersistableRef<'_>; + + /// Converts the object back into its original form. + fn from_persistable(persistable: Self::Persistable) -> Self; +} + /// Wrapper around an operation type that acts as proof that its signature has been checked. /// /// The inner `op` field is private, meaning instances of this type can only be constructed /// by calling `validate`. -#[derive(Derivative, Debug, Clone, Encode, Decode)] +#[derive(Derivative, Debug, Clone, Arbitrary)] #[derivative( PartialEq, Eq, - Hash(bound = "T: Encode + Decode + std::hash::Hash, E: EthSpec") + Hash(bound = "T: TransformPersist + std::hash::Hash, E: EthSpec") )] -pub struct SigVerifiedOp { +#[arbitrary(bound = "T: TransformPersist + Arbitrary<'arbitrary>, E: EthSpec")] +pub struct SigVerifiedOp { op: T, verified_against: VerifiedAgainst, - #[ssz(skip_serializing, skip_deserializing)] _phantom: PhantomData, } +impl Encode for SigVerifiedOp { + fn is_ssz_fixed_len() -> bool { + as Encode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + as Encode>::ssz_fixed_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + let persistable_ref = self.op.as_persistable_ref(); + SigVerifiedOpEncode { + op: persistable_ref, + verified_against: &self.verified_against, + } + .ssz_append(buf) + } + + fn ssz_bytes_len(&self) -> usize { + let persistable_ref = self.op.as_persistable_ref(); + SigVerifiedOpEncode { + op: persistable_ref, + verified_against: &self.verified_against, + } + .ssz_bytes_len() + } +} + +impl Decode for SigVerifiedOp { + fn is_ssz_fixed_len() -> bool { + as Decode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + as Decode>::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let on_disk = SigVerifiedOpDecode::::from_ssz_bytes(bytes)?; + Ok(SigVerifiedOp { + op: T::from_persistable(on_disk.op), + verified_against: on_disk.verified_against, + _phantom: PhantomData, + }) + } +} + +/// On-disk variant of `SigVerifiedOp` that implements `Encode`. +/// +/// We use separate types for Encode and Decode so we can efficiently handle references: the Encode +/// type contains references, while the Decode type does not. +#[derive(Debug, Encode)] +struct SigVerifiedOpEncode<'a, P: Encode> { + op: P, + verified_against: &'a VerifiedAgainst, +} + +/// On-disk variant of `SigVerifiedOp` that implements `Encode`. +#[derive(Debug, Decode)] +struct SigVerifiedOpDecode { + op: P, + verified_against: VerifiedAgainst, +} + /// Information about the fork versions that this message was verified against. /// /// In general it is not safe to assume that a `SigVerifiedOp` constructed at some point in the past @@ -53,7 +133,7 @@ pub struct SigVerifiedOp { /// /// We need to store multiple `ForkVersion`s because attester slashings contain two indexed /// attestations which may be signed using different versions. -#[derive(Debug, PartialEq, Eq, Clone, Hash, Encode, Decode)] +#[derive(Debug, PartialEq, Eq, Clone, Hash, Encode, Decode, TestRandom, Arbitrary)] pub struct VerifiedAgainst { fork_versions: SmallVec<[ForkVersion; MAX_FORKS_VERIFIED_AGAINST]>, } @@ -109,7 +189,7 @@ where } /// Trait for operations that can be verified and transformed into a `SigVerifiedOp`. -pub trait VerifyOperation: Encode + Decode + Sized { +pub trait VerifyOperation: TransformPersist + Sized { type Error; fn validate( @@ -152,15 +232,15 @@ impl VerifyOperation for AttesterSlashing { state: &BeaconState, spec: &ChainSpec, ) -> Result, Self::Error> { - verify_attester_slashing(state, &self, VerifySignatures::True, spec)?; + verify_attester_slashing(state, self.to_ref(), VerifySignatures::True, spec)?; Ok(SigVerifiedOp::new(self, state)) } #[allow(clippy::arithmetic_side_effects)] fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { smallvec![ - self.attestation_1.data.target.epoch, - self.attestation_2.data.target.epoch + self.attestation_1().data().target.epoch, + self.attestation_2().data().target.epoch ] } } @@ -237,3 +317,151 @@ impl VerifyOperationAt for SignedVoluntaryExit { Ok(SigVerifiedOp::new(self, state)) } } + +impl TransformPersist for SignedVoluntaryExit { + type Persistable = Self; + type PersistableRef<'a> = &'a Self; + + fn as_persistable_ref(&self) -> Self::PersistableRef<'_> { + self + } + + fn from_persistable(persistable: Self::Persistable) -> Self { + persistable + } +} + +impl TransformPersist for AttesterSlashing { + type Persistable = AttesterSlashingOnDisk; + type PersistableRef<'a> = AttesterSlashingRefOnDisk<'a, E>; + + fn as_persistable_ref(&self) -> Self::PersistableRef<'_> { + self.to_ref().into() + } + + fn from_persistable(persistable: Self::Persistable) -> Self { + persistable.into() + } +} + +// TODO: Remove this once we no longer support DB schema version 17 +impl TransformPersist for types::AttesterSlashingBase { + type Persistable = Self; + type PersistableRef<'a> = &'a Self; + + fn as_persistable_ref(&self) -> Self::PersistableRef<'_> { + self + } + + fn from_persistable(persistable: Self::Persistable) -> Self { + persistable + } +} +// TODO: Remove this once we no longer support DB schema version 17 +impl From, E>> + for SigVerifiedOp, E> +{ + fn from(base: SigVerifiedOp, E>) -> Self { + SigVerifiedOp { + op: AttesterSlashing::Base(base.op), + verified_against: base.verified_against, + _phantom: PhantomData, + } + } +} +// TODO: Remove this once we no longer support DB schema version 17 +impl TryFrom, E>> + for SigVerifiedOp, E> +{ + type Error = String; + + fn try_from(slashing: SigVerifiedOp, E>) -> Result { + match slashing.op { + AttesterSlashing::Base(base) => Ok(SigVerifiedOp { + op: base, + verified_against: slashing.verified_against, + _phantom: PhantomData, + }), + AttesterSlashing::Electra(_) => Err("non-base attester slashing".to_string()), + } + } +} + +impl TransformPersist for ProposerSlashing { + type Persistable = Self; + type PersistableRef<'a> = &'a Self; + + fn as_persistable_ref(&self) -> Self::PersistableRef<'_> { + self + } + + fn from_persistable(persistable: Self::Persistable) -> Self { + persistable + } +} + +impl TransformPersist for SignedBlsToExecutionChange { + type Persistable = Self; + type PersistableRef<'a> = &'a Self; + + fn as_persistable_ref(&self) -> Self::PersistableRef<'_> { + self + } + + fn from_persistable(persistable: Self::Persistable) -> Self { + persistable + } +} + +#[cfg(all(test, not(debug_assertions)))] +mod test { + use super::*; + use types::{ + test_utils::{SeedableRng, TestRandom, XorShiftRng}, + MainnetEthSpec, + }; + + type E = MainnetEthSpec; + + fn roundtrip_test() { + let runs = 10; + let mut rng = XorShiftRng::seed_from_u64(0xff0af5a356af1123); + + for _ in 0..runs { + let op = T::random_for_test(&mut rng); + let verified_against = VerifiedAgainst::random_for_test(&mut rng); + + let verified_op = SigVerifiedOp { + op, + verified_against, + _phantom: PhantomData::, + }; + + let serialized = verified_op.as_ssz_bytes(); + let deserialized = SigVerifiedOp::from_ssz_bytes(&serialized).unwrap(); + let reserialized = deserialized.as_ssz_bytes(); + assert_eq!(verified_op, deserialized); + assert_eq!(serialized, reserialized); + } + } + + #[test] + fn sig_verified_op_exit_roundtrip() { + roundtrip_test::(); + } + + #[test] + fn proposer_slashing_roundtrip() { + roundtrip_test::(); + } + + #[test] + fn attester_slashing_roundtrip() { + roundtrip_test::>(); + } + + #[test] + fn bls_to_execution_roundtrip() { + roundtrip_test::(); + } +} diff --git a/consensus/swap_or_not_shuffle/benches/benches.rs b/consensus/swap_or_not_shuffle/benches/benches.rs index d5f64f0b6b5..2909ff1ac69 100644 --- a/consensus/swap_or_not_shuffle/benches/benches.rs +++ b/consensus/swap_or_not_shuffle/benches/benches.rs @@ -1,7 +1,4 @@ -#![allow(deprecated)] - -use criterion::Criterion; -use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; use swap_or_not_shuffle::{compute_shuffled_index, shuffle_list as fast_shuffle}; const SHUFFLE_ROUND_COUNT: u8 = 90; @@ -25,70 +22,32 @@ fn shuffles(c: &mut Criterion) { b.iter(|| black_box(shuffle_list(&seed, 8))) }); - c.bench( - "whole list shuffle", - Benchmark::new("8 elements", move |b| { - let seed = vec![42; 32]; - b.iter(|| black_box(shuffle_list(&seed, 8))) - }), - ); - - c.bench( - "whole list shuffle", - Benchmark::new("16 elements", move |b| { - let seed = vec![42; 32]; - b.iter(|| black_box(shuffle_list(&seed, 16))) - }), - ); - - c.bench( - "whole list shuffle", - Benchmark::new("512 elements", move |b| { - let seed = vec![42; 32]; - b.iter(|| black_box(shuffle_list(&seed, 512))) - }) - .sample_size(10), - ); - - c.bench( - "_fast_ whole list shuffle", - Benchmark::new("512 elements", move |b| { - let seed = vec![42; 32]; - let list: Vec = (0..512).collect(); - b.iter(|| black_box(fast_shuffle(list.clone(), SHUFFLE_ROUND_COUNT, &seed, true))) - }) - .sample_size(10), - ); - - c.bench( - "whole list shuffle", - Benchmark::new("16384 elements", move |b| { - let seed = vec![42; 32]; - b.iter(|| black_box(shuffle_list(&seed, 16_384))) - }) - .sample_size(10), - ); - - c.bench( - "_fast_ whole list shuffle", - Benchmark::new("16384 elements", move |b| { - let seed = vec![42; 32]; - let list: Vec = (0..16384).collect(); - b.iter(|| black_box(fast_shuffle(list.clone(), SHUFFLE_ROUND_COUNT, &seed, true))) - }) - .sample_size(10), - ); + for size in [8, 16, 512, 16_384] { + c.bench_with_input( + BenchmarkId::new("whole list shuffle", format!("{size} elements")), + &size, + move |b, &n| { + let seed = vec![42; 32]; + b.iter(|| black_box(shuffle_list(&seed, n))) + }, + ); + } - c.bench( - "_fast_ whole list shuffle", - Benchmark::new("4m elements", move |b| { - let seed = vec![42; 32]; - let list: Vec = (0..4_000_000).collect(); - b.iter(|| black_box(fast_shuffle(list.clone(), SHUFFLE_ROUND_COUNT, &seed, true))) - }) - .sample_size(10), - ); + let mut group = c.benchmark_group("fast"); + group.sample_size(10); + for size in [512, 16_384, 4_000_000] { + group.bench_with_input( + BenchmarkId::new("whole list shuffle", format!("{size} elements")), + &size, + move |b, &n| { + let seed = vec![42; 32]; + let list: Vec = (0..n).collect(); + b.iter(|| black_box(fast_shuffle(list.clone(), SHUFFLE_ROUND_COUNT, &seed, true))) + }, + ); + } + group.finish(); } -criterion_group!(benches, shuffles,); +criterion_group!(benches, shuffles); criterion_main!(benches); diff --git a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs index e71f3ca18e7..5f25c517b0e 100644 --- a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs +++ b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs @@ -17,7 +17,7 @@ use std::cmp::max; /// - `list_size == 0` /// - `index >= list_size` /// - `list_size > 2**24` -/// - `list_size > usize::max_value() / 2` +/// - `list_size > usize::MAX / 2` pub fn compute_shuffled_index( index: usize, list_size: usize, @@ -26,7 +26,7 @@ pub fn compute_shuffled_index( ) -> Option { if list_size == 0 || index >= list_size - || list_size > usize::max_value() / 2 + || list_size > usize::MAX / 2 || list_size > 2_usize.pow(24) { return None; @@ -140,7 +140,7 @@ mod tests { fn returns_none_for_too_large_list() { assert_eq!( None, - compute_shuffled_index(100, usize::max_value() / 2, &[42, 42], 90) + compute_shuffled_index(100, usize::MAX / 2, &[42, 42], 90) ); } } diff --git a/consensus/swap_or_not_shuffle/src/shuffle_list.rs b/consensus/swap_or_not_shuffle/src/shuffle_list.rs index 2b9a2565547..b49a26cc373 100644 --- a/consensus/swap_or_not_shuffle/src/shuffle_list.rs +++ b/consensus/swap_or_not_shuffle/src/shuffle_list.rs @@ -75,7 +75,7 @@ impl Buf { /// Returns `None` under any of the following conditions: /// - `list_size == 0` /// - `list_size > 2**24` -/// - `list_size > usize::max_value() / 2` +/// - `list_size > usize::MAX / 2` pub fn shuffle_list( mut input: Vec, rounds: u8, @@ -84,10 +84,7 @@ pub fn shuffle_list( ) -> Option> { let list_size = input.len(); - if input.is_empty() - || list_size > usize::max_value() / 2 - || list_size > 2_usize.pow(24) - || rounds == 0 + if input.is_empty() || list_size > usize::MAX / 2 || list_size > 2_usize.pow(24) || rounds == 0 { return None; } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 4b7d9f2b98d..28207f828a5 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -33,7 +33,6 @@ test_random_derive = { path = "../../common/test_random_derive" } tree_hash = { workspace = true, features = ["arbitrary"] } tree_hash_derive = { workspace = true } rand_xorshift = "0.3.0" -cached_tree_hash = { workspace = true } serde_yaml = { workspace = true } tempfile = { workspace = true } derivative = { workspace = true } @@ -51,7 +50,6 @@ metastruct = "0.1.0" serde_json = { workspace = true } smallvec = { workspace = true } maplit = { workspace = true } -strum = { workspace = true } milhouse = { workspace = true } rpds = { workspace = true } diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index 5c1036a4c5a..56c48e6cb1c 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -1,7 +1,4 @@ -#![allow(deprecated)] - -use criterion::Criterion; -use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +use criterion::{black_box, criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; use milhouse::List; use rayon::prelude::*; use ssz::Encode; @@ -39,8 +36,8 @@ fn get_state(validator_count: usize) -> BeaconState { slashed: false, activation_eligibility_epoch: Epoch::new(0), activation_epoch: Epoch::new(0), - exit_epoch: Epoch::from(u64::max_value()), - withdrawable_epoch: Epoch::from(u64::max_value()), + exit_epoch: Epoch::from(u64::MAX), + withdrawable_epoch: Epoch::from(u64::MAX), }) .collect(), ) @@ -53,75 +50,82 @@ fn all_benches(c: &mut Criterion) { let validator_count = 16_384; let spec = Arc::new(MainnetEthSpec::default_spec()); + let mut g = c.benchmark_group("types"); + g.sample_size(10); + let mut state = get_state::(validator_count); state.build_caches(&spec).expect("should build caches"); let state_bytes = state.as_ssz_bytes(); let inner_state = state.clone(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("encode/beacon_state", move |b| { + g.bench_with_input( + BenchmarkId::new("encode/beacon_state", validator_count), + &inner_state, + |b, state| { b.iter_batched_ref( - || inner_state.clone(), + || state.clone(), |state| black_box(state.as_ssz_bytes()), - criterion::BatchSize::SmallInput, + BatchSize::SmallInput, ) - }) - .sample_size(10), + }, ); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("decode/beacon_state", move |b| { + g.bench_with_input( + BenchmarkId::new("decode/beacon_state", validator_count), + &(state_bytes.clone(), spec.clone()), + |b, (bytes, spec)| { b.iter_batched_ref( - || (state_bytes.clone(), spec.clone()), + || (bytes.clone(), spec.clone()), |(bytes, spec)| { let state: BeaconState = BeaconState::from_ssz_bytes(&bytes, &spec).expect("should decode"); black_box(state) }, - criterion::BatchSize::SmallInput, + BatchSize::SmallInput, ) - }) - .sample_size(10), + }, ); let inner_state = state.clone(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("clone/beacon_state", move |b| { + g.bench_with_input( + BenchmarkId::new("clone/beacon_state", validator_count), + &inner_state, + |b, state| { b.iter_batched_ref( - || inner_state.clone(), + || state.clone(), |state| black_box(state.clone()), - criterion::BatchSize::SmallInput, + BatchSize::SmallInput, ) - }) - .sample_size(10), + }, ); let inner_state = state.clone(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new( + g.bench_with_input( + BenchmarkId::new( "initialized_cached_tree_hash_without_changes/beacon_state", - move |b| { - b.iter_batched_ref( - || inner_state.clone(), - |state| black_box(state.update_tree_hash_cache()), - criterion::BatchSize::SmallInput, - ) - }, - ) - .sample_size(10), + validator_count, + ), + &inner_state, + |b, state| { + b.iter_batched_ref( + || state.clone(), + |state| black_box(state.update_tree_hash_cache()), + BatchSize::SmallInput, + ) + }, ); let mut inner_state = state.clone(); inner_state.drop_all_caches().unwrap(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("non_initialized_cached_tree_hash/beacon_state", move |b| { + g.bench_with_input( + BenchmarkId::new( + "non_initialized_cached_tree_hash/beacon_state", + validator_count, + ), + &inner_state, + |b, state| { b.iter_batched_ref( - || inner_state.clone(), + || state.clone(), |state| { black_box( state @@ -129,41 +133,40 @@ fn all_benches(c: &mut Criterion) { .expect("should update tree hash"), ) }, - criterion::BatchSize::SmallInput, + BatchSize::SmallInput, ) - }) - .sample_size(10), + }, ); let inner_state = state.clone(); - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new( + g.bench_with_input( + BenchmarkId::new( "initialized_cached_tree_hash_with_new_validators/beacon_state", - move |b| { - b.iter_batched_ref( - || { - let mut state = inner_state.clone(); - for _ in 0..16 { - state - .validators_mut() - .push(Validator::default()) - .expect("should push validatorj"); - state - .balances_mut() - .push(32_000_000_000) - .expect("should push balance"); - } + validator_count, + ), + &inner_state, + |b, state| { + b.iter_batched_ref( + || { + let mut state = state.clone(); + for _ in 0..16 { + state + .validators_mut() + .push(Validator::default()) + .expect("should push validator"); state - }, - |state| black_box(state.update_tree_hash_cache()), - criterion::BatchSize::SmallInput, - ) - }, - ) - .sample_size(10), + .balances_mut() + .push(32_000_000_000) + .expect("should push balance"); + } + state + }, + |state| black_box(state.update_tree_hash_cache()), + BatchSize::SmallInput, + ) + }, ); } -criterion_group!(benches, all_benches,); +criterion_group!(benches, all_benches); criterion_main!(benches); diff --git a/consensus/types/presets/gnosis/electra.yaml b/consensus/types/presets/gnosis/electra.yaml index 72c626ded2f..38f6960bac8 100644 --- a/consensus/types/presets/gnosis/electra.yaml +++ b/consensus/types/presets/gnosis/electra.yaml @@ -35,7 +35,7 @@ MAX_CONSOLIDATIONS: 1 # Execution # --------------------------------------------------------------- # 2**13 (= 8192) receipts -MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD: 8192 +MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 8192 # 2**4 (= 16) withdrawal requests MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 diff --git a/consensus/types/presets/mainnet/electra.yaml b/consensus/types/presets/mainnet/electra.yaml index 72c626ded2f..38f6960bac8 100644 --- a/consensus/types/presets/mainnet/electra.yaml +++ b/consensus/types/presets/mainnet/electra.yaml @@ -35,7 +35,7 @@ MAX_CONSOLIDATIONS: 1 # Execution # --------------------------------------------------------------- # 2**13 (= 8192) receipts -MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD: 8192 +MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 8192 # 2**4 (= 16) withdrawal requests MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 diff --git a/consensus/types/presets/minimal/electra.yaml b/consensus/types/presets/minimal/electra.yaml index 11aa5e1f50e..cf726e004b1 100644 --- a/consensus/types/presets/minimal/electra.yaml +++ b/consensus/types/presets/minimal/electra.yaml @@ -35,7 +35,7 @@ MAX_CONSOLIDATIONS: 1 # Execution # --------------------------------------------------------------- # [customized] -MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD: 4 +MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 4 # [customized] 2**1 (= 2) withdrawal requests MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 2 diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index bfbf4d97afd..223b12e7684 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -1,41 +1,79 @@ +use super::{AttestationBase, AttestationElectra, AttestationRef}; use super::{ - Attestation, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, SelectionProof, - Signature, SignedRoot, + ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, SelectionProof, Signature, + SignedRoot, }; use crate::test_utils::TestRandom; +use crate::Attestation; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -/// A Validators aggregate attestation and selection proof. -/// -/// Spec v0.12.1 +#[superstruct( + variants(Base, Electra), + variant_attributes( + derive( + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + ), + serde(bound = "E: EthSpec"), + arbitrary(bound = "E: EthSpec"), + ), + ref_attributes( + derive(Debug, PartialEq, TreeHash, Serialize,), + serde(untagged, bound = "E: EthSpec"), + tree_hash(enum_behaviour = "transparent") + ), + map_ref_into(AttestationRef) +)] #[derive( - arbitrary::Arbitrary, - Debug, - Clone, - PartialEq, - Serialize, - Deserialize, - Encode, - Decode, - TestRandom, - TreeHash, + arbitrary::Arbitrary, Debug, Clone, PartialEq, Serialize, Deserialize, Encode, TreeHash, )] -#[serde(bound = "E: EthSpec")] +#[serde(untagged)] +#[tree_hash(enum_behaviour = "transparent")] +#[ssz(enum_behaviour = "transparent")] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] #[arbitrary(bound = "E: EthSpec")] pub struct AggregateAndProof { /// The index of the validator that created the attestation. #[serde(with = "serde_utils::quoted_u64")] + #[superstruct(getter(copy))] pub aggregator_index: u64, /// The aggregate attestation. + #[superstruct(flatten)] pub aggregate: Attestation, /// A proof provided by the validator that permits them to publish on the /// `beacon_aggregate_and_proof` gossipsub topic. pub selection_proof: Signature, } +impl<'a, E: EthSpec> AggregateAndProofRef<'a, E> { + /// Returns `true` if `validator_pubkey` signed over `self.aggregate.data.slot`. + pub fn aggregate(self) -> AttestationRef<'a, E> { + map_aggregate_and_proof_ref_into_attestation_ref!(&'a _, self, |inner, cons| { + cons(&inner.aggregate) + }) + } +} +impl AggregateAndProof { + /// Returns `true` if `validator_pubkey` signed over `self.aggregate.data.slot`. + pub fn aggregate<'a>(&'a self) -> AttestationRef<'a, E> { + map_aggregate_and_proof_ref_into_attestation_ref!(&'a _, self.to_ref(), |inner, cons| { + cons(&inner.aggregate) + }) + } +} + impl AggregateAndProof { /// Produces a new `AggregateAndProof` with a `selection_proof` generated by signing /// `aggregate.data.slot` with `secret_key`. @@ -43,29 +81,47 @@ impl AggregateAndProof { /// If `selection_proof.is_none()` it will be computed locally. pub fn from_aggregate( aggregator_index: u64, - aggregate: Attestation, + aggregate: AttestationRef<'_, E>, selection_proof: Option, secret_key: &SecretKey, fork: &Fork, genesis_validators_root: Hash256, spec: &ChainSpec, ) -> Self { - let selection_proof = selection_proof - .unwrap_or_else(|| { - SelectionProof::new::( - aggregate.data.slot, - secret_key, - fork, - genesis_validators_root, - spec, - ) - }) - .into(); + let selection_proof = selection_proof.unwrap_or_else(|| { + SelectionProof::new::( + aggregate.data().slot, + secret_key, + fork, + genesis_validators_root, + spec, + ) + }); - Self { + Self::from_attestation( aggregator_index, - aggregate, + aggregate.clone_as_attestation(), selection_proof, + ) + } + + /// Produces a new `AggregateAndProof` given a `selection_proof` + pub fn from_attestation( + aggregator_index: u64, + aggregate: Attestation, + selection_proof: SelectionProof, + ) -> Self { + match aggregate { + Attestation::Base(aggregate) => Self::Base(AggregateAndProofBase { + aggregator_index, + aggregate, + selection_proof: selection_proof.into(), + }), + Attestation::Electra(aggregate) => Self::Electra(AggregateAndProofElectra { + aggregator_index, + aggregate, + selection_proof: selection_proof.into(), + }), } } @@ -77,16 +133,17 @@ impl AggregateAndProof { genesis_validators_root: Hash256, spec: &ChainSpec, ) -> bool { - let target_epoch = self.aggregate.data.slot.epoch(E::slots_per_epoch()); + let target_epoch = self.aggregate().data().slot.epoch(E::slots_per_epoch()); let domain = spec.get_domain( target_epoch, Domain::SelectionProof, fork, genesis_validators_root, ); - let message = self.aggregate.data.slot.signing_root(domain); - self.selection_proof.verify(validator_pubkey, message) + let message = self.aggregate().data().slot.signing_root(domain); + self.selection_proof().verify(validator_pubkey, message) } } impl SignedRoot for AggregateAndProof {} +impl<'a, E: EthSpec> SignedRoot for AggregateAndProofRef<'a, E> {} diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index e43077d0591..7b53a98caa1 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -1,13 +1,16 @@ +use crate::slot_data::SlotData; +use crate::{test_utils::TestRandom, Hash256, Slot}; +use crate::{Checkpoint, ForkVersionDeserialize}; use derivative::Derivative; use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::BitVector; +use std::hash::{Hash, Hasher}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -use crate::slot_data::SlotData; -use crate::{test_utils::TestRandom, Hash256, Slot}; - use super::{ AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey, Signature, SignedRoot, @@ -18,38 +21,278 @@ pub enum Error { SszTypesError(ssz_types::Error), AlreadySigned(usize), SubnetCountIsZero(ArithError), + IncorrectStateVariant, + InvalidCommitteeLength, + InvalidCommitteeIndex, } -/// Details an attestation that can be slashable. -/// -/// Spec v0.12.1 +impl From for Error { + fn from(e: ssz_types::Error) -> Self { + Error::SszTypesError(e) + } +} + +#[superstruct( + variants(Base, Electra), + variant_attributes( + derive( + Debug, + Clone, + Serialize, + Deserialize, + Decode, + Encode, + TestRandom, + Derivative, + arbitrary::Arbitrary, + TreeHash, + ), + derivative(PartialEq, Hash(bound = "E: EthSpec")), + serde(bound = "E: EthSpec", deny_unknown_fields), + arbitrary(bound = "E: EthSpec"), + ), + ref_attributes(derive(TreeHash), tree_hash(enum_behaviour = "transparent")), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] #[derive( - arbitrary::Arbitrary, Debug, Clone, Serialize, - Deserialize, - Encode, - Decode, TreeHash, - TestRandom, + Encode, Derivative, + Deserialize, + arbitrary::Arbitrary, + PartialEq, )] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] -#[serde(bound = "E: EthSpec")] +#[serde(untagged)] +#[tree_hash(enum_behaviour = "transparent")] +#[ssz(enum_behaviour = "transparent")] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] #[arbitrary(bound = "E: EthSpec")] pub struct Attestation { + #[superstruct(only(Base), partial_getter(rename = "aggregation_bits_base"))] pub aggregation_bits: BitList, + #[superstruct(only(Electra), partial_getter(rename = "aggregation_bits_electra"))] + pub aggregation_bits: BitList, pub data: AttestationData, + #[superstruct(only(Electra))] + pub committee_bits: BitVector, pub signature: AggregateSignature, } +impl Hash for Attestation { + fn hash(&self, state: &mut H) + where + H: Hasher, + { + match self { + Attestation::Base(att) => att.hash(state), + Attestation::Electra(att) => att.hash(state), + } + } +} + impl Attestation { - /// Are the aggregation bitfields of these attestations disjoint? - pub fn signers_disjoint_from(&self, other: &Self) -> bool { - self.aggregation_bits - .intersection(&other.aggregation_bits) - .is_zero() + /// Produces an attestation with empty signature. + pub fn empty_for_signing( + committee_index: u64, + committee_length: usize, + slot: Slot, + beacon_block_root: Hash256, + source: Checkpoint, + target: Checkpoint, + spec: &ChainSpec, + ) -> Result { + if spec.fork_name_at_slot::(slot).electra_enabled() { + let mut committee_bits: BitVector = BitVector::default(); + committee_bits + .set(committee_index as usize, true) + .map_err(|_| Error::InvalidCommitteeIndex)?; + Ok(Attestation::Electra(AttestationElectra { + aggregation_bits: BitList::with_capacity(committee_length) + .map_err(|_| Error::InvalidCommitteeLength)?, + data: AttestationData { + slot, + index: 0u64, + beacon_block_root, + source, + target, + }, + committee_bits, + signature: AggregateSignature::infinity(), + })) + } else { + Ok(Attestation::Base(AttestationBase { + aggregation_bits: BitList::with_capacity(committee_length) + .map_err(|_| Error::InvalidCommitteeLength)?, + data: AttestationData { + slot, + index: committee_index, + beacon_block_root, + source, + target, + }, + signature: AggregateSignature::infinity(), + })) + } + } + + /// Aggregate another Attestation into this one. + /// + /// The aggregation bitfields must be disjoint, and the data must be the same. + pub fn aggregate(&mut self, other: AttestationRef) { + match self { + Attestation::Base(att) => match other { + AttestationRef::Base(oth) => { + att.aggregate(oth); + } + AttestationRef::Electra(_) => { + debug_assert!(false, "Cannot aggregate base and electra attestations"); + } + }, + Attestation::Electra(att) => match other { + AttestationRef::Base(_) => { + debug_assert!(false, "Cannot aggregate base and electra attestations"); + } + AttestationRef::Electra(oth) => { + att.aggregate(oth); + } + }, + } + } + + /// Signs `self`, setting the `committee_position`'th bit of `aggregation_bits` to `true`. + /// + /// Returns an `AlreadySigned` error if the `committee_position`'th bit is already `true`. + pub fn sign( + &mut self, + secret_key: &SecretKey, + committee_position: usize, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> Result<(), Error> { + match self { + Attestation::Base(att) => att.sign( + secret_key, + committee_position, + fork, + genesis_validators_root, + spec, + ), + Attestation::Electra(att) => att.sign( + secret_key, + committee_position, + fork, + genesis_validators_root, + spec, + ), + } + } + + /// Returns an `AlreadySigned` error if the `committee_position`'th bit is already `true`. + pub fn add_signature( + &mut self, + signature: &Signature, + committee_position: usize, + ) -> Result<(), Error> { + match self { + Attestation::Base(att) => att.add_signature(signature, committee_position), + Attestation::Electra(att) => att.add_signature(signature, committee_position), + } + } + + pub fn committee_index(&self) -> Option { + match self { + Attestation::Base(att) => Some(att.data.index), + Attestation::Electra(att) => att.committee_index(), + } + } + + pub fn is_aggregation_bits_zero(&self) -> bool { + match self { + Attestation::Base(att) => att.aggregation_bits.is_zero(), + Attestation::Electra(att) => att.aggregation_bits.is_zero(), + } + } + + pub fn num_set_aggregation_bits(&self) -> usize { + match self { + Attestation::Base(att) => att.aggregation_bits.num_set_bits(), + Attestation::Electra(att) => att.aggregation_bits.num_set_bits(), + } + } + + pub fn get_aggregation_bit(&self, index: usize) -> Result { + match self { + Attestation::Base(att) => att.aggregation_bits.get(index), + Attestation::Electra(att) => att.aggregation_bits.get(index), + } + } +} + +impl<'a, E: EthSpec> AttestationRef<'a, E> { + pub fn clone_as_attestation(self) -> Attestation { + match self { + Self::Base(att) => Attestation::Base(att.clone()), + Self::Electra(att) => Attestation::Electra(att.clone()), + } + } + + pub fn is_aggregation_bits_zero(self) -> bool { + match self { + Self::Base(att) => att.aggregation_bits.is_zero(), + Self::Electra(att) => att.aggregation_bits.is_zero(), + } + } + + pub fn num_set_aggregation_bits(&self) -> usize { + match self { + Self::Base(att) => att.aggregation_bits.num_set_bits(), + Self::Electra(att) => att.aggregation_bits.num_set_bits(), + } + } + + pub fn committee_index(&self) -> Option { + match self { + AttestationRef::Base(att) => Some(att.data.index), + AttestationRef::Electra(att) => att.committee_index(), + } + } + + pub fn set_aggregation_bits(&self) -> Vec { + match self { + Self::Base(att) => att + .aggregation_bits + .iter() + .enumerate() + .filter(|(_i, bit)| *bit) + .map(|(i, _bit)| i) + .collect::>(), + Self::Electra(att) => att + .aggregation_bits + .iter() + .enumerate() + .filter(|(_i, bit)| *bit) + .map(|(i, _bit)| i) + .collect::>(), + } + } +} + +impl AttestationElectra { + pub fn committee_index(&self) -> Option { + self.get_committee_indices().first().cloned() + } + + pub fn get_committee_indices(&self) -> Vec { + self.committee_bits + .iter() + .enumerate() + .filter_map(|(index, bit)| if bit { Some(index as u64) } else { None }) + .collect() } /// Aggregate another Attestation into this one. @@ -57,8 +300,64 @@ impl Attestation { /// The aggregation bitfields must be disjoint, and the data must be the same. pub fn aggregate(&mut self, other: &Self) { debug_assert_eq!(self.data, other.data); - debug_assert!(self.signers_disjoint_from(other)); + self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits); + self.signature.add_assign_aggregate(&other.signature); + } + + /// Signs `self`, setting the `committee_position`'th bit of `aggregation_bits` to `true`. + /// + /// Returns an `AlreadySigned` error if the `committee_position`'th bit is already `true`. + pub fn sign( + &mut self, + secret_key: &SecretKey, + committee_position: usize, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> Result<(), Error> { + let domain = spec.get_domain( + self.data.target.epoch, + Domain::BeaconAttester, + fork, + genesis_validators_root, + ); + let message = self.data.signing_root(domain); + + self.add_signature(&secret_key.sign(message), committee_position) + } + + /// Adds `signature` to `self` and sets the `committee_position`'th bit of `aggregation_bits` to `true`. + /// + /// Returns an `AlreadySigned` error if the `committee_position`'th bit is already `true`. + pub fn add_signature( + &mut self, + signature: &Signature, + committee_position: usize, + ) -> Result<(), Error> { + if self + .aggregation_bits + .get(committee_position) + .map_err(Error::SszTypesError)? + { + Err(Error::AlreadySigned(committee_position)) + } else { + self.aggregation_bits + .set(committee_position, true) + .map_err(Error::SszTypesError)?; + self.signature.add_assign(signature); + + Ok(()) + } + } +} + +impl AttestationBase { + /// Aggregate another Attestation into this one. + /// + /// The aggregation bitfields must be disjoint, and the data must be the same. + pub fn aggregate(&mut self, other: &Self) { + debug_assert_eq!(self.data, other.data); self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits); self.signature.add_assign_aggregate(&other.signature); } @@ -109,11 +408,128 @@ impl Attestation { Ok(()) } } + + pub fn extend_aggregation_bits( + &self, + ) -> Result, ssz_types::Error> { + let mut extended_aggregation_bits: BitList = + BitList::with_capacity(self.aggregation_bits.len())?; + + for (i, bit) in self.aggregation_bits.iter().enumerate() { + extended_aggregation_bits.set(i, bit)?; + } + Ok(extended_aggregation_bits) + } } impl SlotData for Attestation { fn get_slot(&self) -> Slot { - self.data.slot + self.data().slot + } +} + +impl<'a, E: EthSpec> SlotData for AttestationRef<'a, E> { + fn get_slot(&self) -> Slot { + self.data().slot + } +} + +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +#[ssz(enum_behaviour = "union")] +pub enum AttestationOnDisk { + Base(AttestationBase), + Electra(AttestationElectra), +} + +impl AttestationOnDisk { + pub fn to_ref(&self) -> AttestationRefOnDisk { + match self { + AttestationOnDisk::Base(att) => AttestationRefOnDisk::Base(att), + AttestationOnDisk::Electra(att) => AttestationRefOnDisk::Electra(att), + } + } +} + +#[derive(Debug, Clone, Encode)] +#[ssz(enum_behaviour = "union")] +pub enum AttestationRefOnDisk<'a, E: EthSpec> { + Base(&'a AttestationBase), + Electra(&'a AttestationElectra), +} + +impl From> for AttestationOnDisk { + fn from(attestation: Attestation) -> Self { + match attestation { + Attestation::Base(attestation) => Self::Base(attestation), + Attestation::Electra(attestation) => Self::Electra(attestation), + } + } +} + +impl From> for Attestation { + fn from(attestation: AttestationOnDisk) -> Self { + match attestation { + AttestationOnDisk::Base(attestation) => Self::Base(attestation), + AttestationOnDisk::Electra(attestation) => Self::Electra(attestation), + } + } +} + +impl<'a, E: EthSpec> From> for AttestationRefOnDisk<'a, E> { + fn from(attestation: AttestationRef<'a, E>) -> Self { + match attestation { + AttestationRef::Base(attestation) => Self::Base(attestation), + AttestationRef::Electra(attestation) => Self::Electra(attestation), + } + } +} + +impl<'a, E: EthSpec> From> for AttestationRef<'a, E> { + fn from(attestation: AttestationRefOnDisk<'a, E>) -> Self { + match attestation { + AttestationRefOnDisk::Base(attestation) => Self::Base(attestation), + AttestationRefOnDisk::Electra(attestation) => Self::Electra(attestation), + } + } +} + +impl ForkVersionDeserialize for Attestation { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::Value, + fork_name: crate::ForkName, + ) -> Result { + if fork_name.electra_enabled() { + let attestation: AttestationElectra = + serde_json::from_value(value).map_err(serde::de::Error::custom)?; + Ok(Attestation::Electra(attestation)) + } else { + let attestation: AttestationBase = + serde_json::from_value(value).map_err(serde::de::Error::custom)?; + Ok(Attestation::Base(attestation)) + } + } +} + +impl ForkVersionDeserialize for Vec> { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::Value, + fork_name: crate::ForkName, + ) -> Result { + if fork_name.electra_enabled() { + let attestations: Vec> = + serde_json::from_value(value).map_err(serde::de::Error::custom)?; + Ok(attestations + .into_iter() + .map(Attestation::Electra) + .collect::>()) + } else { + let attestations: Vec> = + serde_json::from_value(value).map_err(serde::de::Error::custom)?; + Ok(attestations + .into_iter() + .map(Attestation::Base) + .collect::>()) + } } } @@ -128,7 +544,7 @@ mod tests { // This test will only pass with `blst`, if we run these tests with another // BLS library in future we will have to make it generic. #[test] - fn size_of() { + fn size_of_base() { use std::mem::size_of; let aggregation_bits = @@ -143,10 +559,41 @@ mod tests { let attestation_expected = aggregation_bits + attestation_data + signature; assert_eq!(attestation_expected, 488); assert_eq!( - size_of::>(), + size_of::>(), attestation_expected ); } - ssz_and_tree_hash_tests!(Attestation); + #[test] + fn size_of_electra() { + use std::mem::size_of; + + let aggregation_bits = + size_of::::MaxValidatorsPerSlot>>(); + let attestation_data = size_of::(); + let committee_bits = + size_of::::MaxCommitteesPerSlot>>(); + let signature = size_of::(); + + assert_eq!(aggregation_bits, 56); + assert_eq!(committee_bits, 56); + assert_eq!(attestation_data, 128); + assert_eq!(signature, 288 + 16); + + let attestation_expected = aggregation_bits + committee_bits + attestation_data + signature; + assert_eq!(attestation_expected, 544); + assert_eq!( + size_of::>(), + attestation_expected + ); + } + + mod base { + use super::*; + ssz_and_tree_hash_tests!(AttestationBase); + } + mod electra { + use super::*; + ssz_and_tree_hash_tests!(AttestationElectra); + } } diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/attester_slashing.rs index 5ad5297d0ce..f6aa654d445 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/attester_slashing.rs @@ -1,38 +1,209 @@ -use crate::{test_utils::TestRandom, EthSpec, IndexedAttestation}; - +use crate::indexed_attestation::{ + IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, +}; +use crate::{test_utils::TestRandom, EthSpec}; use derivative::Derivative; +use rand::{Rng, RngCore}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -/// Two conflicting attestations. -/// -/// Spec v0.12.1 +#[superstruct( + variants(Base, Electra), + variant_attributes( + derive( + Derivative, + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + arbitrary::Arbitrary + ), + derivative(PartialEq, Eq, Hash(bound = "E: EthSpec")), + serde(bound = "E: EthSpec"), + arbitrary(bound = "E: EthSpec") + ), + ref_attributes(derive(Debug)) +)] #[derive( - Derivative, - Debug, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, - arbitrary::Arbitrary, + Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary, )] #[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] -#[serde(bound = "E: EthSpec")] +#[serde(bound = "E: EthSpec", untagged)] #[arbitrary(bound = "E: EthSpec")] +#[ssz(enum_behaviour = "transparent")] +#[tree_hash(enum_behaviour = "transparent")] pub struct AttesterSlashing { + #[superstruct(flatten)] pub attestation_1: IndexedAttestation, + #[superstruct(flatten)] pub attestation_2: IndexedAttestation, } +/// This is a copy of the `AttesterSlashing` enum but with `Encode` and `Decode` derived +/// using the `union` behavior for the purposes of persistence on disk. We use a separate +/// type so that we don't accidentally use this non-spec encoding in consensus objects. +#[derive(Debug, Clone, Encode, Decode, Derivative)] +#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[ssz(enum_behaviour = "union")] +pub enum AttesterSlashingOnDisk { + Base(AttesterSlashingBase), + Electra(AttesterSlashingElectra), +} + +#[derive(Debug, Clone, Encode)] +#[ssz(enum_behaviour = "union")] +pub enum AttesterSlashingRefOnDisk<'a, E: EthSpec> { + Base(&'a AttesterSlashingBase), + Electra(&'a AttesterSlashingElectra), +} + +impl From> for AttesterSlashingOnDisk { + fn from(attester_slashing: AttesterSlashing) -> Self { + match attester_slashing { + AttesterSlashing::Base(attester_slashing) => Self::Base(attester_slashing), + AttesterSlashing::Electra(attester_slashing) => Self::Electra(attester_slashing), + } + } +} + +impl From> for AttesterSlashing { + fn from(attester_slashing: AttesterSlashingOnDisk) -> Self { + match attester_slashing { + AttesterSlashingOnDisk::Base(attester_slashing) => Self::Base(attester_slashing), + AttesterSlashingOnDisk::Electra(attester_slashing) => Self::Electra(attester_slashing), + } + } +} + +impl<'a, E: EthSpec> From> for AttesterSlashingRef<'a, E> { + fn from(attester_slashing: AttesterSlashingRefOnDisk<'a, E>) -> Self { + match attester_slashing { + AttesterSlashingRefOnDisk::Base(attester_slashing) => Self::Base(attester_slashing), + AttesterSlashingRefOnDisk::Electra(attester_slashing) => { + Self::Electra(attester_slashing) + } + } + } +} + +impl<'a, E: EthSpec> From> for AttesterSlashingRefOnDisk<'a, E> { + fn from(attester_slashing: AttesterSlashingRef<'a, E>) -> Self { + match attester_slashing { + AttesterSlashingRef::Base(attester_slashing) => Self::Base(attester_slashing), + AttesterSlashingRef::Electra(attester_slashing) => Self::Electra(attester_slashing), + } + } +} + +impl<'a, E: EthSpec> AttesterSlashingRef<'a, E> { + pub fn clone_as_attester_slashing(self) -> AttesterSlashing { + match self { + AttesterSlashingRef::Base(attester_slashing) => { + AttesterSlashing::Base(attester_slashing.clone()) + } + AttesterSlashingRef::Electra(attester_slashing) => { + AttesterSlashing::Electra(attester_slashing.clone()) + } + } + } + + pub fn attestation_1(&self) -> IndexedAttestationRef<'a, E> { + match self { + AttesterSlashingRef::Base(attester_slashing) => { + IndexedAttestationRef::Base(&attester_slashing.attestation_1) + } + AttesterSlashingRef::Electra(attester_slashing) => { + IndexedAttestationRef::Electra(&attester_slashing.attestation_1) + } + } + } + + pub fn attestation_2(&self) -> IndexedAttestationRef<'a, E> { + match self { + AttesterSlashingRef::Base(attester_slashing) => { + IndexedAttestationRef::Base(&attester_slashing.attestation_2) + } + AttesterSlashingRef::Electra(attester_slashing) => { + IndexedAttestationRef::Electra(&attester_slashing.attestation_2) + } + } + } +} + +impl AttesterSlashing { + pub fn attestation_1(&self) -> IndexedAttestationRef { + match self { + AttesterSlashing::Base(attester_slashing) => { + IndexedAttestationRef::Base(&attester_slashing.attestation_1) + } + AttesterSlashing::Electra(attester_slashing) => { + IndexedAttestationRef::Electra(&attester_slashing.attestation_1) + } + } + } + + pub fn attestation_2(&self) -> IndexedAttestationRef { + match self { + AttesterSlashing::Base(attester_slashing) => { + IndexedAttestationRef::Base(&attester_slashing.attestation_2) + } + AttesterSlashing::Electra(attester_slashing) => { + IndexedAttestationRef::Electra(&attester_slashing.attestation_2) + } + } + } +} + +impl TestRandom for AttesterSlashing { + fn random_for_test(rng: &mut impl RngCore) -> Self { + if rng.gen_bool(0.5) { + AttesterSlashing::Base(AttesterSlashingBase::random_for_test(rng)) + } else { + AttesterSlashing::Electra(AttesterSlashingElectra::random_for_test(rng)) + } + } +} + +impl crate::ForkVersionDeserialize for Vec> { + fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( + value: serde_json::Value, + fork_name: crate::ForkName, + ) -> Result { + if fork_name.electra_enabled() { + let slashings: Vec> = + serde_json::from_value(value).map_err(serde::de::Error::custom)?; + Ok(slashings + .into_iter() + .map(AttesterSlashing::Electra) + .collect::>()) + } else { + let slashings: Vec> = + serde_json::from_value(value).map_err(serde::de::Error::custom)?; + Ok(slashings + .into_iter() + .map(AttesterSlashing::Base) + .collect::>()) + } + } +} + #[cfg(test)] mod tests { use super::*; use crate::*; - - ssz_and_tree_hash_tests!(AttesterSlashing); + mod base { + use super::*; + ssz_and_tree_hash_tests!(AttesterSlashingBase); + } + mod electra { + use super::*; + ssz_and_tree_hash_tests!(AttesterSlashingElectra); + } } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 81491d65056..f67a965955c 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1,15 +1,19 @@ +use crate::attestation::AttestationBase; use crate::test_utils::TestRandom; use crate::*; use derivative::Derivative; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; +use std::fmt; use std::marker::PhantomData; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use self::indexed_attestation::{IndexedAttestationBase, IndexedAttestationElectra}; + /// A block of the `BeaconChain`. #[superstruct( variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), @@ -324,7 +328,7 @@ impl> BeaconBlockBase { message: header, signature: Signature::empty(), }; - let indexed_attestation: IndexedAttestation = IndexedAttestation { + let indexed_attestation = IndexedAttestationBase { attesting_indices: VariableList::new(vec![ 0_u64; E::MaxValidatorsPerCommittee::to_usize() @@ -345,12 +349,12 @@ impl> BeaconBlockBase { signed_header_2: signed_header, }; - let attester_slashing = AttesterSlashing { + let attester_slashing = AttesterSlashingBase { attestation_1: indexed_attestation.clone(), attestation_2: indexed_attestation, }; - let attestation: Attestation = Attestation { + let attestation = AttestationBase { aggregation_bits: BitList::with_capacity(E::MaxValidatorsPerCommittee::to_usize()) .unwrap(), data: AttestationData::default(), @@ -603,6 +607,31 @@ impl> BeaconBlockElectra /// Return a Electra block where the block has maximum size. pub fn full(spec: &ChainSpec) -> Self { let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); + let indexed_attestation: IndexedAttestationElectra = IndexedAttestationElectra { + attesting_indices: VariableList::new(vec![0_u64; E::MaxValidatorsPerSlot::to_usize()]) + .unwrap(), + data: AttestationData::default(), + signature: AggregateSignature::empty(), + }; + let attester_slashings = vec![ + AttesterSlashingElectra { + attestation_1: indexed_attestation.clone(), + attestation_2: indexed_attestation, + }; + E::max_attester_slashings_electra() + ] + .into(); + let attestation = AttestationElectra { + aggregation_bits: BitList::with_capacity(E::MaxValidatorsPerSlot::to_usize()).unwrap(), + data: AttestationData::default(), + signature: AggregateSignature::empty(), + committee_bits: BitVector::new(), + }; + let mut attestations_electra = vec![]; + for _ in 0..E::MaxAttestationsElectra::to_usize() { + attestations_electra.push(attestation.clone()); + } + let bls_to_execution_changes = vec![ SignedBlsToExecutionChange { message: BlsToExecutionChange { @@ -626,8 +655,8 @@ impl> BeaconBlockElectra state_root: Hash256::zero(), body: BeaconBlockBodyElectra { proposer_slashings: base_block.body.proposer_slashings, - attester_slashings: base_block.body.attester_slashings, - attestations: base_block.body.attestations, + attester_slashings, + attestations: attestations_electra.into(), deposits: base_block.body.deposits, voluntary_exits: base_block.body.voluntary_exits, bls_to_execution_changes, @@ -641,6 +670,7 @@ impl> BeaconBlockElectra graffiti: Graffiti::default(), execution_payload: Payload::Electra::default(), blob_kzg_commitments: VariableList::empty(), + consolidations: VariableList::empty(), }, } } @@ -671,6 +701,7 @@ impl> EmptyBlock for BeaconBlockElec execution_payload: Payload::Electra::default(), bls_to_execution_changes: VariableList::empty(), blob_kzg_commitments: VariableList::empty(), + consolidations: VariableList::empty(), }, } } @@ -836,6 +867,23 @@ impl> ForkVersionDeserialize )) } } +pub enum BlockImportSource { + Gossip, + Lookup, + RangeSync, + HttpApi, +} + +impl fmt::Display for BlockImportSource { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + BlockImportSource::Gossip => write!(f, "gossip"), + BlockImportSource::Lookup => write!(f, "lookup"), + BlockImportSource::RangeSync => write!(f, "range_sync"), + BlockImportSource::HttpApi => write!(f, "http_api"), + } + } +} #[cfg(test)] mod tests { diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index c3077c4ab68..363ba08f7d5 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -2,6 +2,7 @@ use crate::test_utils::TestRandom; use crate::*; use derivative::Derivative; use merkle_proof::{MerkleTree, MerkleTreeError}; +use metastruct::metastruct; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; @@ -50,6 +51,14 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; ), arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload"), ), + specific_variant_attributes( + Base(metastruct(mappings(beacon_block_body_base_fields(groups(fields))))), + Altair(metastruct(mappings(beacon_block_body_altair_fields(groups(fields))))), + Bellatrix(metastruct(mappings(beacon_block_body_bellatrix_fields(groups(fields))))), + Capella(metastruct(mappings(beacon_block_body_capella_fields(groups(fields))))), + Deneb(metastruct(mappings(beacon_block_body_deneb_fields(groups(fields))))), + Electra(metastruct(mappings(beacon_block_body_electra_fields(groups(fields))))), + ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] @@ -63,8 +72,21 @@ pub struct BeaconBlockBody = FullPay pub eth1_data: Eth1Data, pub graffiti: Graffiti, pub proposer_slashings: VariableList, - pub attester_slashings: VariableList, E::MaxAttesterSlashings>, - pub attestations: VariableList, E::MaxAttestations>, + #[superstruct( + only(Base, Altair, Bellatrix, Capella, Deneb), + partial_getter(rename = "attester_slashings_base") + )] + pub attester_slashings: VariableList, E::MaxAttesterSlashings>, + #[superstruct(only(Electra), partial_getter(rename = "attester_slashings_electra"))] + pub attester_slashings: + VariableList, E::MaxAttesterSlashingsElectra>, + #[superstruct( + only(Base, Altair, Bellatrix, Capella, Deneb), + partial_getter(rename = "attestations_base") + )] + pub attestations: VariableList, E::MaxAttestations>, + #[superstruct(only(Electra), partial_getter(rename = "attestations_electra"))] + pub attestations: VariableList, E::MaxAttestationsElectra>, pub deposits: VariableList, pub voluntary_exits: VariableList, #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] @@ -92,7 +114,10 @@ pub struct BeaconBlockBody = FullPay VariableList, #[superstruct(only(Deneb, Electra))] pub blob_kzg_commitments: KzgCommitments, + #[superstruct(only(Electra))] + pub consolidations: VariableList, #[superstruct(only(Base, Altair))] + #[metastruct(exclude_from(fields))] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[serde(skip)] @@ -117,138 +142,101 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, } } - /// Produces the proof of inclusion for a `KzgCommitment` in `self.blob_kzg_commitments` - /// at `index`. - pub fn kzg_commitment_merkle_proof( - &self, - index: usize, - ) -> Result, Error> { + fn body_merkle_leaves(&self) -> Vec { + let mut leaves = vec![]; match self { - Self::Base(_) | Self::Altair(_) | Self::Bellatrix(_) | Self::Capella(_) => { - Err(Error::IncorrectStateVariant) + Self::Base(body) => { + beacon_block_body_base_fields!(body, |_, field| leaves + .push(field.tree_hash_root())); + } + Self::Altair(body) => { + beacon_block_body_altair_fields!(body, |_, field| leaves + .push(field.tree_hash_root())); + } + Self::Bellatrix(body) => { + beacon_block_body_bellatrix_fields!(body, |_, field| leaves + .push(field.tree_hash_root())); + } + Self::Capella(body) => { + beacon_block_body_capella_fields!(body, |_, field| leaves + .push(field.tree_hash_root())); } Self::Deneb(body) => { - // We compute the branches by generating 2 merkle trees: - // 1. Merkle tree for the `blob_kzg_commitments` List object - // 2. Merkle tree for the `BeaconBlockBody` container - // We then merge the branches for both the trees all the way up to the root. - - // Part1 (Branches for the subtree rooted at `blob_kzg_commitments`) - // - // Branches for `blob_kzg_commitments` without length mix-in - let depth = E::max_blob_commitments_per_block() - .next_power_of_two() - .ilog2(); - let leaves: Vec<_> = body - .blob_kzg_commitments - .iter() - .map(|commitment| commitment.tree_hash_root()) - .collect(); - let tree = MerkleTree::create(&leaves, depth as usize); - let (_, mut proof) = tree - .generate_proof(index, depth as usize) - .map_err(Error::MerkleTreeError)?; - - // Add the branch corresponding to the length mix-in. - let length = body.blob_kzg_commitments.len(); - let usize_len = std::mem::size_of::(); - let mut length_bytes = [0; BYTES_PER_CHUNK]; - length_bytes - .get_mut(0..usize_len) - .ok_or(Error::MerkleTreeError(MerkleTreeError::PleaseNotifyTheDevs))? - .copy_from_slice(&length.to_le_bytes()); - let length_root = Hash256::from_slice(length_bytes.as_slice()); - proof.push(length_root); - - // Part 2 - // Branches for `BeaconBlockBody` container - let leaves = [ - body.randao_reveal.tree_hash_root(), - body.eth1_data.tree_hash_root(), - body.graffiti.tree_hash_root(), - body.proposer_slashings.tree_hash_root(), - body.attester_slashings.tree_hash_root(), - body.attestations.tree_hash_root(), - body.deposits.tree_hash_root(), - body.voluntary_exits.tree_hash_root(), - body.sync_aggregate.tree_hash_root(), - body.execution_payload.tree_hash_root(), - body.bls_to_execution_changes.tree_hash_root(), - body.blob_kzg_commitments.tree_hash_root(), - ]; - let beacon_block_body_depth = leaves.len().next_power_of_two().ilog2() as usize; - let tree = MerkleTree::create(&leaves, beacon_block_body_depth); - let (_, mut proof_body) = tree - .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) - .map_err(Error::MerkleTreeError)?; - // Join the proofs for the subtree and the main tree - proof.append(&mut proof_body); - - debug_assert_eq!(proof.len(), E::kzg_proof_inclusion_proof_depth()); - Ok(proof.into()) + beacon_block_body_deneb_fields!(body, |_, field| leaves + .push(field.tree_hash_root())); } - // TODO(electra): De-duplicate proof computation. Self::Electra(body) => { - // We compute the branches by generating 2 merkle trees: - // 1. Merkle tree for the `blob_kzg_commitments` List object - // 2. Merkle tree for the `BeaconBlockBody` container - // We then merge the branches for both the trees all the way up to the root. - - // Part1 (Branches for the subtree rooted at `blob_kzg_commitments`) - // - // Branches for `blob_kzg_commitments` without length mix-in - let depth = E::max_blob_commitments_per_block() - .next_power_of_two() - .ilog2(); - let leaves: Vec<_> = body - .blob_kzg_commitments - .iter() - .map(|commitment| commitment.tree_hash_root()) - .collect(); - let tree = MerkleTree::create(&leaves, depth as usize); - let (_, mut proof) = tree - .generate_proof(index, depth as usize) - .map_err(Error::MerkleTreeError)?; - - // Add the branch corresponding to the length mix-in. - let length = body.blob_kzg_commitments.len(); - let usize_len = std::mem::size_of::(); - let mut length_bytes = [0; BYTES_PER_CHUNK]; - length_bytes - .get_mut(0..usize_len) - .ok_or(Error::MerkleTreeError(MerkleTreeError::PleaseNotifyTheDevs))? - .copy_from_slice(&length.to_le_bytes()); - let length_root = Hash256::from_slice(length_bytes.as_slice()); - proof.push(length_root); - - // Part 2 - // Branches for `BeaconBlockBody` container - let leaves = [ - body.randao_reveal.tree_hash_root(), - body.eth1_data.tree_hash_root(), - body.graffiti.tree_hash_root(), - body.proposer_slashings.tree_hash_root(), - body.attester_slashings.tree_hash_root(), - body.attestations.tree_hash_root(), - body.deposits.tree_hash_root(), - body.voluntary_exits.tree_hash_root(), - body.sync_aggregate.tree_hash_root(), - body.execution_payload.tree_hash_root(), - body.bls_to_execution_changes.tree_hash_root(), - body.blob_kzg_commitments.tree_hash_root(), - ]; - let beacon_block_body_depth = leaves.len().next_power_of_two().ilog2() as usize; - let tree = MerkleTree::create(&leaves, beacon_block_body_depth); - let (_, mut proof_body) = tree - .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) - .map_err(Error::MerkleTreeError)?; - // Join the proofs for the subtree and the main tree - proof.append(&mut proof_body); - - debug_assert_eq!(proof.len(), E::kzg_proof_inclusion_proof_depth()); - Ok(proof.into()) + beacon_block_body_electra_fields!(body, |_, field| leaves + .push(field.tree_hash_root())); } } + leaves + } + + /// Produces the proof of inclusion for a `KzgCommitment` in `self.blob_kzg_commitments` + /// at `index`. + pub fn kzg_commitment_merkle_proof( + &self, + index: usize, + ) -> Result, Error> { + // We compute the branches by generating 2 merkle trees: + // 1. Merkle tree for the `blob_kzg_commitments` List object + // 2. Merkle tree for the `BeaconBlockBody` container + // We then merge the branches for both the trees all the way up to the root. + + // Part1 (Branches for the subtree rooted at `blob_kzg_commitments`) + // + // Branches for `blob_kzg_commitments` without length mix-in + let blob_leaves = self + .blob_kzg_commitments()? + .iter() + .map(|commitment| commitment.tree_hash_root()) + .collect::>(); + let depth = E::max_blob_commitments_per_block() + .next_power_of_two() + .ilog2(); + let tree = MerkleTree::create(&blob_leaves, depth as usize); + let (_, mut proof) = tree + .generate_proof(index, depth as usize) + .map_err(Error::MerkleTreeError)?; + + // Add the branch corresponding to the length mix-in. + let length = blob_leaves.len(); + let usize_len = std::mem::size_of::(); + let mut length_bytes = [0; BYTES_PER_CHUNK]; + length_bytes + .get_mut(0..usize_len) + .ok_or(Error::MerkleTreeError(MerkleTreeError::PleaseNotifyTheDevs))? + .copy_from_slice(&length.to_le_bytes()); + let length_root = Hash256::from_slice(length_bytes.as_slice()); + proof.push(length_root); + + // Part 2 + // Branches for `BeaconBlockBody` container + let body_leaves = self.body_merkle_leaves(); + let beacon_block_body_depth = body_leaves.len().next_power_of_two().ilog2() as usize; + let tree = MerkleTree::create(&body_leaves, beacon_block_body_depth); + let (_, mut proof_body) = tree + .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) + .map_err(Error::MerkleTreeError)?; + // Join the proofs for the subtree and the main tree + proof.append(&mut proof_body); + debug_assert_eq!(proof.len(), E::kzg_proof_inclusion_proof_depth()); + + Ok(proof.into()) + } + + /// Produces the proof of inclusion for `self.blob_kzg_commitments`. + pub fn kzg_commitments_merkle_proof( + &self, + ) -> Result, Error> { + let body_leaves = self.body_merkle_leaves(); + let beacon_block_body_depth = body_leaves.len().next_power_of_two().ilog2() as usize; + let tree = MerkleTree::create(&body_leaves, beacon_block_body_depth); + let (_, proof) = tree + .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) + .map_err(Error::MerkleTreeError)?; + Ok(proof.into()) } /// Return `true` if this block body has a non-zero number of blobs. @@ -256,6 +244,99 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, self.blob_kzg_commitments() .map_or(false, |blobs| !blobs.is_empty()) } + + pub fn attestations_len(&self) -> usize { + match self { + Self::Base(body) => body.attestations.len(), + Self::Altair(body) => body.attestations.len(), + Self::Bellatrix(body) => body.attestations.len(), + Self::Capella(body) => body.attestations.len(), + Self::Deneb(body) => body.attestations.len(), + Self::Electra(body) => body.attestations.len(), + } + } + + pub fn attester_slashings_len(&self) -> usize { + match self { + Self::Base(body) => body.attester_slashings.len(), + Self::Altair(body) => body.attester_slashings.len(), + Self::Bellatrix(body) => body.attester_slashings.len(), + Self::Capella(body) => body.attester_slashings.len(), + Self::Deneb(body) => body.attester_slashings.len(), + Self::Electra(body) => body.attester_slashings.len(), + } + } + + pub fn attestations(&self) -> Box> + 'a> { + match self { + Self::Base(body) => Box::new(body.attestations.iter().map(AttestationRef::Base)), + Self::Altair(body) => Box::new(body.attestations.iter().map(AttestationRef::Base)), + Self::Bellatrix(body) => Box::new(body.attestations.iter().map(AttestationRef::Base)), + Self::Capella(body) => Box::new(body.attestations.iter().map(AttestationRef::Base)), + Self::Deneb(body) => Box::new(body.attestations.iter().map(AttestationRef::Base)), + Self::Electra(body) => Box::new(body.attestations.iter().map(AttestationRef::Electra)), + } + } + + pub fn attester_slashings(&self) -> Box> + 'a> { + match self { + Self::Base(body) => Box::new( + body.attester_slashings + .iter() + .map(AttesterSlashingRef::Base), + ), + Self::Altair(body) => Box::new( + body.attester_slashings + .iter() + .map(AttesterSlashingRef::Base), + ), + Self::Bellatrix(body) => Box::new( + body.attester_slashings + .iter() + .map(AttesterSlashingRef::Base), + ), + Self::Capella(body) => Box::new( + body.attester_slashings + .iter() + .map(AttesterSlashingRef::Base), + ), + Self::Deneb(body) => Box::new( + body.attester_slashings + .iter() + .map(AttesterSlashingRef::Base), + ), + Self::Electra(body) => Box::new( + body.attester_slashings + .iter() + .map(AttesterSlashingRef::Electra), + ), + } + } +} + +impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRefMut<'a, E, Payload> { + pub fn attestations_mut( + &'a mut self, + ) -> Box> + 'a> { + match self { + Self::Base(body) => Box::new(body.attestations.iter_mut().map(AttestationRefMut::Base)), + Self::Altair(body) => { + Box::new(body.attestations.iter_mut().map(AttestationRefMut::Base)) + } + Self::Bellatrix(body) => { + Box::new(body.attestations.iter_mut().map(AttestationRefMut::Base)) + } + Self::Capella(body) => { + Box::new(body.attestations.iter_mut().map(AttestationRefMut::Base)) + } + Self::Deneb(body) => { + Box::new(body.attestations.iter_mut().map(AttestationRefMut::Base)) + } + Self::Electra(body) => { + Box::new(body.attestations.iter_mut().map(AttestationRefMut::Electra)) + } + } + } } impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Payload> { @@ -556,6 +637,7 @@ impl From>> execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, + consolidations, } = body; ( @@ -574,6 +656,7 @@ impl From>> }, bls_to_execution_changes, blob_kzg_commitments: blob_kzg_commitments.clone(), + consolidations, }, Some(execution_payload), ) @@ -712,6 +795,7 @@ impl BeaconBlockBodyElectra> { execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, + consolidations, } = self; BeaconBlockBodyElectra { @@ -729,6 +813,7 @@ impl BeaconBlockBodyElectra> { }, bls_to_execution_changes: bls_to_execution_changes.clone(), blob_kzg_commitments: blob_kzg_commitments.clone(), + consolidations: consolidations.clone(), } } } @@ -748,6 +833,11 @@ impl From>> } impl BeaconBlockBody { + /// Returns the name of the fork pertaining to `self`. + pub fn fork_name(&self) -> ForkName { + self.to_ref().fork_name() + } + pub fn block_body_merkle_proof(&self, generalized_index: usize) -> Result, Error> { let field_index = match generalized_index { light_client_update::EXECUTION_PAYLOAD_INDEX => { @@ -762,13 +852,25 @@ impl BeaconBlockBody { _ => return Err(Error::IndexNotSupported(generalized_index)), }; + let attestations_root = if self.fork_name() > ForkName::Electra { + self.attestations_electra()?.tree_hash_root() + } else { + self.attestations_base()?.tree_hash_root() + }; + + let attester_slashings_root = if self.fork_name() > ForkName::Electra { + self.attester_slashings_electra()?.tree_hash_root() + } else { + self.attester_slashings_base()?.tree_hash_root() + }; + let mut leaves = vec![ self.randao_reveal().tree_hash_root(), self.eth1_data().tree_hash_root(), self.graffiti().tree_hash_root(), self.proposer_slashings().tree_hash_root(), - self.attester_slashings().tree_hash_root(), - self.attestations().tree_hash_root(), + attester_slashings_root, + attestations_root, self.deposits().tree_hash_root(), self.voluntary_exits().tree_hash_root(), ]; diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 577f282a556..054e5dbe271 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -121,7 +121,6 @@ pub enum Error { state: Slot, }, TreeHashError(tree_hash::Error), - CachedTreeHashError(cached_tree_hash::Error), InvalidValidatorPubkey(ssz::DecodeError), ValidatorRegistryShrunk, TreeHashCacheInconsistent, @@ -159,6 +158,16 @@ pub enum Error { IndexNotSupported(usize), InvalidFlagIndex(usize), MerkleTreeError(merkle_proof::MerkleTreeError), + PartialWithdrawalCountInvalid(usize), + NonExecutionAddresWithdrawalCredential, + NoCommitteeFound(CommitteeIndex), + InvalidCommitteeIndex(CommitteeIndex), + InvalidSelectionProof { + aggregator_index: u64, + }, + AggregatorNotInCommittee { + aggregator_index: u64, + }, } /// Control whether an epoch-indexed field can be indexed at the next epoch or not. @@ -205,6 +214,13 @@ impl From for Hash256 { } /// The state of the `BeaconChain` at some slot. +/// +/// Note: `BeaconState` does not implement `TreeHash` on the top-level type in order to +/// encourage use of the `canonical_root`/`update_tree_hash_cache` methods which flush pending +/// updates to the underlying persistent data structures. This is the safest option for now until +/// we add internal mutability to `milhouse::{List, Vector}`. See: +/// +/// https://github.com/sigp/milhouse/issues/43 #[superstruct( variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), variant_attributes( @@ -315,13 +331,10 @@ impl From for Hash256 { partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant"), map_ref_mut_into(BeaconStateRef) )] -#[derive( - Debug, PartialEq, Clone, Serialize, Deserialize, Encode, TreeHash, arbitrary::Arbitrary, -)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, arbitrary::Arbitrary)] #[serde(untagged)] #[serde(bound = "E: EthSpec")] #[arbitrary(bound = "E: EthSpec")] -#[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] pub struct BeaconState where @@ -367,6 +380,7 @@ where pub eth1_deposit_index: u64, // Registry + #[compare_fields(as_iter)] #[test_random(default)] pub validators: List, #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] @@ -392,8 +406,10 @@ where pub current_epoch_attestations: List, E::MaxPendingAttestations>, // Participation (Altair and later) + #[compare_fields(as_iter)] #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] #[test_random(default)] + #[compare_fields(as_iter)] pub previous_epoch_participation: List, #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] #[test_random(default)] @@ -470,8 +486,12 @@ where // Electra #[superstruct(only(Electra), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] - #[serde(with = "serde_utils::quoted_u64")] - pub deposit_receipts_start_index: u64, + #[serde( + with = "serde_utils::quoted_u64", + //TODO(electra) remove alias when ef tests are updated + alias = "deposit_receipts_start_index" + )] + pub deposit_requests_start_index: u64, #[superstruct(only(Electra), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] @@ -490,13 +510,16 @@ where #[superstruct(only(Electra), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] pub earliest_consolidation_epoch: Epoch, + #[compare_fields(as_iter)] #[test_random(default)] #[superstruct(only(Electra))] pub pending_balance_deposits: List, + #[compare_fields(as_iter)] #[test_random(default)] #[superstruct(only(Electra))] pub pending_partial_withdrawals: List, + #[compare_fields(as_iter)] #[test_random(default)] #[superstruct(only(Electra))] pub pending_consolidations: List, @@ -643,10 +666,8 @@ impl BeaconState { } /// Returns the `tree_hash_root` of the state. - /// - /// Spec v0.12.1 - pub fn canonical_root(&self) -> Hash256 { - Hash256::from_slice(&self.tree_hash_root()[..]) + pub fn canonical_root(&mut self) -> Result { + self.update_tree_hash_cache() } pub fn historical_batch(&mut self) -> Result, Error> { @@ -1467,6 +1488,14 @@ impl BeaconState { } } + /// Get the balance of a single validator. + pub fn get_balance(&self, validator_index: usize) -> Result { + self.balances() + .get(validator_index) + .ok_or(Error::BalancesOutOfBounds(validator_index)) + .copied() + } + /// Get a mutable reference to the balance of a single validator. pub fn get_balance_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { self.balances_mut() @@ -1999,9 +2028,13 @@ impl BeaconState { /// Compute the tree hash root of the state using the tree hash cache. /// /// Initialize the tree hash cache if it isn't already initialized. - pub fn update_tree_hash_cache(&mut self) -> Result { + pub fn update_tree_hash_cache<'a>(&'a mut self) -> Result { self.apply_pending_mutations()?; - Ok(self.tree_hash_root()) + map_beacon_state_ref!(&'a _, self.to_ref(), |inner, cons| { + let root = inner.tree_hash_root(); + cons(inner); + Ok(root) + }) } /// Compute the tree hash root of the validators using the tree hash cache. @@ -2097,11 +2130,12 @@ impl BeaconState { &self, validator_index: usize, spec: &ChainSpec, + current_fork: ForkName, ) -> Result { let max_effective_balance = self .validators() .get(validator_index) - .map(|validator| validator.get_validator_max_effective_balance(spec)) + .map(|validator| validator.get_validator_max_effective_balance(spec, current_fork)) .ok_or(Error::UnknownValidator(validator_index))?; Ok(std::cmp::min( *self @@ -2541,12 +2575,6 @@ impl From for Error { } } -impl From for Error { - fn from(e: cached_tree_hash::Error) -> Error { - Error::CachedTreeHashError(e) - } -} - impl From for Error { fn from(e: tree_hash::Error) -> Error { Error::TreeHashError(e) diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 7913df8e00e..161f8541573 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -86,7 +86,7 @@ impl CommitteeCache { } // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. - if state.validators().len() == usize::max_value() { + if state.validators().len() == usize::MAX { return Err(Error::TooManyValidators); } @@ -183,6 +183,8 @@ impl CommitteeCache { } /// Get all the Beacon committees at a given `slot`. + /// + /// Committees are sorted by ascending index order 0..committees_per_slot pub fn get_beacon_committees_at_slot(&self, slot: Slot) -> Result, Error> { if self.initialized_epoch.is_none() { return Err(Error::CommitteeCacheUninitialized(None)); diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index a2274765691..4dc06feab38 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -2,6 +2,7 @@ use crate::test_utils::*; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_chain::types::*; +use lazy_static::lazy_static; use swap_or_not_shuffle::shuffle_list; pub const VALIDATOR_COUNT: usize = 16; diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 38a76e44c50..16c7ff152fe 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -6,6 +6,7 @@ use beacon_chain::types::{ ChainSpec, Domain, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, Slot, Vector, }; +use lazy_static::lazy_static; use ssz::Encode; use std::ops::Mul; use swap_or_not_shuffle::compute_shuffled_index; diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index e54bc2f4f97..1f60f429db5 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -6,13 +6,10 @@ use crate::{ use crate::{KzgProofs, SignedBeaconBlock}; use bls::Signature; use derivative::Derivative; -use kzg::{ - Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, - FIELD_ELEMENTS_PER_BLOB, -}; +use kzg::{Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT}; use merkle_proof::{merkle_root_from_branch, verify_merkle_proof, MerkleTreeError}; use rand::Rng; -use safe_arith::{ArithError, SafeArith}; +use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -190,33 +187,30 @@ impl BlobSidecar { } /// Verifies the kzg commitment inclusion merkle proof. - pub fn verify_blob_sidecar_inclusion_proof(&self) -> Result { - // Depth of the subtree rooted at `blob_kzg_commitments` in the `BeaconBlockBody` - // is equal to depth of the ssz List max size + 1 for the length mixin - let kzg_commitments_tree_depth = (E::max_blob_commitments_per_block() - .next_power_of_two() - .ilog2() - .safe_add(1))? as usize; + pub fn verify_blob_sidecar_inclusion_proof(&self) -> bool { + let kzg_commitments_tree_depth = E::kzg_commitments_tree_depth(); + + // EthSpec asserts that kzg_commitments_tree_depth is less than KzgCommitmentInclusionProofDepth + let (kzg_commitment_subtree_proof, kzg_commitments_proof) = self + .kzg_commitment_inclusion_proof + .split_at(kzg_commitments_tree_depth); + // Compute the `tree_hash_root` of the `blob_kzg_commitments` subtree using the // inclusion proof branches let blob_kzg_commitments_root = merkle_root_from_branch( self.kzg_commitment.tree_hash_root(), - self.kzg_commitment_inclusion_proof - .get(0..kzg_commitments_tree_depth) - .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?, + kzg_commitment_subtree_proof, kzg_commitments_tree_depth, self.index as usize, ); // The remaining inclusion proof branches are for the top level `BeaconBlockBody` tree - Ok(verify_merkle_proof( + verify_merkle_proof( blob_kzg_commitments_root, - self.kzg_commitment_inclusion_proof - .get(kzg_commitments_tree_depth..E::kzg_proof_inclusion_proof_depth()) - .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?, - E::kzg_proof_inclusion_proof_depth().safe_sub(kzg_commitments_tree_depth)?, + kzg_commitments_proof, + E::block_body_tree_depth(), BLOB_KZG_COMMITMENTS_INDEX, self.signed_block_header.message.body_root, - )) + ) } pub fn random_valid(rng: &mut R, kzg: &Kzg) -> Result { @@ -224,13 +218,7 @@ impl BlobSidecar { rng.fill_bytes(&mut blob_bytes); // Ensure that the blob is canonical by ensuring that // each field element contained in the blob is < BLS_MODULUS - for i in 0..FIELD_ELEMENTS_PER_BLOB { - let Some(byte) = blob_bytes.get_mut( - i.checked_mul(BYTES_PER_FIELD_ELEMENT) - .ok_or("overflow".to_string())?, - ) else { - return Err(format!("blob byte index out of bounds: {:?}", i)); - }; + for byte in blob_bytes.iter_mut().step_by(BYTES_PER_FIELD_ELEMENT) { *byte = 0; } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index b0346a14ef8..9cc864f9641 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -180,7 +180,7 @@ pub struct ChainSpec { pub electra_fork_version: [u8; 4], /// The Electra fork epoch is optional, with `None` representing "Electra never happens". pub electra_fork_epoch: Option, - pub unset_deposit_receipts_start_index: u64, + pub unset_deposit_requests_start_index: u64, pub full_exit_request_amount: u64, pub min_activation_balance: u64, pub max_effective_balance_electra: u64, @@ -190,6 +190,11 @@ pub struct ChainSpec { pub min_per_epoch_churn_limit_electra: u64, pub max_per_epoch_activation_exit_churn_limit: u64, + /* + * DAS params + */ + pub number_of_columns: usize, + /* * Networking */ @@ -376,7 +381,7 @@ impl ChainSpec { state: &BeaconState, ) -> u64 { let fork_name = state.fork_name_unchecked(); - if fork_name >= ForkName::Electra { + if fork_name.electra_enabled() { self.min_slashing_penalty_quotient_electra } else if fork_name >= ForkName::Bellatrix { self.min_slashing_penalty_quotient_bellatrix @@ -387,6 +392,27 @@ impl ChainSpec { } } + /// For a given `BeaconState`, return the whistleblower reward quotient associated with its variant. + pub fn whistleblower_reward_quotient_for_state( + &self, + state: &BeaconState, + ) -> u64 { + let fork_name = state.fork_name_unchecked(); + if fork_name.electra_enabled() { + self.whistleblower_reward_quotient_electra + } else { + self.whistleblower_reward_quotient + } + } + + pub fn max_effective_balance_for_fork(&self, fork_name: ForkName) -> u64 { + if fork_name.electra_enabled() { + self.max_effective_balance_electra + } else { + self.max_effective_balance + } + } + /// Returns a full `Fork` struct for a given epoch. pub fn fork_at_epoch(&self, epoch: Epoch) -> Fork { let current_fork_name = self.fork_name_at_epoch(epoch); @@ -726,7 +752,7 @@ impl ChainSpec { */ electra_fork_version: [0x05, 00, 00, 00], electra_fork_epoch: None, - unset_deposit_receipts_start_index: u64::MAX, + unset_deposit_requests_start_index: u64::MAX, full_exit_request_amount: 0, min_activation_balance: option_wrapper(|| { u64::checked_pow(2, 5)?.checked_mul(u64::checked_pow(10, 9)?) @@ -751,6 +777,8 @@ impl ChainSpec { }) .expect("calculation does not overflow"), + number_of_columns: 128, + /* * Network specific */ @@ -852,6 +880,14 @@ impl ChainSpec { electra_fork_epoch: None, max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 0) .expect("pow does not overflow"), + min_per_epoch_churn_limit_electra: option_wrapper(|| { + u64::checked_pow(2, 6)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), + max_per_epoch_activation_exit_churn_limit: option_wrapper(|| { + u64::checked_pow(2, 7)?.checked_mul(u64::checked_pow(10, 9)?) + }) + .expect("calculation does not overflow"), // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -1028,7 +1064,7 @@ impl ChainSpec { */ electra_fork_version: [0x05, 0x00, 0x00, 0x64], electra_fork_epoch: None, - unset_deposit_receipts_start_index: u64::MAX, + unset_deposit_requests_start_index: u64::MAX, full_exit_request_amount: 0, min_activation_balance: option_wrapper(|| { u64::checked_pow(2, 5)?.checked_mul(u64::checked_pow(10, 9)?) @@ -1053,6 +1089,8 @@ impl ChainSpec { }) .expect("calculation does not overflow"), + number_of_columns: 128, + /* * Network specific */ diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 6fc6e0642ea..110392d4b77 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -124,7 +124,7 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "versioned_hash_version_kzg".to_uppercase() => deneb::VERSIONED_HASH_VERSION_KZG.to_string().into(), // Electra "compounding_withdrawal_prefix".to_uppercase() => u8_hex(spec.compounding_withdrawal_prefix_byte), - "unset_deposit_receipts_start_index".to_uppercase() => spec.unset_deposit_receipts_start_index.to_string().into(), + "unset_deposit_requests_start_index".to_uppercase() => spec.unset_deposit_requests_start_index.to_string().into(), "full_exit_request_amount".to_uppercase() => spec.full_exit_request_amount.to_string().into(), "domain_consolidation".to_uppercase()=> u32_hex(spec.domain_consolidation), } diff --git a/consensus/types/src/consolidation.rs b/consensus/types/src/consolidation.rs index 09a2d4bb0c3..6cc4aa90f27 100644 --- a/consensus/types/src/consolidation.rs +++ b/consensus/types/src/consolidation.rs @@ -1,5 +1,5 @@ -use crate::test_utils::TestRandom; use crate::Epoch; +use crate::{test_utils::TestRandom, SignedRoot}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -27,6 +27,8 @@ pub struct Consolidation { pub epoch: Epoch, } +impl SignedRoot for Consolidation {} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs new file mode 100644 index 00000000000..a0e3ca6cce3 --- /dev/null +++ b/consensus/types/src/data_column_sidecar.rs @@ -0,0 +1,394 @@ +use crate::beacon_block_body::{KzgCommitments, BLOB_KZG_COMMITMENTS_INDEX}; +use crate::test_utils::TestRandom; +use crate::{ + BeaconBlockHeader, ChainSpec, EthSpec, Hash256, KzgProofs, SignedBeaconBlock, + SignedBeaconBlockHeader, Slot, +}; +use crate::{BeaconStateError, BlobsList}; +use bls::Signature; +use derivative::Derivative; +use kzg::Kzg; +use kzg::{Blob as KzgBlob, Cell as KzgCell, Error as KzgError}; +use kzg::{KzgCommitment, KzgProof}; +use merkle_proof::verify_merkle_proof; +use rayon::prelude::*; +use safe_arith::ArithError; +use serde::{Deserialize, Serialize}; +use ssz::Encode; +use ssz_derive::{Decode, Encode}; +use ssz_types::typenum::Unsigned; +use ssz_types::Error as SszError; +use ssz_types::{FixedVector, VariableList}; +use std::hash::Hash; +use std::sync::Arc; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; + +pub type ColumnIndex = u64; +pub type Cell = FixedVector::BytesPerCell>; +pub type DataColumn = VariableList, ::MaxBlobCommitmentsPerBlock>; + +/// Container of the data that identifies an individual data column. +#[derive( + Serialize, Deserialize, Encode, Decode, TreeHash, Copy, Clone, Debug, PartialEq, Eq, Hash, +)] +pub struct DataColumnIdentifier { + pub block_root: Hash256, + pub index: ColumnIndex, +} + +pub type DataColumnSidecarList = Vec>>; + +#[derive( + Debug, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + Derivative, + arbitrary::Arbitrary, +)] +#[serde(bound = "E: EthSpec")] +#[arbitrary(bound = "E: EthSpec")] +#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +pub struct DataColumnSidecar { + #[serde(with = "serde_utils::quoted_u64")] + pub index: ColumnIndex, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + pub column: DataColumn, + /// All of the KZG commitments and proofs associated with the block, used for verifying sample cells. + pub kzg_commitments: KzgCommitments, + pub kzg_proofs: KzgProofs, + pub signed_block_header: SignedBeaconBlockHeader, + /// An inclusion proof, proving the inclusion of `blob_kzg_commitments` in `BeaconBlockBody`. + pub kzg_commitments_inclusion_proof: FixedVector, +} + +impl DataColumnSidecar { + pub fn slot(&self) -> Slot { + self.signed_block_header.message.slot + } + + pub fn block_root(&self) -> Hash256 { + self.signed_block_header.message.tree_hash_root() + } + + pub fn block_parent_root(&self) -> Hash256 { + self.signed_block_header.message.parent_root + } + + pub fn block_proposer_index(&self) -> u64 { + self.signed_block_header.message.proposer_index + } + + /// Verifies the kzg commitment inclusion merkle proof. + pub fn verify_inclusion_proof(&self) -> bool { + let blob_kzg_commitments_root = self.kzg_commitments.tree_hash_root(); + + verify_merkle_proof( + blob_kzg_commitments_root, + &self.kzg_commitments_inclusion_proof, + E::kzg_commitments_inclusion_proof_depth(), + BLOB_KZG_COMMITMENTS_INDEX, + self.signed_block_header.message.body_root, + ) + } + + pub fn build_sidecars( + blobs: &BlobsList, + block: &SignedBeaconBlock, + kzg: &Kzg, + spec: &ChainSpec, + ) -> Result, DataColumnSidecarError> { + let number_of_columns = spec.number_of_columns; + if blobs.is_empty() { + return Ok(vec![]); + } + let kzg_commitments = block + .message() + .body() + .blob_kzg_commitments() + .map_err(|_err| DataColumnSidecarError::PreDeneb)?; + let kzg_commitments_inclusion_proof = + block.message().body().kzg_commitments_merkle_proof()?; + let signed_block_header = block.signed_block_header(); + + let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; + let mut column_kzg_proofs = + vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; + + // NOTE: assumes blob sidecars are ordered by index + let blob_cells_and_proofs_vec = blobs + .into_par_iter() + .map(|blob| { + let blob = KzgBlob::from_bytes(blob).map_err(KzgError::from)?; + kzg.compute_cells_and_proofs(&blob) + }) + .collect::, KzgError>>()?; + + for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { + // we iterate over each column, and we construct the column from "top to bottom", + // pushing on the cell and the corresponding proof at each column index. we do this for + // each blob (i.e. the outer loop). + for col in 0..number_of_columns { + let cell = + blob_cells + .get(col) + .ok_or(DataColumnSidecarError::InconsistentArrayLength(format!( + "Missing blob cell at index {col}" + )))?; + let cell: Vec = cell.into_inner().into_iter().collect(); + let cell = Cell::::from(cell); + + let proof = blob_cell_proofs.get(col).ok_or( + DataColumnSidecarError::InconsistentArrayLength(format!( + "Missing blob cell KZG proof at index {col}" + )), + )?; + + let column = + columns + .get_mut(col) + .ok_or(DataColumnSidecarError::InconsistentArrayLength(format!( + "Missing data column at index {col}" + )))?; + let column_proofs = column_kzg_proofs.get_mut(col).ok_or( + DataColumnSidecarError::InconsistentArrayLength(format!( + "Missing data column proofs at index {col}" + )), + )?; + + column.push(cell); + column_proofs.push(*proof); + } + } + + let sidecars: Vec>> = columns + .into_iter() + .zip(column_kzg_proofs) + .enumerate() + .map(|(index, (col, proofs))| { + Arc::new(DataColumnSidecar { + index: index as u64, + column: DataColumn::::from(col), + kzg_commitments: kzg_commitments.clone(), + kzg_proofs: KzgProofs::::from(proofs), + signed_block_header: signed_block_header.clone(), + kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), + }) + }) + .collect(); + + Ok(sidecars) + } + + pub fn reconstruct( + kzg: &Kzg, + data_columns: &[Arc], + spec: &ChainSpec, + ) -> Result>, KzgError> { + let number_of_columns = spec.number_of_columns; + let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; + let mut column_kzg_proofs = + vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; + + let first_data_column = data_columns + .first() + .ok_or(KzgError::InconsistentArrayLength( + "data_columns should have at least one element".to_string(), + ))?; + let num_of_blobs = first_data_column.kzg_commitments.len(); + + let blob_cells_and_proofs_vec = (0..num_of_blobs) + .into_par_iter() + .map(|row_index| { + let mut cells: Vec = vec![]; + let mut cell_ids: Vec = vec![]; + for data_column in data_columns { + let cell = data_column.column.get(row_index).ok_or( + KzgError::InconsistentArrayLength(format!( + "Missing data column at index {row_index}" + )), + )?; + + cells.push(ssz_cell_to_crypto_cell::(cell)?); + cell_ids.push(data_column.index); + } + // recover_all_cells does not expect sorted + let all_cells = kzg.recover_all_cells(&cell_ids, &cells)?; + let blob = kzg.cells_to_blob(&all_cells)?; + + // Note: This function computes all cells and proofs. According to Justin this is okay, + // computing a partial set may be more expensive and requires code paths that don't exist. + // Computing the blobs cells is technically unnecessary but very cheap. It's done here again + // for simplicity. + kzg.compute_cells_and_proofs(&blob) + }) + .collect::, KzgError>>()?; + + for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { + // we iterate over each column, and we construct the column from "top to bottom", + // pushing on the cell and the corresponding proof at each column index. we do this for + // each blob (i.e. the outer loop). + for col in 0..number_of_columns { + let cell = blob_cells + .get(col) + .ok_or(KzgError::InconsistentArrayLength(format!( + "Missing blob cell at index {col}" + )))?; + let cell: Vec = cell.into_inner().into_iter().collect(); + let cell = Cell::::from(cell); + + let proof = blob_cell_proofs + .get(col) + .ok_or(KzgError::InconsistentArrayLength(format!( + "Missing blob cell KZG proof at index {col}" + )))?; + + let column = columns + .get_mut(col) + .ok_or(KzgError::InconsistentArrayLength(format!( + "Missing data column at index {col}" + )))?; + let column_proofs = + column_kzg_proofs + .get_mut(col) + .ok_or(KzgError::InconsistentArrayLength(format!( + "Missing data column proofs at index {col}" + )))?; + + column.push(cell); + column_proofs.push(*proof); + } + } + + // Clone sidecar elements from existing data column, no need to re-compute + let kzg_commitments = &first_data_column.kzg_commitments; + let signed_block_header = &first_data_column.signed_block_header; + let kzg_commitments_inclusion_proof = &first_data_column.kzg_commitments_inclusion_proof; + + let sidecars: Vec>> = columns + .into_iter() + .zip(column_kzg_proofs) + .enumerate() + .map(|(index, (col, proofs))| { + Arc::new(DataColumnSidecar { + index: index as u64, + column: DataColumn::::from(col), + kzg_commitments: kzg_commitments.clone(), + kzg_proofs: KzgProofs::::from(proofs), + signed_block_header: signed_block_header.clone(), + kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), + }) + }) + .collect(); + Ok(sidecars) + } + + pub fn min_size() -> usize { + // min size is one cell + Self { + index: 0, + column: VariableList::new(vec![Cell::::default()]).unwrap(), + kzg_commitments: VariableList::new(vec![KzgCommitment::empty_for_testing()]).unwrap(), + kzg_proofs: VariableList::new(vec![KzgProof::empty()]).unwrap(), + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader::empty(), + signature: Signature::empty(), + }, + kzg_commitments_inclusion_proof: Default::default(), + } + .as_ssz_bytes() + .len() + } + + pub fn max_size() -> usize { + Self { + index: 0, + column: VariableList::new(vec![Cell::::default(); E::MaxBlobsPerBlock::to_usize()]) + .unwrap(), + kzg_commitments: VariableList::new(vec![ + KzgCommitment::empty_for_testing(); + E::MaxBlobsPerBlock::to_usize() + ]) + .unwrap(), + kzg_proofs: VariableList::new(vec![KzgProof::empty(); E::MaxBlobsPerBlock::to_usize()]) + .unwrap(), + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader::empty(), + signature: Signature::empty(), + }, + kzg_commitments_inclusion_proof: Default::default(), + } + .as_ssz_bytes() + .len() + } + + pub fn empty() -> Self { + Self { + index: 0, + column: DataColumn::::default(), + kzg_commitments: VariableList::default(), + kzg_proofs: VariableList::default(), + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader::empty(), + signature: Signature::empty(), + }, + kzg_commitments_inclusion_proof: Default::default(), + } + } + + pub fn id(&self) -> DataColumnIdentifier { + DataColumnIdentifier { + block_root: self.block_root(), + index: self.index, + } + } +} + +#[derive(Debug)] +pub enum DataColumnSidecarError { + ArithError(ArithError), + BeaconStateError(BeaconStateError), + DataColumnIndexOutOfBounds, + KzgCommitmentInclusionProofOutOfBounds, + KzgError(KzgError), + MissingBlobSidecars, + PreDeneb, + SszError(SszError), + InconsistentArrayLength(String), +} + +impl From for DataColumnSidecarError { + fn from(e: ArithError) -> Self { + Self::ArithError(e) + } +} + +impl From for DataColumnSidecarError { + fn from(e: BeaconStateError) -> Self { + Self::BeaconStateError(e) + } +} + +impl From for DataColumnSidecarError { + fn from(e: KzgError) -> Self { + Self::KzgError(e) + } +} + +impl From for DataColumnSidecarError { + fn from(e: SszError) -> Self { + Self::SszError(e) + } +} + +/// Converts a cell ssz List object to an array to be used with the kzg +/// crypto library. +fn ssz_cell_to_crypto_cell(cell: &Cell) -> Result { + KzgCell::from_bytes(cell.as_ref()).map_err(Into::into) +} diff --git a/consensus/types/src/deposit_receipt.rs b/consensus/types/src/deposit_request.rs similarity index 90% rename from consensus/types/src/deposit_receipt.rs rename to consensus/types/src/deposit_request.rs index 6a08f717f3d..f6ddf8b63a8 100644 --- a/consensus/types/src/deposit_receipt.rs +++ b/consensus/types/src/deposit_request.rs @@ -19,7 +19,7 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] -pub struct DepositReceipt { +pub struct DepositRequest { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, #[serde(with = "serde_utils::quoted_u64")] @@ -33,5 +33,5 @@ pub struct DepositReceipt { mod tests { use super::*; - ssz_and_tree_hash_tests!(DepositReceipt); + ssz_and_tree_hash_tests!(DepositRequest); } diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 62f7f1b8698..15084cb14c4 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -63,6 +63,8 @@ pub trait EthSpec: * Misc */ type MaxValidatorsPerCommittee: Unsigned + Clone + Sync + Send + Debug + PartialEq + Eq; + type MaxValidatorsPerSlot: Unsigned + Clone + Sync + Send + Debug + PartialEq + Eq; + type MaxCommitteesPerSlot: Unsigned + Clone + Sync + Send + Debug + PartialEq + Eq; /* * Time parameters */ @@ -112,6 +114,12 @@ pub trait EthSpec: type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq; type KzgCommitmentInclusionProofDepth: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * New in PeerDAS + */ + type FieldElementsPerCell: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type FieldElementsPerExtBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type KzgCommitmentsInclusionProofDepth: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -135,6 +143,11 @@ pub trait EthSpec: /// Must be set to `BytesPerFieldElement * FieldElementsPerBlob`. type BytesPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /// The total length of a data column in bytes. + /// + /// Must be set to `BytesPerFieldElement * FieldElementsPerCell`. + type BytesPerCell: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* * New in Electra */ @@ -142,7 +155,7 @@ pub trait EthSpec: type PendingPartialWithdrawalsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type PendingConsolidationsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxConsolidations: Unsigned + Clone + Sync + Send + Debug + PartialEq; - type MaxDepositReceiptsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxDepositRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxAttesterSlashingsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxAttestationsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxWithdrawalRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; @@ -282,6 +295,16 @@ pub trait EthSpec: Self::FieldElementsPerBlob::to_usize() } + /// Returns the `FIELD_ELEMENTS_PER_EXT_BLOB` constant for this specification. + fn field_elements_per_ext_blob() -> usize { + Self::FieldElementsPerExtBlob::to_usize() + } + + /// Returns the `FIELD_ELEMENTS_PER_CELL` constant for this specification. + fn field_elements_per_cell() -> usize { + Self::FieldElementsPerCell::to_usize() + } + /// Returns the `BYTES_PER_BLOB` constant for this specification. fn bytes_per_blob() -> usize { Self::BytesPerBlob::to_usize() @@ -292,6 +315,22 @@ pub trait EthSpec: Self::KzgCommitmentInclusionProofDepth::to_usize() } + fn kzg_commitments_tree_depth() -> usize { + // Depth of the subtree rooted at `blob_kzg_commitments` in the `BeaconBlockBody` + // is equal to depth of the ssz List max size + 1 for the length mixin + Self::max_blob_commitments_per_block() + .next_power_of_two() + .ilog2() + .safe_add(1) + .expect("The log of max_blob_commitments_per_block can not overflow") as usize + } + + fn block_body_tree_depth() -> usize { + Self::kzg_proof_inclusion_proof_depth() + .safe_sub(Self::kzg_commitments_tree_depth()) + .expect("Preset values are not configurable and never result in non-positive block body depth") + } + /// Returns the `PENDING_BALANCE_DEPOSITS_LIMIT` constant for this specification. fn pending_balance_deposits_limit() -> usize { Self::PendingBalanceDepositsLimit::to_usize() @@ -312,9 +351,9 @@ pub trait EthSpec: Self::MaxConsolidations::to_usize() } - /// Returns the `MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD` constant for this specification. - fn max_deposit_receipts_per_payload() -> usize { - Self::MaxDepositReceiptsPerPayload::to_usize() + /// Returns the `MAX_DEPOSIT_REQUESTS_PER_PAYLOAD` constant for this specification. + fn max_deposit_requests_per_payload() -> usize { + Self::MaxDepositRequestsPerPayload::to_usize() } /// Returns the `MAX_ATTESTER_SLASHINGS_ELECTRA` constant for this specification. @@ -331,6 +370,10 @@ pub trait EthSpec: fn max_withdrawal_requests_per_payload() -> usize { Self::MaxWithdrawalRequestsPerPayload::to_usize() } + + fn kzg_commitments_inclusion_proof_depth() -> usize { + Self::KzgCommitmentsInclusionProofDepth::to_usize() + } } /// Macro to inherit some type values from another EthSpec. @@ -349,6 +392,8 @@ impl EthSpec for MainnetEthSpec { type JustificationBitsLength = U4; type SubnetBitfieldLength = U64; type MaxValidatorsPerCommittee = U2048; + type MaxCommitteesPerSlot = U64; + type MaxValidatorsPerSlot = U131072; type GenesisEpoch = U0; type SlotsPerEpoch = U32; type EpochsPerEth1VotingPeriod = U64; @@ -374,8 +419,12 @@ impl EthSpec for MainnetEthSpec { type MaxBlobCommitmentsPerBlock = U4096; type BytesPerFieldElement = U32; type FieldElementsPerBlob = U4096; + type FieldElementsPerCell = U64; + type FieldElementsPerExtBlob = U8192; type BytesPerBlob = U131072; + type BytesPerCell = U2048; type KzgCommitmentInclusionProofDepth = U17; + type KzgCommitmentsInclusionProofDepth = U4; // inclusion of the whole list of commitments type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch @@ -385,7 +434,7 @@ impl EthSpec for MainnetEthSpec { type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; type MaxConsolidations = U1; - type MaxDepositReceiptsPerPayload = U8192; + type MaxDepositRequestsPerPayload = U8192; type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; type MaxWithdrawalRequestsPerPayload = U16; @@ -404,6 +453,8 @@ impl EthSpec for MainnetEthSpec { pub struct MinimalEthSpec; impl EthSpec for MinimalEthSpec { + type MaxCommitteesPerSlot = U4; + type MaxValidatorsPerSlot = U8192; type SlotsPerEpoch = U8; type EpochsPerEth1VotingPeriod = U4; type SlotsPerHistoricalRoot = U64; @@ -420,8 +471,12 @@ impl EthSpec for MinimalEthSpec { type KzgCommitmentInclusionProofDepth = U9; type PendingPartialWithdrawalsLimit = U64; type PendingConsolidationsLimit = U64; - type MaxDepositReceiptsPerPayload = U4; + type MaxDepositRequestsPerPayload = U4; type MaxWithdrawalRequestsPerPayload = U2; + type FieldElementsPerCell = U64; + type FieldElementsPerExtBlob = U8192; + type BytesPerCell = U2048; + type KzgCommitmentsInclusionProofDepth = U4; params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, @@ -468,6 +523,8 @@ impl EthSpec for GnosisEthSpec { type JustificationBitsLength = U4; type SubnetBitfieldLength = U64; type MaxValidatorsPerCommittee = U2048; + type MaxCommitteesPerSlot = U64; + type MaxValidatorsPerSlot = U131072; type GenesisEpoch = U0; type SlotsPerEpoch = U16; type EpochsPerEth1VotingPeriod = U64; @@ -504,10 +561,14 @@ impl EthSpec for GnosisEthSpec { type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; type MaxConsolidations = U1; - type MaxDepositReceiptsPerPayload = U8192; + type MaxDepositRequestsPerPayload = U8192; type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; type MaxWithdrawalRequestsPerPayload = U16; + type FieldElementsPerCell = U64; + type FieldElementsPerExtBlob = U8192; + type BytesPerCell = U2048; + type KzgCommitmentsInclusionProofDepth = U4; fn default_spec() -> ChainSpec { ChainSpec::gnosis() @@ -517,3 +578,28 @@ impl EthSpec for GnosisEthSpec { EthSpecId::Gnosis } } + +#[cfg(test)] +mod test { + use crate::{EthSpec, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; + use ssz_types::typenum::Unsigned; + + fn assert_valid_spec() { + E::kzg_commitments_tree_depth(); + E::block_body_tree_depth(); + assert!(E::MaxValidatorsPerSlot::to_i32() >= E::MaxValidatorsPerCommittee::to_i32()); + } + + #[test] + fn mainnet_spec() { + assert_valid_spec::(); + } + #[test] + fn minimal_spec() { + assert_valid_spec::(); + } + #[test] + fn gnosis_spec() { + assert_valid_spec::(); + } +} diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 0946b9ecffa..02300cc1927 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -13,6 +13,10 @@ pub type Transactions = VariableList< >; pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; +pub type DepositRequests = + VariableList::MaxDepositRequestsPerPayload>; +pub type WithdrawalRequests = + VariableList::MaxWithdrawalRequestsPerPayload>; #[superstruct( variants(Bellatrix, Capella, Deneb, Electra), @@ -90,7 +94,9 @@ pub struct ExecutionPayload { #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, #[superstruct(only(Electra))] - pub deposit_receipts: VariableList, + //TODO(electra) remove alias once EF tests are updates with correct name + #[serde(alias = "deposit_receipts")] + pub deposit_requests: VariableList, #[superstruct(only(Electra))] pub withdrawal_requests: VariableList, diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 324d7b97472..149cc286ae9 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -77,19 +77,18 @@ pub struct ExecutionPayloadHeader { pub block_hash: ExecutionBlockHash, #[superstruct(getter(copy))] pub transactions_root: Hash256, - #[superstruct(only(Capella, Deneb, Electra))] - #[superstruct(getter(copy))] + #[superstruct(only(Capella, Deneb, Electra), partial_getter(copy))] pub withdrawals_root: Hash256, - #[superstruct(only(Deneb, Electra))] + #[superstruct(only(Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] - #[superstruct(getter(copy))] pub blob_gas_used: u64, - #[superstruct(only(Deneb, Electra))] + #[superstruct(only(Deneb, Electra), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] - #[superstruct(getter(copy))] pub excess_blob_gas: u64, #[superstruct(only(Electra), partial_getter(copy))] - pub deposit_receipts_root: Hash256, + //TODO(electra) remove alias once EF tests are updates with correct name + #[serde(alias = "deposit_receipts_root")] + pub deposit_requests_root: Hash256, #[superstruct(only(Electra), partial_getter(copy))] pub withdrawal_requests_root: Hash256, } @@ -120,14 +119,9 @@ impl ExecutionPayloadHeader { #[allow(clippy::arithmetic_side_effects)] pub fn ssz_max_var_len_for_fork(fork_name: ForkName) -> usize { // Matching here in case variable fields are added in future forks. - // TODO(electra): review electra changes match fork_name { - ForkName::Base - | ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Deneb - | ForkName::Electra => { + ForkName::Base | ForkName::Altair => 0, + ForkName::Bellatrix | ForkName::Capella | ForkName::Deneb | ForkName::Electra => { // Max size of variable length `extra_data` field E::max_extra_data_bytes() * ::ssz_fixed_len() } @@ -210,7 +204,7 @@ impl ExecutionPayloadHeaderDeneb { withdrawals_root: self.withdrawals_root, blob_gas_used: self.blob_gas_used, excess_blob_gas: self.excess_blob_gas, - deposit_receipts_root: Hash256::zero(), + deposit_requests_root: Hash256::zero(), withdrawal_requests_root: Hash256::zero(), } } @@ -303,7 +297,7 @@ impl<'a, E: EthSpec> From<&'a ExecutionPayloadElectra> for ExecutionPayloadHe withdrawals_root: payload.withdrawals.tree_hash_root(), blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - deposit_receipts_root: payload.deposit_receipts.tree_hash_root(), + deposit_requests_root: payload.deposit_requests.tree_hash_root(), withdrawal_requests_root: payload.withdrawal_requests.tree_hash_root(), } } diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 5cc66214733..51a5b3813ba 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -31,6 +31,16 @@ impl ForkName { ] } + pub fn list_all_fork_epochs(spec: &ChainSpec) -> Vec<(ForkName, Option)> { + vec![ + (ForkName::Altair, spec.altair_fork_epoch), + (ForkName::Bellatrix, spec.bellatrix_fork_epoch), + (ForkName::Capella, spec.capella_fork_epoch), + (ForkName::Deneb, spec.deneb_fork_epoch), + (ForkName::Electra, spec.electra_fork_epoch), + ] + } + pub fn latest() -> ForkName { // This unwrap is safe as long as we have 1+ forks. It is tested below. *ForkName::list_all().last().unwrap() @@ -119,6 +129,26 @@ impl ForkName { ForkName::Electra => None, } } + + pub fn altair_enabled(self) -> bool { + self >= ForkName::Altair + } + + pub fn bellatrix_enabled(self) -> bool { + self >= ForkName::Bellatrix + } + + pub fn capella_enabled(self) -> bool { + self >= ForkName::Capella + } + + pub fn deneb_enabled(self) -> bool { + self >= ForkName::Deneb + } + + pub fn electra_enabled(self) -> bool { + self >= ForkName::Electra + } } /// Map a fork name into a fork-versioned superstruct type like `BeaconBlock`. diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index c7a0081c9f7..3d8f411cafb 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -37,9 +37,9 @@ impl From<[u8; GRAFFITI_BYTES_LEN]> for Graffiti { } } -impl Into<[u8; GRAFFITI_BYTES_LEN]> for Graffiti { - fn into(self) -> [u8; GRAFFITI_BYTES_LEN] { - self.0 +impl From for [u8; GRAFFITI_BYTES_LEN] { + fn from(from: Graffiti) -> [u8; GRAFFITI_BYTES_LEN] { + from.0 } } @@ -77,9 +77,9 @@ impl<'de> Deserialize<'de> for GraffitiString { } } -impl Into for GraffitiString { - fn into(self) -> Graffiti { - let graffiti_bytes = self.0.as_bytes(); +impl From for Graffiti { + fn from(from: GraffitiString) -> Graffiti { + let graffiti_bytes = from.0.as_bytes(); let mut graffiti = [0; GRAFFITI_BYTES_LEN]; let graffiti_len = std::cmp::min(graffiti_bytes.len(), GRAFFITI_BYTES_LEN); diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs index 95d015a0f73..76bb111ea2f 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/historical_summary.rs @@ -1,14 +1,10 @@ use crate::test_utils::TestRandom; -use crate::Unsigned; use crate::{BeaconState, EthSpec, Hash256}; -use cached_tree_hash::Error; -use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; use compare_fields_derive::CompareFields; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::VariableList; use test_random_derive::TestRandom; -use tree_hash::{mix_in_length, TreeHash, BYTES_PER_CHUNK}; +use tree_hash::TreeHash; use tree_hash_derive::TreeHash; /// `HistoricalSummary` matches the components of the phase0 `HistoricalBatch` @@ -44,46 +40,3 @@ impl HistoricalSummary { } } } - -/// Wrapper type allowing the implementation of `CachedTreeHash`. -#[derive(Debug)] -pub struct HistoricalSummaryCache<'a, N: Unsigned> { - pub inner: &'a VariableList, -} - -impl<'a, N: Unsigned> HistoricalSummaryCache<'a, N> { - pub fn new(inner: &'a VariableList) -> Self { - Self { inner } - } - - #[allow(clippy::len_without_is_empty)] - pub fn len(&self) -> usize { - self.inner.len() - } -} - -impl<'a, N: Unsigned> CachedTreeHash for HistoricalSummaryCache<'a, N> { - fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { - TreeHashCache::new(arena, int_log(N::to_usize()), self.len()) - } - - fn recalculate_tree_hash_root( - &self, - arena: &mut CacheArena, - cache: &mut TreeHashCache, - ) -> Result { - Ok(mix_in_length( - &cache.recalculate_merkle_root(arena, leaf_iter(self.inner))?, - self.len(), - )) - } -} - -pub fn leaf_iter( - values: &[HistoricalSummary], -) -> impl ExactSizeIterator + '_ { - values - .iter() - .map(|value| value.tree_hash_root()) - .map(Hash256::to_fixed_bytes) -} diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index d80b49d55a7..9274600ed2c 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -1,9 +1,11 @@ use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, EthSpec, VariableList}; +use core::slice::Iter; use derivative::Derivative; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::hash::{Hash, Hasher}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -12,25 +14,50 @@ use tree_hash_derive::TreeHash; /// To be included in an `AttesterSlashing`. /// /// Spec v0.12.1 +#[superstruct( + variants(Base, Electra), + variant_attributes( + derive( + Debug, + Clone, + Serialize, + Deserialize, + Decode, + Encode, + TestRandom, + Derivative, + arbitrary::Arbitrary, + TreeHash, + ), + derivative(PartialEq, Hash(bound = "E: EthSpec")), + serde(bound = "E: EthSpec", deny_unknown_fields), + arbitrary(bound = "E: EthSpec"), + ) +)] #[derive( - Derivative, Debug, Clone, Serialize, - Deserialize, - Encode, - Decode, TreeHash, - TestRandom, + Encode, + Derivative, + Deserialize, arbitrary::Arbitrary, + PartialEq, )] -#[derivative(PartialEq, Eq)] // to satisfy Clippy's lint about `Hash` -#[serde(bound = "E: EthSpec")] +#[serde(untagged)] +#[tree_hash(enum_behaviour = "transparent")] +#[ssz(enum_behaviour = "transparent")] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] #[arbitrary(bound = "E: EthSpec")] pub struct IndexedAttestation { /// Lists validator registry indices, not committee indices. - #[serde(with = "quoted_variable_list_u64")] + #[superstruct(only(Base), partial_getter(rename = "attesting_indices_base"))] + #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] pub attesting_indices: VariableList, + #[superstruct(only(Electra), partial_getter(rename = "attesting_indices_electra"))] + #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] + pub attesting_indices: VariableList, pub data: AttestationData, pub signature: AggregateSignature, } @@ -40,15 +67,123 @@ impl IndexedAttestation { /// /// Spec v0.12.1 pub fn is_double_vote(&self, other: &Self) -> bool { - self.data.target.epoch == other.data.target.epoch && self.data != other.data + // reuse the ref implementation to ensure logic is the same + self.to_ref().is_double_vote(other.to_ref()) } /// Check if ``attestation_data_1`` surrounds ``attestation_data_2``. /// /// Spec v0.12.1 pub fn is_surround_vote(&self, other: &Self) -> bool { - self.data.source.epoch < other.data.source.epoch - && other.data.target.epoch < self.data.target.epoch + // reuse the ref implementation to ensure logic is the same + self.to_ref().is_surround_vote(other.to_ref()) + } + + pub fn attesting_indices_len(&self) -> usize { + match self { + IndexedAttestation::Base(att) => att.attesting_indices.len(), + IndexedAttestation::Electra(att) => att.attesting_indices.len(), + } + } + + pub fn attesting_indices_to_vec(&self) -> Vec { + match self { + IndexedAttestation::Base(att) => att.attesting_indices.to_vec(), + IndexedAttestation::Electra(att) => att.attesting_indices.to_vec(), + } + } + + pub fn attesting_indices_is_empty(&self) -> bool { + match self { + IndexedAttestation::Base(att) => att.attesting_indices.is_empty(), + IndexedAttestation::Electra(att) => att.attesting_indices.is_empty(), + } + } + + pub fn attesting_indices_iter(&self) -> Iter<'_, u64> { + match self { + IndexedAttestation::Base(att) => att.attesting_indices.iter(), + IndexedAttestation::Electra(att) => att.attesting_indices.iter(), + } + } + + pub fn attesting_indices_first(&self) -> Option<&u64> { + match self { + IndexedAttestation::Base(att) => att.attesting_indices.first(), + IndexedAttestation::Electra(att) => att.attesting_indices.first(), + } + } + + pub fn to_electra(self) -> IndexedAttestationElectra { + match self { + Self::Base(att) => { + let extended_attesting_indices: VariableList = + VariableList::new(att.attesting_indices.to_vec()) + .expect("MaxValidatorsPerSlot must be >= MaxValidatorsPerCommittee"); + // Note a unit test in consensus/types/src/eth_spec.rs asserts this invariant for + // all known specs + + IndexedAttestationElectra { + attesting_indices: extended_attesting_indices, + data: att.data, + signature: att.signature, + } + } + Self::Electra(att) => att, + } + } +} + +impl<'a, E: EthSpec> IndexedAttestationRef<'a, E> { + pub fn is_double_vote(&self, other: Self) -> bool { + self.data().target.epoch == other.data().target.epoch && self.data() != other.data() + } + + pub fn is_surround_vote(&self, other: Self) -> bool { + self.data().source.epoch < other.data().source.epoch + && other.data().target.epoch < self.data().target.epoch + } + + pub fn attesting_indices_len(&self) -> usize { + match self { + IndexedAttestationRef::Base(att) => att.attesting_indices.len(), + IndexedAttestationRef::Electra(att) => att.attesting_indices.len(), + } + } + + pub fn attesting_indices_to_vec(&self) -> Vec { + match self { + IndexedAttestationRef::Base(att) => att.attesting_indices.to_vec(), + IndexedAttestationRef::Electra(att) => att.attesting_indices.to_vec(), + } + } + + pub fn attesting_indices_is_empty(&self) -> bool { + match self { + IndexedAttestationRef::Base(att) => att.attesting_indices.is_empty(), + IndexedAttestationRef::Electra(att) => att.attesting_indices.is_empty(), + } + } + + pub fn attesting_indices_iter(&self) -> Iter<'_, u64> { + match self { + IndexedAttestationRef::Base(att) => att.attesting_indices.iter(), + IndexedAttestationRef::Electra(att) => att.attesting_indices.iter(), + } + } + + pub fn attesting_indices_first(&self) -> Option<&u64> { + match self { + IndexedAttestationRef::Base(att) => att.attesting_indices.first(), + IndexedAttestationRef::Electra(att) => att.attesting_indices.first(), + } + } + + pub fn clone_as_indexed_attestation(self) -> IndexedAttestation { + match self { + IndexedAttestationRef::Base(att) => IndexedAttestation::Base(att.clone()), + IndexedAttestationRef::Electra(att) => IndexedAttestation::Electra(att.clone()), + } } } @@ -59,46 +194,12 @@ impl IndexedAttestation { /// Used in the operation pool. impl Hash for IndexedAttestation { fn hash(&self, state: &mut H) { - self.attesting_indices.hash(state); - self.data.hash(state); - self.signature.as_ssz_bytes().hash(state); - } -} - -/// Serialize a variable list of `u64` such that each int is quoted. Deserialize a variable -/// list supporting both quoted and un-quoted ints. -/// -/// E.g.,`["0", "1", "2"]` -mod quoted_variable_list_u64 { - use super::*; - use crate::Unsigned; - use serde::ser::SerializeSeq; - use serde::{Deserializer, Serializer}; - use serde_utils::quoted_u64_vec::{QuotedIntVecVisitor, QuotedIntWrapper}; - - pub fn serialize(value: &VariableList, serializer: S) -> Result - where - S: Serializer, - T: Unsigned, - { - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for &int in value.iter() { - seq.serialize_element(&QuotedIntWrapper { int })?; - } - seq.end() - } - - pub fn deserialize<'de, D, T>(deserializer: D) -> Result, D::Error> - where - D: Deserializer<'de>, - T: Unsigned, - { - deserializer - .deserialize_any(QuotedIntVecVisitor) - .and_then(|vec| { - VariableList::new(vec) - .map_err(|e| serde::de::Error::custom(format!("invalid length: {:?}", e))) - }) + match self { + IndexedAttestation::Base(att) => att.attesting_indices.hash(state), + IndexedAttestation::Electra(att) => att.attesting_indices.hash(state), + }; + self.data().hash(state); + self.signature().as_ssz_bytes().hash(state); } } @@ -157,17 +258,25 @@ mod tests { assert!(!indexed_vote_first.is_surround_vote(&indexed_vote_second)); } - ssz_and_tree_hash_tests!(IndexedAttestation); + mod base { + use super::*; + ssz_and_tree_hash_tests!(IndexedAttestationBase); + } + mod electra { + use super::*; + ssz_and_tree_hash_tests!(IndexedAttestationElectra); + } fn create_indexed_attestation( target_epoch: u64, source_epoch: u64, ) -> IndexedAttestation { let mut rng = XorShiftRng::from_seed([42; 16]); - let mut indexed_vote = IndexedAttestation::random_for_test(&mut rng); + let mut indexed_vote = + IndexedAttestation::Base(IndexedAttestationBase::random_for_test(&mut rng)); - indexed_vote.data.source.epoch = Epoch::new(source_epoch); - indexed_vote.data.target.epoch = Epoch::new(target_epoch); + indexed_vote.data_mut().source.epoch = Epoch::new(source_epoch); + indexed_vote.data_mut().target.epoch = Epoch::new(target_epoch); indexed_vote } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 5c521d98af9..b5c500f0b22 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -9,8 +9,6 @@ ) )] -#[macro_use] -extern crate lazy_static; #[macro_use] pub mod test_utils; @@ -35,7 +33,7 @@ pub mod contribution_and_proof; pub mod deposit; pub mod deposit_data; pub mod deposit_message; -pub mod deposit_receipt; +pub mod deposit_request; pub mod deposit_tree_snapshot; pub mod enr_fork_id; pub mod eth1_data; @@ -86,7 +84,6 @@ pub mod config_and_preset; pub mod execution_block_header; pub mod fork_context; pub mod participation_flags; -pub mod participation_list; pub mod payload; pub mod preset; pub mod slot_epoch; @@ -107,6 +104,7 @@ pub mod slot_data; pub mod sqlite; pub mod blob_sidecar; +pub mod data_column_sidecar; pub mod light_client_header; pub mod non_zero_usize; pub mod runtime_var_list; @@ -114,15 +112,23 @@ pub mod runtime_var_list; use ethereum_types::{H160, H256}; pub use crate::activation_queue::ActivationQueue; -pub use crate::aggregate_and_proof::AggregateAndProof; -pub use crate::attestation::{Attestation, Error as AttestationError}; +pub use crate::aggregate_and_proof::{ + AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, +}; +pub use crate::attestation::{ + Attestation, AttestationBase, AttestationElectra, AttestationRef, AttestationRefMut, + Error as AttestationError, +}; pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; -pub use crate::attester_slashing::AttesterSlashing; +pub use crate::attester_slashing::{ + AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, AttesterSlashingOnDisk, + AttesterSlashingRef, AttesterSlashingRefOnDisk, +}; pub use crate::beacon_block::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, - EmptyBlock, + BlockImportSource, EmptyBlock, }; pub use crate::beacon_block_body::{ BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyBellatrix, @@ -144,7 +150,7 @@ pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; pub use crate::deposit_message::DepositMessage; -pub use crate::deposit_receipt::DepositReceipt; +pub use crate::deposit_request::DepositRequest; pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; pub use crate::enr_fork_id::EnrForkId; pub use crate::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; @@ -169,28 +175,31 @@ pub use crate::fork_name::{ForkName, InconsistentFork}; pub use crate::fork_versioned_response::{ForkVersionDeserialize, ForkVersionedResponse}; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; -pub use crate::indexed_attestation::IndexedAttestation; +pub use crate::indexed_attestation::{ + IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, +}; pub use crate::light_client_bootstrap::{ LightClientBootstrap, LightClientBootstrapAltair, LightClientBootstrapCapella, - LightClientBootstrapDeneb, + LightClientBootstrapDeneb, LightClientBootstrapElectra, }; pub use crate::light_client_finality_update::{ LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientFinalityUpdateCapella, - LightClientFinalityUpdateDeneb, + LightClientFinalityUpdateDeneb, LightClientFinalityUpdateElectra, }; pub use crate::light_client_header::{ LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + LightClientHeaderElectra, }; pub use crate::light_client_optimistic_update::{ LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientOptimisticUpdateCapella, LightClientOptimisticUpdateDeneb, + LightClientOptimisticUpdateElectra, }; pub use crate::light_client_update::{ Error as LightClientError, LightClientUpdate, LightClientUpdateAltair, - LightClientUpdateCapella, LightClientUpdateDeneb, + LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, }; pub use crate::participation_flags::ParticipationFlags; -pub use crate::participation_list::ParticipationList; pub use crate::payload::{ AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadRef, BlockType, ExecPayload, @@ -210,7 +219,9 @@ pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; pub use crate::runtime_var_list::RuntimeVariableList; pub use crate::selection_proof::SelectionProof; pub use crate::shuffling_id::AttestationShufflingId; -pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; +pub use crate::signed_aggregate_and_proof::{ + SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, +}; pub use crate::signed_beacon_block::{ ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 61da0e1b117..e3a85744ded 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -1,7 +1,8 @@ use crate::{ light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec, EthSpec, FixedVector, ForkName, ForkVersionDeserialize, Hash256, LightClientHeader, LightClientHeaderAltair, - LightClientHeaderCapella, LightClientHeaderDeneb, SignedBeaconBlock, Slot, SyncCommittee, + LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderElectra, SignedBeaconBlock, + Slot, SyncCommittee, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -16,7 +17,7 @@ use tree_hash_derive::TreeHash; /// A LightClientBootstrap is the initializer we send over to light_client nodes /// that are trying to generate their basic storage when booting up. #[superstruct( - variants(Altair, Capella, Deneb), + variants(Altair, Capella, Deneb, Electra), variant_attributes( derive( Debug, @@ -51,6 +52,8 @@ pub struct LightClientBootstrap { pub header: LightClientHeaderCapella, #[superstruct(only(Deneb), partial_getter(rename = "header_deneb"))] pub header: LightClientHeaderDeneb, + #[superstruct(only(Electra), partial_getter(rename = "header_electra"))] + pub header: LightClientHeaderElectra, /// The `SyncCommittee` used in the requested period. pub current_sync_committee: Arc>, /// Merkle proof for sync committee @@ -66,6 +69,7 @@ impl LightClientBootstrap { Self::Altair(_) => func(ForkName::Altair), Self::Capella(_) => func(ForkName::Capella), Self::Deneb(_) => func(ForkName::Deneb), + Self::Electra(_) => func(ForkName::Electra), } } @@ -82,9 +86,8 @@ impl LightClientBootstrap { Self::Altair(LightClientBootstrapAltair::from_ssz_bytes(bytes)?) } ForkName::Capella => Self::Capella(LightClientBootstrapCapella::from_ssz_bytes(bytes)?), - ForkName::Deneb | ForkName::Electra => { - Self::Deneb(LightClientBootstrapDeneb::from_ssz_bytes(bytes)?) - } + ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb::from_ssz_bytes(bytes)?), + ForkName::Electra => Self::Electra(LightClientBootstrapElectra::from_ssz_bytes(bytes)?), ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientBootstrap decoding for {fork_name} not implemented" @@ -97,18 +100,16 @@ impl LightClientBootstrap { #[allow(clippy::arithmetic_side_effects)] pub fn ssz_max_len_for_fork(fork_name: ForkName) -> usize { - // TODO(electra): review electra changes - match fork_name { + let fixed_len = match fork_name { ForkName::Base => 0, - ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Deneb - | ForkName::Electra => { + ForkName::Altair | ForkName::Bellatrix => { as Encode>::ssz_fixed_len() - + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } - } + ForkName::Capella => as Encode>::ssz_fixed_len(), + ForkName::Deneb => as Encode>::ssz_fixed_len(), + ForkName::Electra => as Encode>::ssz_fixed_len(), + }; + fixed_len + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } pub fn from_beacon_state( @@ -138,11 +139,16 @@ impl LightClientBootstrap { current_sync_committee, current_sync_committee_branch, }), - ForkName::Deneb | ForkName::Electra => Self::Deneb(LightClientBootstrapDeneb { + ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, current_sync_committee_branch, }), + ForkName::Electra => Self::Electra(LightClientBootstrapElectra { + header: LightClientHeaderElectra::block_to_light_client_header(block)?, + current_sync_committee, + current_sync_committee_branch, + }), }; Ok(light_client_bootstrap) diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 29c526e2916..a9e24e03db1 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -2,7 +2,8 @@ use super::{EthSpec, FixedVector, Hash256, LightClientHeader, Slot, SyncAggregat use crate::ChainSpec; use crate::{ light_client_update::*, test_utils::TestRandom, ForkName, ForkVersionDeserialize, - LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, SignedBeaconBlock, + LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + LightClientHeaderElectra, SignedBeaconBlock, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -15,7 +16,7 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; #[superstruct( - variants(Altair, Capella, Deneb), + variants(Altair, Capella, Deneb, Electra), variant_attributes( derive( Debug, @@ -50,6 +51,8 @@ pub struct LightClientFinalityUpdate { pub attested_header: LightClientHeaderCapella, #[superstruct(only(Deneb), partial_getter(rename = "attested_header_deneb"))] pub attested_header: LightClientHeaderDeneb, + #[superstruct(only(Electra), partial_getter(rename = "attested_header_electra"))] + pub attested_header: LightClientHeaderElectra, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). #[superstruct(only(Altair), partial_getter(rename = "finalized_header_altair"))] pub finalized_header: LightClientHeaderAltair, @@ -57,6 +60,8 @@ pub struct LightClientFinalityUpdate { pub finalized_header: LightClientHeaderCapella, #[superstruct(only(Deneb), partial_getter(rename = "finalized_header_deneb"))] pub finalized_header: LightClientHeaderDeneb, + #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] + pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. #[test_random(default)] pub finality_branch: FixedVector, @@ -80,7 +85,7 @@ impl LightClientFinalityUpdate { .map_err(|_| Error::InconsistentFork)? { ForkName::Altair | ForkName::Bellatrix => { - let finality_update = LightClientFinalityUpdateAltair { + Self::Altair(LightClientFinalityUpdateAltair { attested_header: LightClientHeaderAltair::block_to_light_client_header( attested_block, )?, @@ -90,37 +95,42 @@ impl LightClientFinalityUpdate { finality_branch, sync_aggregate, signature_slot, - }; - Self::Altair(finality_update) - } - ForkName::Capella => { - let finality_update = LightClientFinalityUpdateCapella { - attested_header: LightClientHeaderCapella::block_to_light_client_header( - attested_block, - )?, - finalized_header: LightClientHeaderCapella::block_to_light_client_header( - finalized_block, - )?, - finality_branch, - sync_aggregate, - signature_slot, - }; - Self::Capella(finality_update) - } - ForkName::Deneb | ForkName::Electra => { - let finality_update = LightClientFinalityUpdateDeneb { - attested_header: LightClientHeaderDeneb::block_to_light_client_header( - attested_block, - )?, - finalized_header: LightClientHeaderDeneb::block_to_light_client_header( - finalized_block, - )?, - finality_branch, - sync_aggregate, - signature_slot, - }; - Self::Deneb(finality_update) + }) } + ForkName::Capella => Self::Capella(LightClientFinalityUpdateCapella { + attested_header: LightClientHeaderCapella::block_to_light_client_header( + attested_block, + )?, + finalized_header: LightClientHeaderCapella::block_to_light_client_header( + finalized_block, + )?, + finality_branch, + sync_aggregate, + signature_slot, + }), + ForkName::Deneb => Self::Deneb(LightClientFinalityUpdateDeneb { + attested_header: LightClientHeaderDeneb::block_to_light_client_header( + attested_block, + )?, + finalized_header: LightClientHeaderDeneb::block_to_light_client_header( + finalized_block, + )?, + finality_branch, + sync_aggregate, + signature_slot, + }), + ForkName::Electra => Self::Electra(LightClientFinalityUpdateElectra { + attested_header: LightClientHeaderElectra::block_to_light_client_header( + attested_block, + )?, + finalized_header: LightClientHeaderElectra::block_to_light_client_header( + finalized_block, + )?, + finality_branch, + sync_aggregate, + signature_slot, + }), + ForkName::Base => return Err(Error::AltairForkNotActive), }; @@ -135,6 +145,7 @@ impl LightClientFinalityUpdate { Self::Altair(_) => func(ForkName::Altair), Self::Capella(_) => func(ForkName::Capella), Self::Deneb(_) => func(ForkName::Deneb), + Self::Electra(_) => func(ForkName::Electra), } } @@ -153,8 +164,9 @@ impl LightClientFinalityUpdate { ForkName::Capella => { Self::Capella(LightClientFinalityUpdateCapella::from_ssz_bytes(bytes)?) } - ForkName::Deneb | ForkName::Electra => { - Self::Deneb(LightClientFinalityUpdateDeneb::from_ssz_bytes(bytes)?) + ForkName::Deneb => Self::Deneb(LightClientFinalityUpdateDeneb::from_ssz_bytes(bytes)?), + ForkName::Electra => { + Self::Electra(LightClientFinalityUpdateElectra::from_ssz_bytes(bytes)?) } ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( @@ -168,18 +180,17 @@ impl LightClientFinalityUpdate { #[allow(clippy::arithmetic_side_effects)] pub fn ssz_max_len_for_fork(fork_name: ForkName) -> usize { - // TODO(electra): review electra changes - match fork_name { + let fixed_size = match fork_name { ForkName::Base => 0, - ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Deneb - | ForkName::Electra => { + ForkName::Altair | ForkName::Bellatrix => { as Encode>::ssz_fixed_len() - + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } - } + ForkName::Capella => as Encode>::ssz_fixed_len(), + ForkName::Deneb => as Encode>::ssz_fixed_len(), + ForkName::Electra => as Encode>::ssz_fixed_len(), + }; + // `2 *` because there are two headers in the update + fixed_size + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } } diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index 213ec90f955..1d6432ed6f3 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -4,7 +4,7 @@ use crate::ForkVersionDeserialize; use crate::{light_client_update::*, BeaconBlockBody}; use crate::{ test_utils::TestRandom, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, - FixedVector, Hash256, SignedBeaconBlock, + ExecutionPayloadHeaderElectra, FixedVector, Hash256, SignedBeaconBlock, }; use crate::{BeaconBlockHeader, ExecutionPayloadHeader}; use derivative::Derivative; @@ -17,7 +17,7 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; #[superstruct( - variants(Altair, Capella, Deneb), + variants(Altair, Capella, Deneb, Electra), variant_attributes( derive( Debug, @@ -54,8 +54,13 @@ pub struct LightClientHeader { pub execution: ExecutionPayloadHeaderCapella, #[superstruct(only(Deneb), partial_getter(rename = "execution_payload_header_deneb"))] pub execution: ExecutionPayloadHeaderDeneb, + #[superstruct( + only(Electra), + partial_getter(rename = "execution_payload_header_electra") + )] + pub execution: ExecutionPayloadHeaderElectra, - #[superstruct(only(Capella, Deneb))] + #[superstruct(only(Capella, Deneb, Electra))] pub execution_branch: FixedVector, #[ssz(skip_serializing, skip_deserializing)] @@ -81,9 +86,12 @@ impl LightClientHeader { ForkName::Capella => LightClientHeader::Capella( LightClientHeaderCapella::block_to_light_client_header(block)?, ), - ForkName::Deneb | ForkName::Electra => LightClientHeader::Deneb( + ForkName::Deneb => LightClientHeader::Deneb( LightClientHeaderDeneb::block_to_light_client_header(block)?, ), + ForkName::Electra => LightClientHeader::Electra( + LightClientHeaderElectra::block_to_light_client_header(block)?, + ), }; Ok(header) } @@ -96,9 +104,12 @@ impl LightClientHeader { ForkName::Capella => { LightClientHeader::Capella(LightClientHeaderCapella::from_ssz_bytes(bytes)?) } - ForkName::Deneb | ForkName::Electra => { + ForkName::Deneb => { LightClientHeader::Deneb(LightClientHeaderDeneb::from_ssz_bytes(bytes)?) } + ForkName::Electra => { + LightClientHeader::Electra(LightClientHeaderElectra::from_ssz_bytes(bytes)?) + } ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientHeader decoding for {fork_name} not implemented" @@ -192,6 +203,34 @@ impl LightClientHeaderDeneb { } } +impl LightClientHeaderElectra { + pub fn block_to_light_client_header(block: &SignedBeaconBlock) -> Result { + let payload = block + .message() + .execution_payload()? + .execution_payload_electra()?; + + let header = ExecutionPayloadHeaderElectra::from(payload); + let beacon_block_body = BeaconBlockBody::from( + block + .message() + .body_electra() + .map_err(|_| Error::BeaconBlockBodyError)? + .to_owned(), + ); + + let execution_branch = + beacon_block_body.block_body_merkle_proof(EXECUTION_PAYLOAD_INDEX)?; + + Ok(LightClientHeaderElectra { + beacon: block.message().block_header(), + execution: header, + execution_branch: FixedVector::new(execution_branch)?, + _phantom_data: PhantomData, + }) + } +} + impl ForkVersionDeserialize for LightClientHeader { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( value: serde_json::value::Value, @@ -204,9 +243,12 @@ impl ForkVersionDeserialize for LightClientHeader { ForkName::Capella => serde_json::from_value(value) .map(|light_client_header| Self::Capella(light_client_header)) .map_err(serde::de::Error::custom), - ForkName::Deneb | ForkName::Electra => serde_json::from_value(value) + ForkName::Deneb => serde_json::from_value(value) .map(|light_client_header| Self::Deneb(light_client_header)) .map_err(serde::de::Error::custom), + ForkName::Electra => serde_json::from_value(value) + .map(|light_client_header| Self::Electra(light_client_header)) + .map_err(serde::de::Error::custom), ForkName::Base => Err(serde::de::Error::custom(format!( "LightClientHeader deserialization for {fork_name} not implemented" ))), diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 4727673f6c0..708f24e7701 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -2,7 +2,7 @@ use super::{EthSpec, ForkName, ForkVersionDeserialize, LightClientHeader, Slot, use crate::test_utils::TestRandom; use crate::{ light_client_update::*, ChainSpec, LightClientHeaderAltair, LightClientHeaderCapella, - LightClientHeaderDeneb, SignedBeaconBlock, + LightClientHeaderDeneb, LightClientHeaderElectra, SignedBeaconBlock, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -18,7 +18,7 @@ use tree_hash_derive::TreeHash; /// A LightClientOptimisticUpdate is the update we send on each slot, /// it is based off the current unfinalized epoch is verified only against BLS signature. #[superstruct( - variants(Altair, Capella, Deneb), + variants(Altair, Capella, Deneb, Electra), variant_attributes( derive( Debug, @@ -53,6 +53,8 @@ pub struct LightClientOptimisticUpdate { pub attested_header: LightClientHeaderCapella, #[superstruct(only(Deneb), partial_getter(rename = "attested_header_deneb"))] pub attested_header: LightClientHeaderDeneb, + #[superstruct(only(Electra), partial_getter(rename = "attested_header_electra"))] + pub attested_header: LightClientHeaderElectra, /// current sync aggregate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -86,13 +88,20 @@ impl LightClientOptimisticUpdate { sync_aggregate, signature_slot, }), - ForkName::Deneb | ForkName::Electra => Self::Deneb(LightClientOptimisticUpdateDeneb { + ForkName::Deneb => Self::Deneb(LightClientOptimisticUpdateDeneb { attested_header: LightClientHeaderDeneb::block_to_light_client_header( attested_block, )?, sync_aggregate, signature_slot, }), + ForkName::Electra => Self::Electra(LightClientOptimisticUpdateElectra { + attested_header: LightClientHeaderElectra::block_to_light_client_header( + attested_block, + )?, + sync_aggregate, + signature_slot, + }), ForkName::Base => return Err(Error::AltairForkNotActive), }; @@ -107,6 +116,7 @@ impl LightClientOptimisticUpdate { Self::Altair(_) => func(ForkName::Altair), Self::Capella(_) => func(ForkName::Capella), Self::Deneb(_) => func(ForkName::Deneb), + Self::Electra(_) => func(ForkName::Electra), } } @@ -139,9 +149,12 @@ impl LightClientOptimisticUpdate { ForkName::Capella => { Self::Capella(LightClientOptimisticUpdateCapella::from_ssz_bytes(bytes)?) } - ForkName::Deneb | ForkName::Electra => { + ForkName::Deneb => { Self::Deneb(LightClientOptimisticUpdateDeneb::from_ssz_bytes(bytes)?) } + ForkName::Electra => { + Self::Electra(LightClientOptimisticUpdateElectra::from_ssz_bytes(bytes)?) + } ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientOptimisticUpdate decoding for {fork_name} not implemented" @@ -154,18 +167,16 @@ impl LightClientOptimisticUpdate { #[allow(clippy::arithmetic_side_effects)] pub fn ssz_max_len_for_fork(fork_name: ForkName) -> usize { - // TODO(electra): review electra changes - match fork_name { + let fixed_len = match fork_name { ForkName::Base => 0, - ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Deneb - | ForkName::Electra => { + ForkName::Altair | ForkName::Bellatrix => { as Encode>::ssz_fixed_len() - + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } - } + ForkName::Capella => as Encode>::ssz_fixed_len(), + ForkName::Deneb => as Encode>::ssz_fixed_len(), + ForkName::Electra => as Encode>::ssz_fixed_len(), + }; + fixed_len + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } } diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 002fbea2d37..210fa0eeeb3 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,4 +1,5 @@ use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; +use crate::light_client_header::LightClientHeaderElectra; use crate::{ beacon_state, test_utils::TestRandom, BeaconBlock, BeaconBlockHeader, BeaconState, ChainSpec, ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, @@ -76,7 +77,7 @@ impl From for Error { /// or to sync up to the last committee period, we need to have one ready for each ALTAIR period /// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. #[superstruct( - variants(Altair, Capella, Deneb), + variants(Altair, Capella, Deneb, Electra), variant_attributes( derive( Debug, @@ -111,6 +112,8 @@ pub struct LightClientUpdate { pub attested_header: LightClientHeaderCapella, #[superstruct(only(Deneb), partial_getter(rename = "attested_header_deneb"))] pub attested_header: LightClientHeaderDeneb, + #[superstruct(only(Electra), partial_getter(rename = "attested_header_electra"))] + pub attested_header: LightClientHeaderElectra, /// The `SyncCommittee` used in the next period. pub next_sync_committee: Arc>, /// Merkle proof for next sync committee @@ -122,6 +125,8 @@ pub struct LightClientUpdate { pub finalized_header: LightClientHeaderCapella, #[superstruct(only(Deneb), partial_getter(rename = "finalized_header_deneb"))] pub finalized_header: LightClientHeaderDeneb, + #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] + pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. pub finality_branch: FixedVector, /// current sync aggreggate @@ -163,7 +168,7 @@ impl LightClientUpdate { let signature_period = block.epoch().sync_committee_period(chain_spec)?; // Compute and validate attested header. let mut attested_header = attested_state.latest_block_header().clone(); - attested_header.state_root = attested_state.tree_hash_root(); + attested_header.state_root = attested_state.update_tree_hash_cache()?; let attested_period = attested_header .slot .epoch(E::slots_per_epoch()) @@ -221,7 +226,7 @@ impl LightClientUpdate { signature_slot: block.slot(), }) } - ForkName::Deneb | ForkName::Electra => { + ForkName::Deneb => { let attested_header = LightClientHeaderDeneb::block_to_light_client_header(attested_block)?; let finalized_header = @@ -236,6 +241,23 @@ impl LightClientUpdate { signature_slot: block.slot(), }) } + ForkName::Electra => { + let attested_header = + LightClientHeaderElectra::block_to_light_client_header(attested_block)?; + let finalized_header = + LightClientHeaderElectra::block_to_light_client_header(finalized_block)?; + Self::Electra(LightClientUpdateElectra { + attested_header, + next_sync_committee: attested_state.next_sync_committee()?.clone(), + next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, + finalized_header, + finality_branch: FixedVector::new(finality_branch)?, + sync_aggregate: sync_aggregate.clone(), + signature_slot: block.slot(), + }) + } // To add a new fork, just append the new fork variant on the latest fork. Forks that + // have a distinct execution header will need a new LightClientUdpate variant only + // if you need to test or support lightclient usages }; Ok(light_client_update) @@ -247,9 +269,8 @@ impl LightClientUpdate { Self::Altair(LightClientUpdateAltair::from_ssz_bytes(bytes)?) } ForkName::Capella => Self::Capella(LightClientUpdateCapella::from_ssz_bytes(bytes)?), - ForkName::Deneb | ForkName::Electra => { - Self::Deneb(LightClientUpdateDeneb::from_ssz_bytes(bytes)?) - } + ForkName::Deneb => Self::Deneb(LightClientUpdateDeneb::from_ssz_bytes(bytes)?), + ForkName::Electra => Self::Electra(LightClientUpdateElectra::from_ssz_bytes(bytes)?), ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientUpdate decoding for {fork_name} not implemented" diff --git a/consensus/types/src/participation_list.rs b/consensus/types/src/participation_list.rs deleted file mode 100644 index 6e3d916dee5..00000000000 --- a/consensus/types/src/participation_list.rs +++ /dev/null @@ -1,55 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] - -use crate::{Hash256, ParticipationFlags, Unsigned, VariableList}; -use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, Error, TreeHashCache}; -use tree_hash::{mix_in_length, BYTES_PER_CHUNK}; - -/// Wrapper type allowing the implementation of `CachedTreeHash`. -#[derive(Debug)] -pub struct ParticipationList<'a, N: Unsigned> { - pub inner: &'a VariableList, -} - -impl<'a, N: Unsigned> ParticipationList<'a, N> { - pub fn new(inner: &'a VariableList) -> Self { - Self { inner } - } -} - -impl<'a, N: Unsigned> CachedTreeHash for ParticipationList<'a, N> { - fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { - TreeHashCache::new( - arena, - int_log(N::to_usize() / BYTES_PER_CHUNK), - leaf_count(self.inner.len()), - ) - } - - fn recalculate_tree_hash_root( - &self, - arena: &mut CacheArena, - cache: &mut TreeHashCache, - ) -> Result { - Ok(mix_in_length( - &cache.recalculate_merkle_root(arena, leaf_iter(self.inner))?, - self.inner.len(), - )) - } -} - -pub fn leaf_count(len: usize) -> usize { - (len + BYTES_PER_CHUNK - 1) / BYTES_PER_CHUNK -} - -pub fn leaf_iter( - values: &[ParticipationFlags], -) -> impl ExactSizeIterator + '_ { - values.chunks(BYTES_PER_CHUNK).map(|xs| { - // Zero-pad chunks on the right. - let mut chunk = [0u8; BYTES_PER_CHUNK]; - for (byte, x) in chunk.iter_mut().zip(xs) { - *byte = x.into_u8(); - } - chunk - }) -} diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 80a70c171f5..362cb6d3864 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -39,6 +39,15 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + /// fork-specific fields fn withdrawals_root(&self) -> Result; fn blob_gas_used(&self) -> Result; + fn withdrawal_requests( + &self, + ) -> Result< + Option>, + Error, + >; + fn deposit_requests( + &self, + ) -> Result>, Error>; /// Is this a default payload with 0x0 roots for transactions and withdrawals? fn is_default_with_zero_roots(&self) -> bool; @@ -278,6 +287,35 @@ impl ExecPayload for FullPayload { } } + fn withdrawal_requests( + &self, + ) -> Result< + Option>, + Error, + > { + match self { + FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { + Err(Error::IncorrectStateVariant) + } + FullPayload::Electra(inner) => { + Ok(Some(inner.execution_payload.withdrawal_requests.clone())) + } + } + } + + fn deposit_requests( + &self, + ) -> Result>, Error> { + match self { + FullPayload::Bellatrix(_) | FullPayload::Capella(_) | FullPayload::Deneb(_) => { + Err(Error::IncorrectStateVariant) + } + FullPayload::Electra(inner) => { + Ok(Some(inner.execution_payload.deposit_requests.clone())) + } + } + } + fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -410,6 +448,35 @@ impl<'b, E: EthSpec> ExecPayload for FullPayloadRef<'b, E> { } } + fn withdrawal_requests( + &self, + ) -> Result< + Option>, + Error, + > { + match self { + FullPayloadRef::Bellatrix(_) + | FullPayloadRef::Capella(_) + | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Electra(inner) => { + Ok(Some(inner.execution_payload.withdrawal_requests.clone())) + } + } + } + + fn deposit_requests( + &self, + ) -> Result>, Error> { + match self { + FullPayloadRef::Bellatrix(_) + | FullPayloadRef::Capella(_) + | FullPayloadRef::Deneb(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Electra(inner) => { + Ok(Some(inner.execution_payload.deposit_requests.clone())) + } + } + } + fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -590,6 +657,21 @@ impl ExecPayload for BlindedPayload { } } + fn withdrawal_requests( + &self, + ) -> Result< + Option>, + Error, + > { + Ok(None) + } + + fn deposit_requests( + &self, + ) -> Result>, Error> { + Ok(None) + } + fn is_default_with_zero_roots(&self) -> bool { self.to_ref().is_default_with_zero_roots() } @@ -691,6 +773,21 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { } } + fn withdrawal_requests( + &self, + ) -> Result< + Option>, + Error, + > { + Ok(None) + } + + fn deposit_requests( + &self, + ) -> Result>, Error> { + Ok(None) + } + fn is_default_with_zero_roots<'a>(&'a self) -> bool { map_blinded_payload_ref!(&'b _, self, move |payload, cons| { cons(payload); @@ -717,7 +814,9 @@ macro_rules! impl_exec_payload_common { $is_default_with_empty_roots:block, $f:block, $g:block, - $h:block) => { + $h:block, + $i:block, + $j:block) => { impl ExecPayload for $wrapper_type { fn block_type() -> BlockType { BlockType::$block_type_variant @@ -780,6 +879,23 @@ macro_rules! impl_exec_payload_common { let h = $h; h(self) } + + fn withdrawal_requests( + &self, + ) -> Result< + Option>, + Error, + > { + let i = $i; + i(self) + } + + fn deposit_requests( + &self, + ) -> Result>, Error> { + let j = $j; + j(self) + } } impl From<$wrapped_type> for $wrapper_type { @@ -825,7 +941,9 @@ macro_rules! impl_exec_payload_for_fork { wrapper_ref_type.blob_gas_used() }; c - } + }, + { |_| { Ok(None) } }, + { |_| { Ok(None) } } ); impl TryInto<$wrapper_type_header> for BlindedPayload { @@ -912,6 +1030,35 @@ macro_rules! impl_exec_payload_for_fork { wrapper_ref_type.blob_gas_used() }; c + }, + { + let c: for<'a> fn( + &'a $wrapper_type_full, + ) -> Result< + Option< + VariableList< + ExecutionLayerWithdrawalRequest, + E::MaxWithdrawalRequestsPerPayload, + >, + >, + Error, + > = |payload: &$wrapper_type_full| { + let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawal_requests() + }; + c + }, + { + let c: for<'a> fn( + &'a $wrapper_type_full, + ) -> Result< + Option>, + Error, + > = |payload: &$wrapper_type_full| { + let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); + wrapper_ref_type.deposit_requests() + }; + c } ); diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index f4008d62e1d..9e9c5aaf410 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -248,7 +248,7 @@ pub struct ElectraPreset { #[serde(with = "serde_utils::quoted_u64")] pub max_consolidations: u64, #[serde(with = "serde_utils::quoted_u64")] - pub max_deposit_receipts_per_payload: u64, + pub max_deposit_requests_per_payload: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_attester_slashings_electra: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -270,7 +270,7 @@ impl ElectraPreset { pending_partial_withdrawals_limit: E::pending_partial_withdrawals_limit() as u64, pending_consolidations_limit: E::pending_consolidations_limit() as u64, max_consolidations: E::max_consolidations() as u64, - max_deposit_receipts_per_payload: E::max_deposit_receipts_per_payload() as u64, + max_deposit_requests_per_payload: E::max_deposit_requests_per_payload() as u64, max_attester_slashings_electra: E::max_attester_slashings_electra() as u64, max_attestations_electra: E::max_attestations_electra() as u64, max_withdrawal_requests_per_payload: E::max_withdrawal_requests_per_payload() as u64, diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/selection_proof.rs index cd9f9c06d06..c80a00c3d1f 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/selection_proof.rs @@ -77,9 +77,9 @@ impl SelectionProof { } } -impl Into for SelectionProof { - fn into(self) -> Signature { - self.0 +impl From for Signature { + fn from(from: SelectionProof) -> Signature { + from.0 } } diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/signed_aggregate_and_proof.rs index c31c50ea174..26eca19bf15 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/signed_aggregate_and_proof.rs @@ -1,10 +1,15 @@ use super::{ - AggregateAndProof, Attestation, ChainSpec, Domain, EthSpec, Fork, Hash256, SecretKey, - SelectionProof, Signature, SignedRoot, + AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, +}; +use super::{ + AttestationRef, ChainSpec, Domain, EthSpec, Fork, Hash256, SecretKey, SelectionProof, + Signature, SignedRoot, }; use crate::test_utils::TestRandom; +use crate::Attestation; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -12,22 +17,38 @@ use tree_hash_derive::TreeHash; /// gossipsub topic. /// /// Spec v0.12.1 +#[superstruct( + variants(Base, Electra), + variant_attributes( + derive( + arbitrary::Arbitrary, + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TestRandom, + TreeHash, + ), + serde(bound = "E: EthSpec"), + arbitrary(bound = "E: EthSpec"), + ), + map_into(Attestation), + map_ref_into(AggregateAndProofRef) +)] #[derive( - Debug, - Clone, - PartialEq, - Serialize, - Deserialize, - Encode, - Decode, - TestRandom, - TreeHash, - arbitrary::Arbitrary, + arbitrary::Arbitrary, Debug, Clone, PartialEq, Serialize, Deserialize, Encode, TreeHash, )] -#[serde(bound = "E: EthSpec")] +#[serde(untagged)] +#[tree_hash(enum_behaviour = "transparent")] +#[ssz(enum_behaviour = "transparent")] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] #[arbitrary(bound = "E: EthSpec")] pub struct SignedAggregateAndProof { /// The `AggregateAndProof` that was signed. + #[superstruct(flatten)] pub message: AggregateAndProof, /// The aggregate attestation. pub signature: Signature, @@ -40,7 +61,7 @@ impl SignedAggregateAndProof { /// If `selection_proof.is_none()` it will be computed locally. pub fn from_aggregate( aggregator_index: u64, - aggregate: Attestation, + aggregate: AttestationRef<'_, E>, selection_proof: Option, secret_key: &SecretKey, fork: &Fork, @@ -56,8 +77,7 @@ impl SignedAggregateAndProof { genesis_validators_root, spec, ); - - let target_epoch = message.aggregate.data.slot.epoch(E::slots_per_epoch()); + let target_epoch = message.aggregate().data().slot.epoch(E::slots_per_epoch()); let domain = spec.get_domain( target_epoch, Domain::AggregateAndProof, @@ -66,9 +86,35 @@ impl SignedAggregateAndProof { ); let signing_message = message.signing_root(domain); - SignedAggregateAndProof { - message, - signature: secret_key.sign(signing_message), + Self::from_aggregate_and_proof(message, secret_key.sign(signing_message)) + } + + /// Produces a new `SignedAggregateAndProof` given a `signature` of `aggregate` + pub fn from_aggregate_and_proof(aggregate: AggregateAndProof, signature: Signature) -> Self { + match aggregate { + AggregateAndProof::Base(message) => { + SignedAggregateAndProof::Base(SignedAggregateAndProofBase { message, signature }) + } + AggregateAndProof::Electra(message) => { + SignedAggregateAndProof::Electra(SignedAggregateAndProofElectra { + message, + signature, + }) + } } } + + pub fn message<'a>(&'a self) -> AggregateAndProofRef<'a, E> { + map_signed_aggregate_and_proof_ref_into_aggregate_and_proof_ref!( + &'a _, + self.to_ref(), + |inner, cons| { cons(&inner.message) } + ) + } + + pub fn into_attestation(self) -> Attestation { + map_signed_aggregate_and_proof_into_attestation!(self, |inner, cons| { + cons(inner.message.aggregate) + }) + } } diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 4d3279a7f77..a22df49ad7b 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -498,6 +498,7 @@ impl SignedBeaconBlockElectra> { execution_payload: BlindedPayloadElectra { .. }, bls_to_execution_changes, blob_kzg_commitments, + consolidations, }, }, signature, @@ -521,6 +522,7 @@ impl SignedBeaconBlockElectra> { execution_payload: FullPayloadElectra { execution_payload }, bls_to_execution_changes, blob_kzg_commitments, + consolidations, }, }, signature, diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 79d00649111..8c8f2d073dd 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -70,7 +70,7 @@ impl Slot { } pub fn max_value() -> Slot { - Slot(u64::max_value()) + Slot(u64::MAX) } } @@ -80,7 +80,7 @@ impl Epoch { } pub fn max_value() -> Epoch { - Epoch(u64::max_value()) + Epoch(u64::MAX) } /// The first slot in the epoch. @@ -176,10 +176,10 @@ mod epoch_tests { let slots_per_epoch = 32; // The last epoch which can be represented by u64. - let epoch = Epoch::new(u64::max_value() / slots_per_epoch); + let epoch = Epoch::new(u64::MAX / slots_per_epoch); // A slot number on the epoch should be equal to u64::max_value. - assert_eq!(epoch.end_slot(slots_per_epoch), Slot::new(u64::max_value())); + assert_eq!(epoch.end_slot(slots_per_epoch), Slot::new(u64::MAX)); } #[test] diff --git a/consensus/types/src/slot_epoch_macros.rs b/consensus/types/src/slot_epoch_macros.rs index fafc455ef83..42e7a0f2ee2 100644 --- a/consensus/types/src/slot_epoch_macros.rs +++ b/consensus/types/src/slot_epoch_macros.rs @@ -6,9 +6,9 @@ macro_rules! impl_from_into_u64 { } } - impl Into for $main { - fn into(self) -> u64 { - self.0 + impl From<$main> for u64 { + fn from(from: $main) -> u64 { + from.0 } } @@ -28,9 +28,9 @@ macro_rules! impl_from_into_usize { } } - impl Into for $main { - fn into(self) -> usize { - self.0 as usize + impl From<$main> for usize { + fn from(from: $main) -> usize { + from.0 as usize } } @@ -352,7 +352,7 @@ macro_rules! new_tests { fn new() { assert_eq!($type(0), $type::new(0)); assert_eq!($type(3), $type::new(3)); - assert_eq!($type(u64::max_value()), $type::new(u64::max_value())); + assert_eq!($type(u64::MAX), $type::new(u64::MAX)); } }; } @@ -368,17 +368,17 @@ macro_rules! from_into_tests { let x: $other = $type(3).into(); assert_eq!(x, 3); - let x: $other = $type(u64::max_value()).into(); + let x: $other = $type(u64::MAX).into(); // Note: this will fail on 32 bit systems. This is expected as we don't have a proper // 32-bit system strategy in place. - assert_eq!(x, $other::max_value()); + assert_eq!(x, $other::MAX); } #[test] fn from() { assert_eq!($type(0), $type::from(0_u64)); assert_eq!($type(3), $type::from(3_u64)); - assert_eq!($type(u64::max_value()), $type::from($other::max_value())); + assert_eq!($type(u64::MAX), $type::from($other::MAX)); } }; } @@ -396,8 +396,8 @@ macro_rules! math_between_tests { assert_partial_ord(1, Ordering::Less, 2); assert_partial_ord(2, Ordering::Greater, 1); - assert_partial_ord(0, Ordering::Less, u64::max_value()); - assert_partial_ord(u64::max_value(), Ordering::Greater, 0); + assert_partial_ord(0, Ordering::Less, u64::MAX); + assert_partial_ord(u64::MAX, Ordering::Greater, 0); } #[test] @@ -412,9 +412,9 @@ macro_rules! math_between_tests { assert_partial_eq(1, 0, false); assert_partial_eq(1, 1, true); - assert_partial_eq(u64::max_value(), u64::max_value(), true); - assert_partial_eq(0, u64::max_value(), false); - assert_partial_eq(u64::max_value(), 0, false); + assert_partial_eq(u64::MAX, u64::MAX, true); + assert_partial_eq(0, u64::MAX, false); + assert_partial_eq(u64::MAX, 0, false); } #[test] @@ -436,8 +436,8 @@ macro_rules! math_between_tests { assert_add(7, 7, 14); // Addition should be saturating. - assert_add(u64::max_value(), 1, u64::max_value()); - assert_add(u64::max_value(), u64::max_value(), u64::max_value()); + assert_add(u64::MAX, 1, u64::MAX); + assert_add(u64::MAX, u64::MAX, u64::MAX); } #[test] @@ -455,8 +455,8 @@ macro_rules! math_between_tests { assert_sub(1, 0, 1); assert_sub(2, 1, 1); assert_sub(14, 7, 7); - assert_sub(u64::max_value(), 1, u64::max_value() - 1); - assert_sub(u64::max_value(), u64::max_value(), 0); + assert_sub(u64::MAX, 1, u64::MAX - 1); + assert_sub(u64::MAX, u64::MAX, 0); // Subtraction should be saturating assert_sub(0, 1, 0); @@ -480,7 +480,7 @@ macro_rules! math_between_tests { assert_mul(0, 2, 0); // Multiplication should be saturating. - assert_mul(u64::max_value(), 2, u64::max_value()); + assert_mul(u64::MAX, 2, u64::MAX); } #[test] @@ -499,7 +499,7 @@ macro_rules! math_between_tests { assert_div(2, 2, 1); assert_div(100, 50, 2); assert_div(128, 2, 64); - assert_div(u64::max_value(), 2, 2_u64.pow(63) - 1); + assert_div(u64::MAX, 2, 2_u64.pow(63) - 1); } #[test] @@ -544,8 +544,8 @@ macro_rules! math_tests { assert_saturating_sub(1, 0, 1); assert_saturating_sub(2, 1, 1); assert_saturating_sub(14, 7, 7); - assert_saturating_sub(u64::max_value(), 1, u64::max_value() - 1); - assert_saturating_sub(u64::max_value(), u64::max_value(), 0); + assert_saturating_sub(u64::MAX, 1, u64::MAX - 1); + assert_saturating_sub(u64::MAX, u64::MAX, 0); // Subtraction should be saturating assert_saturating_sub(0, 1, 0); @@ -565,8 +565,8 @@ macro_rules! math_tests { assert_saturating_add(7, 7, 14); // Addition should be saturating. - assert_saturating_add(u64::max_value(), 1, u64::max_value()); - assert_saturating_add(u64::max_value(), u64::max_value(), u64::max_value()); + assert_saturating_add(u64::MAX, 1, u64::MAX); + assert_saturating_add(u64::MAX, u64::MAX, u64::MAX); } #[test] @@ -581,11 +581,11 @@ macro_rules! math_tests { assert_checked_div(2, 2, Some(1)); assert_checked_div(100, 50, Some(2)); assert_checked_div(128, 2, Some(64)); - assert_checked_div(u64::max_value(), 2, Some(2_u64.pow(63) - 1)); + assert_checked_div(u64::MAX, 2, Some(2_u64.pow(63) - 1)); assert_checked_div(2, 0, None); assert_checked_div(0, 0, None); - assert_checked_div(u64::max_value(), 0, None); + assert_checked_div(u64::MAX, 0, None); } #[test] @@ -607,7 +607,7 @@ macro_rules! math_tests { assert_is_power_of_two(4, true); assert_is_power_of_two(2_u64.pow(4), true); - assert_is_power_of_two(u64::max_value(), false); + assert_is_power_of_two(u64::MAX, false); } #[test] @@ -619,8 +619,8 @@ macro_rules! math_tests { assert_ord(1, Ordering::Less, 2); assert_ord(2, Ordering::Greater, 1); - assert_ord(0, Ordering::Less, u64::max_value()); - assert_ord(u64::max_value(), Ordering::Greater, 0); + assert_ord(0, Ordering::Less, u64::MAX); + assert_ord(u64::MAX, Ordering::Greater, 0); } }; } @@ -647,8 +647,8 @@ macro_rules! all_tests { let x = $type(3).as_u64(); assert_eq!(x, 3); - let x = $type(u64::max_value()).as_u64(); - assert_eq!(x, u64::max_value()); + let x = $type(u64::MAX).as_u64(); + assert_eq!(x, u64::MAX); } } @@ -665,8 +665,8 @@ macro_rules! all_tests { let x = $type(3).as_usize(); assert_eq!(x, 3); - let x = $type(u64::max_value()).as_usize(); - assert_eq!(x, usize::max_value()); + let x = $type(u64::MAX).as_usize(); + assert_eq!(x, usize::MAX); } } }; diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 9b6a2e6a192..66786b51297 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -1,5 +1,6 @@ //! Identifies each shard by an integer identifier. -use crate::{AttestationData, ChainSpec, CommitteeIndex, Epoch, EthSpec, Slot}; +use crate::{AttestationRef, ChainSpec, CommitteeIndex, Epoch, EthSpec, Slot}; +use lazy_static::lazy_static; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use std::ops::{Deref, DerefMut}; @@ -37,16 +38,18 @@ impl SubnetId { id.into() } - /// Compute the subnet for an attestation with `attestation_data` where each slot in the + /// Compute the subnet for an attestation where each slot in the /// attestation epoch contains `committee_count_per_slot` committees. - pub fn compute_subnet_for_attestation_data( - attestation_data: &AttestationData, + pub fn compute_subnet_for_attestation( + attestation: AttestationRef, committee_count_per_slot: u64, spec: &ChainSpec, ) -> Result { + let committee_index = attestation.committee_index().ok_or(ArithError::Overflow)?; + Self::compute_subnet::( - attestation_data.slot, - attestation_data.index, + attestation.data().slot, + committee_index, committee_count_per_slot, spec, ) @@ -149,15 +152,15 @@ impl From for SubnetId { } } -impl Into for SubnetId { - fn into(self) -> u64 { - self.0 +impl From for u64 { + fn from(from: SubnetId) -> u64 { + from.0 } } -impl Into for &SubnetId { - fn into(self) -> u64 { - self.0 +impl From<&SubnetId> for u64 { + fn from(from: &SubnetId) -> u64 { + from.0 } } diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index 2c20daa9702..c348c3e8be3 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -63,13 +63,6 @@ impl SyncCommitteeContribution { }) } - /// Are the aggregation bitfields of these sync contribution disjoint? - pub fn signers_disjoint_from(&self, other: &Self) -> bool { - self.aggregation_bits - .intersection(&other.aggregation_bits) - .is_zero() - } - /// Aggregate another `SyncCommitteeContribution` into this one. /// /// The aggregation bitfields must be disjoint, and the data must be the same. @@ -77,7 +70,6 @@ impl SyncCommitteeContribution { debug_assert_eq!(self.slot, other.slot); debug_assert_eq!(self.beacon_block_root, other.beacon_block_root); debug_assert_eq!(self.subcommittee_index, other.subcommittee_index); - debug_assert!(self.signers_disjoint_from(other)); self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits); self.signature.add_assign_aggregate(&other.signature); @@ -110,6 +102,12 @@ impl SlotData for SyncCommitteeContribution { } } +impl SlotData for &SyncCommitteeContribution { + fn get_slot(&self) -> Slot { + self.slot + } +} + impl SlotData for SyncContributionData { fn get_slot(&self) -> Slot { self.slot diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_selection_proof.rs index da7370a0c19..0b32c6981b6 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_selection_proof.rs @@ -90,9 +90,9 @@ impl SyncSelectionProof { } } -impl Into for SyncSelectionProof { - fn into(self) -> Signature { - self.0 +impl From for Signature { + fn from(from: SyncSelectionProof) -> Signature { + from.0 } } diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index dd0807f21ce..7806aecfca8 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -1,6 +1,7 @@ //! Identifies each sync committee subnet by an integer identifier. use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::EthSpec; +use lazy_static::lazy_static; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_types::typenum::Unsigned; @@ -77,15 +78,15 @@ impl From for SyncSubnetId { } } -impl Into for SyncSubnetId { - fn into(self) -> u64 { - self.0 +impl From for u64 { + fn from(from: SyncSubnetId) -> u64 { + from.0 } } -impl Into for &SyncSubnetId { - fn into(self) -> u64 { - self.0 +impl From<&SyncSubnetId> for u64 { + fn from(from: &SyncSubnetId) -> u64 { + from.0 } } diff --git a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs new file mode 100644 index 00000000000..ab7ded04090 --- /dev/null +++ b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs @@ -0,0 +1,96 @@ +use rand::Rng; + +use kzg::{KzgCommitment, KzgProof}; + +use crate::beacon_block_body::KzgCommitments; +use crate::*; + +use super::*; + +type BlobsBundle = (KzgCommitments, KzgProofs, BlobsList); + +pub fn generate_rand_block_and_blobs( + fork_name: ForkName, + num_blobs: usize, + rng: &mut impl Rng, +) -> (SignedBeaconBlock>, Vec>) { + let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); + let mut block = SignedBeaconBlock::from_block(inner, Signature::random_for_test(rng)); + let mut blob_sidecars = vec![]; + + if block.fork_name_unchecked() < ForkName::Deneb { + return (block, blob_sidecars); + } + + let (commitments, proofs, blobs) = generate_blobs::(num_blobs).unwrap(); + *block + .message_mut() + .body_mut() + .blob_kzg_commitments_mut() + .expect("kzg commitment expected from Deneb") = commitments.clone(); + + for (index, ((blob, kzg_commitment), kzg_proof)) in blobs + .into_iter() + .zip(commitments.into_iter()) + .zip(proofs.into_iter()) + .enumerate() + { + blob_sidecars.push(BlobSidecar { + index: index as u64, + blob: blob.clone(), + kzg_commitment, + kzg_proof, + signed_block_header: block.signed_block_header(), + kzg_commitment_inclusion_proof: block + .message() + .body() + .kzg_commitment_merkle_proof(index) + .unwrap(), + }); + } + (block, blob_sidecars) +} + +pub fn generate_blobs(n_blobs: usize) -> Result, String> { + let (mut commitments, mut proofs, mut blobs) = BlobsBundle::::default(); + + for blob_index in 0..n_blobs { + blobs + .push(Blob::::default()) + .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; + commitments + .push(KzgCommitment::empty_for_testing()) + .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; + proofs + .push(KzgProof::empty()) + .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; + } + + Ok((commitments, proofs, blobs)) +} + +#[cfg(test)] +mod test { + use super::*; + use rand::thread_rng; + + #[test] + fn test_verify_blob_inclusion_proof() { + let (_block, blobs) = + generate_rand_block_and_blobs::(ForkName::Deneb, 6, &mut thread_rng()); + for blob in blobs { + assert!(blob.verify_blob_sidecar_inclusion_proof()); + } + } + + #[test] + fn test_verify_blob_inclusion_proof_invalid() { + let (_block, blobs) = + generate_rand_block_and_blobs::(ForkName::Deneb, 6, &mut thread_rng()); + + for mut blob in blobs { + blob.kzg_commitment_inclusion_proof = FixedVector::random_for_test(&mut thread_rng()); + assert!(!blob.verify_blob_sidecar_inclusion_proof()); + } + } +} diff --git a/consensus/types/src/test_utils/mod.rs b/consensus/types/src/test_utils/mod.rs index d172342ee64..9599bcd3641 100644 --- a/consensus/types/src/test_utils/mod.rs +++ b/consensus/types/src/test_utils/mod.rs @@ -15,6 +15,8 @@ use tree_hash::TreeHash; #[macro_use] mod macros; mod generate_deterministic_keypairs; +#[cfg(test)] +mod generate_random_block_and_blobs; mod test_random; pub fn test_ssz_tree_hash_pair(v1: &T, v2: &U) diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 72a7a036ccc..00355779d2d 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -2,6 +2,7 @@ use crate::*; use rand::RngCore; use rand::SeedableRng; use rand_xorshift::XorShiftRng; +use smallvec::{smallvec, SmallVec}; use std::marker::PhantomData; use std::sync::Arc; @@ -118,6 +119,21 @@ where } } +impl TestRandom for SmallVec<[U; N]> +where + U: TestRandom, +{ + fn random_for_test(rng: &mut impl RngCore) -> Self { + let mut output = smallvec![]; + + for _ in 0..(usize::random_for_test(rng) % 4) { + output.push(::random_for_test(rng)); + } + + output + } +} + macro_rules! impl_test_random_for_u8_array { ($len: expr) => { impl TestRandom for [u8; $len] { diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index f73f7c18c5a..35176d389d0 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -26,6 +26,15 @@ impl TestRandom for BitVector { fn random_for_test(rng: &mut impl RngCore) -> Self { let mut raw_bytes = smallvec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; rng.fill_bytes(&mut raw_bytes); + // If N isn't divisible by 8 + // zero out bits greater than N + if let Some(last_byte) = raw_bytes.last_mut() { + let mut mask = 0; + for i in 0..N::to_usize() % 8 { + mask |= 1 << i; + } + *last_byte &= mask; + } Self::from_bytes(raw_bytes).expect("we generate a valid BitVector") } } diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 8ed449ec8a7..b5e92d1f5d8 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,6 +1,6 @@ use crate::{ - test_utils::TestRandom, Address, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, - PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, ForkName, + Hash256, PublicKeyBytes, }; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -57,22 +57,55 @@ impl Validator { /// Returns `true` if the validator is eligible to join the activation queue. /// - /// Modified in electra - pub fn is_eligible_for_activation_queue(&self, spec: &ChainSpec) -> bool { + /// Calls the correct function depending on the provided `fork_name`. + pub fn is_eligible_for_activation_queue( + &self, + spec: &ChainSpec, + current_fork: ForkName, + ) -> bool { + if current_fork.electra_enabled() { + self.is_eligible_for_activation_queue_electra(spec) + } else { + self.is_eligible_for_activation_queue_base(spec) + } + } + + /// Returns `true` if the validator is eligible to join the activation queue. + /// + /// Spec v0.12.1 + fn is_eligible_for_activation_queue_base(&self, spec: &ChainSpec) -> bool { + self.activation_eligibility_epoch == spec.far_future_epoch + && self.effective_balance == spec.max_effective_balance + } + + /// Returns `true` if the validator is eligible to join the activation queue. + /// + /// Modified in electra as part of EIP 7251. + fn is_eligible_for_activation_queue_electra(&self, spec: &ChainSpec) -> bool { self.activation_eligibility_epoch == spec.far_future_epoch && self.effective_balance >= spec.min_activation_balance } /// Returns `true` if the validator is eligible to be activated. - /// - /// Spec v0.12.1 pub fn is_eligible_for_activation( &self, state: &BeaconState, spec: &ChainSpec, + ) -> bool { + self.is_eligible_for_activation_with_finalized_checkpoint( + &state.finalized_checkpoint(), + spec, + ) + } + + /// Returns `true` if the validator is eligible to be activated. + pub fn is_eligible_for_activation_with_finalized_checkpoint( + &self, + finalized_checkpoint: &Checkpoint, + spec: &ChainSpec, ) -> bool { // Placement in queue is finalized - self.activation_eligibility_epoch <= state.finalized_checkpoint().epoch + self.activation_eligibility_epoch <= finalized_checkpoint.epoch // Has not yet been activated && self.activation_epoch == spec.far_future_epoch } @@ -107,9 +140,9 @@ impl Validator { is_compounding_withdrawal_credential(self.withdrawal_credentials, spec) } - /// Get the eth1 withdrawal address if this validator has one initialized. - pub fn get_eth1_withdrawal_address(&self, spec: &ChainSpec) -> Option
{ - self.has_eth1_withdrawal_credential(spec) + /// Get the execution withdrawal address if this validator has one initialized. + pub fn get_execution_withdrawal_address(&self, spec: &ChainSpec) -> Option
{ + self.has_execution_withdrawal_credential(spec) .then(|| { self.withdrawal_credentials .as_bytes() @@ -131,8 +164,40 @@ impl Validator { /// Returns `true` if the validator is fully withdrawable at some epoch. /// - /// Note: Modified in electra. - pub fn is_fully_withdrawable_at(&self, balance: u64, epoch: Epoch, spec: &ChainSpec) -> bool { + /// Calls the correct function depending on the provided `fork_name`. + pub fn is_fully_withdrawable_at( + &self, + balance: u64, + epoch: Epoch, + spec: &ChainSpec, + current_fork: ForkName, + ) -> bool { + if current_fork.electra_enabled() { + self.is_fully_withdrawable_at_electra(balance, epoch, spec) + } else { + self.is_fully_withdrawable_at_capella(balance, epoch, spec) + } + } + + /// Returns `true` if the validator is fully withdrawable at some epoch. + fn is_fully_withdrawable_at_capella( + &self, + balance: u64, + epoch: Epoch, + spec: &ChainSpec, + ) -> bool { + self.has_eth1_withdrawal_credential(spec) && self.withdrawable_epoch <= epoch && balance > 0 + } + + /// Returns `true` if the validator is fully withdrawable at some epoch. + /// + /// Modified in electra as part of EIP 7251. + fn is_fully_withdrawable_at_electra( + &self, + balance: u64, + epoch: Epoch, + spec: &ChainSpec, + ) -> bool { self.has_execution_withdrawal_credential(spec) && self.withdrawable_epoch <= epoch && balance > 0 @@ -140,9 +205,37 @@ impl Validator { /// Returns `true` if the validator is partially withdrawable. /// - /// Note: Modified in electra. - pub fn is_partially_withdrawable_validator(&self, balance: u64, spec: &ChainSpec) -> bool { - let max_effective_balance = self.get_validator_max_effective_balance(spec); + /// Calls the correct function depending on the provided `fork_name`. + pub fn is_partially_withdrawable_validator( + &self, + balance: u64, + spec: &ChainSpec, + current_fork: ForkName, + ) -> bool { + if current_fork.electra_enabled() { + self.is_partially_withdrawable_validator_electra(balance, spec, current_fork) + } else { + self.is_partially_withdrawable_validator_capella(balance, spec) + } + } + + /// Returns `true` if the validator is partially withdrawable. + fn is_partially_withdrawable_validator_capella(&self, balance: u64, spec: &ChainSpec) -> bool { + self.has_eth1_withdrawal_credential(spec) + && self.effective_balance == spec.max_effective_balance + && balance > spec.max_effective_balance + } + + /// Returns `true` if the validator is partially withdrawable. + /// + /// Modified in electra as part of EIP 7251. + pub fn is_partially_withdrawable_validator_electra( + &self, + balance: u64, + spec: &ChainSpec, + current_fork: ForkName, + ) -> bool { + let max_effective_balance = self.get_validator_max_effective_balance(spec, current_fork); let has_max_effective_balance = self.effective_balance == max_effective_balance; let has_excess_balance = balance > max_effective_balance; self.has_execution_withdrawal_credential(spec) @@ -157,13 +250,31 @@ impl Validator { } /// Returns the max effective balance for a validator in gwei. - pub fn get_validator_max_effective_balance(&self, spec: &ChainSpec) -> u64 { - if self.has_compounding_withdrawal_credential(spec) { - spec.max_effective_balance_electra + pub fn get_validator_max_effective_balance( + &self, + spec: &ChainSpec, + current_fork: ForkName, + ) -> u64 { + if current_fork >= ForkName::Electra { + if self.has_compounding_withdrawal_credential(spec) { + spec.max_effective_balance_electra + } else { + spec.min_activation_balance + } } else { - spec.min_activation_balance + spec.max_effective_balance } } + + pub fn get_active_balance( + &self, + validator_balance: u64, + spec: &ChainSpec, + current_fork: ForkName, + ) -> u64 { + let max_effective_balance = self.get_validator_max_effective_balance(spec, current_fork); + std::cmp::min(validator_balance, max_effective_balance) + } } impl Default for Validator { @@ -172,12 +283,12 @@ impl Default for Validator { Self { pubkey: PublicKeyBytes::empty(), withdrawal_credentials: Hash256::default(), - activation_eligibility_epoch: Epoch::from(std::u64::MAX), - activation_epoch: Epoch::from(std::u64::MAX), - exit_epoch: Epoch::from(std::u64::MAX), - withdrawable_epoch: Epoch::from(std::u64::MAX), + activation_eligibility_epoch: Epoch::from(u64::MAX), + activation_epoch: Epoch::from(u64::MAX), + exit_epoch: Epoch::from(u64::MAX), + withdrawable_epoch: Epoch::from(u64::MAX), slashed: false, - effective_balance: std::u64::MAX, + effective_balance: u64::MAX, } } } diff --git a/crypto/bls/src/generic_public_key.rs b/crypto/bls/src/generic_public_key.rs index 462e4cb2cb0..80b42dfa714 100644 --- a/crypto/bls/src/generic_public_key.rs +++ b/crypto/bls/src/generic_public_key.rs @@ -11,6 +11,9 @@ use tree_hash::TreeHash; /// The byte-length of a BLS public key when serialized in compressed form. pub const PUBLIC_KEY_BYTES_LEN: usize = 48; +/// The byte-length of a BLS public key when serialized in uncompressed form. +pub const PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN: usize = 96; + /// Represents the public key at infinity. pub const INFINITY_PUBLIC_KEY: [u8; PUBLIC_KEY_BYTES_LEN] = [ 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -23,8 +26,17 @@ pub trait TPublicKey: Sized + Clone { /// Serialize `self` as compressed bytes. fn serialize(&self) -> [u8; PUBLIC_KEY_BYTES_LEN]; + /// Serialize `self` as uncompressed bytes. + fn serialize_uncompressed(&self) -> [u8; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN]; + /// Deserialize `self` from compressed bytes. fn deserialize(bytes: &[u8]) -> Result; + + /// Deserialize `self` from uncompressed bytes. + /// + /// This function *does not* perform thorough checks of the input bytes and should only be + /// used with bytes output from `Self::serialize_uncompressed`. + fn deserialize_uncompressed(bytes: &[u8]) -> Result; } /// A BLS public key that is generic across some BLS point (`Pub`). @@ -65,6 +77,11 @@ where self.point.serialize() } + /// Serialize `self` as uncompressed bytes. + pub fn serialize_uncompressed(&self) -> [u8; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN] { + self.point.serialize_uncompressed() + } + /// Deserialize `self` from compressed bytes. pub fn deserialize(bytes: &[u8]) -> Result { if bytes == &INFINITY_PUBLIC_KEY[..] { @@ -75,6 +92,13 @@ where }) } } + + /// Deserialize `self` from compressed bytes. + pub fn deserialize_uncompressed(bytes: &[u8]) -> Result { + Ok(Self { + point: Pub::deserialize_uncompressed(bytes)?, + }) + } } impl Eq for GenericPublicKey {} diff --git a/crypto/bls/src/impls/blst.rs b/crypto/bls/src/impls/blst.rs index 0049d79cc55..54c7ad2944e 100644 --- a/crypto/bls/src/impls/blst.rs +++ b/crypto/bls/src/impls/blst.rs @@ -1,10 +1,12 @@ use crate::{ generic_aggregate_public_key::TAggregatePublicKey, generic_aggregate_signature::TAggregateSignature, - generic_public_key::{GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN}, + generic_public_key::{ + GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, + }, generic_secret_key::TSecretKey, generic_signature::{TSignature, SIGNATURE_BYTES_LEN}, - Error, Hash256, ZeroizeHash, INFINITY_SIGNATURE, + BlstError, Error, Hash256, ZeroizeHash, INFINITY_SIGNATURE, }; pub use blst::min_pk as blst_core; use blst::{blst_scalar, BLST_ERROR}; @@ -121,6 +123,10 @@ impl TPublicKey for blst_core::PublicKey { self.compress() } + fn serialize_uncompressed(&self) -> [u8; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN] { + blst_core::PublicKey::serialize(self) + } + fn deserialize(bytes: &[u8]) -> Result { // key_validate accepts uncompressed bytes too so enforce byte length here. // It also does subgroup checks, noting infinity check is done in `generic_public_key.rs`. @@ -132,6 +138,19 @@ impl TPublicKey for blst_core::PublicKey { } Self::key_validate(bytes).map_err(Into::into) } + + fn deserialize_uncompressed(bytes: &[u8]) -> Result { + if bytes.len() != PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN { + return Err(Error::InvalidByteLength { + got: bytes.len(), + expected: PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, + }); + } + // Ensure we use the `blst` function rather than the one from this trait. + let result: Result = Self::deserialize(bytes); + let key = result?; + Ok(key) + } } /// A wrapper that allows for `PartialEq` and `Clone` impls. diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index f2d8b79b986..a09fb347e6b 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -1,7 +1,9 @@ use crate::{ generic_aggregate_public_key::TAggregatePublicKey, generic_aggregate_signature::TAggregateSignature, - generic_public_key::{GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN}, + generic_public_key::{ + GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, + }, generic_secret_key::{TSecretKey, SECRET_KEY_BYTES_LEN}, generic_signature::{TSignature, SIGNATURE_BYTES_LEN}, Error, Hash256, ZeroizeHash, INFINITY_PUBLIC_KEY, INFINITY_SIGNATURE, @@ -46,11 +48,19 @@ impl TPublicKey for PublicKey { self.0 } + fn serialize_uncompressed(&self) -> [u8; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN] { + panic!("fake_crypto does not support uncompressed keys") + } + fn deserialize(bytes: &[u8]) -> Result { let mut pubkey = Self::infinity(); pubkey.0[..].copy_from_slice(&bytes[0..PUBLIC_KEY_BYTES_LEN]); Ok(pubkey) } + + fn deserialize_uncompressed(_: &[u8]) -> Result { + panic!("fake_crypto does not support uncompressed keys") + } } impl Eq for PublicKey {} diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index fef9804b784..af269b943d7 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -33,7 +33,9 @@ mod zeroize_hash; pub mod impls; -pub use generic_public_key::{INFINITY_PUBLIC_KEY, PUBLIC_KEY_BYTES_LEN}; +pub use generic_public_key::{ + INFINITY_PUBLIC_KEY, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, +}; pub use generic_secret_key::SECRET_KEY_BYTES_LEN; pub use generic_signature::{INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN}; pub use get_withdrawal_credentials::get_withdrawal_credentials; diff --git a/crypto/bls/tests/tests.rs b/crypto/bls/tests/tests.rs index 478c1b7dc26..dac2e97f407 100644 --- a/crypto/bls/tests/tests.rs +++ b/crypto/bls/tests/tests.rs @@ -341,6 +341,11 @@ macro_rules! test_suite { .assert_single_message_verify(true) } + #[test] + fn deserialize_infinity_public_key() { + PublicKey::deserialize(&bls::INFINITY_PUBLIC_KEY).unwrap_err(); + } + /// A helper struct to make it easer to deal with `SignatureSet` lifetimes. struct OwnedSignatureSet { signature: AggregateSignature, diff --git a/crypto/eth2_key_derivation/tests/tests.rs b/crypto/eth2_key_derivation/tests/tests.rs index b18a7b0e267..a344b7b3fc1 100644 --- a/crypto/eth2_key_derivation/tests/tests.rs +++ b/crypto/eth2_key_derivation/tests/tests.rs @@ -22,7 +22,7 @@ fn deterministic() { fn children_deterministic() { let master = DerivedKey::from_seed(&[42]).unwrap(); assert_eq!( - master.child(u32::max_value()).secret(), - master.child(u32::max_value()).secret(), + master.child(u32::MAX).secret(), + master.child(u32::MAX).secret(), ) } diff --git a/crypto/eth2_keystore/src/json_keystore/checksum_module.rs b/crypto/eth2_keystore/src/json_keystore/checksum_module.rs index dbb21e4de19..834c5eda5f8 100644 --- a/crypto/eth2_keystore/src/json_keystore/checksum_module.rs +++ b/crypto/eth2_keystore/src/json_keystore/checksum_module.rs @@ -14,9 +14,9 @@ pub enum ChecksumFunction { Sha256, } -impl Into for ChecksumFunction { - fn into(self) -> String { - match self { +impl From for String { + fn from(from: ChecksumFunction) -> String { + match from { ChecksumFunction::Sha256 => "sha256".into(), } } @@ -38,8 +38,8 @@ impl TryFrom for ChecksumFunction { #[serde(try_from = "Value", into = "Value")] pub struct EmptyMap; -impl Into for EmptyMap { - fn into(self) -> Value { +impl From for Value { + fn from(_from: EmptyMap) -> Value { Value::Object(Map::default()) } } diff --git a/crypto/eth2_keystore/src/json_keystore/cipher_module.rs b/crypto/eth2_keystore/src/json_keystore/cipher_module.rs index 03a9d305a27..c801a409237 100644 --- a/crypto/eth2_keystore/src/json_keystore/cipher_module.rs +++ b/crypto/eth2_keystore/src/json_keystore/cipher_module.rs @@ -13,9 +13,9 @@ pub enum CipherFunction { Aes128Ctr, } -impl Into for CipherFunction { - fn into(self) -> String { - match self { +impl From for String { + fn from(from: CipherFunction) -> String { + match from { CipherFunction::Aes128Ctr => "aes-128-ctr".into(), } } diff --git a/crypto/eth2_keystore/src/json_keystore/hex_bytes.rs b/crypto/eth2_keystore/src/json_keystore/hex_bytes.rs index cc61f13d979..e891693040f 100644 --- a/crypto/eth2_keystore/src/json_keystore/hex_bytes.rs +++ b/crypto/eth2_keystore/src/json_keystore/hex_bytes.rs @@ -25,9 +25,9 @@ impl From> for HexBytes { } } -impl Into for HexBytes { - fn into(self) -> String { - hex::encode(self.0) +impl From for String { + fn from(from: HexBytes) -> String { + hex::encode(from.0) } } diff --git a/crypto/eth2_keystore/src/json_keystore/kdf_module.rs b/crypto/eth2_keystore/src/json_keystore/kdf_module.rs index a29b895c953..2086ac7b21a 100644 --- a/crypto/eth2_keystore/src/json_keystore/kdf_module.rs +++ b/crypto/eth2_keystore/src/json_keystore/kdf_module.rs @@ -23,8 +23,8 @@ pub struct KdfModule { #[serde(try_from = "String", into = "String")] pub struct EmptyString; -impl Into for EmptyString { - fn into(self) -> String { +impl From for String { + fn from(_from: EmptyString) -> String { "".into() } } @@ -91,9 +91,9 @@ pub enum KdfFunction { Pbkdf2, } -impl Into for KdfFunction { - fn into(self) -> String { - match self { +impl From for String { + fn from(from: KdfFunction) -> String { + match from { KdfFunction::Scrypt => "scrypt".into(), KdfFunction::Pbkdf2 => "pbkdf2".into(), } diff --git a/crypto/eth2_wallet/src/json_wallet/mod.rs b/crypto/eth2_wallet/src/json_wallet/mod.rs index d2092508a23..f9a41059113 100644 --- a/crypto/eth2_wallet/src/json_wallet/mod.rs +++ b/crypto/eth2_wallet/src/json_wallet/mod.rs @@ -39,9 +39,9 @@ pub enum TypeField { Hd, } -impl Into for TypeField { - fn into(self) -> String { - match self { +impl From for String { + fn from(from: TypeField) -> String { + match from { TypeField::Hd => "hierarchical deterministic".into(), } } diff --git a/crypto/eth2_wallet/src/wallet.rs b/crypto/eth2_wallet/src/wallet.rs index 7a7d65f654c..8bf70912167 100644 --- a/crypto/eth2_wallet/src/wallet.rs +++ b/crypto/eth2_wallet/src/wallet.rs @@ -179,7 +179,7 @@ impl Wallet { /// /// - If `wallet_password` is unable to decrypt `self`. /// - If `keystore_password.is_empty()`. - /// - If `self.nextaccount == u32::max_value()`. + /// - If `self.nextaccount == u32::MAX`. pub fn next_validator( &mut self, wallet_password: &[u8], diff --git a/crypto/kzg/src/kzg_proof.rs b/crypto/kzg/src/kzg_proof.rs index c9a138a31c3..5a83466d0c3 100644 --- a/crypto/kzg/src/kzg_proof.rs +++ b/crypto/kzg/src/kzg_proof.rs @@ -38,9 +38,9 @@ impl From<[u8; BYTES_PER_PROOF]> for KzgProof { } } -impl Into<[u8; BYTES_PER_PROOF]> for KzgProof { - fn into(self) -> [u8; BYTES_PER_PROOF] { - self.0 +impl From for [u8; BYTES_PER_PROOF] { + fn from(from: KzgProof) -> [u8; BYTES_PER_PROOF] { + from.0 } } diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index 658dc9fe06e..181642df390 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -19,6 +19,8 @@ pub enum Error { Kzg(c_kzg::Error), /// The kzg verification failed KzgVerificationFailed, + /// Misc indexing error + InconsistentArrayLength(String), } impl From for Error { @@ -27,6 +29,28 @@ impl From for Error { } } +pub const CELLS_PER_EXT_BLOB: usize = 128; + +// TODO(das): use proper crypto once ckzg merges das branch +#[allow(dead_code)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct Cell { + bytes: [u8; 2048usize], +} + +impl Cell { + pub fn from_bytes(b: &[u8]) -> Result { + Ok(Self { + bytes: b + .try_into() + .map_err(|_| Error::Kzg(c_kzg::Error::MismatchLength("".to_owned())))?, + }) + } + pub fn into_inner(self) -> [u8; 2048usize] { + self.bytes + } +} + /// A wrapper over a kzg library that holds the trusted setup parameters. #[derive(Debug)] pub struct Kzg { @@ -141,6 +165,55 @@ impl Kzg { ) .map_err(Into::into) } + + /// Computes the cells and associated proofs for a given `blob` at index `index`. + #[allow(clippy::type_complexity)] + pub fn compute_cells_and_proofs( + &self, + _blob: &Blob, + ) -> Result< + ( + Box<[Cell; CELLS_PER_EXT_BLOB]>, + Box<[KzgProof; CELLS_PER_EXT_BLOB]>, + ), + Error, + > { + // TODO(das): use proper crypto once ckzg merges das branch + let cells = Box::new(core::array::from_fn(|_| Cell { bytes: [0u8; 2048] })); + let proofs = Box::new([KzgProof([0u8; BYTES_PER_PROOF]); CELLS_PER_EXT_BLOB]); + Ok((cells, proofs)) + } + + /// Verifies a batch of cell-proof-commitment triplets. + /// + /// Here, `coordinates` correspond to the (row, col) coordinate of the cell in the extended + /// blob "matrix". In the 1D extension, row corresponds to the blob index, and col corresponds + /// to the data column index. + pub fn verify_cell_proof_batch( + &self, + _cells: &[Cell], + _kzg_proofs: &[Bytes48], + _coordinates: &[(u64, u64)], + _kzg_commitments: &[Bytes48], + ) -> Result<(), Error> { + // TODO(das): use proper crypto once ckzg merges das branch + Ok(()) + } + + pub fn cells_to_blob(&self, _cells: &[Cell; CELLS_PER_EXT_BLOB]) -> Result { + // TODO(das): use proper crypto once ckzg merges das branch + Ok(Blob::new([0u8; 131072usize])) + } + + pub fn recover_all_cells( + &self, + _cell_ids: &[u64], + _cells: &[Cell], + ) -> Result, Error> { + // TODO(das): use proper crypto once ckzg merges das branch + let cells = Box::new(core::array::from_fn(|_| Cell { bytes: [0u8; 2048] })); + Ok(cells) + } } impl TryFrom for Kzg { diff --git a/database_manager/Cargo.toml b/database_manager/Cargo.toml index 07045dd95c2..96176f3fba5 100644 --- a/database_manager/Cargo.toml +++ b/database_manager/Cargo.toml @@ -10,10 +10,8 @@ clap = { workspace = true } clap_utils = { workspace = true } environment = { workspace = true } hex = { workspace = true } -logging = { workspace = true } -sloggers = { workspace = true } store = { workspace = true } -tempfile = { workspace = true } types = { workspace = true } slog = { workspace = true } strum = { workspace = true } +serde = { workspace = true } diff --git a/database_manager/src/cli.rs b/database_manager/src/cli.rs new file mode 100644 index 00000000000..5521b97805f --- /dev/null +++ b/database_manager/src/cli.rs @@ -0,0 +1,229 @@ +pub use clap::{Arg, ArgAction, Args, Command, FromArgMatches, Parser}; +use clap_utils::get_color_style; +use clap_utils::FLAG_HEADER; +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; + +use crate::InspectTarget; + +#[derive(Parser, Clone, Deserialize, Serialize, Debug)] +#[clap( + name = "database_manager", + visible_alias = "db", + about = "Manage a beacon node database.", + styles = get_color_style(), + next_line_help = true, + term_width = 80, + disable_help_flag = true, + disable_help_subcommand = true, + display_order = 0, +)] +pub struct DatabaseManager { + #[clap( + long, + value_name = "SLOT_COUNT", + help = "Specifies how often a freezer DB restore point should be stored. \ + Cannot be changed after initialization. \ + [default: 2048 (mainnet) or 64 (minimal)]", + display_order = 0 + )] + pub slots_per_restore_point: Option, + + #[clap( + long, + value_name = "DIR", + help = "Data directory for the freezer database.", + display_order = 0 + )] + pub freezer_dir: Option, + + #[clap( + long, + value_name = "EPOCHS", + default_value_t = 0, + help = "The margin for blob pruning in epochs. The oldest blobs are pruned \ + up until data_availability_boundary - blob_prune_margin_epochs.", + display_order = 0 + )] + pub blob_prune_margin_epochs: u64, + + #[clap( + long, + value_name = "DIR", + help = "Data directory for the blobs database.", + display_order = 0 + )] + pub blobs_dir: Option, + + #[clap( + long, + global = true, + help = "Prints help information", + action = clap::ArgAction::HelpLong, + display_order = 0, + help_heading = FLAG_HEADER + )] + help: Option, + + #[clap(subcommand)] + pub subcommand: DatabaseManagerSubcommand, +} + +#[derive(Parser, Clone, Deserialize, Serialize, Debug)] +#[clap(rename_all = "kebab-case")] +pub enum DatabaseManagerSubcommand { + Migrate(Migrate), + Inspect(Inspect), + Version(Version), + PrunePayloads(PrunePayloads), + PruneBlobs(PruneBlobs), + PruneStates(PruneStates), + Compact(Compact), +} + +#[derive(Parser, Clone, Deserialize, Serialize, Debug)] +#[clap(about = "Migrate the database to a specific schema version.")] +pub struct Migrate { + #[clap( + long, + value_name = "VERSION", + help = "Schema version to migrate to", + display_order = 0 + )] + pub to: u64, +} + +#[derive(Parser, Clone, Deserialize, Serialize, Debug)] +#[clap(about = "Inspect raw database values.")] +pub struct Inspect { + #[clap( + long, + value_name = "TAG", + help = "3-byte column ID (see `DBColumn`)", + display_order = 0 + )] + pub column: String, + + #[clap( + long, + value_enum, + value_name = "TARGET", + default_value_t = InspectTarget::ValueSizes, + help = "Select the type of output to show", + display_order = 0, + )] + pub output: InspectTarget, + + #[clap( + long, + value_name = "N", + help = "Skip over the first N keys", + display_order = 0 + )] + pub skip: Option, + + #[clap( + long, + value_name = "N", + help = "Output at most N keys", + display_order = 0 + )] + pub limit: Option, + + #[clap( + long, + conflicts_with = "blobs_db", + help = "Inspect the freezer DB rather than the hot DB", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub freezer: bool, + + #[clap( + long, + conflicts_with = "freezer", + help = "Inspect the blobs DB rather than the hot DB", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub blobs_db: bool, + + #[clap( + long, + value_name = "DIR", + help = "Base directory for the output files. Defaults to the current directory", + display_order = 0 + )] + pub output_dir: Option, +} + +#[derive(Parser, Clone, Deserialize, Serialize, Debug)] +#[clap(about = "Display database schema version.", visible_aliases = &["v"])] +pub struct Version {} + +#[derive(Parser, Clone, Deserialize, Serialize, Debug)] +#[clap( + about = "Prune finalized execution payloads.", + alias = "prune_payloads" +)] +pub struct PrunePayloads {} + +#[derive(Parser, Clone, Deserialize, Serialize, Debug)] +#[clap( + about = "Prune blobs older than data availability boundary.", + alias = "prune_blobs" +)] +pub struct PruneBlobs {} + +#[derive(Parser, Clone, Deserialize, Serialize, Debug)] +#[clap( + about = "Prune all beacon states from the freezer database.", + alias = "prune_states" +)] +pub struct PruneStates { + #[clap( + long, + help = "Commit to pruning states irreversably. Without this flag the command will \ + just check that the database is capable of being pruned.", + help_heading = FLAG_HEADER, + )] + pub confirm: bool, +} + +#[derive(Parser, Clone, Deserialize, Serialize, Debug)] +#[clap(about = "Compact database manually.")] +pub struct Compact { + #[clap( + long, + value_name = "TAG", + help = "3-byte column ID (see `DBColumn`)", + display_order = 0 + )] + pub column: String, + + #[clap( + long, + conflicts_with = "blobs_db", + help = "Inspect the freezer DB rather than the hot DB", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub freezer: bool, + + #[clap( + long, + conflicts_with = "freezer", + help = "Inspect the blobs DB rather than the hot DB", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub blobs_db: bool, + + #[clap( + long, + value_name = "DIR", + help = "Base directory for the output files. Defaults to the current directory", + display_order = 0 + )] + pub output_dir: Option, +} diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 617192abfef..c5344f1f92e 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -1,10 +1,17 @@ +pub mod cli; +use crate::cli::DatabaseManager; +use crate::cli::Migrate; +use crate::cli::PruneStates; use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, schema_change::migrate_schema, slot_clock::SystemTimeSlotClock, }; use beacon_node::{get_data_dir, get_slots_per_restore_point, ClientConfig}; -use clap::{App, Arg, ArgMatches}; +use clap::ArgMatches; +use clap::ValueEnum; +use cli::{Compact, Inspect}; use environment::{Environment, RuntimeContext}; +use serde::{Deserialize, Serialize}; use slog::{info, warn, Logger}; use std::fs; use std::io::Write; @@ -15,220 +22,30 @@ use store::{ metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}, DBColumn, HotColdDB, KeyValueStore, LevelDB, }; -use strum::{EnumString, EnumVariantNames, VariantNames}; +use strum::{EnumString, EnumVariantNames}; use types::{BeaconState, EthSpec, Slot}; -pub const CMD: &str = "database_manager"; - -pub fn version_cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("version") - .visible_aliases(&["v"]) - .setting(clap::AppSettings::ColoredHelp) - .about("Display database schema version") -} - -pub fn migrate_cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("migrate") - .setting(clap::AppSettings::ColoredHelp) - .about("Migrate the database to a specific schema version") - .arg( - Arg::with_name("to") - .long("to") - .value_name("VERSION") - .help("Schema version to migrate to") - .takes_value(true) - .required(true), - ) -} - -pub fn inspect_cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("inspect") - .setting(clap::AppSettings::ColoredHelp) - .about("Inspect raw database values") - .arg( - Arg::with_name("column") - .long("column") - .value_name("TAG") - .help("3-byte column ID (see `DBColumn`)") - .takes_value(true) - .required(true), - ) - .arg( - Arg::with_name("output") - .long("output") - .value_name("TARGET") - .help("Select the type of output to show") - .default_value("sizes") - .possible_values(InspectTarget::VARIANTS), - ) - .arg( - Arg::with_name("skip") - .long("skip") - .value_name("N") - .help("Skip over the first N keys"), - ) - .arg( - Arg::with_name("limit") - .long("limit") - .value_name("N") - .help("Output at most N keys"), - ) - .arg( - Arg::with_name("freezer") - .long("freezer") - .help("Inspect the freezer DB rather than the hot DB") - .takes_value(false) - .conflicts_with("blobs-db"), - ) - .arg( - Arg::with_name("blobs-db") - .long("blobs-db") - .help("Inspect the blobs DB rather than the hot DB") - .takes_value(false) - .conflicts_with("freezer"), - ) - .arg( - Arg::with_name("output-dir") - .long("output-dir") - .value_name("DIR") - .help("Base directory for the output files. Defaults to the current directory") - .takes_value(true), - ) -} - -pub fn compact_cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("compact") - .setting(clap::AppSettings::ColoredHelp) - .about("Compact database manually") - .arg( - Arg::with_name("column") - .long("column") - .value_name("TAG") - .help("3-byte column ID (see `DBColumn`)") - .takes_value(true) - .required(true), - ) - .arg( - Arg::with_name("freezer") - .long("freezer") - .help("Inspect the freezer DB rather than the hot DB") - .takes_value(false) - .conflicts_with("blobs-db"), - ) - .arg( - Arg::with_name("blobs-db") - .long("blobs-db") - .help("Inspect the blobs DB rather than the hot DB") - .takes_value(false) - .conflicts_with("freezer"), - ) -} - -pub fn prune_payloads_app<'a, 'b>() -> App<'a, 'b> { - App::new("prune-payloads") - .alias("prune_payloads") - .setting(clap::AppSettings::ColoredHelp) - .about("Prune finalized execution payloads") -} - -pub fn prune_blobs_app<'a, 'b>() -> App<'a, 'b> { - App::new("prune-blobs") - .alias("prune_blobs") - .setting(clap::AppSettings::ColoredHelp) - .about("Prune blobs older than data availability boundary") -} - -pub fn prune_states_app<'a, 'b>() -> App<'a, 'b> { - App::new("prune-states") - .alias("prune_states") - .arg( - Arg::with_name("confirm") - .long("confirm") - .help( - "Commit to pruning states irreversably. Without this flag the command will \ - just check that the database is capable of being pruned.", - ) - .takes_value(false), - ) - .setting(clap::AppSettings::ColoredHelp) - .about("Prune all beacon states from the freezer database") -} - -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) - .visible_aliases(&["db"]) - .setting(clap::AppSettings::ColoredHelp) - .about("Manage a beacon node database") - .arg( - Arg::with_name("slots-per-restore-point") - .long("slots-per-restore-point") - .value_name("SLOT_COUNT") - .help( - "Specifies how often a freezer DB restore point should be stored. \ - Cannot be changed after initialization. \ - [default: 2048 (mainnet) or 64 (minimal)]", - ) - .takes_value(true), - ) - .arg( - Arg::with_name("freezer-dir") - .long("freezer-dir") - .value_name("DIR") - .help("Data directory for the freezer database.") - .takes_value(true), - ) - .arg( - Arg::with_name("blob-prune-margin-epochs") - .long("blob-prune-margin-epochs") - .value_name("EPOCHS") - .help( - "The margin for blob pruning in epochs. The oldest blobs are pruned \ - up until data_availability_boundary - blob_prune_margin_epochs.", - ) - .takes_value(true) - .default_value("0"), - ) - .arg( - Arg::with_name("blobs-dir") - .long("blobs-dir") - .value_name("DIR") - .help("Data directory for the blobs database.") - .takes_value(true), - ) - .subcommand(migrate_cli_app()) - .subcommand(version_cli_app()) - .subcommand(inspect_cli_app()) - .subcommand(compact_cli_app()) - .subcommand(prune_payloads_app()) - .subcommand(prune_blobs_app()) - .subcommand(prune_states_app()) -} - fn parse_client_config( cli_args: &ArgMatches, + database_manager_config: &DatabaseManager, _env: &Environment, ) -> Result { let mut client_config = ClientConfig::default(); client_config.set_data_dir(get_data_dir(cli_args)); + client_config + .freezer_db_path + .clone_from(&database_manager_config.freezer_dir); + client_config + .blobs_db_path + .clone_from(&database_manager_config.blobs_dir); - if let Some(freezer_dir) = clap_utils::parse_optional(cli_args, "freezer-dir")? { - client_config.freezer_db_path = Some(freezer_dir); - } + let (sprp, sprp_explicit) = + get_slots_per_restore_point::(database_manager_config.slots_per_restore_point)?; - if let Some(blobs_db_dir) = clap_utils::parse_optional(cli_args, "blobs-dir")? { - client_config.blobs_db_path = Some(blobs_db_dir); - } - - let (sprp, sprp_explicit) = get_slots_per_restore_point::(cli_args)?; client_config.store.slots_per_restore_point = sprp; client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; - - if let Some(blob_prune_margin_epochs) = - clap_utils::parse_optional(cli_args, "blob-prune-margin-epochs")? - { - client_config.store.blob_prune_margin_epochs = blob_prune_margin_epochs; - } + client_config.store.blob_prune_margin_epochs = database_manager_config.blob_prune_margin_epochs; Ok(client_config) } @@ -270,15 +87,21 @@ pub fn display_db_version( Ok(()) } -#[derive(Debug, PartialEq, Eq, EnumString, EnumVariantNames)] +#[derive( + Debug, PartialEq, Eq, Clone, EnumString, Deserialize, Serialize, EnumVariantNames, ValueEnum, +)] pub enum InspectTarget { #[strum(serialize = "sizes")] + #[clap(name = "sizes")] ValueSizes, #[strum(serialize = "total")] + #[clap(name = "total")] ValueTotal, #[strum(serialize = "values")] + #[clap(name = "values")] Values, #[strum(serialize = "gaps")] + #[clap(name = "gaps")] Gaps, } @@ -293,16 +116,18 @@ pub struct InspectConfig { output_dir: PathBuf, } -fn parse_inspect_config(cli_args: &ArgMatches) -> Result { - let column = clap_utils::parse_required(cli_args, "column")?; - let target = clap_utils::parse_required(cli_args, "output")?; - let skip = clap_utils::parse_optional(cli_args, "skip")?; - let limit = clap_utils::parse_optional(cli_args, "limit")?; - let freezer = cli_args.is_present("freezer"); - let blobs_db = cli_args.is_present("blobs-db"); - - let output_dir: PathBuf = - clap_utils::parse_optional(cli_args, "output-dir")?.unwrap_or_else(PathBuf::new); +fn parse_inspect_config(inspect_config: &Inspect) -> Result { + let column: DBColumn = inspect_config + .column + .parse() + .map_err(|e| format!("Unable to parse column flag: {e:?}"))?; + let target: InspectTarget = inspect_config.output.clone(); + let skip = inspect_config.skip; + let limit = inspect_config.limit; + let freezer = inspect_config.freezer; + let blobs_db = inspect_config.blobs_db; + + let output_dir: PathBuf = inspect_config.output_dir.clone().unwrap_or_default(); Ok(InspectConfig { column, target, @@ -419,10 +244,13 @@ pub struct CompactConfig { blobs_db: bool, } -fn parse_compact_config(cli_args: &ArgMatches) -> Result { - let column = clap_utils::parse_required(cli_args, "column")?; - let freezer = cli_args.is_present("freezer"); - let blobs_db = cli_args.is_present("blobs-db"); +fn parse_compact_config(compact_config: &Compact) -> Result { + let column: DBColumn = compact_config + .column + .parse() + .expect("column is a required field"); + let freezer = compact_config.freezer; + let blobs_db = compact_config.blobs_db; Ok(CompactConfig { column, freezer, @@ -461,8 +289,8 @@ pub struct MigrateConfig { to: SchemaVersion, } -fn parse_migrate_config(cli_args: &ArgMatches) -> Result { - let to = SchemaVersion(clap_utils::parse_required(cli_args, "to")?); +fn parse_migrate_config(migrate_config: &Migrate) -> Result { + let to = SchemaVersion(migrate_config.to); Ok(MigrateConfig { to }) } @@ -564,9 +392,10 @@ pub fn prune_blobs( pub struct PruneStatesConfig { confirm: bool, } - -fn parse_prune_states_config(cli_args: &ArgMatches) -> Result { - let confirm = cli_args.is_present("confirm"); +fn parse_prune_states_config( + prune_states_config: &PruneStates, +) -> Result { + let confirm = prune_states_config.confirm; Ok(PruneStatesConfig { confirm }) } @@ -645,33 +474,35 @@ pub fn prune_states( } /// Run the database manager, returning an error string if the operation did not succeed. -pub fn run(cli_args: &ArgMatches<'_>, env: Environment) -> Result<(), String> { - let client_config = parse_client_config(cli_args, &env)?; +pub fn run( + cli_args: &ArgMatches, + db_manager_config: &DatabaseManager, + env: Environment, +) -> Result<(), String> { + let client_config = parse_client_config(cli_args, db_manager_config, &env)?; let context = env.core_context(); let log = context.log().clone(); let format_err = |e| format!("Fatal error: {:?}", e); - match cli_args.subcommand() { - ("version", Some(_)) => { - display_db_version(client_config, &context, log).map_err(format_err) - } - ("migrate", Some(cli_args)) => { - let migrate_config = parse_migrate_config(cli_args)?; + match &db_manager_config.subcommand { + cli::DatabaseManagerSubcommand::Migrate(migrate_config) => { + let migrate_config = parse_migrate_config(migrate_config)?; migrate_db(migrate_config, client_config, &context, log).map_err(format_err) } - ("inspect", Some(cli_args)) => { - let inspect_config = parse_inspect_config(cli_args)?; + cli::DatabaseManagerSubcommand::Inspect(inspect_config) => { + let inspect_config = parse_inspect_config(inspect_config)?; inspect_db::(inspect_config, client_config) } - ("compact", Some(cli_args)) => { - let compact_config = parse_compact_config(cli_args)?; - compact_db::(compact_config, client_config, log).map_err(format_err) + cli::DatabaseManagerSubcommand::Version(_) => { + display_db_version(client_config, &context, log).map_err(format_err) } - ("prune-payloads", Some(_)) => { + cli::DatabaseManagerSubcommand::PrunePayloads(_) => { prune_payloads(client_config, &context, log).map_err(format_err) } - ("prune-blobs", Some(_)) => prune_blobs(client_config, &context, log).map_err(format_err), - ("prune-states", Some(cli_args)) => { + cli::DatabaseManagerSubcommand::PruneBlobs(_) => { + prune_blobs(client_config, &context, log).map_err(format_err) + } + cli::DatabaseManagerSubcommand::PruneStates(prune_states_config) => { let executor = env.core_context().executor; let network_config = context .eth2_network_config @@ -691,10 +522,13 @@ pub fn run(cli_args: &ArgMatches<'_>, env: Environment) -> Result .map_err(|e| format!("Error getting genesis state: {e}"))? .ok_or("Genesis state missing")?; - let prune_config = parse_prune_states_config(cli_args)?; + let prune_config = parse_prune_states_config(prune_states_config)?; prune_states(client_config, prune_config, genesis_state, &context, log) } - _ => Err("Unknown subcommand, for help `lighthouse database_manager --help`".into()), + cli::DatabaseManagerSubcommand::Compact(compact_config) => { + let compact_config = parse_compact_config(compact_config)?; + compact_db::(compact_config, client_config, log).map_err(format_err) + } } } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 2aba106e506..3cddd8ee60b 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "5.1.3" +version = "5.2.1" authors = ["Paul Hauner "] edition = { workspace = true } @@ -20,23 +20,18 @@ serde_json = { workspace = true } env_logger = { workspace = true } types = { workspace = true } state_processing = { workspace = true } -int_to_bytes = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } environment = { workspace = true } eth2_network_config = { workspace = true } -genesis = { workspace = true } deposit_contract = { workspace = true } tree_hash = { workspace = true } clap_utils = { workspace = true } lighthouse_network = { workspace = true } -validator_dir = { workspace = true, features = ["insecure_keys"] } +validator_dir = { workspace = true } lighthouse_version = { workspace = true } -directory = { workspace = true } account_utils = { workspace = true } eth2_wallet = { workspace = true } -eth1_test_rig = { workspace = true } -sensitive_url = { workspace = true } eth2 = { workspace = true } snap = { workspace = true } beacon_chain = { workspace = true } diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 4f5c3f2972f..2cc9ce605b3 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -5,7 +5,7 @@ FROM rust:1.75.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES -ENV FEATURES $FEATURES +ENV FEATURES=$FEATURES RUN cd lighthouse && make install-lcli FROM ubuntu:22.04 diff --git a/lcli/src/block_root.rs b/lcli/src/block_root.rs index 0ee304c8a58..a90a4843d8a 100644 --- a/lcli/src/block_root.rs +++ b/lcli/src/block_root.rs @@ -32,6 +32,7 @@ use clap_utils::{parse_optional, parse_required}; use environment::Environment; use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use eth2_network_config::Eth2NetworkConfig; +use log::info; use std::path::PathBuf; use std::time::{Duration, Instant}; use types::{EthSpec, FullPayload, SignedBeaconBlock}; diff --git a/lcli/src/change_genesis_time.rs b/lcli/src/change_genesis_time.rs deleted file mode 100644 index f75652c768f..00000000000 --- a/lcli/src/change_genesis_time.rs +++ /dev/null @@ -1,45 +0,0 @@ -use clap::ArgMatches; -use eth2_network_config::Eth2NetworkConfig; -use ssz::Encode; -use std::fs::File; -use std::io::{Read, Write}; -use std::path::PathBuf; -use types::{BeaconState, EthSpec}; - -pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { - let path = matches - .value_of("ssz-state") - .ok_or("ssz-state not specified")? - .parse::() - .map_err(|e| format!("Unable to parse ssz-state: {}", e))?; - - let genesis_time = matches - .value_of("genesis-time") - .ok_or("genesis-time not specified")? - .parse::() - .map_err(|e| format!("Unable to parse genesis-time: {}", e))?; - - let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?; - let spec = ð2_network_config.chain_spec::()?; - - let mut state: BeaconState = { - let mut file = File::open(&path).map_err(|e| format!("Unable to open file: {}", e))?; - - let mut ssz = vec![]; - - file.read_to_end(&mut ssz) - .map_err(|e| format!("Unable to read file: {}", e))?; - - BeaconState::from_ssz_bytes(&ssz, spec) - .map_err(|e| format!("Unable to decode SSZ: {:?}", e))? - }; - - *state.genesis_time_mut() = genesis_time; - - let mut file = File::create(path).map_err(|e| format!("Unable to create file: {}", e))?; - - file.write_all(&state.as_ssz_bytes()) - .map_err(|e| format!("Unable to write to file: {}", e))?; - - Ok(()) -} diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs deleted file mode 100644 index 974a34591f0..00000000000 --- a/lcli/src/create_payload_header.rs +++ /dev/null @@ -1,67 +0,0 @@ -use clap::ArgMatches; -use clap_utils::{parse_optional, parse_required}; -use ssz::Encode; -use std::fs::File; -use std::io::Write; -use std::time::{SystemTime, UNIX_EPOCH}; -use types::{ - EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, - ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, - ForkName, -}; - -pub fn run(matches: &ArgMatches) -> Result<(), String> { - let eth1_block_hash = parse_required(matches, "execution-block-hash")?; - let genesis_time = parse_optional(matches, "genesis-time")?.unwrap_or( - SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to get time: {:?}", e))? - .as_secs(), - ); - let base_fee_per_gas = parse_required(matches, "base-fee-per-gas")?; - let gas_limit = parse_required(matches, "gas-limit")?; - let file_name = matches.value_of("file").ok_or("No file supplied")?; - let fork_name: ForkName = parse_optional(matches, "fork")?.unwrap_or(ForkName::Bellatrix); - - let execution_payload_header: ExecutionPayloadHeader = match fork_name { - ForkName::Base | ForkName::Altair => return Err("invalid fork name".to_string()), - ForkName::Bellatrix => ExecutionPayloadHeader::Bellatrix(ExecutionPayloadHeaderBellatrix { - gas_limit, - base_fee_per_gas, - timestamp: genesis_time, - block_hash: eth1_block_hash, - prev_randao: eth1_block_hash.into_root(), - ..ExecutionPayloadHeaderBellatrix::default() - }), - ForkName::Capella => ExecutionPayloadHeader::Capella(ExecutionPayloadHeaderCapella { - gas_limit, - base_fee_per_gas, - timestamp: genesis_time, - block_hash: eth1_block_hash, - prev_randao: eth1_block_hash.into_root(), - ..ExecutionPayloadHeaderCapella::default() - }), - ForkName::Deneb => ExecutionPayloadHeader::Deneb(ExecutionPayloadHeaderDeneb { - gas_limit, - base_fee_per_gas, - timestamp: genesis_time, - block_hash: eth1_block_hash, - prev_randao: eth1_block_hash.into_root(), - ..ExecutionPayloadHeaderDeneb::default() - }), - ForkName::Electra => ExecutionPayloadHeader::Electra(ExecutionPayloadHeaderElectra { - gas_limit, - base_fee_per_gas, - timestamp: genesis_time, - block_hash: eth1_block_hash, - prev_randao: eth1_block_hash.into_root(), - ..ExecutionPayloadHeaderElectra::default() - }), - }; - - let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; - let bytes = execution_payload_header.as_ssz_bytes(); - file.write_all(bytes.as_slice()) - .map_err(|_| "Unable to write to file".to_string())?; - Ok(()) -} diff --git a/lcli/src/deploy_deposit_contract.rs b/lcli/src/deploy_deposit_contract.rs deleted file mode 100644 index b920486c846..00000000000 --- a/lcli/src/deploy_deposit_contract.rs +++ /dev/null @@ -1,32 +0,0 @@ -use clap::ArgMatches; -use environment::Environment; -use types::EthSpec; - -use eth1_test_rig::{Http, Provider}; - -pub fn run(env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { - let eth1_http: String = clap_utils::parse_required(matches, "eth1-http")?; - let confirmations: usize = clap_utils::parse_required(matches, "confirmations")?; - let validator_count: Option = clap_utils::parse_optional(matches, "validator-count")?; - - let client = Provider::::try_from(ð1_http) - .map_err(|e| format!("Unable to connect to eth1 HTTP: {:?}", e))?; - - env.runtime().block_on(async { - let contract = eth1_test_rig::DepositContract::deploy(client, confirmations, None) - .await - .map_err(|e| format!("Failed to deploy deposit contract: {:?}", e))?; - - println!("Deposit contract address: {:?}", contract.address()); - - // Deposit insecure validators to the deposit contract created - if let Some(validator_count) = validator_count { - let amount = env.eth2_config.spec.max_effective_balance; - for i in 0..validator_count { - println!("Submitting deposit for validator {}...", i); - contract.deposit_deterministic_async::(i, amount).await?; - } - } - Ok(()) - }) -} diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs deleted file mode 100644 index 635a36ef709..00000000000 --- a/lcli/src/eth1_genesis.rs +++ /dev/null @@ -1,62 +0,0 @@ -use clap::ArgMatches; -use environment::Environment; -use eth2_network_config::Eth2NetworkConfig; -use genesis::{Eth1Config, Eth1Endpoint, Eth1GenesisService}; -use sensitive_url::SensitiveUrl; -use ssz::Encode; -use std::cmp::max; -use std::path::PathBuf; -use std::time::Duration; -use types::EthSpec; - -/// Interval between polling the eth1 node for genesis information. -pub const ETH1_GENESIS_UPDATE_INTERVAL: Duration = Duration::from_millis(7_000); - -pub fn run( - env: Environment, - testnet_dir: PathBuf, - matches: &ArgMatches<'_>, -) -> Result<(), String> { - let endpoints = matches - .value_of("eth1-endpoint") - .map(|e| { - warn!("The --eth1-endpoint flag is deprecated. Please use --eth1-endpoints instead"); - String::from(e) - }) - .or_else(|| matches.value_of("eth1-endpoints").map(String::from)); - - let mut eth2_network_config = Eth2NetworkConfig::load(testnet_dir.clone())?; - - let spec = eth2_network_config.chain_spec::()?; - - let mut config = Eth1Config::default(); - if let Some(v) = endpoints.clone() { - let endpoint = SensitiveUrl::parse(&v) - .map_err(|e| format!("Unable to parse eth1 endpoint URL: {:?}", e))?; - config.endpoint = Eth1Endpoint::NoAuth(endpoint); - } - config.deposit_contract_address = format!("{:?}", spec.deposit_contract_address); - config.deposit_contract_deploy_block = eth2_network_config.deposit_contract_deploy_block; - config.lowest_cached_block_number = eth2_network_config.deposit_contract_deploy_block; - config.follow_distance = spec.eth1_follow_distance / 2; - config.node_far_behind_seconds = max(5, config.follow_distance) * spec.seconds_per_eth1_block; - - let genesis_service = - Eth1GenesisService::new(config, env.core_context().log().clone(), spec.clone())?; - - env.runtime().block_on(async { - let _ = genesis_service - .wait_for_genesis_state::(ETH1_GENESIS_UPDATE_INTERVAL, spec) - .await - .map(move |genesis_state| { - eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes().into()); - eth2_network_config.force_write_to_file(testnet_dir) - }) - .map_err(|e| format!("Failed to find genesis: {}", e))?; - - info!("Starting service to produce genesis BeaconState from eth1"); - info!("Connecting to eth1 http endpoints: {:?}", endpoints); - - Ok(()) - }) -} diff --git a/lcli/src/indexed_attestations.rs b/lcli/src/indexed_attestations.rs index 63f8cd94637..ccc14171128 100644 --- a/lcli/src/indexed_attestations.rs +++ b/lcli/src/indexed_attestations.rs @@ -1,6 +1,6 @@ use clap::ArgMatches; use clap_utils::parse_required; -use state_processing::common::get_indexed_attestation; +use state_processing::common::{attesting_indices_base, attesting_indices_electra}; use std::fs::File; use std::io::Read; use std::path::{Path, PathBuf}; @@ -33,9 +33,14 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { let indexed_attestations = attestations .into_iter() - .map(|att| { - let committee = state.get_beacon_committee(att.data.slot, att.data.index)?; - get_indexed_attestation(committee.committee, &att) + .map(|att| match att { + Attestation::Base(att) => { + let committee = state.get_beacon_committee(att.data.slot, att.data.index)?; + attesting_indices_base::get_indexed_attestation(committee.committee, &att) + } + Attestation::Electra(att) => { + attesting_indices_electra::get_indexed_attestation_from_state(&state, &att) + } }) .collect::, _>>() .map_err(|e| format!("Error constructing indexed attestation: {:?}", e))?; diff --git a/lcli/src/insecure_validators.rs b/lcli/src/insecure_validators.rs deleted file mode 100644 index 67d04c2cd5e..00000000000 --- a/lcli/src/insecure_validators.rs +++ /dev/null @@ -1,64 +0,0 @@ -use clap::ArgMatches; -use std::fs; -use std::path::PathBuf; -use validator_dir::Builder as ValidatorBuilder; - -/// Generates validator directories with INSECURE, deterministic keypairs given the range -/// of indices, validator and secret directories. -pub fn generate_validator_dirs( - indices: &[usize], - validators_dir: PathBuf, - secrets_dir: PathBuf, -) -> Result<(), String> { - if !validators_dir.exists() { - fs::create_dir_all(&validators_dir) - .map_err(|e| format!("Unable to create validators dir: {:?}", e))?; - } - - if !secrets_dir.exists() { - fs::create_dir_all(&secrets_dir) - .map_err(|e| format!("Unable to create secrets dir: {:?}", e))?; - } - - for i in indices { - println!("Validator {}", i + 1); - - ValidatorBuilder::new(validators_dir.clone()) - .password_dir(secrets_dir.clone()) - .store_withdrawal_keystore(false) - .insecure_voting_keypair(*i) - .map_err(|e| format!("Unable to generate keys: {:?}", e))? - .build() - .map_err(|e| format!("Unable to build validator: {:?}", e))?; - } - - Ok(()) -} - -pub fn run(matches: &ArgMatches) -> Result<(), String> { - let validator_count: usize = clap_utils::parse_required(matches, "count")?; - let base_dir: PathBuf = clap_utils::parse_required(matches, "base-dir")?; - let node_count: Option = clap_utils::parse_optional(matches, "node-count")?; - if let Some(node_count) = node_count { - let validators_per_node = validator_count / node_count; - let validator_range = (0..validator_count).collect::>(); - let indices_range = validator_range - .chunks(validators_per_node) - .collect::>(); - - for (i, indices) in indices_range.iter().enumerate() { - let validators_dir = base_dir.join(format!("node_{}", i + 1)).join("validators"); - let secrets_dir = base_dir.join(format!("node_{}", i + 1)).join("secrets"); - generate_validator_dirs(indices, validators_dir, secrets_dir)?; - } - } else { - let validators_dir = base_dir.join("validators"); - let secrets_dir = base_dir.join("secrets"); - generate_validator_dirs( - (0..validator_count).collect::>().as_slice(), - validators_dir, - secrets_dir, - )?; - } - Ok(()) -} diff --git a/lcli/src/interop_genesis.rs b/lcli/src/interop_genesis.rs deleted file mode 100644 index f44edffd468..00000000000 --- a/lcli/src/interop_genesis.rs +++ /dev/null @@ -1,49 +0,0 @@ -use clap::ArgMatches; -use clap_utils::parse_ssz_optional; -use eth2_network_config::Eth2NetworkConfig; -use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; -use ssz::Encode; -use std::path::PathBuf; -use std::time::{SystemTime, UNIX_EPOCH}; -use types::{test_utils::generate_deterministic_keypairs, EthSpec, Hash256}; - -pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { - let validator_count = matches - .value_of("validator-count") - .ok_or("validator-count not specified")? - .parse::() - .map_err(|e| format!("Unable to parse validator-count: {}", e))?; - - let genesis_time = if let Some(genesis_time) = matches.value_of("genesis-time") { - genesis_time - .parse::() - .map_err(|e| format!("Unable to parse genesis-time: {}", e))? - } else { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to get time: {:?}", e))? - .as_secs() - }; - - let mut eth2_network_config = Eth2NetworkConfig::load(testnet_dir.clone())?; - - let mut spec = eth2_network_config.chain_spec::()?; - - if let Some(v) = parse_ssz_optional(matches, "genesis-fork-version")? { - spec.genesis_fork_version = v; - } - - let keypairs = generate_deterministic_keypairs(validator_count); - let genesis_state = interop_genesis_state::( - &keypairs, - genesis_time, - Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - None, - &spec, - )?; - - eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes().into()); - eth2_network_config.force_write_to_file(testnet_dir)?; - - Ok(()) -} diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 7b5c1598c9e..85898b60ee4 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -1,26 +1,16 @@ -#[macro_use] -extern crate log; mod block_root; -mod change_genesis_time; mod check_deposit_data; -mod create_payload_header; -mod deploy_deposit_contract; -mod eth1_genesis; mod generate_bootnode_enr; mod indexed_attestations; -mod insecure_validators; -mod interop_genesis; mod mnemonic_validators; mod mock_el; -mod new_testnet; mod parse_ssz; -mod replace_state_pubkeys; mod skip_slots; mod state_root; mod transition_blocks; -use clap::{App, Arg, ArgMatches, SubCommand}; -use clap_utils::parse_optional; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::{parse_optional, FLAG_HEADER}; use environment::{EnvironmentBuilder, LoggerConfig}; use eth2_network_config::Eth2NetworkConfig; use parse_ssz::run_parse_ssz; @@ -32,944 +22,542 @@ use types::{EthSpec, EthSpecId}; fn main() { env_logger::init(); - let matches = App::new("Lighthouse CLI Tool") + let matches = Command::new("Lighthouse CLI Tool") .version(lighthouse_version::VERSION) + .display_order(0) .about("Performs various testing-related tasks, including defining testnets.") .arg( - Arg::with_name("spec") - .short("s") + Arg::new("spec") + .short('s') .long("spec") .value_name("STRING") - .takes_value(true) - .possible_values(&["minimal", "mainnet", "gnosis"]) + .action(ArgAction::Set) + .value_parser(["minimal", "mainnet", "gnosis"]) .default_value("mainnet") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("testnet-dir") - .short("d") + Arg::new("testnet-dir") + .short('d') .long("testnet-dir") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .global(true) - .help("The testnet dir."), + .help("The testnet dir.") + .display_order(0) ) .arg( - Arg::with_name("network") + Arg::new("network") .long("network") .value_name("NAME") - .takes_value(true) + .action(ArgAction::Set) .global(true) .help("The network to use. Defaults to mainnet.") .conflicts_with("testnet-dir") + .display_order(0) ) .subcommand( - SubCommand::with_name("skip-slots") + Command::new("skip-slots") .about( "Performs a state transition from some state across some number of skip slots", ) .arg( - Arg::with_name("output-path") + Arg::new("output-path") .long("output-path") .value_name("PATH") - .takes_value(true) - .help("Path to output a SSZ file."), + .action(ArgAction::Set) + .help("Path to output a SSZ file.") + .display_order(0) ) .arg( - Arg::with_name("pre-state-path") + Arg::new("pre-state-path") .long("pre-state-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") - .help("Path to a SSZ file of the pre-state."), + .help("Path to a SSZ file of the pre-state.") + .display_order(0) ) .arg( - Arg::with_name("beacon-url") + Arg::new("beacon-url") .long("beacon-url") .value_name("URL") - .takes_value(true) - .help("URL to a beacon-API provider."), + .action(ArgAction::Set) + .help("URL to a beacon-API provider.") + .display_order(0) ) .arg( - Arg::with_name("state-id") + Arg::new("state-id") .long("state-id") .value_name("STATE_ID") - .takes_value(true) + .action(ArgAction::Set) .requires("beacon-url") - .help("Identifier for a state as per beacon-API standards (slot, root, etc.)"), + .help("Identifier for a state as per beacon-API standards (slot, root, etc.)") + .display_order(0) ) .arg( - Arg::with_name("runs") + Arg::new("runs") .long("runs") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") - .help("Number of repeat runs, useful for benchmarking."), + .help("Number of repeat runs, useful for benchmarking.") + .display_order(0) ) .arg( - Arg::with_name("state-root") + Arg::new("state-root") .long("state-root") .value_name("HASH256") - .takes_value(true) - .help("Tree hash root of the provided state, to avoid computing it."), + .action(ArgAction::Set) + .help("Tree hash root of the provided state, to avoid computing it.") + .display_order(0) ) .arg( - Arg::with_name("slots") + Arg::new("slots") .long("slots") .value_name("INTEGER") - .takes_value(true) - .help("Number of slots to skip forward."), + .action(ArgAction::Set) + .help("Number of slots to skip forward.") + .display_order(0) ) .arg( - Arg::with_name("partial-state-advance") + Arg::new("partial-state-advance") .long("partial-state-advance") - .takes_value(false) - .help("If present, don't compute state roots when skipping forward."), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("If present, don't compute state roots when skipping forward.") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("transition-blocks") + Command::new("transition-blocks") .about("Performs a state transition given a pre-state and block") .arg( - Arg::with_name("pre-state-path") + Arg::new("pre-state-path") .long("pre-state-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") .requires("block-path") - .help("Path to load a BeaconState from as SSZ."), + .help("Path to load a BeaconState from as SSZ.") + .display_order(0) ) .arg( - Arg::with_name("block-path") + Arg::new("block-path") .long("block-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") .requires("pre-state-path") - .help("Path to load a SignedBeaconBlock from as SSZ."), + .help("Path to load a SignedBeaconBlock from as SSZ.") + .display_order(0) ) .arg( - Arg::with_name("post-state-output-path") + Arg::new("post-state-output-path") .long("post-state-output-path") .value_name("PATH") - .takes_value(true) - .help("Path to output the post-state."), + .action(ArgAction::Set) + .help("Path to output the post-state.") + .display_order(0) ) .arg( - Arg::with_name("pre-state-output-path") + Arg::new("pre-state-output-path") .long("pre-state-output-path") .value_name("PATH") - .takes_value(true) - .help("Path to output the pre-state, useful when used with --beacon-url."), + .action(ArgAction::Set) + .help("Path to output the pre-state, useful when used with --beacon-url.") + .display_order(0) ) .arg( - Arg::with_name("block-output-path") + Arg::new("block-output-path") .long("block-output-path") .value_name("PATH") - .takes_value(true) - .help("Path to output the block, useful when used with --beacon-url."), + .action(ArgAction::Set) + .help("Path to output the block, useful when used with --beacon-url.") + .display_order(0) ) .arg( - Arg::with_name("beacon-url") + Arg::new("beacon-url") .long("beacon-url") .value_name("URL") - .takes_value(true) - .help("URL to a beacon-API provider."), + .action(ArgAction::Set) + .help("URL to a beacon-API provider.") + .display_order(0) ) .arg( - Arg::with_name("block-id") + Arg::new("block-id") .long("block-id") .value_name("BLOCK_ID") - .takes_value(true) + .action(ArgAction::Set) .requires("beacon-url") - .help("Identifier for a block as per beacon-API standards (slot, root, etc.)"), + .help("Identifier for a block as per beacon-API standards (slot, root, etc.)") + .display_order(0) ) .arg( - Arg::with_name("runs") + Arg::new("runs") .long("runs") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") - .help("Number of repeat runs, useful for benchmarking."), + .help("Number of repeat runs, useful for benchmarking.") + .display_order(0) ) .arg( - Arg::with_name("no-signature-verification") + Arg::new("no-signature-verification") .long("no-signature-verification") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("Disable signature verification.") + .display_order(0) ) .arg( - Arg::with_name("exclude-cache-builds") + Arg::new("exclude-cache-builds") .long("exclude-cache-builds") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, pre-build the committee and tree-hash caches without \ - including them in the timings."), + including them in the timings.") + .display_order(0) ) .arg( - Arg::with_name("exclude-post-block-thc") + Arg::new("exclude-post-block-thc") .long("exclude-post-block-thc") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, don't rebuild the tree-hash-cache after applying \ - the block."), + the block.") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("pretty-ssz") + Command::new("pretty-ssz") .about("Parses SSZ-encoded data from a file") .arg( - Arg::with_name("format") - .short("f") + Arg::new("format") + .short('f') .long("format") .value_name("FORMAT") - .takes_value(true) - .required(true) + .action(ArgAction::Set) + .required(false) .default_value("json") - .possible_values(&["json", "yaml"]) + .value_parser(["json", "yaml"]) .help("Output format to use") + .display_order(0) ) .arg( - Arg::with_name("type") + Arg::new("type") .value_name("TYPE") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("Type to decode"), + .help("Type to decode") + .display_order(0) ) .arg( - Arg::with_name("ssz-file") + Arg::new("ssz-file") .value_name("FILE") - .takes_value(true) - .required(true) - .help("Path to SSZ bytes"), - ) - ) - .subcommand( - SubCommand::with_name("deploy-deposit-contract") - .about( - "Deploy a testing eth1 deposit contract.", - ) - .arg( - Arg::with_name("eth1-http") - .long("eth1-http") - .short("e") - .value_name("ETH1_HTTP_PATH") - .help("Path to an Eth1 JSON-RPC IPC endpoint") - .takes_value(true) - .required(true) - ) - .arg( - Arg::with_name("confirmations") - .value_name("INTEGER") - .long("confirmations") - .takes_value(true) - .default_value("3") - .help("The number of block confirmations before declaring the contract deployed."), - ) - .arg( - Arg::with_name("validator-count") - .value_name("VALIDATOR_COUNT") - .long("validator-count") - .takes_value(true) - .help("If present, makes `validator_count` number of INSECURE deterministic deposits after \ - deploying the deposit contract." - ), - ) - ) - .subcommand( - SubCommand::with_name("eth1-genesis") - .about("Listens to the eth1 chain and finds the genesis beacon state") - .arg( - Arg::with_name("eth1-endpoint") - .short("e") - .long("eth1-endpoint") - .value_name("HTTP_SERVER") - .takes_value(true) - .help("Deprecated. Use --eth1-endpoints."), - ) - .arg( - Arg::with_name("eth1-endpoints") - .long("eth1-endpoints") - .value_name("HTTP_SERVER_LIST") - .takes_value(true) - .conflicts_with("eth1-endpoint") - .help( - "One or more comma-delimited URLs to eth1 JSON-RPC http APIs. \ - If multiple endpoints are given the endpoints are used as \ - fallback in the given order.", - ), - ), - ) - .subcommand( - SubCommand::with_name("interop-genesis") - .about("Produces an interop-compatible genesis state using deterministic keypairs") - .arg( - Arg::with_name("validator-count") - .long("validator-count") - .index(1) - .value_name("INTEGER") - .takes_value(true) - .default_value("1024") - .help("The number of validators in the genesis state."), - ) - .arg( - Arg::with_name("genesis-time") - .long("genesis-time") - .short("t") - .value_name("UNIX_EPOCH") - .takes_value(true) - .help("The value for state.genesis_time. Defaults to now."), - ) - .arg( - Arg::with_name("genesis-fork-version") - .long("genesis-fork-version") - .value_name("HEX") - .takes_value(true) - .help( - "Used to avoid reply attacks between testnets. Recommended to set to - non-default.", - ), - ), - ) - .subcommand( - SubCommand::with_name("change-genesis-time") - .about( - "Loads a file with an SSZ-encoded BeaconState and modifies the genesis time.", - ) - .arg( - Arg::with_name("ssz-state") - .index(1) - .value_name("PATH") - .takes_value(true) - .required(true) - .help("The path to the SSZ file"), - ) - .arg( - Arg::with_name("genesis-time") - .index(2) - .value_name("UNIX_EPOCH") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The value for state.genesis_time."), - ), - ) - .subcommand( - SubCommand::with_name("replace-state-pubkeys") - .about( - "Loads a file with an SSZ-encoded BeaconState and replaces \ - all the validator pubkeys with ones derived from the mnemonic \ - such that validator indices correspond to EIP-2334 voting keypair \ - derivation paths.", + .help("Path to SSZ bytes") + .display_order(0) ) - .arg( - Arg::with_name("ssz-state") - .index(1) - .value_name("PATH") - .takes_value(true) - .required(true) - .help("The path to the SSZ file"), - ) - .arg( - Arg::with_name("mnemonic") - .index(2) - .value_name("BIP39_MNENMONIC") - .takes_value(true) - .default_value( - "replace nephew blur decorate waste convince soup column \ - orient excite play baby", - ) - .help("The mnemonic for key derivation."), - ), ) .subcommand( - SubCommand::with_name("create-payload-header") - .about("Generates an SSZ file containing bytes for an `ExecutionPayloadHeader`. \ - Useful as input for `lcli new-testnet --execution-payload-header FILE`. If `--fork` \ - is not provided, a payload header for the `Bellatrix` fork will be created.") - .arg( - Arg::with_name("execution-block-hash") - .long("execution-block-hash") - .value_name("BLOCK_HASH") - .takes_value(true) - .help("The block hash used when generating an execution payload. This \ - value is used for `execution_payload_header.block_hash` as well as \ - `execution_payload_header.random`") - .default_value( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ) - .arg( - Arg::with_name("genesis-time") - .long("genesis-time") - .value_name("INTEGER") - .takes_value(true) - .help("The genesis time when generating an execution payload.") - ) - .arg( - Arg::with_name("base-fee-per-gas") - .long("base-fee-per-gas") - .value_name("INTEGER") - .takes_value(true) - .help("The base fee per gas field in the execution payload generated.") - .default_value("1000000000"), - ) - .arg( - Arg::with_name("gas-limit") - .long("gas-limit") - .value_name("INTEGER") - .takes_value(true) - .help("The gas limit field in the execution payload generated.") - .default_value("30000000"), - ) - .arg( - Arg::with_name("file") - .long("file") - .value_name("FILE") - .takes_value(true) - .required(true) - .help("Output file"), - ).arg( - Arg::with_name("fork") - .long("fork") - .value_name("FORK") - .takes_value(true) - .default_value("bellatrix") - .help("The fork for which the execution payload header should be created.") - .possible_values(&["bellatrix", "capella", "deneb", "electra"]) - ) - ) - .subcommand( - SubCommand::with_name("new-testnet") - .about( - "Produce a new testnet directory. If any of the optional flags are not - supplied the values will remain the default for the --spec flag", - ) - .arg( - Arg::with_name("force") - .long("force") - .short("f") - .takes_value(false) - .help("Overwrites any previous testnet configurations"), - ) - .arg( - Arg::with_name("interop-genesis-state") - .long("interop-genesis-state") - .takes_value(false) - .help( - "If present, a interop-style genesis.ssz file will be generated.", - ), - ) - .arg( - Arg::with_name("derived-genesis-state") - .long("derived-genesis-state") - .takes_value(false) - .help( - "If present, a genesis.ssz file will be generated with keys generated from a given mnemonic.", - ), - ) - .arg( - Arg::with_name("mnemonic-phrase") - .long("mnemonic-phrase") - .value_name("MNEMONIC_PHRASE") - .takes_value(true) - .requires("derived-genesis-state") - .help("The mnemonic with which we generate the validator keys for a derived genesis state"), - ) - .arg( - Arg::with_name("min-genesis-time") - .long("min-genesis-time") - .value_name("UNIX_SECONDS") - .takes_value(true) - .help( - "The minimum permitted genesis time. For non-eth1 testnets will be - the genesis time. Defaults to now.", - ), - ) - .arg( - Arg::with_name("min-genesis-active-validator-count") - .long("min-genesis-active-validator-count") - .value_name("INTEGER") - .takes_value(true) - .help("The number of validators required to trigger eth2 genesis."), - ) - .arg( - Arg::with_name("genesis-delay") - .long("genesis-delay") - .value_name("SECONDS") - .takes_value(true) - .help("The delay between sufficient eth1 deposits and eth2 genesis."), - ) - .arg( - Arg::with_name("min-deposit-amount") - .long("min-deposit-amount") - .value_name("GWEI") - .takes_value(true) - .help("The minimum permitted deposit amount."), - ) - .arg( - Arg::with_name("max-effective-balance") - .long("max-effective-balance") - .value_name("GWEI") - .takes_value(true) - .help("The amount required to become a validator."), - ) - .arg( - Arg::with_name("effective-balance-increment") - .long("effective-balance-increment") - .value_name("GWEI") - .takes_value(true) - .help("The steps in effective balance calculation."), - ) - .arg( - Arg::with_name("ejection-balance") - .long("ejection-balance") - .value_name("GWEI") - .takes_value(true) - .help("The balance at which a validator gets ejected."), - ) - .arg( - Arg::with_name("eth1-follow-distance") - .long("eth1-follow-distance") - .value_name("ETH1_BLOCKS") - .takes_value(true) - .help("The distance to follow behind the eth1 chain head."), - ) - .arg( - Arg::with_name("genesis-fork-version") - .long("genesis-fork-version") - .value_name("HEX") - .takes_value(true) - .help( - "Used to avoid reply attacks between testnets. Recommended to set to - non-default.", - ), - ) - .arg( - Arg::with_name("seconds-per-slot") - .long("seconds-per-slot") - .value_name("SECONDS") - .takes_value(true) - .help("Eth2 slot time"), - ) - .arg( - Arg::with_name("seconds-per-eth1-block") - .long("seconds-per-eth1-block") - .value_name("SECONDS") - .takes_value(true) - .help("Eth1 block time"), - ) - .arg( - Arg::with_name("eth1-id") - .long("eth1-id") - .value_name("ETH1_ID") - .takes_value(true) - .help("The chain id and network id for the eth1 testnet."), - ) - .arg( - Arg::with_name("deposit-contract-address") - .long("deposit-contract-address") - .value_name("ETH1_ADDRESS") - .takes_value(true) - .required(true) - .help("The address of the deposit contract."), - ) - .arg( - Arg::with_name("deposit-contract-deploy-block") - .long("deposit-contract-deploy-block") - .value_name("ETH1_BLOCK_NUMBER") - .takes_value(true) - .default_value("0") - .help( - "The block the deposit contract was deployed. Setting this is a huge - optimization for nodes, please do it.", - ), - ) - .arg( - Arg::with_name("altair-fork-epoch") - .long("altair-fork-epoch") - .value_name("EPOCH") - .takes_value(true) - .help( - "The epoch at which to enable the Altair hard fork", - ), - ) - .arg( - Arg::with_name("bellatrix-fork-epoch") - .long("bellatrix-fork-epoch") - .value_name("EPOCH") - .takes_value(true) - .help( - "The epoch at which to enable the Bellatrix hard fork", - ), - ) - .arg( - Arg::with_name("capella-fork-epoch") - .long("capella-fork-epoch") - .value_name("EPOCH") - .takes_value(true) - .help( - "The epoch at which to enable the Capella hard fork", - ), - ) - .arg( - Arg::with_name("deneb-fork-epoch") - .long("deneb-fork-epoch") - .value_name("EPOCH") - .takes_value(true) - .help( - "The epoch at which to enable the Deneb hard fork", - ), - ) - .arg( - Arg::with_name("electra-fork-epoch") - .long("electra-fork-epoch") - .value_name("EPOCH") - .takes_value(true) - .help( - "The epoch at which to enable the Electra hard fork", - ), - ) - .arg( - Arg::with_name("ttd") - .long("ttd") - .value_name("TTD") - .takes_value(true) - .help( - "The terminal total difficulty", - ), - ) - .arg( - Arg::with_name("eth1-block-hash") - .long("eth1-block-hash") - .value_name("BLOCK_HASH") - .takes_value(true) - .help("The eth1 block hash used when generating a genesis state."), - ) - .arg( - Arg::with_name("execution-payload-header") - .long("execution-payload-header") - .value_name("FILE") - .takes_value(true) - .required(false) - .help("Path to file containing `ExecutionPayloadHeader` SSZ bytes to be \ - used in the genesis state."), - ) - .arg( - Arg::with_name("validator-count") - .long("validator-count") - .value_name("INTEGER") - .takes_value(true) - .help("The number of validators when generating a genesis state."), - ) - .arg( - Arg::with_name("genesis-time") - .long("genesis-time") - .value_name("INTEGER") - .takes_value(true) - .help("The genesis time when generating a genesis state."), - ) - .arg( - Arg::with_name("proposer-score-boost") - .long("proposer-score-boost") - .value_name("INTEGER") - .takes_value(true) - .help("The proposer score boost to apply as a percentage, e.g. 70 = 70%"), - ) - - ) - .subcommand( - SubCommand::with_name("check-deposit-data") + Command::new("check-deposit-data") .about("Checks the integrity of some deposit data.") .arg( - Arg::with_name("deposit-amount") + Arg::new("deposit-amount") .index(1) .value_name("GWEI") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The amount (in Gwei) that was deposited"), + .help("The amount (in Gwei) that was deposited") + .display_order(0) ) .arg( - Arg::with_name("deposit-data") + Arg::new("deposit-data") .index(2) .value_name("HEX") - .takes_value(true) + .action(ArgAction::Set) .required(true) .help( "A 0x-prefixed hex string of the deposit data. Should include the function signature.", - ), + ) + .display_order(0) ), ) .subcommand( - SubCommand::with_name("generate-bootnode-enr") + Command::new("generate-bootnode-enr") .about("Generates an ENR address to be used as a pre-genesis boot node.") .arg( - Arg::with_name("ip") + Arg::new("ip") .long("ip") .value_name("IP_ADDRESS") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The IP address to be included in the ENR and used for discovery"), + .help("The IP address to be included in the ENR and used for discovery") + .display_order(0) ) .arg( - Arg::with_name("udp-port") + Arg::new("udp-port") .long("udp-port") .value_name("UDP_PORT") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The UDP port to be included in the ENR and used for discovery"), + .help("The UDP port to be included in the ENR and used for discovery") + .display_order(0) ) .arg( - Arg::with_name("tcp-port") + Arg::new("tcp-port") .long("tcp-port") .value_name("TCP_PORT") - .takes_value(true) + .action(ArgAction::Set) .required(true) .help( "The TCP port to be included in the ENR and used for application comms", - ), + ) + .display_order(0) ) .arg( - Arg::with_name("output-dir") + Arg::new("output-dir") .long("output-dir") .value_name("OUTPUT_DIRECTORY") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The directory in which to create the network dir"), + .help("The directory in which to create the network dir") + .display_order(0) ) .arg( - Arg::with_name("genesis-fork-version") + Arg::new("genesis-fork-version") .long("genesis-fork-version") .value_name("HEX") - .takes_value(true) + .action(ArgAction::Set) .required(true) .help( "Used to avoid reply attacks between testnets. Recommended to set to non-default.", - ), + ) + .display_order(0) ), ) .subcommand( - SubCommand::with_name("insecure-validators") - .about("Produces validator directories with INSECURE, deterministic keypairs.") - .arg( - Arg::with_name("count") - .long("count") - .value_name("COUNT") - .takes_value(true) - .required(true) - .help("Produces validators in the range of 0..count."), - ) - .arg( - Arg::with_name("base-dir") - .long("base-dir") - .value_name("BASE_DIR") - .takes_value(true) - .required(true) - .help("The base directory where validator keypairs and secrets are stored"), - ) - .arg( - Arg::with_name("node-count") - .long("node-count") - .value_name("NODE_COUNT") - .takes_value(true) - .help("The number of nodes to divide the validator keys to"), - ) - ) - .subcommand( - SubCommand::with_name("mnemonic-validators") + Command::new("mnemonic-validators") .about("Produces validator directories by deriving the keys from \ a mnemonic. For testing purposes only, DO NOT USE IN \ PRODUCTION!") .arg( - Arg::with_name("count") + Arg::new("count") .long("count") .value_name("COUNT") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("Produces validators in the range of 0..count."), + .help("Produces validators in the range of 0..count.") + .display_order(0) ) .arg( - Arg::with_name("base-dir") + Arg::new("base-dir") .long("base-dir") .value_name("BASE_DIR") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The base directory where validator keypairs and secrets are stored"), + .help("The base directory where validator keypairs and secrets are stored") + .display_order(0) ) .arg( - Arg::with_name("node-count") + Arg::new("node-count") .long("node-count") .value_name("NODE_COUNT") - .takes_value(true) - .help("The number of nodes to divide the validator keys to"), + .action(ArgAction::Set) + .help("The number of nodes to divide the validator keys to") + .display_order(0) ) .arg( - Arg::with_name("mnemonic-phrase") + Arg::new("mnemonic-phrase") .long("mnemonic-phrase") .value_name("MNEMONIC_PHRASE") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("The mnemonic with which we generate the validator keys"), + .help("The mnemonic with which we generate the validator keys") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("indexed-attestations") + Command::new("indexed-attestations") .about("Convert attestations to indexed form, using the committees from a state.") .arg( - Arg::with_name("state") + Arg::new("state") .long("state") .value_name("SSZ_STATE") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("BeaconState to generate committees from (SSZ)"), + .help("BeaconState to generate committees from (SSZ)") + .display_order(0) ) .arg( - Arg::with_name("attestations") + Arg::new("attestations") .long("attestations") .value_name("JSON_ATTESTATIONS") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("List of Attestations to convert to indexed form (JSON)"), + .help("List of Attestations to convert to indexed form (JSON)") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("block-root") + Command::new("block-root") .about("Computes the block root of some block.") .arg( - Arg::with_name("block-path") + Arg::new("block-path") .long("block-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") - .help("Path to load a SignedBeaconBlock from as SSZ."), + .help("Path to load a SignedBeaconBlock from as SSZ.") + .display_order(0) ) .arg( - Arg::with_name("beacon-url") + Arg::new("beacon-url") .long("beacon-url") .value_name("URL") - .takes_value(true) - .help("URL to a beacon-API provider."), + .action(ArgAction::Set) + .help("URL to a beacon-API provider.") + .display_order(0) ) .arg( - Arg::with_name("block-id") + Arg::new("block-id") .long("block-id") .value_name("BLOCK_ID") - .takes_value(true) + .action(ArgAction::Set) .requires("beacon-url") - .help("Identifier for a block as per beacon-API standards (slot, root, etc.)"), + .help("Identifier for a block as per beacon-API standards (slot, root, etc.)") + .display_order(0) ) .arg( - Arg::with_name("runs") + Arg::new("runs") .long("runs") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") - .help("Number of repeat runs, useful for benchmarking."), + .help("Number of repeat runs, useful for benchmarking.") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("state-root") + Command::new("state-root") .about("Computes the state root of some state.") .arg( - Arg::with_name("state-path") + Arg::new("state-path") .long("state-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("beacon-url") - .help("Path to load a BeaconState from as SSZ."), + .help("Path to load a BeaconState from as SSZ.") + .display_order(0) ) .arg( - Arg::with_name("beacon-url") + Arg::new("beacon-url") .long("beacon-url") .value_name("URL") - .takes_value(true) - .help("URL to a beacon-API provider."), + .action(ArgAction::Set) + .help("URL to a beacon-API provider.") + .display_order(0) ) .arg( - Arg::with_name("state-id") + Arg::new("state-id") .long("state-id") .value_name("BLOCK_ID") - .takes_value(true) + .action(ArgAction::Set) .requires("beacon-url") - .help("Identifier for a state as per beacon-API standards (slot, root, etc.)"), + .help("Identifier for a state as per beacon-API standards (slot, root, etc.)") + .display_order(0) ) .arg( - Arg::with_name("runs") + Arg::new("runs") .long("runs") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .default_value("1") - .help("Number of repeat runs, useful for benchmarking."), + .help("Number of repeat runs, useful for benchmarking.") + .display_order(0) ) ) .subcommand( - SubCommand::with_name("mock-el") + Command::new("mock-el") .about("Creates a mock execution layer server. This is NOT SAFE and should only \ be used for testing and development on testnets. Do not use in production. Do not \ use on mainnet. It cannot perform validator duties.") .arg( - Arg::with_name("jwt-output-path") + Arg::new("jwt-output-path") .long("jwt-output-path") .value_name("PATH") - .takes_value(true) + .action(ArgAction::Set) .required(true) - .help("Path to write the JWT secret."), + .help("Path to write the JWT secret.") + .display_order(0) ) .arg( - Arg::with_name("listen-address") + Arg::new("listen-address") .long("listen-address") .value_name("IP_ADDRESS") - .takes_value(true) + .action(ArgAction::Set) .help("The server will listen on this address.") .default_value("127.0.0.1") + .display_order(0) ) .arg( - Arg::with_name("listen-port") + Arg::new("listen-port") .long("listen-port") .value_name("PORT") - .takes_value(true) + .action(ArgAction::Set) .help("The server will listen on this port.") .default_value("8551") + .display_order(0) ) .arg( - Arg::with_name("all-payloads-valid") + Arg::new("all-payloads-valid") .long("all-payloads-valid") - .takes_value(true) + .action(ArgAction::Set) .help("Controls the response to newPayload and forkchoiceUpdated. \ Set to 'true' to return VALID. Set to 'false' to return SYNCING.") .default_value("false") - .hidden(true) + .hide(true) + .display_order(0) ) .arg( - Arg::with_name("shanghai-time") + Arg::new("shanghai-time") .long("shanghai-time") .value_name("UNIX_TIMESTAMP") - .takes_value(true) + .action(ArgAction::Set) .help("The payload timestamp that enables Shanghai. Defaults to the mainnet value.") .default_value("1681338479") + .display_order(0) ) .arg( - Arg::with_name("cancun-time") + Arg::new("cancun-time") .long("cancun-time") .value_name("UNIX_TIMESTAMP") - .takes_value(true) + .action(ArgAction::Set) .help("The payload timestamp that enables Cancun. No default is provided \ until Cancun is triggered on mainnet.") + .display_order(0) ) .arg( - Arg::with_name("prague-time") + Arg::new("prague-time") .long("prague-time") .value_name("UNIX_TIMESTAMP") - .takes_value(true) + .action(ArgAction::Set) .help("The payload timestamp that enables Prague. No default is provided \ until Prague is triggered on mainnet.") + .display_order(0) ) ) .get_matches(); let result = matches - .value_of("spec") + .get_one::("spec") .ok_or_else(|| "Missing --spec flag".to_string()) - .and_then(FromStr::from_str) + .and_then(|s| FromStr::from_str(s)) .and_then(|eth_spec_id| match eth_spec_id { EthSpecId::Minimal => run(EnvironmentBuilder::minimal(), &matches), EthSpecId::Mainnet => run(EnvironmentBuilder::mainnet(), &matches), @@ -985,10 +573,7 @@ fn main() { } } -fn run( - env_builder: EnvironmentBuilder, - matches: &ArgMatches<'_>, -) -> Result<(), String> { +fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> Result<(), String> { let env = env_builder .multi_threaded_tokio_runtime() .map_err(|e| format!("should start tokio runtime: {:?}", e))? @@ -1020,9 +605,6 @@ fn run( (None, Some(network_name)) }; - // Lazily load either the testnet dir or the network config, as required. - // Some subcommands like new-testnet need the testnet dir but not the network config. - let get_testnet_dir = || testnet_dir.clone().ok_or("testnet-dir is required"); let get_network_config = || { if let Some(testnet_dir) = &testnet_dir { Eth2NetworkConfig::load(testnet_dir.clone()).map_err(|e| { @@ -1039,74 +621,42 @@ fn run( }; match matches.subcommand() { - ("transition-blocks", Some(matches)) => { + Some(("transition-blocks", matches)) => { let network_config = get_network_config()?; transition_blocks::run::(env, network_config, matches) .map_err(|e| format!("Failed to transition blocks: {}", e)) } - ("skip-slots", Some(matches)) => { + Some(("skip-slots", matches)) => { let network_config = get_network_config()?; skip_slots::run::(env, network_config, matches) .map_err(|e| format!("Failed to skip slots: {}", e)) } - ("pretty-ssz", Some(matches)) => { + Some(("pretty-ssz", matches)) => { let network_config = get_network_config()?; run_parse_ssz::(network_config, matches) .map_err(|e| format!("Failed to pretty print hex: {}", e)) } - ("deploy-deposit-contract", Some(matches)) => { - deploy_deposit_contract::run::(env, matches) - .map_err(|e| format!("Failed to run deploy-deposit-contract command: {}", e)) - } - ("eth1-genesis", Some(matches)) => { - let testnet_dir = get_testnet_dir()?; - eth1_genesis::run::(env, testnet_dir, matches) - .map_err(|e| format!("Failed to run eth1-genesis command: {}", e)) - } - ("interop-genesis", Some(matches)) => { - let testnet_dir = get_testnet_dir()?; - interop_genesis::run::(testnet_dir, matches) - .map_err(|e| format!("Failed to run interop-genesis command: {}", e)) - } - ("change-genesis-time", Some(matches)) => { - let testnet_dir = get_testnet_dir()?; - change_genesis_time::run::(testnet_dir, matches) - .map_err(|e| format!("Failed to run change-genesis-time command: {}", e)) - } - ("create-payload-header", Some(matches)) => create_payload_header::run::(matches) - .map_err(|e| format!("Failed to run create-payload-header command: {}", e)), - ("replace-state-pubkeys", Some(matches)) => { - let testnet_dir = get_testnet_dir()?; - replace_state_pubkeys::run::(testnet_dir, matches) - .map_err(|e| format!("Failed to run replace-state-pubkeys command: {}", e)) - } - ("new-testnet", Some(matches)) => { - let testnet_dir = get_testnet_dir()?; - new_testnet::run::(testnet_dir, matches) - .map_err(|e| format!("Failed to run new_testnet command: {}", e)) - } - ("check-deposit-data", Some(matches)) => check_deposit_data::run(matches) + Some(("check-deposit-data", matches)) => check_deposit_data::run(matches) .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), - ("generate-bootnode-enr", Some(matches)) => generate_bootnode_enr::run::(matches) + Some(("generate-bootnode-enr", matches)) => generate_bootnode_enr::run::(matches) .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), - ("insecure-validators", Some(matches)) => insecure_validators::run(matches) - .map_err(|e| format!("Failed to run insecure-validators command: {}", e)), - ("mnemonic-validators", Some(matches)) => mnemonic_validators::run(matches) + Some(("mnemonic-validators", matches)) => mnemonic_validators::run(matches) .map_err(|e| format!("Failed to run mnemonic-validators command: {}", e)), - ("indexed-attestations", Some(matches)) => indexed_attestations::run::(matches) + Some(("indexed-attestations", matches)) => indexed_attestations::run::(matches) .map_err(|e| format!("Failed to run indexed-attestations command: {}", e)), - ("block-root", Some(matches)) => { + Some(("block-root", matches)) => { let network_config = get_network_config()?; block_root::run::(env, network_config, matches) .map_err(|e| format!("Failed to run block-root command: {}", e)) } - ("state-root", Some(matches)) => { + Some(("state-root", matches)) => { let network_config = get_network_config()?; state_root::run::(env, network_config, matches) .map_err(|e| format!("Failed to run state-root command: {}", e)) } - ("mock-el", Some(matches)) => mock_el::run::(env, matches) + Some(("mock-el", matches)) => mock_el::run::(env, matches) .map_err(|e| format!("Failed to run mock-el command: {}", e)), - (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), + Some((other, _)) => Err(format!("Unknown subcommand {}. See --help.", other)), + _ => Err("No subcommand provided. See --help.".to_string()), } } diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs deleted file mode 100644 index f6bfb2ac013..00000000000 --- a/lcli/src/new_testnet.rs +++ /dev/null @@ -1,393 +0,0 @@ -use account_utils::eth2_keystore::keypair_from_secret; -use clap::ArgMatches; -use clap_utils::{parse_optional, parse_required, parse_ssz_optional}; -use eth2_network_config::{Eth2NetworkConfig, GenesisStateSource, TRUSTED_SETUP_BYTES}; -use eth2_wallet::bip39::Seed; -use eth2_wallet::bip39::{Language, Mnemonic}; -use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; -use ethereum_hashing::hash; -use ssz::Decode; -use ssz::Encode; -use state_processing::process_activations; -use state_processing::upgrade::{ - upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, - upgrade_to_electra, -}; -use std::fs::File; -use std::io::Read; -use std::path::PathBuf; -use std::str::FromStr; -use std::time::{SystemTime, UNIX_EPOCH}; -use types::ExecutionBlockHash; -use types::{ - test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Epoch, - Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, - ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, - ForkName, Hash256, Keypair, PublicKey, Validator, -}; - -pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { - let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?; - let deposit_contract_deploy_block = parse_required(matches, "deposit-contract-deploy-block")?; - - let overwrite_files = matches.is_present("force"); - - if testnet_dir_path.exists() && !overwrite_files { - return Err(format!( - "{:?} already exists, will not overwrite. Use --force to overwrite", - testnet_dir_path - )); - } - - let mut spec = E::default_spec(); - - // Update the spec value if the flag was defined. Otherwise, leave it as the default. - macro_rules! maybe_update { - ($flag: tt, $var: ident) => { - if let Some(val) = parse_optional(matches, $flag)? { - spec.$var = val - } - }; - } - - spec.deposit_contract_address = deposit_contract_address; - - maybe_update!("min-genesis-time", min_genesis_time); - maybe_update!("min-deposit-amount", min_deposit_amount); - maybe_update!( - "min-genesis-active-validator-count", - min_genesis_active_validator_count - ); - maybe_update!("max-effective-balance", max_effective_balance); - maybe_update!("effective-balance-increment", effective_balance_increment); - maybe_update!("ejection-balance", ejection_balance); - maybe_update!("eth1-follow-distance", eth1_follow_distance); - maybe_update!("genesis-delay", genesis_delay); - maybe_update!("eth1-id", deposit_chain_id); - maybe_update!("eth1-id", deposit_network_id); - maybe_update!("seconds-per-slot", seconds_per_slot); - maybe_update!("seconds-per-eth1-block", seconds_per_eth1_block); - - if let Some(v) = parse_ssz_optional(matches, "genesis-fork-version")? { - spec.genesis_fork_version = v; - } - - if let Some(proposer_score_boost) = parse_optional(matches, "proposer-score-boost")? { - spec.proposer_score_boost = Some(proposer_score_boost); - } - - if let Some(fork_epoch) = parse_optional(matches, "altair-fork-epoch")? { - spec.altair_fork_epoch = Some(fork_epoch); - } - - if let Some(fork_epoch) = parse_optional(matches, "bellatrix-fork-epoch")? { - spec.bellatrix_fork_epoch = Some(fork_epoch); - } - - if let Some(fork_epoch) = parse_optional(matches, "capella-fork-epoch")? { - spec.capella_fork_epoch = Some(fork_epoch); - } - - if let Some(fork_epoch) = parse_optional(matches, "deneb-fork-epoch")? { - spec.deneb_fork_epoch = Some(fork_epoch); - } - - if let Some(fork_epoch) = parse_optional(matches, "electra-fork-epoch")? { - spec.electra_fork_epoch = Some(fork_epoch); - } - - if let Some(ttd) = parse_optional(matches, "ttd")? { - spec.terminal_total_difficulty = ttd; - } - - let validator_count = parse_required(matches, "validator-count")?; - let execution_payload_header: Option> = - parse_optional(matches, "execution-payload-header")? - .map(|filename: String| { - let mut bytes = vec![]; - let mut file = File::open(filename.as_str()) - .map_err(|e| format!("Unable to open {}: {}", filename, e))?; - file.read_to_end(&mut bytes) - .map_err(|e| format!("Unable to read {}: {}", filename, e))?; - let fork_name = spec.fork_name_at_epoch(Epoch::new(0)); - match fork_name { - ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid( - "genesis fork must be post-merge".to_string(), - )), - ForkName::Bellatrix => { - ExecutionPayloadHeaderBellatrix::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Bellatrix) - } - ForkName::Capella => { - ExecutionPayloadHeaderCapella::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Capella) - } - ForkName::Deneb => { - ExecutionPayloadHeaderDeneb::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Deneb) - } - ForkName::Electra => { - ExecutionPayloadHeaderElectra::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Electra) - } - } - .map_err(|e| format!("SSZ decode failed: {:?}", e)) - }) - .transpose()?; - - let (eth1_block_hash, genesis_time) = if let Some(payload) = execution_payload_header.as_ref() { - let eth1_block_hash = - parse_optional(matches, "eth1-block-hash")?.unwrap_or_else(|| payload.block_hash()); - let genesis_time = - parse_optional(matches, "genesis-time")?.unwrap_or_else(|| payload.timestamp()); - (eth1_block_hash, genesis_time) - } else { - let eth1_block_hash = parse_required(matches, "eth1-block-hash").map_err(|_| { - "One of `--execution-payload-header` or `--eth1-block-hash` must be set".to_string() - })?; - let genesis_time = parse_optional(matches, "genesis-time")?.unwrap_or( - SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to get time: {:?}", e))? - .as_secs(), - ); - (eth1_block_hash, genesis_time) - }; - - let genesis_state_bytes = if matches.is_present("interop-genesis-state") { - let keypairs = generate_deterministic_keypairs(validator_count); - let keypairs: Vec<_> = keypairs.into_iter().map(|kp| (kp.clone(), kp)).collect(); - - let genesis_state = initialize_state_with_validators::( - &keypairs, - genesis_time, - eth1_block_hash.into_root(), - execution_payload_header, - &spec, - )?; - - Some(genesis_state.as_ssz_bytes()) - } else if matches.is_present("derived-genesis-state") { - let mnemonic_phrase: String = clap_utils::parse_required(matches, "mnemonic-phrase")?; - let mnemonic = Mnemonic::from_phrase(&mnemonic_phrase, Language::English).map_err(|e| { - format!( - "Unable to derive mnemonic from string {:?}: {:?}", - mnemonic_phrase, e - ) - })?; - let seed = Seed::new(&mnemonic, ""); - let keypairs = (0..validator_count as u32) - .map(|index| { - let (secret, _) = - recover_validator_secret_from_mnemonic(seed.as_bytes(), index, KeyType::Voting) - .unwrap(); - - let voting_keypair = keypair_from_secret(secret.as_bytes()).unwrap(); - - let (secret, _) = recover_validator_secret_from_mnemonic( - seed.as_bytes(), - index, - KeyType::Withdrawal, - ) - .unwrap(); - let withdrawal_keypair = keypair_from_secret(secret.as_bytes()).unwrap(); - (voting_keypair, withdrawal_keypair) - }) - .collect::>(); - let genesis_state = initialize_state_with_validators::( - &keypairs, - genesis_time, - eth1_block_hash.into_root(), - execution_payload_header, - &spec, - )?; - Some(genesis_state.as_ssz_bytes()) - } else { - None - }; - - let kzg_trusted_setup = if let Some(epoch) = spec.deneb_fork_epoch { - // Only load the trusted setup if the deneb fork epoch is set - if epoch != Epoch::max_value() { - Some(TRUSTED_SETUP_BYTES.to_vec()) - } else { - None - } - } else { - None - }; - let testnet = Eth2NetworkConfig { - deposit_contract_deploy_block, - boot_enr: Some(vec![]), - genesis_state_bytes: genesis_state_bytes.map(Into::into), - genesis_state_source: GenesisStateSource::IncludedBytes, - config: Config::from_chain_spec::(&spec), - kzg_trusted_setup, - }; - - testnet.write_to_file(testnet_dir_path, overwrite_files) -} - -/// Returns a `BeaconState` with the given validator keypairs embedded into the -/// genesis state. This allows us to start testnets without having to deposit validators -/// manually. -/// -/// The optional `execution_payload_header` allows us to start a network from the bellatrix -/// fork without the need to transition to altair and bellatrix. -/// -/// We need to ensure that `eth1_block_hash` is equal to the genesis block hash that is -/// generated from the execution side `genesis.json`. -fn initialize_state_with_validators( - keypairs: &[(Keypair, Keypair)], // Voting and Withdrawal keypairs - genesis_time: u64, - eth1_block_hash: Hash256, - execution_payload_header: Option>, - spec: &ChainSpec, -) -> Result, String> { - // If no header is provided, then start from a Bellatrix state by default - let default_header: ExecutionPayloadHeader = - ExecutionPayloadHeader::Bellatrix(ExecutionPayloadHeaderBellatrix { - block_hash: ExecutionBlockHash::from_root(eth1_block_hash), - parent_hash: ExecutionBlockHash::zero(), - ..ExecutionPayloadHeaderBellatrix::default() - }); - let execution_payload_header = execution_payload_header.unwrap_or(default_header); - // Empty eth1 data - let eth1_data = Eth1Data { - block_hash: eth1_block_hash, - deposit_count: 0, - deposit_root: Hash256::from_str( - "0xd70a234731285c6804c2a4f56711ddb8c82c99740f207854891028af34e27e5e", - ) - .unwrap(), // empty deposit tree root - }; - let mut state = BeaconState::new(genesis_time, eth1_data, spec); - - // Seed RANDAO with Eth1 entropy - state.fill_randao_mixes_with(eth1_block_hash).unwrap(); - - for keypair in keypairs.iter() { - let withdrawal_credentials = |pubkey: &PublicKey| { - let mut credentials = hash(&pubkey.as_ssz_bytes()); - credentials[0] = spec.bls_withdrawal_prefix_byte; - Hash256::from_slice(&credentials) - }; - let amount = spec.max_effective_balance; - // Create a new validator. - let validator = Validator { - pubkey: keypair.0.pk.clone().into(), - withdrawal_credentials: withdrawal_credentials(&keypair.1.pk), - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance: std::cmp::min( - amount - amount % (spec.effective_balance_increment), - spec.max_effective_balance, - ), - slashed: false, - }; - state.validators_mut().push(validator).unwrap(); - state.balances_mut().push(amount).unwrap(); - } - - process_activations(&mut state, spec).unwrap(); - - if spec - .altair_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) - { - upgrade_to_altair(&mut state, spec).unwrap(); - - state.fork_mut().previous_version = spec.altair_fork_version; - } - - // Similarly, perform an upgrade to Bellatrix if configured from genesis. - if spec - .bellatrix_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) - { - upgrade_to_bellatrix(&mut state, spec).unwrap(); - - // Remove intermediate Altair fork from `state.fork`. - state.fork_mut().previous_version = spec.bellatrix_fork_version; - - // Override latest execution payload header. - // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing - if let ExecutionPayloadHeader::Bellatrix(ref header) = execution_payload_header { - *state - .latest_execution_payload_header_bellatrix_mut() - .or(Err("mismatched fork".to_string()))? = header.clone(); - } - } - - // Similarly, perform an upgrade to Capella if configured from genesis. - if spec - .capella_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) - { - upgrade_to_capella(&mut state, spec).unwrap(); - - // Remove intermediate Bellatrix fork from `state.fork`. - state.fork_mut().previous_version = spec.capella_fork_version; - - // Override latest execution payload header. - // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing - if let ExecutionPayloadHeader::Capella(ref header) = execution_payload_header { - *state - .latest_execution_payload_header_capella_mut() - .or(Err("mismatched fork".to_string()))? = header.clone(); - } - } - - // Similarly, perform an upgrade to Deneb if configured from genesis. - if spec - .deneb_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) - { - upgrade_to_deneb(&mut state, spec).unwrap(); - - // Remove intermediate Capella fork from `state.fork`. - state.fork_mut().previous_version = spec.deneb_fork_version; - - // Override latest execution payload header. - // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing - if let ExecutionPayloadHeader::Deneb(ref header) = execution_payload_header { - *state - .latest_execution_payload_header_deneb_mut() - .or(Err("mismatched fork".to_string()))? = header.clone(); - } - } - - // Similarly, perform an upgrade to Electra if configured from genesis. - if spec - .electra_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) - { - upgrade_to_electra(&mut state, spec).unwrap(); - - // Remove intermediate Deneb fork from `state.fork`. - state.fork_mut().previous_version = spec.electra_fork_version; - - // Override latest execution payload header. - // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/bellatrix/beacon-chain.md#testing - if let ExecutionPayloadHeader::Electra(ref header) = execution_payload_header { - *state - .latest_execution_payload_header_electra_mut() - .or(Err("mismatched fork".to_string()))? = header.clone(); - } - } - - // Now that we have our validators, initialize the caches (including the committees) - state.build_caches(spec).unwrap(); - - // Set genesis validators root for domain separation and chain versioning - *state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache().unwrap(); - - // Sanity check for state fork matching config fork. - state - .fork_name(spec) - .map_err(|e| format!("state fork mismatch: {e:?}"))?; - - Ok(state) -} diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index e86ffb73dc2..dd13f6847b4 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -1,6 +1,7 @@ use clap::ArgMatches; use clap_utils::parse_required; use eth2_network_config::Eth2NetworkConfig; +use log::info; use serde::Serialize; use snap::raw::Decoder; use ssz::Decode; @@ -31,8 +32,12 @@ pub fn run_parse_ssz( network_config: Eth2NetworkConfig, matches: &ArgMatches, ) -> Result<(), String> { - let type_str = matches.value_of("type").ok_or("No type supplied")?; - let filename = matches.value_of("ssz-file").ok_or("No file supplied")?; + let type_str = matches + .get_one::("type") + .ok_or("No type supplied")?; + let filename = matches + .get_one::("ssz-file") + .ok_or("No file supplied")?; let format = parse_required(matches, "format")?; let bytes = if filename.ends_with("ssz_snappy") { @@ -58,7 +63,7 @@ pub fn run_parse_ssz( // More fork-specific decoders may need to be added in future, but shouldn't be 100% necessary, // as the fork-generic decoder will always be available (requires correct --network flag). - match type_str { + match type_str.as_str() { "SignedBeaconBlock" => decode_and_print::>( &bytes, |bytes| SignedBeaconBlock::from_ssz_bytes(bytes, spec), diff --git a/lcli/src/replace_state_pubkeys.rs b/lcli/src/replace_state_pubkeys.rs deleted file mode 100644 index e8d012b16ec..00000000000 --- a/lcli/src/replace_state_pubkeys.rs +++ /dev/null @@ -1,86 +0,0 @@ -use account_utils::{eth2_keystore::keypair_from_secret, mnemonic_from_phrase}; -use clap::ArgMatches; -use eth2_network_config::Eth2NetworkConfig; -use eth2_wallet::bip39::Seed; -use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; -use ssz::Encode; -use state_processing::common::DepositDataTree; -use std::fs::File; -use std::io::{Read, Write}; -use std::path::PathBuf; -use tree_hash::TreeHash; -use types::{BeaconState, DepositData, EthSpec, Hash256, SignatureBytes, DEPOSIT_TREE_DEPTH}; - -pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { - let path = matches - .value_of("ssz-state") - .ok_or("ssz-state not specified")? - .parse::() - .map_err(|e| format!("Unable to parse ssz-state: {}", e))?; - - let mnemonic_phrase = matches - .value_of("mnemonic") - .ok_or("mnemonic not specified")?; - - let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?; - let spec = ð2_network_config.chain_spec::()?; - - let mut state: BeaconState = { - let mut file = File::open(&path).map_err(|e| format!("Unable to open file: {}", e))?; - - let mut ssz = vec![]; - - file.read_to_end(&mut ssz) - .map_err(|e| format!("Unable to read file: {}", e))?; - - BeaconState::from_ssz_bytes(&ssz, spec) - .map_err(|e| format!("Unable to decode SSZ: {:?}", e))? - }; - - let mnemonic = mnemonic_from_phrase(mnemonic_phrase)?; - let seed = Seed::new(&mnemonic, ""); - - let mut deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); - let mut deposit_root = Hash256::zero(); - let validators = state.validators_mut(); - for index in 0..validators.len() { - let (secret, _) = - recover_validator_secret_from_mnemonic(seed.as_bytes(), index as u32, KeyType::Voting) - .map_err(|e| format!("Unable to generate validator key: {:?}", e))?; - - let keypair = keypair_from_secret(secret.as_bytes()) - .map_err(|e| format!("Unable build keystore: {:?}", e))?; - - eprintln!("{}: {}", index, keypair.pk); - - validators.get_mut(index).unwrap().pubkey = keypair.pk.into(); - - // Update the deposit tree. - let mut deposit_data = DepositData { - pubkey: validators.get(index).unwrap().pubkey, - // Set this to a junk value since it's very time consuming to generate the withdrawal - // keys and it's not useful for the time being. - withdrawal_credentials: Hash256::zero(), - amount: spec.min_deposit_amount, - signature: SignatureBytes::empty(), - }; - deposit_data.signature = deposit_data.create_signature(&keypair.sk, spec); - deposit_tree - .push_leaf(deposit_data.tree_hash_root()) - .map_err(|e| format!("failed to create deposit tree: {:?}", e))?; - deposit_root = deposit_tree.root(); - } - - // Update the genesis validators root since we changed the validators. - *state.genesis_validators_root_mut() = state.validators().tree_hash_root(); - - // Update the deposit root with our simulated deposits. - state.eth1_data_mut().deposit_root = deposit_root; - - let mut file = File::create(path).map_err(|e| format!("Unable to create file: {}", e))?; - - file.write_all(&state.as_ssz_bytes()) - .map_err(|e| format!("Unable to write to file: {}", e))?; - - Ok(()) -} diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index d421c077d83..2ad79051ea4 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -50,6 +50,7 @@ use clap_utils::{parse_optional, parse_required}; use environment::Environment; use eth2::{types::StateId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use eth2_network_config::Eth2NetworkConfig; +use log::info; use ssz::Encode; use state_processing::state_advance::{complete_state_advance, partial_state_advance}; use state_processing::AllCaches; @@ -75,7 +76,7 @@ pub fn run( let runs: usize = parse_required(matches, "runs")?; let slots: u64 = parse_required(matches, "slots")?; let cli_state_root: Option = parse_optional(matches, "state-root")?; - let partial: bool = matches.is_present("partial-state-advance"); + let partial: bool = matches.get_flag("partial-state-advance"); info!("Using {} spec", E::spec_name()); info!("Advancing {} slots", slots); diff --git a/lcli/src/state_root.rs b/lcli/src/state_root.rs index 06293b79b3d..17a947b2f00 100644 --- a/lcli/src/state_root.rs +++ b/lcli/src/state_root.rs @@ -4,6 +4,7 @@ use clap_utils::{parse_optional, parse_required}; use environment::Environment; use eth2::{types::StateId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use eth2_network_config::Eth2NetworkConfig; +use log::info; use std::path::PathBuf; use std::time::{Duration, Instant}; use types::{BeaconState, EthSpec}; diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 77fd352829f..62ae602187b 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -72,6 +72,7 @@ use eth2::{ BeaconNodeHttpClient, SensitiveUrl, Timeouts, }; use eth2_network_config::Eth2NetworkConfig; +use log::{debug, info}; use ssz::Encode; use state_processing::state_advance::complete_state_advance; use state_processing::{ @@ -117,9 +118,9 @@ pub fn run( let beacon_url: Option = parse_optional(matches, "beacon-url")?; let runs: usize = parse_required(matches, "runs")?; let config = Config { - no_signature_verification: matches.is_present("no-signature-verification"), - exclude_cache_builds: matches.is_present("exclude-cache-builds"), - exclude_post_block_thc: matches.is_present("exclude-post-block-thc"), + no_signature_verification: matches.get_flag("no-signature-verification"), + exclude_cache_builds: matches.get_flag("exclude-cache-builds"), + exclude_post_block_thc: matches.get_flag("exclude-post-block-thc"), }; info!("Using {} spec", E::spec_name()); @@ -390,7 +391,7 @@ fn do_transition( // Signature verification should prime the indexed attestation cache. assert_eq!( ctxt.num_cached_indexed_attestations(), - block.message().body().attestations().len() + block.message().body().attestations_len() ); } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 54faa03a31f..912602776af 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "lighthouse" -version = "5.1.3" +version = "5.2.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false -rust-version = "1.75.0" +rust-version = "1.77.0" [features] default = ["slasher-lmdb"] @@ -22,13 +22,20 @@ gnosis = [] slasher-mdbx = ["slasher/mdbx"] # Support slasher LMDB backend. slasher-lmdb = ["slasher/lmdb"] -# Use jemalloc. -jemalloc = ["malloc_utils/jemalloc"] +# Support slasher redb backend. +slasher-redb = ["slasher/redb"] +# Deprecated. This is now enabled by default on non windows targets. +jemalloc = [] + +[target.'cfg(not(target_os = "windows"))'.dependencies] +malloc_utils = { workspace = true, features = ["jemalloc"] } + +[target.'cfg(target_os = "windows")'.dependencies] +malloc_utils = { workspace = true } [dependencies] beacon_node = { workspace = true } slog = { workspace = true } -sloggers = { workspace = true } types = { workspace = true } bls = { workspace = true } ethereum_hashing = { workspace = true } @@ -54,7 +61,6 @@ unused_port = { workspace = true } database_manager = { path = "../database_manager" } slasher = { workspace = true } validator_manager = { path = "../validator_manager" } -tracing-subscriber = { workspace = true } logging = { workspace = true } [dev-dependencies] diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index e59b1d455a4..a83a7a91571 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -36,7 +36,7 @@ use {futures::channel::oneshot, std::cell::RefCell}; pub use task_executor::test_utils::null_logger; -const LOG_CHANNEL_SIZE: usize = 2048; +const LOG_CHANNEL_SIZE: usize = 16384; const SSE_LOG_CHANNEL_SIZE: usize = 2048; /// The maximum time in seconds the client will wait for all internal tasks to shutdown. const MAXIMUM_SHUTDOWN_TIME: u64 = 15; diff --git a/lighthouse/src/cli.rs b/lighthouse/src/cli.rs new file mode 100644 index 00000000000..90d3e811ebc --- /dev/null +++ b/lighthouse/src/cli.rs @@ -0,0 +1,9 @@ +use clap::Parser; +use database_manager::cli::DatabaseManager; +use serde::{Deserialize, Serialize}; + +#[derive(Parser, Clone, Deserialize, Serialize, Debug)] +pub enum LighthouseSubcommands { + #[clap(name = "database_manager")] + DatabaseManager(DatabaseManager), +} diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 932b125dc69..481e17dbc80 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -1,13 +1,20 @@ +mod cli; mod metrics; use beacon_node::ProductionBeaconNode; -use clap::{App, Arg, ArgMatches}; -use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, get_eth2_network_config}; +use clap::FromArgMatches; +use clap::Subcommand; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::{ + flags::DISABLE_MALLOC_TUNING_FLAG, get_color_style, get_eth2_network_config, FLAG_HEADER, +}; +use cli::LighthouseSubcommands; use directory::{parse_path_or_default, DEFAULT_BEACON_NODE_DIR, DEFAULT_VALIDATOR_DIR}; use environment::{EnvironmentBuilder, LoggerConfig}; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODED_NET_NAMES}; use ethereum_hashing::have_sha_extensions; use futures::TryFutureExt; +use lazy_static::lazy_static; use lighthouse_version::VERSION; use malloc_utils::configure_memory_allocator; use slog::{crit, info}; @@ -18,6 +25,27 @@ use task_executor::ShutdownReason; use types::{EthSpec, EthSpecId}; use validator_client::ProductionValidatorClient; +lazy_static! { + pub static ref SHORT_VERSION: String = VERSION.replace("Lighthouse/", ""); + pub static ref LONG_VERSION: String = format!( + "{}\n\ + BLS library: {}\n\ + BLS hardware acceleration: {}\n\ + SHA256 hardware acceleration: {}\n\ + Allocator: {}\n\ + Profile: {}\n\ + Specs: mainnet (true), minimal ({}), gnosis ({})", + SHORT_VERSION.as_str(), + bls_library_name(), + bls_hardware_acceleration(), + have_sha_extensions(), + allocator_name(), + build_profile_name(), + cfg!(feature = "spec-minimal"), + cfg!(feature = "gnosis"), + ); +} + fn bls_library_name() -> &'static str { if cfg!(feature = "portable") { "blst-portable" @@ -28,11 +56,20 @@ fn bls_library_name() -> &'static str { } } +#[inline(always)] +fn bls_hardware_acceleration() -> bool { + #[cfg(target_arch = "x86_64")] + return std::is_x86_feature_detected!("adx"); + + #[cfg(target_arch = "aarch64")] + return std::arch::is_aarch64_feature_detected!("neon"); +} + fn allocator_name() -> &'static str { - if cfg!(feature = "jemalloc") { - "jemalloc" - } else { + if cfg!(target_os = "windows") { "system" + } else { + "jemalloc" } } @@ -54,41 +91,31 @@ fn main() { } // Parse the CLI parameters. - let matches = App::new("Lighthouse") - .version(VERSION.replace("Lighthouse/", "").as_str()) + let cli = Command::new("Lighthouse") + .version(SHORT_VERSION.as_str()) .author("Sigma Prime ") - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) + .next_line_help(true) + .term_width(80) + .disable_help_flag(true) .about( "Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon \ node, a validator client and utilities for managing validator accounts.", ) - .long_version( - format!( - "{}\n\ - BLS library: {}\n\ - SHA256 hardware acceleration: {}\n\ - Allocator: {}\n\ - Profile: {}\n\ - Specs: mainnet (true), minimal ({}), gnosis ({})", - VERSION.replace("Lighthouse/", ""), - bls_library_name(), - have_sha_extensions(), - allocator_name(), - build_profile_name(), - cfg!(feature = "spec-minimal"), - cfg!(feature = "gnosis"), - ).as_str() - ) + .long_version(LONG_VERSION.as_str()) + .display_order(0) .arg( - Arg::with_name("env_log") - .short("l") + Arg::new("env_log") + .short('l') .help( "DEPRECATED Enables environment logging giving access to sub-protocol logs such as discv5 and libp2p", ) - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("logfile") + Arg::new("logfile") .long("logfile") .value_name("FILE") .help( @@ -97,115 +124,135 @@ fn main() { future logs are stored. \ Once the number of log files exceeds the value specified in \ `--logfile-max-number` the oldest log file will be overwritten.") - .takes_value(true) - .global(true), + .action(ArgAction::Set) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-debug-level") + Arg::new("logfile-debug-level") .long("logfile-debug-level") .value_name("LEVEL") .help("The verbosity level used when emitting logs to the log file.") - .takes_value(true) - .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) + .action(ArgAction::Set) + .value_parser(["info", "debug", "trace", "warn", "error", "crit"]) .default_value("debug") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-format") + Arg::new("logfile-format") .long("logfile-format") .value_name("FORMAT") .help("Specifies the log format used when emitting logs to the logfile.") - .possible_values(&["DEFAULT", "JSON"]) - .takes_value(true) + .value_parser(["DEFAULT", "JSON"]) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-max-size") + Arg::new("logfile-max-size") .long("logfile-max-size") .value_name("SIZE") .help( "The maximum size (in MB) each log file can grow to before rotating. If set \ to 0, background file logging is disabled.") - .takes_value(true) + .action(ArgAction::Set) .default_value("200") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-max-number") + Arg::new("logfile-max-number") .long("logfile-max-number") .value_name("COUNT") .help( "The maximum number of log files that will be stored. If set to 0, \ background file logging is disabled.") - .takes_value(true) - .default_value("5") - .global(true), + .action(ArgAction::Set) + .default_value("10") + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-compress") + Arg::new("logfile-compress") .long("logfile-compress") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, compress old log files. This can help reduce the space needed \ to store old logs.") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("logfile-no-restricted-perms") + Arg::new("logfile-no-restricted-perms") .long("logfile-no-restricted-perms") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, log files will be generated as world-readable meaning they can be read by \ any user on the machine. Note that logs can often contain sensitive information \ about your validator and so this flag should be used with caution. For Windows users, \ the log file permissions will be inherited from the parent folder.") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("log-format") + Arg::new("log-format") .long("log-format") .value_name("FORMAT") .help("Specifies the log format used when emitting logs to the terminal.") - .possible_values(&["JSON"]) - .takes_value(true) - .global(true), + .value_parser(["JSON"]) + .action(ArgAction::Set) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("log-color") + Arg::new("log-color") .long("log-color") .alias("log-colour") .help("Force outputting colors when emitting logs to the terminal.") - .global(true), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("disable-log-timestamp") + Arg::new("disable-log-timestamp") .long("disable-log-timestamp") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, do not include timestamps in logging output.") - .global(true), + .global(true) + .display_order(0) ) .arg( - Arg::with_name("debug-level") + Arg::new("debug-level") .long("debug-level") .value_name("LEVEL") .help("Specifies the verbosity level used when emitting logs to the terminal.") - .takes_value(true) - .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) + .action(ArgAction::Set) + .value_parser(["info", "debug", "trace", "warn", "error", "crit"]) .global(true) - .default_value("info"), + .default_value("info") + .display_order(0) ) .arg( - Arg::with_name("datadir") + Arg::new("datadir") .long("datadir") - .short("d") + .short('d') .value_name("DIR") .global(true) .help( "Used to specify a custom root data directory for lighthouse keys and databases. \ Defaults to $HOME/.lighthouse/{network} where network is the value of the `network` flag \ Note: Users should specify separate custom datadirs for different networks.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("testnet-dir") - .short("t") + Arg::new("testnet-dir") + .short('t') .long("testnet-dir") .value_name("DIR") .help( @@ -213,57 +260,66 @@ fn main() { a hard-coded Lighthouse testnet. Only effective if there is no \ existing database.", ) - .takes_value(true) - .global(true), + .action(ArgAction::Set) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("network") + Arg::new("network") .long("network") .value_name("network") .help("Name of the Eth2 chain Lighthouse will sync and follow.") - .possible_values(HARDCODED_NET_NAMES) + .value_parser(HARDCODED_NET_NAMES.to_vec()) .conflicts_with("testnet-dir") - .takes_value(true) + .action(ArgAction::Set) .global(true) - + .display_order(0) ) .arg( - Arg::with_name("dump-config") + Arg::new("dump-config") .long("dump-config") - .hidden(true) + .hide(true) .help("Dumps the config to a desired location. Used for testing only.") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("dump-chain-config") + Arg::new("dump-chain-config") .long("dump-chain-config") - .hidden(true) + .hide(true) .help("Dumps the chain config to a desired location. Used for testing only.") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("immediate-shutdown") + Arg::new("immediate-shutdown") .long("immediate-shutdown") - .hidden(true) + .hide(true) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "Shuts down immediately after the Beacon Node or Validator has successfully launched. \ Used for testing only, DO NOT USE IN PRODUCTION.") .global(true) + .display_order(0) ) .arg( - Arg::with_name(DISABLE_MALLOC_TUNING_FLAG) + Arg::new(DISABLE_MALLOC_TUNING_FLAG) .long(DISABLE_MALLOC_TUNING_FLAG) .help( "If present, do not configure the system allocator. Providing this flag will \ generally increase memory usage, it should only be provided when debugging \ specific memory allocation issues." ) - .global(true), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("terminal-total-difficulty-override") + Arg::new("terminal-total-difficulty-override") .long("terminal-total-difficulty-override") .value_name("INTEGER") .help("Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. \ @@ -272,11 +328,12 @@ fn main() { the broad Ethereum community has elected to override the terminal difficulty. \ Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("terminal-block-hash-override") + Arg::new("terminal-block-hash-override") .long("terminal-block-hash-override") .value_name("TERMINAL_BLOCK_HASH") .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. \ @@ -285,11 +342,12 @@ fn main() { Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") .requires("terminal-block-hash-epoch-override") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("terminal-block-hash-epoch-override") + Arg::new("terminal-block-hash-epoch-override") .long("terminal-block-hash-epoch-override") .value_name("EPOCH") .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH \ @@ -298,11 +356,12 @@ fn main() { Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") .requires("terminal-block-hash-override") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("safe-slots-to-import-optimistically") + Arg::new("safe-slots-to-import-optimistically") .long("safe-slots-to-import-optimistically") .value_name("INTEGER") .help("Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY \ @@ -311,11 +370,12 @@ fn main() { of an attack at the PoS transition block. Incorrect use of this flag can cause your \ node to possibly accept an invalid chain or sync more slowly. Be extremely careful with \ this flag.") - .takes_value(true) + .action(ArgAction::Set) .global(true) + .display_order(0) ) .arg( - Arg::with_name("genesis-state-url") + Arg::new("genesis-state-url") .long("genesis-state-url") .value_name("URL") .help( @@ -324,27 +384,40 @@ fn main() { If not supplied, a default URL or the --checkpoint-sync-url may be used. \ If the genesis state is already included in this binary then this value will be ignored.", ) - .takes_value(true) - .global(true), + .action(ArgAction::Set) + .global(true) + .display_order(0) ) .arg( - Arg::with_name("genesis-state-url-timeout") + Arg::new("genesis-state-url-timeout") .long("genesis-state-url-timeout") .value_name("SECONDS") .help( "The timeout in seconds for the request to --genesis-state-url.", ) - .takes_value(true) + .action(ArgAction::Set) .default_value("180") - .global(true), + .global(true) + .display_order(0) + ) + .arg( + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) ) .subcommand(beacon_node::cli_app()) .subcommand(boot_node::cli_app()) .subcommand(validator_client::cli_app()) .subcommand(account_manager::cli_app()) - .subcommand(database_manager::cli_app()) - .subcommand(validator_manager::cli_app()) - .get_matches(); + .subcommand(validator_manager::cli_app()); + + let cli = LighthouseSubcommands::augment_subcommands(cli); + + let matches = cli.get_matches(); // Configure the allocator early in the process, before it has the chance to use the default values for // anything important. @@ -352,7 +425,7 @@ fn main() { // Only apply this optimization for the beacon node. It's the only process with a substantial // memory footprint. let is_beacon_node = matches.subcommand_name() == Some("beacon_node"); - if is_beacon_node && !matches.is_present(DISABLE_MALLOC_TUNING_FLAG) { + if is_beacon_node && !matches.get_flag(DISABLE_MALLOC_TUNING_FLAG) { if let Err(e) = configure_memory_allocator() { eprintln!( "Unable to configure the memory allocator: {} \n\ @@ -370,7 +443,7 @@ fn main() { if let Some(bootnode_matches) = matches.subcommand_matches("boot_node") { // The bootnode uses the main debug-level flag let debug_info = matches - .value_of("debug-level") + .get_one::("debug-level") .expect("Debug-level must be present") .into(); @@ -430,53 +503,53 @@ fn run( } let debug_level = matches - .value_of("debug-level") + .get_one::("debug-level") .ok_or("Expected --debug-level flag")?; - let log_format = matches.value_of("log-format"); + let log_format = matches.get_one::("log-format"); - let log_color = matches.is_present("log-color"); + let log_color = matches.get_flag("log-color"); - let disable_log_timestamp = matches.is_present("disable-log-timestamp"); + let disable_log_timestamp = matches.get_flag("disable-log-timestamp"); let logfile_debug_level = matches - .value_of("logfile-debug-level") + .get_one::("logfile-debug-level") .ok_or("Expected --logfile-debug-level flag")?; let logfile_format = matches - .value_of("logfile-format") + .get_one::("logfile-format") // Ensure that `logfile-format` defaults to the value of `log-format`. - .or_else(|| matches.value_of("log-format")); + .or_else(|| matches.get_one::("log-format")); let logfile_max_size: u64 = matches - .value_of("logfile-max-size") + .get_one::("logfile-max-size") .ok_or("Expected --logfile-max-size flag")? .parse() .map_err(|e| format!("Failed to parse `logfile-max-size`: {:?}", e))?; let logfile_max_number: usize = matches - .value_of("logfile-max-number") + .get_one::("logfile-max-number") .ok_or("Expected --logfile-max-number flag")? .parse() .map_err(|e| format!("Failed to parse `logfile-max-number`: {:?}", e))?; - let logfile_compress = matches.is_present("logfile-compress"); + let logfile_compress = matches.get_flag("logfile-compress"); - let logfile_restricted = !matches.is_present("logfile-no-restricted-perms"); + let logfile_restricted = !matches.get_flag("logfile-no-restricted-perms"); // Construct the path to the log file. let mut log_path: Option = clap_utils::parse_optional(matches, "logfile")?; if log_path.is_none() { log_path = match matches.subcommand() { - ("beacon_node", _) => Some( + Some(("beacon_node", _)) => Some( parse_path_or_default(matches, "datadir")? .join(DEFAULT_BEACON_NODE_DIR) .join("logs") .join("beacon") .with_extension("log"), ), - ("validator_client", Some(vc_matches)) => { - let base_path = if vc_matches.is_present("validators-dir") { + Some(("validator_client", vc_matches)) => { + let base_path = if vc_matches.contains_id("validators-dir") { parse_path_or_default(vc_matches, "validators-dir")? } else { parse_path_or_default(matches, "datadir")?.join(DEFAULT_VALIDATOR_DIR) @@ -495,9 +568,9 @@ fn run( let sse_logging = { if let Some(bn_matches) = matches.subcommand_matches("beacon_node") { - bn_matches.is_present("gui") + bn_matches.get_flag("gui") } else if let Some(vc_matches) = matches.subcommand_matches("validator_client") { - vc_matches.is_present("http") + vc_matches.get_flag("http") } else { false } @@ -609,14 +682,13 @@ fn run( return Ok(()); } - if let Some(sub_matches) = matches.subcommand_matches(database_manager::CMD) { + if let Ok(LighthouseSubcommands::DatabaseManager(db_manager_config)) = + LighthouseSubcommands::from_arg_matches(matches) + { info!(log, "Running database manager for {} network", network_name); - // Pass the entire `environment` to the database manager so it can run blocking operations. - database_manager::run(sub_matches, environment)?; - - // Exit as soon as database manager returns control. + database_manager::run(matches, &db_manager_config, environment)?; return Ok(()); - } + }; info!(log, "Lighthouse started"; "version" => VERSION); info!( @@ -626,15 +698,21 @@ fn run( ); match matches.subcommand() { - ("beacon_node", Some(matches)) => { + Some(("beacon_node", matches)) => { let context = environment.core_context(); let log = context.log().clone(); let executor = context.executor.clone(); let mut config = beacon_node::get_config::(matches, &context)?; config.logger_config = logger_config; - let shutdown_flag = matches.is_present("immediate-shutdown"); // Dump configs if `dump-config` or `dump-chain-config` flags are set clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; + + let shutdown_flag = matches.get_flag("immediate-shutdown"); + if shutdown_flag { + info!(log, "Beacon node immediate shutdown triggered."); + return Ok(()); + } + executor.clone().spawn( async move { if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await { @@ -644,46 +722,42 @@ fn run( let _ = executor .shutdown_sender() .try_send(ShutdownReason::Failure("Failed to start beacon node")); - } else if shutdown_flag { - let _ = executor.shutdown_sender().try_send(ShutdownReason::Success( - "Beacon node immediate shutdown triggered.", - )); } }, "beacon_node", ); } - ("validator_client", Some(matches)) => { + Some(("validator_client", matches)) => { let context = environment.core_context(); let log = context.log().clone(); let executor = context.executor.clone(); let config = validator_client::Config::from_cli(matches, context.log()) .map_err(|e| format!("Unable to initialize validator config: {}", e))?; - let shutdown_flag = matches.is_present("immediate-shutdown"); // Dump configs if `dump-config` or `dump-chain-config` flags are set clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; - if !shutdown_flag { - executor.clone().spawn( - async move { - if let Err(e) = ProductionValidatorClient::new(context, config) - .and_then(|mut vc| async move { vc.start_service().await }) - .await - { - crit!(log, "Failed to start validator client"; "reason" => e); - // Ignore the error since it always occurs during normal operation when - // shutting down. - let _ = executor.shutdown_sender().try_send(ShutdownReason::Failure( - "Failed to start validator client", - )); - } - }, - "validator_client", - ); - } else { - let _ = executor.shutdown_sender().try_send(ShutdownReason::Success( - "Validator client immediate shutdown triggered.", - )); + + let shutdown_flag = matches.get_flag("immediate-shutdown"); + if shutdown_flag { + info!(log, "Validator client immediate shutdown triggered."); + return Ok(()); } + + executor.clone().spawn( + async move { + if let Err(e) = ProductionValidatorClient::new(context, config) + .and_then(|mut vc| async move { vc.start_service().await }) + .await + { + crit!(log, "Failed to start validator client"; "reason" => e); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send(ShutdownReason::Failure("Failed to start validator client")); + } + }, + "validator_client", + ); } _ => { crit!(log, "No subcommand supplied. See --help ."); diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 7dfde69d3a9..cd499f2adae 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -58,13 +58,22 @@ impl CommandLineTest { fn run_with_zero_port(&mut self) -> CompletedTest { // Required since Deneb was enabled on mainnet. - self.cmd.arg("--allow-insecure-genesis-sync"); - self.run_with_zero_port_and_no_genesis_sync() + self.set_allow_insecure_genesis_sync() + .run_with_zero_port_and_no_genesis_sync() } fn run_with_zero_port_and_no_genesis_sync(&mut self) -> CompletedTest { + self.set_zero_port().run() + } + + fn set_allow_insecure_genesis_sync(&mut self) -> &mut Self { + self.cmd.arg("--allow-insecure-genesis-sync"); + self + } + + fn set_zero_port(&mut self) -> &mut Self { self.cmd.arg("-z"); - self.run() + self } } @@ -101,9 +110,20 @@ fn staking_flag() { } #[test] -#[should_panic] fn allow_insecure_genesis_sync_default() { - CommandLineTest::new().run_with_zero_port_and_no_genesis_sync(); + CommandLineTest::new() + .run_with_zero_port_and_no_genesis_sync() + .with_config(|config| { + assert_eq!(config.allow_insecure_genesis_sync, false); + }); +} + +#[test] +#[should_panic] +fn insecure_genesis_sync_should_panic() { + CommandLineTest::new() + .set_zero_port() + .run_with_immediate_shutdown(false); } #[test] @@ -636,6 +656,26 @@ fn builder_fallback_flags() { ); } +#[test] +fn builder_get_header_timeout() { + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-header-timeout"), + Some("1500"), + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .builder_header_timeout, + Some(Duration::from_millis(1500)) + ); + }, + ); +} + #[test] fn builder_user_agent() { run_payload_builder_flag_test_with_config( @@ -1578,7 +1618,7 @@ fn empty_inbound_rate_limiter_flag() { #[test] fn disable_inbound_rate_limiter_flag() { CommandLineTest::new() - .flag("inbound-rate-limiter", Some("disabled")) + .flag("disable-inbound-rate-limiter", None) .run_with_zero_port() .with_config(|config| assert_eq!(config.network.inbound_rate_limiter_config, None)); } @@ -1846,6 +1886,19 @@ fn block_cache_size_flag() { .with_config(|config| assert_eq!(config.store.block_cache_size, new_non_zero_usize(4))); } #[test] +fn state_cache_size_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.store.state_cache_size, new_non_zero_usize(128))); +} +#[test] +fn state_cache_size_flag() { + CommandLineTest::new() + .flag("state-cache-size", Some("64")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.store.state_cache_size, new_non_zero_usize(64))); +} +#[test] fn historic_state_cache_size_flag() { CommandLineTest::new() .flag("historic-state-cache-size", Some("4")) @@ -2121,7 +2174,6 @@ fn slasher_broadcast_flag_no_args() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-max-db-size", Some("1")) - .flag("slasher-broadcast", None) .run_with_zero_port() .with_config(|config| { let slasher_config = config @@ -2146,6 +2198,21 @@ fn slasher_broadcast_flag_no_default() { }); } #[test] +fn slasher_broadcast_flag_no_argument() { + CommandLineTest::new() + .flag("slasher", None) + .flag("slasher-max-db-size", Some("1")) + .flag("slasher-broadcast", None) + .run_with_zero_port() + .with_config(|config| { + let slasher_config = config + .slasher + .as_ref() + .expect("Unable to parse Slasher config"); + assert!(slasher_config.broadcast); + }); +} +#[test] fn slasher_broadcast_flag_true() { CommandLineTest::new() .flag("slasher", None) @@ -2175,6 +2242,8 @@ fn slasher_broadcast_flag_false() { assert!(!slasher_config.broadcast); }); } + +#[cfg(all(feature = "lmdb"))] #[test] fn slasher_backend_override_to_default() { // Hard to test this flag because all but one backend is disabled by default and the backend @@ -2205,7 +2274,9 @@ fn ensure_panic_on_failed_launch() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-chunk-size", Some("10")) - .run_with_zero_port() + .set_allow_insecure_genesis_sync() + .set_zero_port() + .run_with_immediate_shutdown(false) .with_config(|config| { let slasher_config = config .slasher @@ -2300,7 +2371,7 @@ fn proposer_re_org_disallowed_offsets_default() { #[test] fn proposer_re_org_disallowed_offsets_override() { CommandLineTest::new() - .flag("--proposer-reorg-disallowed-offsets", Some("1,2,3")) + .flag("proposer-reorg-disallowed-offsets", Some("1,2,3")) .run_with_zero_port() .with_config(|config| { assert_eq!( @@ -2314,7 +2385,7 @@ fn proposer_re_org_disallowed_offsets_override() { #[should_panic] fn proposer_re_org_disallowed_offsets_invalid() { CommandLineTest::new() - .flag("--proposer-reorg-disallowed-offsets", Some("32,33,34")) + .flag("proposer-reorg-disallowed-offsets", Some("32,33,34")) .run_with_zero_port(); } diff --git a/lighthouse/tests/exec.rs b/lighthouse/tests/exec.rs index 61e0677ca8c..9d6453908c8 100644 --- a/lighthouse/tests/exec.rs +++ b/lighthouse/tests/exec.rs @@ -21,12 +21,19 @@ pub trait CommandLineTestExec { self } + fn run(&mut self) -> CompletedTest { + self.run_with_immediate_shutdown(true) + } + /// Executes the `Command` returned by `Self::cmd_mut` with temporary data directory, dumps - /// the configuration and shuts down immediately. + /// the configuration and shuts down immediately if `immediate_shutdown` is set to true. /// /// Options `--datadir`, `--dump-config`, `--dump-chain-config` and `--immediate-shutdown` must /// not be set on the command. - fn run(&mut self) -> CompletedTest { + fn run_with_immediate_shutdown( + &mut self, + immediate_shutdown: bool, + ) -> CompletedTest { // Setup temp directory. let tmp_dir = TempDir::new().expect("Unable to create temporary directory"); let tmp_config_path: PathBuf = tmp_dir.path().join("config.json"); @@ -39,8 +46,11 @@ pub trait CommandLineTestExec { .arg(format!("--{}", "dump-config")) .arg(tmp_config_path.as_os_str()) .arg(format!("--{}", "dump-chain-config")) - .arg(tmp_chain_config_path.as_os_str()) - .arg("--immediate-shutdown"); + .arg(tmp_chain_config_path.as_os_str()); + + if immediate_shutdown { + cmd.arg("--immediate-shutdown"); + } // Run the command. let output = output_result(cmd); diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 14b16daa6bb..5ec63b519ec 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -579,27 +579,22 @@ fn wrong_broadcast_flag() { } #[test] -fn latency_measurement_service() { - CommandLineTest::new().run().with_config(|config| { - assert!(config.enable_latency_measurement_service); - }); - CommandLineTest::new() - .flag("latency-measurement-service", None) - .run() - .with_config(|config| { - assert!(config.enable_latency_measurement_service); - }); +fn disable_latency_measurement_service() { CommandLineTest::new() - .flag("latency-measurement-service", Some("true")) + .flag("disable-latency-measurement-service", None) .run() .with_config(|config| { - assert!(config.enable_latency_measurement_service); + assert!(!config.enable_latency_measurement_service); }); +} +#[test] +fn latency_measurement_service() { + // This flag is DEPRECATED so has no effect, but should still be accepted. CommandLineTest::new() .flag("latency-measurement-service", Some("false")) .run() .with_config(|config| { - assert!(!config.enable_latency_measurement_service); + assert!(config.enable_latency_measurement_service); }); } diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index fab1cfebf4b..bca6a18ab56 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -55,7 +55,12 @@ impl CommandLineTest { } fn run(mut cmd: Command, should_succeed: bool) { - let output = cmd.output().expect("process should complete"); + let output = cmd + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .output() + .expect("process should complete"); if output.status.success() != should_succeed { let stdout = String::from_utf8(output.stdout).unwrap(); let stderr = String::from_utf8(output.stderr).unwrap(); diff --git a/scripts/cli.sh b/scripts/cli.sh index 2767ed73c80..6ca019b39e9 100755 --- a/scripts/cli.sh +++ b/scripts/cli.sh @@ -12,14 +12,11 @@ write_to_file() { local file="$2" local program="$3" - # Remove first line of cmd to get rid of commit specific numbers. - cmd=${cmd#*$'\n'} - # We need to add the header and the backticks to create the code block. printf "# %s\n\n\`\`\`\n%s\n\`\`\`" "$program" "$cmd" > "$file" # Adjust the width of the help text and append to the end of file - sed -i -e '$a\'$'\n''' "$file" + sed -i -e '$a\'$'\n''\n''' "$file" } CMD=./target/release/lighthouse diff --git a/scripts/local_testnet/.gitignore b/scripts/local_testnet/.gitignore new file mode 100644 index 00000000000..98d8a5a6304 --- /dev/null +++ b/scripts/local_testnet/.gitignore @@ -0,0 +1 @@ +logs diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 77c9d62c1cd..0275cb217f8 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -1,201 +1,85 @@ # Simple Local Testnet -These scripts allow for running a small local testnet with multiple beacon nodes and validator clients and a geth execution client. +These scripts allow for running a small local testnet with a default of 4 beacon nodes, 4 validator clients and 4 geth execution clients using Kurtosis. This setup can be useful for testing and development. -## Requirements +## Installation -The scripts require `lcli`, `lighthouse`, `geth`, `bootnode` to be installed on `PATH` (run `echo $PATH` to view all `PATH` directories). +1. Install [Docker](https://docs.docker.com/get-docker/). Verify that Docker has been successfully installed by running `sudo docker run hello-world`. +1. Install [Kurtosis](https://docs.kurtosis.com/install/). Verify that Kurtosis has been successfully installed by running `kurtosis version` which should display the version. -MacOS users need to install GNU `sed` and GNU `grep`, and add them both to `PATH` as well. - -The first step is to install Rust and dependencies. Refer to the [Lighthouse Book](https://lighthouse-book.sigmaprime.io/installation-source.html#dependencies) for installation. We will also need [jq](https://jqlang.github.io/jq/), which can be installed with `sudo apt install jq`. - -Then, we clone the Lighthouse repository: -```bash -cd ~ -git clone https://github.com/sigp/lighthouse.git -cd lighthouse -``` -We are now ready to build Lighthouse. Run the command: - -```bash -make -make install-lcli -``` - -This will build `lighthouse` and `lcli`. For `geth` and `bootnode`, go to [geth website](https://geth.ethereum.org/downloads) and download the `Geth & Tools`. For example, to download and extract `Geth & Tools 1.13.1`: - -```bash -cd ~ -curl -LO https://gethstore.blob.core.windows.net/builds/geth-alltools-linux-amd64-1.13.1-3f40e65c.tar.gz -tar xvf geth-alltools-linux-amd64-1.13.1-3f40e65c.tar.gz -``` - -After extraction, copy `geth` and `bootnode` to the `PATH`. A typical directory is `/usr/local/bin`. - -```bash -cd geth-alltools-linux-amd64-1.13.1-3f40e65c -sudo cp geth bootnode /usr/local/bin -``` - -After that We can remove the downloaded files: - -```bash -cd ~ -rm -r geth-alltools-linux-amd64-1.13.1-3f40e65c geth-alltools-linux-amd64-1.13.1-3f40e65c.tar.gz -``` - -We are now ready to start a local testnet. +1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `sudo apt install yq -y`. ## Starting the testnet -To start a testnet using the predetermined settings: +To start a testnet, from the Lighthouse root repository: ```bash -cd ~ -cd ./lighthouse/scripts/local_testnet -./start_local_testnet.sh genesis.json +cd ./scripts/local_testnet +./start_local_testnet.sh ``` -This will execute the script and if the testnet setup is successful, you will see "Started!" at the end. - -The testnet starts with a post-merge genesis state. -The testnet starts a consensus layer and execution layer boot node along with `BN_COUNT` -(the number of beacon nodes) each connected to a geth execution client and `VC_COUNT` (the number of validator clients). By default, `BN_COUNT=4`, `VC_COUNT=4`. +It will build a Lighthouse docker image from the root of the directory and will take an approximately 12 minutes to complete. Once built, the testing will be started automatically. You will see a list of services running and "Started!" at the end. +You can also select your own Lighthouse docker image to use by specifying it in `network_params.yml` under the `cl_image` key. +Full configuration reference for kurtosis is specified [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). -The `start_local_testnet.sh` script takes four options `-v VC_COUNT`, `-d DEBUG_LEVEL`, `-p` to enable builder proposals and `-h` for help. It also takes a mandatory `GENESIS_FILE` for initialising geth's state. -A sample `genesis.json` is provided in this directory. - -The options may be in any order or absent in which case they take the default value specified. -- VC_COUNT: the number of validator clients to create, default: `BN_COUNT` -- DEBUG_LEVEL: one of { error, warn, info, debug, trace }, default: `info` - -The `ETH1_BLOCK_HASH` environment variable is set to the block_hash of the genesis execution layer block which depends on the contents of `genesis.json`. Users of these scripts need to ensure that the `ETH1_BLOCK_HASH` variable is updated if genesis file is modified. - -To view the beacon, validator client and geth logs: +To view all running services: ```bash -tail -f ~/.lighthouse/local-testnet/testnet/beacon_node_1.log -tail -f ~/.lighthouse/local-testnet/testnet/validator_node_1.log -tail -f ~/.lighthouse/local-testnet/testnet/geth_1.log +kurtosis enclave inspect local-testnet ``` -where `beacon_node_1` can be changed to `beacon_node_2`, `beacon_node_3` or `beacon_node_4` to view logs for different beacon nodes. The same applies to validator clients and geth nodes. - -## Stopping the testnet - -To stop the testnet, navigate to the directory `cd ~/lighthouse/scripts/local_testnet`, then run the command: +To view the logs: ```bash -./stop_local_testnet.sh +kurtosis service logs local-testnet $SERVICE_NAME ``` -Once a testnet is stopped, it cannot be continued from where it left off. When the start local testnet command is run, it will start a new local testnet. - -## Manual creation of local testnet - -In [Starting the testnet](./README.md#starting-the-testnet), the testnet is started automatically with predetermined parameters (database directory, ports used etc). This section describes some modifications of the local testnet settings, e.g., changing the database directory, or changing the ports used. - - -The testnet also contains parameters that are specified in `vars.env`, such as the slot time `SECONDS_PER_SLOT=3` (instead of 12 seconds on mainnet). You may change these parameters to suit your testing purposes. After that, in the `local_testnet` directory, run the following command to create genesis state with embedded validators and validator keys, and also to update the time in `genesis.json`: +where `$SERVICE_NAME` is obtained by inspecting the running services above. For example, to view the logs of the first beacon node, validator client and geth: ```bash -./setup.sh -./setup_time.sh genesis.json +kurtosis service logs local-testnet -f cl-1-lighthouse-geth +kurtosis service logs local-testnet -f vc-1-geth-lighthouse +kurtosis service logs local-testnet -f el-1-geth-lighthouse ``` -Note: The generated genesis validators are embedded into the genesis state as genesis validators and hence do not require manual deposits to activate. - -Generate bootnode enr and start an EL and CL bootnode so that multiple nodes can find each other -```bash -./bootnode.sh -./el_bootnode.sh -``` +If you would like to save the logs, use the command: -Start a geth node: ```bash -./geth.sh -``` -e.g. -```bash -./geth.sh $HOME/.lighthouse/local-testnet/geth_1 7001 6001 5001 genesis.json +kurtosis dump $OUTPUT_DIRECTORY ``` -Start a beacon node: +This will create a folder named `$OUTPUT_DIRECTORY` in the present working directory that contains all logs and other information. If you want the logs for a particular service and saved to a file named `logs.txt`: ```bash -./beacon_node.sh -``` -e.g. -```bash -./beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9001 9101 8001 http://localhost:5001 ~/.lighthouse/local-testnet/geth_1/geth/jwtsecret +kurtosis service logs local-testnet $SERVICE_NAME -a > logs.txt ``` +where `$SERVICE_NAME` can be viewed by running `kurtosis enclave inspect local-testnet`. -In a new terminal, start the validator client which will attach to the first -beacon node: +Kurtosis comes with a Dora explorer which can be opened with: ```bash -./validator_client.sh -``` -e.g. to attach to the above created beacon node -```bash -./validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8001 +open $(kurtosis port print local-testnet dora http) ``` -You can create additional geth, beacon node and validator client instances by changing the ports, e.g., for a second geth, beacon node and validator client: +Some testnet parameters can be varied by modifying the `network_params.yaml` file. Kurtosis also comes with a web UI which can be open with `kurtosis web`. -```bash -./geth.sh $HOME/.lighthouse/local-testnet/geth_2 7002 6002 5002 genesis.json -./beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9002 9102 8002 http://localhost:5002 ~/.lighthouse/local-testnet/geth_2/geth/jwtsecret -./validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8002 -``` - -## Additional Info - -### Adjusting number and distribution of validators -The `VALIDATOR_COUNT` parameter is used to specify the number of insecure validator keystores to generate and make deposits for. -The `BN_COUNT` parameter is used to adjust the division of these generated keys among separate validator client instances. -For e.g. for `VALIDATOR_COUNT=80` and `BN_COUNT=4`, the validator keys are distributed over 4 datadirs with 20 keystores per datadir. The datadirs are located in `$DATADIR/node_{i}` which can be passed to separate validator client -instances using the `--datadir` parameter. - -### Starting fresh +## Stopping the testnet -You can delete the current testnet and all related files using the following command. Alternatively, if you wish to start another testnet, doing the steps [Starting the testnet](./README.md#starting-the-testnet) will automatically delete the files and start a fresh local testnet. +To stop the testnet, from the Lighthouse root repository: ```bash -./clean.sh +cd ./scripts/local_testnet +./stop_local_testnet.sh ``` -### Updating the genesis time of the beacon state +You will see "Local testnet stopped." at the end. -If it's been a while since you ran `./setup` then the genesis time of the -genesis state will be far in the future, causing lots of skip slots. +## CLI options -Update the genesis time to now using: +The script comes with some CLI options, which can be viewed with `./start_local_testnet.sh --help`. One of the CLI options is to avoid rebuilding Lighthouse each time the testnet starts, which can be configured with the command: ```bash -./reset_genesis_time.sh -``` - -> Note: you probably want to just rerun `./start_local_testnet.sh` to start over -> but this is another option. - -### Testing builder flow - -1. Add builder URL to `BN_ARGS` in `./vars.env`, e.g. `--builder http://localhost:8650`. Some mock builder server options: - - [`mock-relay`](https://github.com/realbigsean/mock-relay) - - [`dummy-builder`](https://github.com/michaelsproul/dummy_builder) -2. The above mock builders do not support non-mainnet presets as of now, and will require setting `SECONDS_PER_SLOT` and `SECONDS_PER_ETH1_BLOCK` to `12` in `./vars.env`. -3. Start the testnet with the following command (the `-p` flag enables the validator client `--builder-proposals` flag): - ```bash - ./start_local_testnet.sh -p genesis.json - ``` -4. Block production using builder flow will start at epoch 4. - -### Testing sending a transaction - -Some addresses in the local testnet are seeded with testnet ETH, allowing users to carry out transactions. To send a transaction, we first add the address to a wallet, such as [Metamask](https://metamask.io/). The private keys for the addresses are listed [here](https://github.com/sigp/lighthouse/blob/441fc1691b69f9edc4bbdc6665f3efab16265c9b/testing/execution_engine_integration/src/execution_engine.rs#L13-L14). - -Next, we add the local testnet to Metamask, a brief guide can be found [here](https://support.metamask.io/hc/en-us/articles/360043227612-How-to-add-a-custom-network-RPC). If you start the local testnet with default settings, the network RPC is: http://localhost:6001 and the `Chain ID` is `4242`, as defined in [`vars.env`](https://github.com/sigp/lighthouse/blob/441fc1691b69f9edc4bbdc6665f3efab16265c9b/scripts/local_testnet/vars.env#L42). Once the network and account are added, you should see that the account contains testnet ETH which allow us to carry out transactions. +./start_local_testnet.sh -b false +``` \ No newline at end of file diff --git a/scripts/local_testnet/anvil_test_node.sh b/scripts/local_testnet/anvil_test_node.sh deleted file mode 100755 index 41be9175605..00000000000 --- a/scripts/local_testnet/anvil_test_node.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -set -Eeuo pipefail - -source ./vars.env - -exec anvil \ - --balance 1000000000 \ - --gas-limit 1000000000 \ - --accounts 10 \ - --mnemonic "$ETH1_NETWORK_MNEMONIC" \ - --block-time $SECONDS_PER_ETH1_BLOCK \ - --port 8545 \ - --chain-id "$CHAIN_ID" diff --git a/scripts/local_testnet/beacon_node.sh b/scripts/local_testnet/beacon_node.sh deleted file mode 100755 index 940fe2b8581..00000000000 --- a/scripts/local_testnet/beacon_node.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env bash - -# -# Starts a beacon node based upon a genesis state created by `./setup.sh`. -# - -set -Eeuo pipefail - -source ./vars.env - -SUBSCRIBE_ALL_SUBNETS= -DEBUG_LEVEL=${DEBUG_LEVEL:-info} - -# Get options -while getopts "d:sh" flag; do - case "${flag}" in - d) DEBUG_LEVEL=${OPTARG};; - s) SUBSCRIBE_ALL_SUBNETS="--subscribe-all-subnets";; - h) - echo "Start a beacon node" - echo - echo "usage: $0 " - echo - echo "Options:" - echo " -s: pass --subscribe-all-subnets to 'lighthouse bn ...', default is not passed" - echo " -d: DEBUG_LEVEL, default info" - echo " -h: this help" - echo - echo "Positional arguments:" - echo " DATADIR Value for --datadir parameter" - echo " NETWORK-PORT Value for --enr-udp-port, --enr-tcp-port and --port" - echo " HTTP-PORT Value for --http-port" - echo " EXECUTION-ENDPOINT Value for --execution-endpoint" - echo " EXECUTION-JWT Value for --execution-jwt" - exit - ;; - esac -done - -# Get positional arguments -data_dir=${@:$OPTIND+0:1} -tcp_port=${@:$OPTIND+1:1} -quic_port=${@:$OPTIND+2:1} -http_port=${@:$OPTIND+3:1} -execution_endpoint=${@:$OPTIND+4:1} -execution_jwt=${@:$OPTIND+5:1} - -lighthouse_binary=lighthouse - -exec $lighthouse_binary \ - --debug-level $DEBUG_LEVEL \ - bn \ - $SUBSCRIBE_ALL_SUBNETS \ - --datadir $data_dir \ - --testnet-dir $TESTNET_DIR \ - --enable-private-discovery \ - --disable-peer-scoring \ - --staking \ - --enr-address 127.0.0.1 \ - --enr-udp-port $tcp_port \ - --enr-tcp-port $tcp_port \ - --enr-quic-port $quic_port \ - --port $tcp_port \ - --quic-port $quic_port \ - --http-port $http_port \ - --disable-packet-filter \ - --target-peers $((BN_COUNT - 1)) \ - --execution-endpoint $execution_endpoint \ - --execution-jwt $execution_jwt \ - $BN_ARGS diff --git a/scripts/local_testnet/bootnode.sh b/scripts/local_testnet/bootnode.sh deleted file mode 100755 index ca02a24140f..00000000000 --- a/scripts/local_testnet/bootnode.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash - -# -# Generates a bootnode enr and saves it in $TESTNET/boot_enr.yaml -# Starts a bootnode from the generated enr. -# - -set -Eeuo pipefail - -source ./vars.env - -echo "Generating bootnode enr" - -lcli \ - generate-bootnode-enr \ - --ip 127.0.0.1 \ - --udp-port $BOOTNODE_PORT \ - --tcp-port $BOOTNODE_PORT \ - --genesis-fork-version $GENESIS_FORK_VERSION \ - --output-dir $DATADIR/bootnode - -bootnode_enr=`cat $DATADIR/bootnode/enr.dat` -echo "- $bootnode_enr" > $TESTNET_DIR/boot_enr.yaml - -echo "Generated bootnode enr and written to $TESTNET_DIR/boot_enr.yaml" - -DEBUG_LEVEL=${1:-info} - -echo "Starting bootnode" - -exec lighthouse boot_node \ - --testnet-dir $TESTNET_DIR \ - --port $BOOTNODE_PORT \ - --listen-address 127.0.0.1 \ - --disable-packet-filter \ - --network-dir $DATADIR/bootnode \ diff --git a/scripts/local_testnet/clean.sh b/scripts/local_testnet/clean.sh deleted file mode 100755 index cd915e470d6..00000000000 --- a/scripts/local_testnet/clean.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -# -# Deletes all files associated with the local testnet. -# - -set -Eeuo pipefail - -source ./vars.env - -if [ -d $DATADIR ]; then - rm -rf $DATADIR -fi diff --git a/scripts/local_testnet/dump_logs.sh b/scripts/local_testnet/dump_logs.sh deleted file mode 100755 index 64b7942fb63..00000000000 --- a/scripts/local_testnet/dump_logs.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -# Print all the logs output from local testnet - -set -Eeuo pipefail - -source ./vars.env - -for f in "$TESTNET_DIR"/*.log -do - [[ -e "$f" ]] || break # handle the case of no *.log files - echo "=============================================================================" - echo "$f" - echo "=============================================================================" - cat "$f" - echo "" -done diff --git a/scripts/local_testnet/el_bootnode.sh b/scripts/local_testnet/el_bootnode.sh deleted file mode 100755 index ee437a491c9..00000000000 --- a/scripts/local_testnet/el_bootnode.sh +++ /dev/null @@ -1,3 +0,0 @@ -priv_key="02fd74636e96a8ffac8e7b01b0de8dea94d6bcf4989513b38cf59eb32163ff91" -source ./vars.env -exec $EL_BOOTNODE_BINARY --nodekeyhex $priv_key \ No newline at end of file diff --git a/scripts/local_testnet/genesis.json b/scripts/local_testnet/genesis.json deleted file mode 100644 index 26003bed5df..00000000000 --- a/scripts/local_testnet/genesis.json +++ /dev/null @@ -1,863 +0,0 @@ -{ - "config": { - "chainId": 4242, - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "berlinBlock": 0, - "londonBlock": 0, - "mergeNetsplitBlock": 0, - "shanghaiTime": 0, - "cancunTime": 0, - "pragueTime": 0, - "terminalTotalDifficulty": 0, - "terminalTotalDifficultyPassed": true - }, - "alloc": { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x6d6172697573766477000000" - }, - "0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": { - "balance": "10000000000000000000000000" - }, - "0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": { - "balance": "10000000000000000000000000" - }, - "0x0000000000000000000000000000000000000000": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000001": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000003": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000004": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000005": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000006": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000007": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000008": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000009": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000010": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000011": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000012": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000013": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000014": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000015": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000016": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000017": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000018": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000019": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000020": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000021": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000022": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000023": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000024": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000025": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000026": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000027": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000028": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000029": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000030": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000031": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000032": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000033": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000034": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000035": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000036": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000037": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000038": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000039": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000040": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000041": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000042": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000043": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000044": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000045": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000046": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000047": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000048": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000049": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000050": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000051": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000052": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000053": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000054": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000055": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000056": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000057": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000058": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000059": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000060": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000061": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000062": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000063": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000064": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000065": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000066": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000067": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000068": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000069": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000070": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000071": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000072": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000073": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000074": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000075": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000076": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000077": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000078": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000079": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000080": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000081": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000082": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000083": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000084": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000085": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000086": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000087": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000088": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000089": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000090": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000091": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000092": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000093": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000094": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000095": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000096": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000097": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000098": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000099": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009f": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000aa": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ab": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ac": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ad": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ae": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000af": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ba": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000be": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bf": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ca": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ce": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cf": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000da": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000db": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000dc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000dd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000de": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000df": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ea": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000eb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ec": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ed": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ee": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ef": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fa": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fe": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ff": { - "balance": "1" - }, - "0x4242424242424242424242424242424242424242": { - "balance": "0", - "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a26469706673582212201dd26f37a621703009abf16e77e69c93dc50c79db7f6cc37543e3e0e3decdc9764736f6c634300060b0033", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b", - "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", - "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c", - "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c", - "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30", - "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1", - "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c", - "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193", - "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1", - "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", - "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", - "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f", - "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e", - "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784", - "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb", - "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb", - "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab", - "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4", - "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f", - "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa", - "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c", - "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", - "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", - "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", - "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", - "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", - "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", - "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", - "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", - "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", - "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7" - } - }, - "0x9a4aa7d9C2F6386e5F24d790eB2FFB9fd543A170": { - "balance": "1000000000000000000000000000" - }, - "0x5E3141B900ac5f5608b0d057D10d45a0e4927cD9": { - "balance": "1000000000000000000000000000" - }, - "0x7cF5Dbc49F0904065664b5B6C0d69CaB55F33988": { - "balance": "1000000000000000000000000000" - }, - "0x8D12b071A6F3823A535D38C4a583a2FA1859e822": { - "balance": "1000000000000000000000000000" - }, - "0x3B575D3cda6b30736A38B031E0d245E646A21135": { - "balance": "1000000000000000000000000000" - }, - "0x53bDe6CF93461674F590E532006b4022dA57A724": { - "balance": "1000000000000000000000000000" - } - }, - "coinbase": "0x0000000000000000000000000000000000000000", - "difficulty": "0x01", - "extraData": "", - "gasLimit": "0x400000", - "nonce": "0x1234", - "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "1662465600" -} diff --git a/scripts/local_testnet/geth.sh b/scripts/local_testnet/geth.sh deleted file mode 100755 index 5dc4575cf0a..00000000000 --- a/scripts/local_testnet/geth.sh +++ /dev/null @@ -1,53 +0,0 @@ -set -Eeuo pipefail - -source ./vars.env - -# Get options -while getopts "d:sh" flag; do - case "${flag}" in - d) DEBUG_LEVEL=${OPTARG};; - s) SUBSCRIBE_ALL_SUBNETS="--subscribe-all-subnets";; - h) - echo "Start a geth node" - echo - echo "usage: $0 " - echo - echo "Options:" - echo " -h: this help" - echo - echo "Positional arguments:" - echo " DATADIR Value for --datadir parameter" - echo " NETWORK-PORT Value for --port" - echo " HTTP-PORT Value for --http.port" - echo " AUTH-PORT Value for --authrpc.port" - echo " GENESIS_FILE Value for geth init" - exit - ;; - esac -done - -# Get positional arguments -data_dir=${@:$OPTIND+0:1} -network_port=${@:$OPTIND+1:1} -http_port=${@:$OPTIND+2:1} -auth_port=${@:$OPTIND+3:1} -genesis_file=${@:$OPTIND+4:1} - -# Init -$GETH_BINARY init \ - --datadir $data_dir \ - $genesis_file - -echo "Completed init" - -exec $GETH_BINARY \ - --datadir $data_dir \ - --ipcdisable \ - --http \ - --http.api="engine,eth,web3,net,debug" \ - --networkid=$CHAIN_ID \ - --syncmode=full \ - --bootnodes $EL_BOOTNODE_ENODE \ - --port $network_port \ - --http.port $http_port \ - --authrpc.port $auth_port diff --git a/scripts/local_testnet/kill_processes.sh b/scripts/local_testnet/kill_processes.sh deleted file mode 100755 index 83a0027337a..00000000000 --- a/scripts/local_testnet/kill_processes.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -# Kill processes - -set -Euo pipefail - -# First parameter is the file with -# one pid per line. -if [ -f "$1" ]; then - while read pid - do - # handle the case of blank lines - [[ -n "$pid" ]] || continue - - echo killing $pid - kill $pid || true - done < $1 -fi - - diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml new file mode 100644 index 00000000000..f54fce354a0 --- /dev/null +++ b/scripts/local_testnet/network_params.yaml @@ -0,0 +1,14 @@ +# Full configuration reference [here](https://github.com/kurtosis-tech/ethereum-package?tab=readme-ov-file#configuration). +participants: + - el_type: geth + el_image: ethereum/client-go:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --target-peers=3 + count: 4 +network_params: + deneb_fork_epoch: 0 + seconds_per_slot: 3 +global_log_level: debug +snooper_enabled: false diff --git a/scripts/local_testnet/reset_genesis_time.sh b/scripts/local_testnet/reset_genesis_time.sh deleted file mode 100755 index 68c8fb6b4cb..00000000000 --- a/scripts/local_testnet/reset_genesis_time.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -# -# Resets the beacon state genesis time to now. -# - -set -Eeuo pipefail - -source ./vars.env - -NOW=$(date +%s) - -lcli \ - change-genesis-time \ - $TESTNET_DIR/genesis.ssz \ - $(date +%s) - -echo "Reset genesis time to now ($NOW)" diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh deleted file mode 100755 index 419cba19ed9..00000000000 --- a/scripts/local_testnet/setup.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash - -# -# Produces a testnet specification and a genesis state where the genesis time -# is now + $GENESIS_DELAY. -# -# Generates datadirs for multiple validator keys according to the -# $VALIDATOR_COUNT and $BN_COUNT variables. -# - -set -o nounset -o errexit -o pipefail - -source ./vars.env - - -NOW=`date +%s` -GENESIS_TIME=`expr $NOW + $GENESIS_DELAY` - -lcli \ - new-testnet \ - --spec $SPEC_PRESET \ - --deposit-contract-address $DEPOSIT_CONTRACT_ADDRESS \ - --testnet-dir $TESTNET_DIR \ - --min-genesis-active-validator-count $GENESIS_VALIDATOR_COUNT \ - --min-genesis-time $GENESIS_TIME \ - --genesis-delay $GENESIS_DELAY \ - --genesis-fork-version $GENESIS_FORK_VERSION \ - --altair-fork-epoch $ALTAIR_FORK_EPOCH \ - --bellatrix-fork-epoch $BELLATRIX_FORK_EPOCH \ - --capella-fork-epoch $CAPELLA_FORK_EPOCH \ - --deneb-fork-epoch $DENEB_FORK_EPOCH \ - --electra-fork-epoch $ELECTRA_FORK_EPOCH \ - --ttd $TTD \ - --eth1-block-hash $ETH1_BLOCK_HASH \ - --eth1-id $CHAIN_ID \ - --eth1-follow-distance 128 \ - --seconds-per-slot $SECONDS_PER_SLOT \ - --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ - --proposer-score-boost "$PROPOSER_SCORE_BOOST" \ - --validator-count $GENESIS_VALIDATOR_COUNT \ - --interop-genesis-state \ - --force - -echo Specification and genesis.ssz generated at $TESTNET_DIR. -echo "Generating $VALIDATOR_COUNT validators concurrently... (this may take a while)" - -lcli \ - insecure-validators \ - --count $VALIDATOR_COUNT \ - --base-dir $DATADIR \ - --node-count $VC_COUNT - -echo Validators generated with keystore passwords at $DATADIR. diff --git a/scripts/local_testnet/setup_time.sh b/scripts/local_testnet/setup_time.sh deleted file mode 100755 index 36f7fc4e997..00000000000 --- a/scripts/local_testnet/setup_time.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -set -Eeuo pipefail - -source ./vars.env - -# Function to output SLOT_PER_EPOCH for mainnet or minimal -get_spec_preset_value() { - case "$SPEC_PRESET" in - mainnet) echo 32 ;; - minimal) echo 8 ;; - gnosis) echo 16 ;; - *) echo "Unsupported preset: $SPEC_PRESET" >&2; exit 1 ;; - esac -} - -SLOT_PER_EPOCH=$(get_spec_preset_value $SPEC_PRESET) -echo "slot_per_epoch=$SLOT_PER_EPOCH" - -genesis_file=$1 - -# Update future hardforks time in the EL genesis file based on the CL genesis time -GENESIS_TIME=$(lcli pretty-ssz --spec $SPEC_PRESET --testnet-dir $TESTNET_DIR BeaconState $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') -echo $GENESIS_TIME -CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * $SLOT_PER_EPOCH * SECONDS_PER_SLOT))) -echo $CAPELLA_TIME -sed -i 's/"shanghaiTime".*$/"shanghaiTime": '"$CAPELLA_TIME"',/g' $genesis_file -CANCUN_TIME=$((GENESIS_TIME + (DENEB_FORK_EPOCH * $SLOT_PER_EPOCH * SECONDS_PER_SLOT))) -echo $CANCUN_TIME -sed -i 's/"cancunTime".*$/"cancunTime": '"$CANCUN_TIME"',/g' $genesis_file -PRAGUE_TIME=$((GENESIS_TIME + (ELECTRA_FORK_EPOCH * $SLOT_PER_EPOCH * SECONDS_PER_SLOT))) -echo $PRAGUE_TIME -sed -i 's/"pragueTime".*$/"pragueTime": '"$PRAGUE_TIME"',/g' $genesis_file -cat $genesis_file - diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index be91d069985..4b03b1e0102 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -1,147 +1,83 @@ #!/usr/bin/env bash -# Start all processes necessary to create a local testnet -set -Eeuo pipefail +# Requires `docker`, `kurtosis`, `yq` -source ./vars.env +set -Eeuo pipefail -# Set a higher ulimit in case we want to import 1000s of validators. -ulimit -n 65536 +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +ENCLAVE_NAME=local-testnet +NETWORK_PARAMS_FILE=$SCRIPT_DIR/network_params.yaml -# VC_COUNT is defaulted in vars.env -DEBUG_LEVEL=${DEBUG_LEVEL:-info} -BUILDER_PROPOSALS= +BUILD_IMAGE=true +BUILDER_PROPOSALS=false +CI=false # Get options -while getopts "v:d:ph" flag; do +while getopts "e:b:n:phc" flag; do case "${flag}" in - v) VC_COUNT=${OPTARG};; - d) DEBUG_LEVEL=${OPTARG};; - p) BUILDER_PROPOSALS="-p";; + e) ENCLAVE_NAME=${OPTARG};; + b) BUILD_IMAGE=${OPTARG};; + n) NETWORK_PARAMS_FILE=${OPTARG};; + p) BUILDER_PROPOSALS=true;; + c) CI=true;; h) - validators=$(( $VALIDATOR_COUNT / $BN_COUNT )) - echo "Start local testnet, defaults: 1 eth1 node, $BN_COUNT beacon nodes," - echo "and $VC_COUNT validator clients with each vc having $validators validators." + echo "Start a local testnet with kurtosis." echo echo "usage: $0 " echo echo "Options:" - echo " -v: VC_COUNT default: $VC_COUNT" - echo " -d: DEBUG_LEVEL default: info" - echo " -p: enable builder proposals" - echo " -h: this help" + echo " -e: enclave name default: $ENCLAVE_NAME" + echo " -b: whether to build Lighthouse docker image default: $BUILD_IMAGE" + echo " -n: kurtosis network params file path default: $NETWORK_PARAMS_FILE" + echo " -p: enable builder proposals" + echo " -c: CI mode, run without other additional services like Grafana and Dora explorer" + echo " -h: this help" exit ;; esac done -if (( $VC_COUNT > $BN_COUNT )); then - echo "Error $VC_COUNT is too large, must be <= BN_COUNT=$BN_COUNT" - exit -fi - -genesis_file=${@:$OPTIND+0:1} - -# Init some constants -PID_FILE=$TESTNET_DIR/PIDS.pid -LOG_DIR=$TESTNET_DIR - -# Stop local testnet and remove $PID_FILE -./stop_local_testnet.sh - -# Clean $DATADIR and create empty log files so the -# user can "tail -f" right after starting this script -# even before its done. -./clean.sh -mkdir -p $LOG_DIR -for (( bn=1; bn<=$BN_COUNT; bn++ )); do - touch $LOG_DIR/beacon_node_$bn.log -done -for (( el=1; el<=$BN_COUNT; el++ )); do - touch $LOG_DIR/geth_$el.log -done -for (( vc=1; vc<=$VC_COUNT; vc++ )); do - touch $LOG_DIR/validator_node_$vc.log -done - -# Sleep with a message -sleeping() { - echo sleeping $1 - sleep $1 -} - -# Execute the command with logs saved to a file. -# -# First parameter is log file name -# Second parameter is executable name -# Remaining parameters are passed to executable -execute_command() { - LOG_NAME=$1 - EX_NAME=$2 - shift - shift - CMD="$EX_NAME $@ >> $LOG_DIR/$LOG_NAME 2>&1" - echo "executing: $CMD" - echo "$CMD" > "$LOG_DIR/$LOG_NAME" - eval "$CMD &" -} - -# Execute the command with logs saved to a file -# and is PID is saved to $PID_FILE. -# -# First parameter is log file name -# Second parameter is executable name -# Remaining parameters are passed to executable -execute_command_add_PID() { - execute_command $@ - echo "$!" >> $PID_FILE -} - +LH_IMAGE_NAME=$(yq eval ".participants[0].cl_image" $NETWORK_PARAMS_FILE) -# Setup data -echo "executing: ./setup.sh >> $LOG_DIR/setup.log" -./setup.sh >> $LOG_DIR/setup.log 2>&1 - -# Call setup_time.sh to update future hardforks time in the EL genesis file based on the CL genesis time -./setup_time.sh $genesis_file - -# Delay to let boot_enr.yaml to be created -execute_command_add_PID bootnode.log ./bootnode.sh -sleeping 3 - -execute_command_add_PID el_bootnode.log ./el_bootnode.sh -sleeping 3 - -# Start beacon nodes -BN_udp_tcp_base=9000 -BN_http_port_base=8000 +if ! command -v docker &> /dev/null; then + echo "Docker is not installed. Please install Docker and try again." + exit 1 +fi -EL_base_network=7000 -EL_base_http=6000 -EL_base_auth_http=5000 +if ! command -v kurtosis &> /dev/null; then + echo "kurtosis command not found. Please install kurtosis and try again." + exit +fi -(( $VC_COUNT < $BN_COUNT )) && SAS=-s || SAS= +if ! command -v yq &> /dev/null; then + echo "yq not found. Please install yq and try again." +fi -for (( el=1; el<=$BN_COUNT; el++ )); do - execute_command_add_PID geth_$el.log ./geth.sh $DATADIR/geth_datadir$el $((EL_base_network + $el)) $((EL_base_http + $el)) $((EL_base_auth_http + $el)) $genesis_file -done +if [ "$BUILDER_PROPOSALS" = true ]; then + yq eval '.participants[0].vc_extra_params = ["--builder-proposals"]' -i $NETWORK_PARAMS_FILE + echo "--builder-proposals VC flag added to network_params.yaml" +fi -sleeping 20 +if [ "$CI" = true ]; then + # TODO: run assertoor tests + yq eval '.additional_services = []' -i $NETWORK_PARAMS_FILE + echo "Running without additional services (CI mode)." +else + yq eval '.additional_services = ["dora", "prometheus_grafana"]' -i $NETWORK_PARAMS_FILE + echo "Additional services dora and prometheus_grafana added to network_params.yaml" +fi -# Reset the `genesis.json` config file fork times. -sed -i 's/"shanghaiTime".*$/"shanghaiTime": 0,/g' $genesis_file -sed -i 's/"cancunTime".*$/"cancunTime": 0,/g' $genesis_file -sed -i 's/"pragueTime".*$/"pragueTime": 0,/g' $genesis_file +if [ "$BUILD_IMAGE" = true ]; then + echo "Building Lighthouse Docker image." + ROOT_DIR="$SCRIPT_DIR/../.." + docker build --build-arg FEATURES=portable -f $ROOT_DIR/Dockerfile -t $LH_IMAGE_NAME $ROOT_DIR +else + echo "Not rebuilding Lighthouse Docker image." +fi -for (( bn=1; bn<=$BN_COUNT; bn++ )); do - secret=$DATADIR/geth_datadir$bn/geth/jwtsecret - echo $secret - execute_command_add_PID beacon_node_$bn.log ./beacon_node.sh $SAS -d $DEBUG_LEVEL $DATADIR/node_$bn $((BN_udp_tcp_base + $bn)) $((BN_udp_tcp_base + $bn + 100)) $((BN_http_port_base + $bn)) http://localhost:$((EL_base_auth_http + $bn)) $secret -done +# Stop local testnet +kurtosis enclave rm -f $ENCLAVE_NAME 2>/dev/null || true -# Start requested number of validator clients -for (( vc=1; vc<=$VC_COUNT; vc++ )); do - execute_command_add_PID validator_node_$vc.log ./validator_client.sh $BUILDER_PROPOSALS -d $DEBUG_LEVEL $DATADIR/node_$vc http://localhost:$((BN_http_port_base + $vc)) -done +kurtosis run --enclave $ENCLAVE_NAME github.com/ethpandaops/ethereum-package --args-file $NETWORK_PARAMS_FILE echo "Started!" diff --git a/scripts/local_testnet/stop_local_testnet.sh b/scripts/local_testnet/stop_local_testnet.sh index b1c3188ee3a..5500f8d5a04 100755 --- a/scripts/local_testnet/stop_local_testnet.sh +++ b/scripts/local_testnet/stop_local_testnet.sh @@ -1,10 +1,15 @@ #!/usr/bin/env bash -# Stop all processes that were started with start_local_testnet.sh - set -Eeuo pipefail -source ./vars.env +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +ENCLAVE_NAME=${1:-local-testnet} +LOGS_PATH=$SCRIPT_DIR/logs +LOGS_SUBDIR=$LOGS_PATH/$ENCLAVE_NAME + +# Delete existing logs directory and make sure parent directory exists. +rm -rf $LOGS_SUBDIR && mkdir -p $LOGS_PATH +kurtosis enclave dump $ENCLAVE_NAME $LOGS_SUBDIR +echo "Local testnet logs stored to $LOGS_SUBDIR." -PID_FILE=$TESTNET_DIR/PIDS.pid -./kill_processes.sh $PID_FILE -rm -f $PID_FILE +kurtosis enclave rm -f $ENCLAVE_NAME +echo "Local testnet stopped." diff --git a/scripts/local_testnet/validator_client.sh b/scripts/local_testnet/validator_client.sh deleted file mode 100755 index d88a1833cb5..00000000000 --- a/scripts/local_testnet/validator_client.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash - -# -# Starts a validator client based upon a genesis state created by -# `./setup.sh`. -# -# Usage: ./validator_client.sh - -set -Eeuo pipefail - -source ./vars.env - -DEBUG_LEVEL=info - -BUILDER_PROPOSALS= - -# Get options -while getopts "pd:" flag; do - case "${flag}" in - p) BUILDER_PROPOSALS="--builder-proposals";; - d) DEBUG_LEVEL=${OPTARG};; - esac -done - -exec lighthouse \ - --debug-level $DEBUG_LEVEL \ - vc \ - $BUILDER_PROPOSALS \ - --datadir ${@:$OPTIND:1} \ - --testnet-dir $TESTNET_DIR \ - --init-slashing-protection \ - --beacon-nodes ${@:$OPTIND+1:1} \ - --suggested-fee-recipient 0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990 \ - $VC_ARGS diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env deleted file mode 100644 index 9bdec71ff78..00000000000 --- a/scripts/local_testnet/vars.env +++ /dev/null @@ -1,69 +0,0 @@ -# Path to the geth binary -GETH_BINARY=geth -EL_BOOTNODE_BINARY=bootnode - -# Base directories for the validator keys and secrets -DATADIR=~/.lighthouse/local-testnet - -# Directory for the eth2 config -TESTNET_DIR=$DATADIR/testnet - -# Mnemonic for generating validator keys -MNEMONIC_PHRASE="vast thought differ pull jewel broom cook wrist tribe word before omit" - -EL_BOOTNODE_ENODE="enode://51ea9bb34d31efc3491a842ed13b8cab70e753af108526b57916d716978b380ed713f4336a80cdb85ec2a115d5a8c0ae9f3247bed3c84d3cb025c6bab311062c@127.0.0.1:0?discport=30301" - -# Hardcoded deposit contract -DEPOSIT_CONTRACT_ADDRESS=4242424242424242424242424242424242424242 - -GENESIS_FORK_VERSION=0x42424242 - -# Block hash generated from genesis.json in directory -ETH1_BLOCK_HASH=4b0e17cf5c04616d64526d292b80a1f2720cf2195d990006e4ea6950c5bbcb9f - -VALIDATOR_COUNT=80 -GENESIS_VALIDATOR_COUNT=80 - -# Number of beacon_node instances that you intend to run -BN_COUNT=4 - -# Number of validator clients -VC_COUNT=$BN_COUNT - -# Number of seconds to delay to start genesis block. -# If started by a script this can be 0, if starting by hand -# use something like 180. -GENESIS_DELAY=0 - -# Port for P2P communication with bootnode -BOOTNODE_PORT=4242 - -# Network ID and Chain ID of local eth1 test network -CHAIN_ID=4242 - -# Hard fork configuration -ALTAIR_FORK_EPOCH=0 -BELLATRIX_FORK_EPOCH=0 -CAPELLA_FORK_EPOCH=0 -DENEB_FORK_EPOCH=1 -ELECTRA_FORK_EPOCH=9999999 - -TTD=0 - -# Spec version (mainnet or minimal) -SPEC_PRESET=mainnet - -# Seconds per Eth2 slot -SECONDS_PER_SLOT=3 - -# Seconds per Eth1 block -SECONDS_PER_ETH1_BLOCK=3 - -# Proposer score boost percentage -PROPOSER_SCORE_BOOST=40 - -# Command line arguments for beacon node client -BN_ARGS="" - -# Command line arguments for validator client -VC_ARGS="" diff --git a/scripts/mdlint.sh b/scripts/mdlint.sh new file mode 100755 index 00000000000..5274f108d2c --- /dev/null +++ b/scripts/mdlint.sh @@ -0,0 +1,23 @@ +#! /usr/bin/env bash + +# IMPORTANT +# This script should NOT be run directly. +# Run `make mdlint` from the root of the repository instead. + +# use markdownlint-cli to check for markdown files +docker run --rm -v ./book:/workdir ghcr.io/igorshubovych/markdownlint-cli:latest '**/*.md' --ignore node_modules + +# exit code +exit_code=$(echo $?) + +if [[ $exit_code == 0 ]]; then + echo "All markdown files are properly formatted." + exit 0 +elif [[ $exit_code == 1 ]]; then + echo "Exiting with errors. Run 'make mdlint' locally and commit the changes. Note that not all errors can be fixed automatically, if there are still errors after running 'make mdlint', look for the errors and fix manually." + docker run --rm -v ./book:/workdir ghcr.io/igorshubovych/markdownlint-cli:latest '**/*.md' --ignore node_modules --fix + exit 1 +else + echo "Exiting with exit code >1. Check for the error logs and fix them accordingly." + exit 1 +fi \ No newline at end of file diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index e13c06cdbac..441e2a63575 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -1,101 +1,129 @@ #!/usr/bin/env bash -# Requires `lighthouse`, `lcli`, `geth`, `bootnode`, `curl`, `jq` +# Requires `docker`, `kurtosis`, `yq`, `curl`, `jq` +set -Eeuo pipefail +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +NETWORK_PARAMS_FILE=$SCRIPT_DIR/network_params.yaml BEHAVIOR=$1 +ENCLAVE_NAME=local-testnet-$BEHAVIOR + +SECONDS_PER_SLOT=$(yq eval ".network_params.seconds_per_slot" $NETWORK_PARAMS_FILE) +KEYS_PER_NODE=$(yq eval ".network_params.num_validator_keys_per_node" $NETWORK_PARAMS_FILE) +LH_IMAGE_NAME=$(yq eval ".participants[0].cl_image" $NETWORK_PARAMS_FILE) if [[ "$BEHAVIOR" != "success" ]] && [[ "$BEHAVIOR" != "failure" ]]; then echo "Usage: doppelganger_protection.sh [success|failure]" exit 1 fi -exit_if_fails() { - echo $@ - $@ - EXIT_CODE=$? - if [[ $EXIT_CODE -eq 1 ]]; then - exit 1 - fi +function exit_and_dump_logs() { + local exit_code=$1 + echo "Shutting down..." + $SCRIPT_DIR/../local_testnet/stop_local_testnet.sh $ENCLAVE_NAME + echo "Test completed with exit code $exit_code." + exit $exit_code } -genesis_file=$2 - -source ./vars.env - -exit_if_fails ../local_testnet/clean.sh - - -echo "Setting up local testnet" - -exit_if_fails ../local_testnet/setup.sh -# Duplicate this directory so slashing protection doesn't keep us from re-using validator keys -exit_if_fails cp -R $HOME/.lighthouse/local-testnet/node_1 $HOME/.lighthouse/local-testnet/node_1_doppelganger - -echo "Starting bootnode" - -exit_if_fails ../local_testnet/bootnode.sh &> /dev/null & - -exit_if_fails ../local_testnet/el_bootnode.sh &> /dev/null & - -# wait for the bootnode to start -sleep 10 +function get_service_status() { + local service_name=$1 + kurtosis service inspect $ENCLAVE_NAME $service_name | grep Status | cut -d':' -f2 | xargs +} -echo "Starting local execution nodes" +function run_command_without_exit() { + local command=$1 + set +e + eval "$command" + local exit_code=$? + set -e + echo $exit_code +} -exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir1 6000 5000 4000 $genesis_file &> geth.log & -exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir2 6100 5100 4100 $genesis_file &> /dev/null & -exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir3 6200 5200 4200 $genesis_file &> /dev/null & +# Start local testnet +$SCRIPT_DIR/../local_testnet/start_local_testnet.sh -e $ENCLAVE_NAME -b false -c -n $NETWORK_PARAMS_FILE -sleep 20 +# Immediately stop node 4 (as we only need the node 4 validator keys generated for later use) +kurtosis service stop $ENCLAVE_NAME cl-4-lighthouse-geth el-4-geth-lighthouse vc-4-geth-lighthouse > /dev/null -exit_if_fails ../local_testnet/beacon_node.sh -d debug $HOME/.lighthouse/local-testnet/node_1 8000 7000 9000 http://localhost:4000 $HOME/.lighthouse/local-testnet/geth_datadir1/geth/jwtsecret &> /dev/null & -exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 8100 7100 9100 http://localhost:4100 $HOME/.lighthouse/local-testnet/geth_datadir2/geth/jwtsecret &> /dev/null & -exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 8200 7200 9200 http://localhost:4200 $HOME/.lighthouse/local-testnet/geth_datadir3/geth/jwtsecret &> /dev/null & +# Get the http port to get the config +BN1_HTTP_ADDRESS=`kurtosis port print $ENCLAVE_NAME cl-1-lighthouse-geth http` -echo "Starting local validator clients" +# Get the genesis time and genesis delay +MIN_GENESIS_TIME=`curl -s $BN1_HTTP_ADDRESS/eth/v1/config/spec | jq '.data.MIN_GENESIS_TIME|tonumber'` +GENESIS_DELAY=`curl -s $BN1_HTTP_ADDRESS/eth/v1/config/spec | jq '.data.GENESIS_DELAY|tonumber'` -exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:9000 &> /dev/null & -exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:9100 &> /dev/null & -exit_if_fails ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_3 http://localhost:9200 &> /dev/null & +CURRENT_TIME=`date +%s` +# Note: doppelganger protection can only be started post epoch 0 +echo "Waiting until next epoch before starting the next validator client..." +DELAY=$(( $SECONDS_PER_SLOT * 32 + $GENESIS_DELAY + $MIN_GENESIS_TIME - $CURRENT_TIME)) +sleep $DELAY -echo "Waiting an epoch before starting the next validator client" -sleep $(( $SECONDS_PER_SLOT * 32 )) +# Use BN2 for the next validator client +bn_2_url=$(kurtosis service inspect $ENCLAVE_NAME cl-2-lighthouse-geth | grep 'enr-address' | cut -d'=' -f2) +bn_2_port=4000 if [[ "$BEHAVIOR" == "failure" ]]; then - echo "Starting the doppelganger validator client" + echo "Starting the doppelganger validator client." # Use same keys as keys from VC1 and connect to BN2 # This process should not last longer than 2 epochs - timeout $(( $SECONDS_PER_SLOT * 32 * 2 )) ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_1_doppelganger http://localhost:9100 - DOPPELGANGER_EXIT=$? - - echo "Shutting down" - - # Cleanup - killall geth - killall lighthouse - killall bootnode - - echo "Done" - - # We expect to find a doppelganger, exit with success error code if doppelganger was found - # and failure if no doppelganger was found. - if [[ $DOPPELGANGER_EXIT -eq 1 ]]; then - exit 0 + vc_1_range_start=0 + vc_1_range_end=$(($KEYS_PER_NODE - 1)) + vc_1_keys_artifact_id="1-lighthouse-geth-$vc_1_range_start-$vc_1_range_end-0" + service_name=vc-1-doppelganger + + kurtosis service add \ + --files /validator_keys:$vc_1_keys_artifact_id,/testnet:el_cl_genesis_data \ + $ENCLAVE_NAME $service_name $LH_IMAGE_NAME -- lighthouse \ + vc \ + --debug-level debug \ + --testnet-dir=/testnet \ + --validators-dir=/validator_keys/keys \ + --secrets-dir=/validator_keys/secrets \ + --init-slashing-protection \ + --beacon-nodes=http://$bn_2_url:$bn_2_port \ + --enable-doppelganger-protection \ + --suggested-fee-recipient 0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990 + + # Check if doppelganger VC has stopped and exited. Exit code 1 means the check timed out and VC is still running. + check_exit_cmd="until [ \$(get_service_status $service_name) != 'RUNNING' ]; do sleep 1; done" + doppelganger_exit=$(run_command_without_exit "timeout $(( $SECONDS_PER_SLOT * 32 * 2 )) bash -c \"$check_exit_cmd\"") + + if [[ $doppelganger_exit -eq 1 ]]; then + echo "Test failed: expected doppelganger but VC is still running. Check the logs for details." + exit_and_dump_logs 1 else - exit 1 + echo "Test passed: doppelganger found and VC process stopped successfully." + exit_and_dump_logs 0 fi fi if [[ "$BEHAVIOR" == "success" ]]; then - echo "Starting the last validator client" + echo "Starting the last validator client." - ../local_testnet/validator_client.sh $HOME/.lighthouse/local-testnet/node_4 http://localhost:9100 & - DOPPELGANGER_FAILURE=0 + vc_4_range_start=$(($KEYS_PER_NODE * 3)) + vc_4_range_end=$(($KEYS_PER_NODE * 4 - 1)) + vc_4_keys_artifact_id="4-lighthouse-geth-$vc_4_range_start-$vc_4_range_end-0" + service_name=vc-4 + + kurtosis service add \ + --files /validator_keys:$vc_4_keys_artifact_id,/testnet:el_cl_genesis_data \ + $ENCLAVE_NAME $service_name $LH_IMAGE_NAME -- lighthouse \ + vc \ + --debug-level debug \ + --testnet-dir=/testnet \ + --validators-dir=/validator_keys/keys \ + --secrets-dir=/validator_keys/secrets \ + --init-slashing-protection \ + --beacon-nodes=http://$bn_2_url:$bn_2_port \ + --enable-doppelganger-protection \ + --suggested-fee-recipient 0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990 + + doppelganger_failure=0 # Sleep three epochs, then make sure all validators were active in epoch 2. Use # `is_previous_epoch_target_attester` from epoch 3 for a complete view of epoch 2 inclusion. @@ -104,20 +132,27 @@ if [[ "$BEHAVIOR" == "success" ]]; then echo "Waiting three epochs..." sleep $(( $SECONDS_PER_SLOT * 32 * 3 )) - PREVIOUS_DIR=$(pwd) - cd $HOME/.lighthouse/local-testnet/node_4/validators + # Get VC4 validator keys + keys_path=$SCRIPT_DIR/$ENCLAVE_NAME/node_4/validators + rm -rf $keys_path && mkdir -p $keys_path + kurtosis files download $ENCLAVE_NAME $vc_4_keys_artifact_id $keys_path + cd $keys_path/keys + for val in 0x*; do [[ -e $val ]] || continue - curl -s localhost:9100/lighthouse/validator_inclusion/3/$val | jq | grep -q '"is_previous_epoch_target_attester": false' - IS_ATTESTER=$? - if [[ $IS_ATTESTER -eq 0 ]]; then + is_attester=$(run_command_without_exit "curl -s $BN1_HTTP_ADDRESS/lighthouse/validator_inclusion/3/$val | jq | grep -q '\"is_previous_epoch_target_attester\": false'") + if [[ $is_attester -eq 0 ]]; then echo "$val did not attest in epoch 2." else echo "ERROR! $val did attest in epoch 2." - DOPPELGANGER_FAILURE=1 + doppelganger_failure=1 fi done + if [[ $doppelganger_failure -eq 1 ]]; then + exit_and_dump_logs 1 + fi + # Sleep two epochs, then make sure all validators were active in epoch 4. Use # `is_previous_epoch_target_attester` from epoch 5 for a complete view of epoch 4 inclusion. # @@ -126,30 +161,18 @@ if [[ "$BEHAVIOR" == "success" ]]; then sleep $(( $SECONDS_PER_SLOT * 32 * 2 )) for val in 0x*; do [[ -e $val ]] || continue - curl -s localhost:9100/lighthouse/validator_inclusion/5/$val | jq | grep -q '"is_previous_epoch_target_attester": true' - IS_ATTESTER=$? - if [[ $IS_ATTESTER -eq 0 ]]; then + is_attester=$(run_command_without_exit "curl -s $BN1_HTTP_ADDRESS/lighthouse/validator_inclusion/5/$val | jq | grep -q '\"is_previous_epoch_target_attester\": true'") + if [[ $is_attester -eq 0 ]]; then echo "$val attested in epoch 4." else echo "ERROR! $val did not attest in epoch 4." - DOPPELGANGER_FAILURE=1 + doppelganger_failure=1 fi done - echo "Shutting down" - - # Cleanup - cd $PREVIOUS_DIR - - killall geth - killall lighthouse - killall bootnode - - echo "Done" - - if [[ $DOPPELGANGER_FAILURE -eq 1 ]]; then - exit 1 + if [[ $doppelganger_failure -eq 1 ]]; then + exit_and_dump_logs 1 fi fi -exit 0 +exit_and_dump_logs 0 diff --git a/scripts/tests/genesis.json b/scripts/tests/genesis.json deleted file mode 100644 index bfbc08c81e5..00000000000 --- a/scripts/tests/genesis.json +++ /dev/null @@ -1,856 +0,0 @@ -{ - "config": { - "chainId": 4242, - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "berlinBlock": 0, - "londonBlock": 0, - "mergeForkBlock": 0, - "shanghaiTime": 0, - "cancunTime": 0, - "terminalTotalDifficulty": 0, - "terminalTotalDifficultyPassed": true - }, - "alloc": { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x6d6172697573766477000000" - }, - "0x0000000000000000000000000000000000000000": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000001": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000003": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000004": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000005": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000006": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000007": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000008": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000009": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000010": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000011": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000012": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000013": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000014": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000015": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000016": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000017": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000018": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000019": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000020": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000021": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000022": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000023": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000024": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000025": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000026": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000027": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000028": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000029": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000030": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000031": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000032": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000033": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000034": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000035": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000036": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000037": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000038": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000039": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000040": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000041": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000042": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000043": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000044": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000045": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000046": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000047": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000048": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000049": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000050": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000051": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000052": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000053": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000054": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000055": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000056": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000057": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000058": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000059": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000060": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000061": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000062": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000063": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000064": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000065": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000066": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000067": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000068": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000069": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000070": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000071": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000072": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000073": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000074": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000075": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000076": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000077": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000078": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000079": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000080": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000081": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000082": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000083": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000084": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000085": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000086": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000087": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000088": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000089": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000090": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000091": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000092": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000093": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000094": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000095": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000096": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000097": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000098": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000099": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009f": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000aa": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ab": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ac": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ad": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ae": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000af": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ba": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000be": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bf": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ca": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ce": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cf": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000da": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000db": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000dc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000dd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000de": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000df": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ea": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000eb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ec": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ed": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ee": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ef": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fa": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fe": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ff": { - "balance": "1" - }, - "0x4242424242424242424242424242424242424242": { - "balance": "0", - "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a26469706673582212201dd26f37a621703009abf16e77e69c93dc50c79db7f6cc37543e3e0e3decdc9764736f6c634300060b0033", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b", - "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", - "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c", - "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c", - "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30", - "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1", - "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c", - "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193", - "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1", - "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", - "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", - "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f", - "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e", - "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784", - "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb", - "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb", - "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab", - "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4", - "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f", - "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa", - "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c", - "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", - "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", - "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", - "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", - "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", - "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", - "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", - "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", - "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", - "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7" - } - }, - "0x9a4aa7d9C2F6386e5F24d790eB2FFB9fd543A170": { - "balance": "1000000000000000000000000000" - }, - "0x5E3141B900ac5f5608b0d057D10d45a0e4927cD9": { - "balance": "1000000000000000000000000000" - }, - "0x7cF5Dbc49F0904065664b5B6C0d69CaB55F33988": { - "balance": "1000000000000000000000000000" - }, - "0x8D12b071A6F3823A535D38C4a583a2FA1859e822": { - "balance": "1000000000000000000000000000" - }, - "0x3B575D3cda6b30736A38B031E0d245E646A21135": { - "balance": "1000000000000000000000000000" - }, - "0x53bDe6CF93461674F590E532006b4022dA57A724": { - "balance": "1000000000000000000000000000" - } - }, - "coinbase": "0x0000000000000000000000000000000000000000", - "difficulty": "0x01", - "extraData": "", - "gasLimit": "0x400000", - "nonce": "0x1234", - "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "1662465600" -} diff --git a/scripts/tests/network_params.yaml b/scripts/tests/network_params.yaml new file mode 100644 index 00000000000..21114df0e82 --- /dev/null +++ b/scripts/tests/network_params.yaml @@ -0,0 +1,16 @@ +# Full configuration reference [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). +participants: + - el_type: geth + el_image: ethereum/client-go:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --target-peers=3 + count: 4 +network_params: + deneb_fork_epoch: 0 + seconds_per_slot: 3 + num_validator_keys_per_node: 20 +global_log_level: debug +snooper_enabled: false +additional_services: [] diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env deleted file mode 100644 index 4d8f9db64e4..00000000000 --- a/scripts/tests/vars.env +++ /dev/null @@ -1,66 +0,0 @@ -# Path to the geth binary -GETH_BINARY=geth -EL_BOOTNODE_BINARY=bootnode - -# Base directories for the validator keys and secrets -DATADIR=~/.lighthouse/local-testnet - -# Directory for the eth2 config -TESTNET_DIR=$DATADIR/testnet - -EL_BOOTNODE_ENODE="enode://51ea9bb34d31efc3491a842ed13b8cab70e753af108526b57916d716978b380ed713f4336a80cdb85ec2a115d5a8c0ae9f3247bed3c84d3cb025c6bab311062c@127.0.0.1:0?discport=30301" - -# Hardcoded deposit contract -DEPOSIT_CONTRACT_ADDRESS=4242424242424242424242424242424242424242 - -GENESIS_FORK_VERSION=0x42424242 - -# Block hash generated from genesis.json in directory -ETH1_BLOCK_HASH=7a5c656343c3a66dcf75415958b500e8873f9dab0cd588e6cf0785b52a06dd34 - -VALIDATOR_COUNT=80 -GENESIS_VALIDATOR_COUNT=80 - -# Number of beacon_node instances that you intend to run -BN_COUNT=4 - -# Number of validator clients -VC_COUNT=$BN_COUNT - -# Number of seconds to delay to start genesis block. -# If started by a script this can be 0, if starting by hand -# use something like 180. -GENESIS_DELAY=0 - -# Port for P2P communication with bootnode -BOOTNODE_PORT=4242 - -# Network ID and Chain ID of local eth1 test network -CHAIN_ID=4242 - -# Hard fork configuration -ALTAIR_FORK_EPOCH=0 -BELLATRIX_FORK_EPOCH=0 -CAPELLA_FORK_EPOCH=0 -DENEB_FORK_EPOCH=0 -ELECTRA_FORK_EPOCH=18446744073709551615 - -TTD=0 - -# Spec version (mainnet or minimal) -SPEC_PRESET=mainnet - -# Seconds per Eth2 slot -SECONDS_PER_SLOT=3 - -# Seconds per Eth1 block -SECONDS_PER_ETH1_BLOCK=1 - -# Proposer score boost percentage -PROPOSER_SCORE_BOOST=70 - -# Command line arguments for beacon node client -BN_ARGS="" - -# Enable doppelganger detection -VC_ARGS=" --enable-doppelganger-protection " diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 90fb54cd1ab..ad0bb00963f 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -8,11 +8,13 @@ edition = { workspace = true } default = ["lmdb"] mdbx = ["dep:mdbx"] lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] +redb = ["dep:redb"] portable = ["types/portable"] [dependencies] bincode = { workspace = true } byteorder = { workspace = true } +derivative = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } @@ -25,17 +27,19 @@ rand = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } slog = { workspace = true } -sloggers = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } types = { workspace = true } strum = { workspace = true } +ssz_types = { workspace = true } # MDBX is pinned at the last version with Windows and macOS support. -mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", tag = "v0.1.4", optional = true } +mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", rev = "e6ff4b9377c1619bcf0bfdf52bee5a980a432a1a", optional = true } lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } +redb = { version = "2.1", optional = true } + [dev-dependencies] maplit = { workspace = true } rayon = { workspace = true } diff --git a/slasher/src/array.rs b/slasher/src/array.rs index b733b07c63f..77ddceb85fe 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -226,12 +226,12 @@ impl TargetArrayChunk for MinTargetChunk { ) -> Result, Error> { let min_target = self.chunk - .get_target(validator_index, attestation.data.source.epoch, config)?; - if attestation.data.target.epoch > min_target { + .get_target(validator_index, attestation.data().source.epoch, config)?; + if attestation.data().target.epoch > min_target { let existing_attestation = db.get_attestation_for_validator(txn, validator_index, min_target)?; - if attestation.data.source.epoch < existing_attestation.data.source.epoch { + if attestation.data().source.epoch < existing_attestation.data().source.epoch { Ok(AttesterSlashingStatus::SurroundsExisting(Box::new( existing_attestation, ))) @@ -329,12 +329,12 @@ impl TargetArrayChunk for MaxTargetChunk { ) -> Result, Error> { let max_target = self.chunk - .get_target(validator_index, attestation.data.source.epoch, config)?; - if attestation.data.target.epoch < max_target { + .get_target(validator_index, attestation.data().source.epoch, config)?; + if attestation.data().target.epoch < max_target { let existing_attestation = db.get_attestation_for_validator(txn, validator_index, max_target)?; - if existing_attestation.data.source.epoch < attestation.data.source.epoch { + if existing_attestation.data().source.epoch < attestation.data().source.epoch { Ok(AttesterSlashingStatus::SurroundedByExisting(Box::new( existing_attestation, ))) @@ -428,7 +428,7 @@ pub fn apply_attestation_for_validator( current_epoch: Epoch, config: &Config, ) -> Result, Error> { - let mut chunk_index = config.chunk_index(attestation.data.source.epoch); + let mut chunk_index = config.chunk_index(attestation.data().source.epoch); let mut current_chunk = get_chunk_for_update( db, txn, @@ -446,7 +446,7 @@ pub fn apply_attestation_for_validator( } let Some(mut start_epoch) = - T::first_start_epoch(attestation.data.source.epoch, current_epoch, config) + T::first_start_epoch(attestation.data().source.epoch, current_epoch, config) else { return Ok(slashing_status); }; @@ -465,7 +465,7 @@ pub fn apply_attestation_for_validator( chunk_index, validator_index, start_epoch, - attestation.data.target.epoch, + attestation.data().target.epoch, current_epoch, config, )?; @@ -492,7 +492,7 @@ pub fn update( let mut chunk_attestations = BTreeMap::new(); for attestation in batch { chunk_attestations - .entry(config.chunk_index(attestation.indexed.data.source.epoch)) + .entry(config.chunk_index(attestation.indexed.data().source.epoch)) .or_insert_with(Vec::new) .push(attestation); } diff --git a/slasher/src/attestation_queue.rs b/slasher/src/attestation_queue.rs index 3d23932df9f..62a1bb09455 100644 --- a/slasher/src/attestation_queue.rs +++ b/slasher/src/attestation_queue.rs @@ -47,7 +47,8 @@ impl AttestationBatch { self.attestations.push(Arc::downgrade(&indexed_record)); let attestation_data_hash = indexed_record.record.attestation_data_hash; - for &validator_index in &indexed_record.indexed.attesting_indices { + + for &validator_index in indexed_record.indexed.attesting_indices_iter() { self.attesters .entry((validator_index, attestation_data_hash)) .and_modify(|existing_entry| { @@ -56,8 +57,8 @@ impl AttestationBatch { // smaller indexed attestation. Single-bit attestations will usually be removed // completely by this process, and aggregates will only be retained if they // are not redundant with respect to a larger aggregate seen in the same batch. - if existing_entry.indexed.attesting_indices.len() - < indexed_record.indexed.attesting_indices.len() + if existing_entry.indexed.attesting_indices_len() + < indexed_record.indexed.attesting_indices_len() { *existing_entry = indexed_record.clone(); } diff --git a/slasher/src/attester_record.rs b/slasher/src/attester_record.rs index 56fdcb809ff..1cd4ba7d4e0 100644 --- a/slasher/src/attester_record.rs +++ b/slasher/src/attester_record.rs @@ -80,18 +80,20 @@ impl IndexedAttesterRecord { #[derive(Debug, Clone, Encode, Decode, TreeHash)] struct IndexedAttestationHeader { - pub attesting_indices: VariableList, + pub attesting_indices: VariableList, pub data_root: Hash256, pub signature: AggregateSignature, } impl From> for AttesterRecord { fn from(indexed_attestation: IndexedAttestation) -> AttesterRecord { - let attestation_data_hash = indexed_attestation.data.tree_hash_root(); + let attestation_data_hash = indexed_attestation.data().tree_hash_root(); + let attesting_indices = + VariableList::new(indexed_attestation.attesting_indices_to_vec()).unwrap_or_default(); let header = IndexedAttestationHeader:: { - attesting_indices: indexed_attestation.attesting_indices, + attesting_indices, data_root: attestation_data_hash, - signature: indexed_attestation.signature, + signature: indexed_attestation.signature().clone(), }; let indexed_attestation_hash = header.tree_hash_root(); AttesterRecord { @@ -104,15 +106,15 @@ impl From> for AttesterRecord { #[cfg(test)] mod test { use super::*; - use crate::test_utils::indexed_att; + use crate::test_utils::indexed_att_electra; // Check correctness of fast hashing #[test] fn fast_hash() { let data = vec![ - indexed_att(vec![], 0, 0, 0), - indexed_att(vec![1, 2, 3], 12, 14, 1), - indexed_att(vec![4], 0, 5, u64::MAX), + indexed_att_electra(vec![], 0, 0, 0), + indexed_att_electra(vec![1, 2, 3], 12, 14, 1), + indexed_att_electra(vec![4], 0, 5, u64::MAX), ]; for att in data { assert_eq!( diff --git a/slasher/src/config.rs b/slasher/src/config.rs index 4fd74343e76..33d68fa0e5d 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -11,20 +11,23 @@ pub const DEFAULT_VALIDATOR_CHUNK_SIZE: usize = 256; pub const DEFAULT_HISTORY_LENGTH: usize = 4096; pub const DEFAULT_UPDATE_PERIOD: u64 = 12; pub const DEFAULT_SLOT_OFFSET: f64 = 10.5; -pub const DEFAULT_MAX_DB_SIZE: usize = 256 * 1024; // 256 GiB +pub const DEFAULT_MAX_DB_SIZE: usize = 512 * 1024; // 512 GiB pub const DEFAULT_ATTESTATION_ROOT_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(100_000); pub const DEFAULT_BROADCAST: bool = false; -#[cfg(all(feature = "mdbx", not(feature = "lmdb")))] +#[cfg(all(feature = "mdbx", not(any(feature = "lmdb", feature = "redb"))))] pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Mdbx; #[cfg(feature = "lmdb")] pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Lmdb; -#[cfg(not(any(feature = "mdbx", feature = "lmdb")))] +#[cfg(all(feature = "redb", not(any(feature = "mdbx", feature = "lmdb"))))] +pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Redb; +#[cfg(not(any(feature = "mdbx", feature = "lmdb", feature = "redb")))] pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Disabled; pub const MAX_HISTORY_LENGTH: usize = 1 << 16; pub const MEGABYTE: usize = 1 << 20; pub const MDBX_DATA_FILENAME: &str = "mdbx.dat"; +pub const REDB_DATA_FILENAME: &str = "slasher.redb"; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { @@ -64,6 +67,8 @@ pub enum DatabaseBackend { Mdbx, #[cfg(feature = "lmdb")] Lmdb, + #[cfg(feature = "redb")] + Redb, Disabled, } @@ -166,8 +171,7 @@ impl Config { validator_chunk_index: usize, ) -> impl Iterator + 'a { attestation - .attesting_indices - .iter() + .attesting_indices_iter() .filter(move |v| self.validator_chunk_index(**v) == validator_chunk_index) .copied() } diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 49d2b00a4cd..4f4729a123f 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -1,6 +1,7 @@ pub mod interface; mod lmdb_impl; mod mdbx_impl; +mod redb_impl; use crate::{ metrics, AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, Config, Error, @@ -13,12 +14,15 @@ use parking_lot::Mutex; use serde::de::DeserializeOwned; use slog::{info, Logger}; use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; use std::borrow::{Borrow, Cow}; use std::marker::PhantomData; use std::sync::Arc; use tree_hash::TreeHash; use types::{ - Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, SignedBeaconBlockHeader, Slot, + AggregateSignature, AttestationData, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, + IndexedAttestationBase, IndexedAttestationElectra, ProposerSlashing, SignedBeaconBlockHeader, + Slot, VariableList, }; /// Current database schema version, to check compatibility of on-disk DB with software. @@ -69,6 +73,7 @@ pub struct SlasherDB { /// LRU cache mapping indexed attestation IDs to their attestation data roots. attestation_root_cache: Mutex>, pub(crate) config: Arc, + pub(crate) spec: Arc, _phantom: PhantomData, } @@ -235,6 +240,43 @@ impl AsRef<[u8]> for IndexedAttestationId { } } +/// Indexed attestation that abstracts over Phase0 and Electra variants by using a plain `Vec` for +/// the attesting indices. +/// +/// This allows us to avoid rewriting the entire indexed attestation database at Electra, which +/// saves a lot of execution time. The bytes that it encodes to are the same as the bytes that a +/// regular IndexedAttestation encodes to, because SSZ doesn't care about the length-bound. +#[derive(Debug, PartialEq, Decode, Encode)] +pub struct IndexedAttestationOnDisk { + attesting_indices: Vec, + data: AttestationData, + signature: AggregateSignature, +} + +impl IndexedAttestationOnDisk { + fn into_indexed_attestation( + self, + spec: &ChainSpec, + ) -> Result, Error> { + let fork_at_target_epoch = spec.fork_name_at_epoch(self.data.target.epoch); + if fork_at_target_epoch.electra_enabled() { + let attesting_indices = VariableList::new(self.attesting_indices)?; + Ok(IndexedAttestation::Electra(IndexedAttestationElectra { + attesting_indices, + data: self.data, + signature: self.signature, + })) + } else { + let attesting_indices = VariableList::new(self.attesting_indices)?; + Ok(IndexedAttestation::Base(IndexedAttestationBase { + attesting_indices, + data: self.data, + signature: self.signature, + })) + } + } +} + /// Bincode deserialization specialised to `Cow<[u8]>`. fn bincode_deserialize(bytes: Cow<[u8]>) -> Result { Ok(bincode::deserialize(bytes.borrow())?) @@ -245,7 +287,7 @@ fn ssz_decode(bytes: Cow<[u8]>) -> Result { } impl SlasherDB { - pub fn open(config: Arc, log: Logger) -> Result { + pub fn open(config: Arc, spec: Arc, log: Logger) -> Result { info!(log, "Opening slasher database"; "backend" => %config.backend); std::fs::create_dir_all(&config.database_path)?; @@ -268,6 +310,7 @@ impl SlasherDB { databases, attestation_root_cache, config, + spec, _phantom: PhantomData, }; @@ -438,7 +481,7 @@ impl SlasherDB { ) -> Result { // Look-up ID by hash. let id_key = IndexedAttestationIdKey::new( - indexed_attestation.data.target.epoch, + indexed_attestation.data().target.epoch, indexed_attestation_hash, ); @@ -447,8 +490,7 @@ impl SlasherDB { } // Store the new indexed attestation at the end of the current table. - let db = &self.databases.indexed_attestation_db; - let mut cursor = txn.cursor(db)?; + let mut cursor = txn.cursor(&self.databases.indexed_attestation_db)?; let indexed_att_id = match cursor.last_key()? { // First ID is 1 so that 0 can be used to represent `null` in `CompactAttesterRecord`. @@ -457,11 +499,11 @@ impl SlasherDB { }; let attestation_key = IndexedAttestationId::new(indexed_att_id); + // IndexedAttestationOnDisk and IndexedAttestation have compatible encodings. let data = indexed_attestation.as_ssz_bytes(); cursor.put(attestation_key.as_ref(), &data)?; drop(cursor); - // Update the (epoch, hash) to ID mapping. self.put_indexed_attestation_id(txn, &id_key, attestation_key)?; @@ -481,7 +523,8 @@ impl SlasherDB { .ok_or(Error::MissingIndexedAttestation { id: indexed_attestation_id.as_u64(), })?; - ssz_decode(bytes) + let indexed_attestation_on_disk: IndexedAttestationOnDisk = ssz_decode(bytes)?; + indexed_attestation_on_disk.into_indexed_attestation(&self.spec) } fn get_attestation_data_root( @@ -500,7 +543,7 @@ impl SlasherDB { // Otherwise, load the indexed attestation, compute the root and cache it. let indexed_attestation = self.get_indexed_attestation(txn, indexed_id)?; - let attestation_data_root = indexed_attestation.data.tree_hash_root(); + let attestation_data_root = indexed_attestation.data().tree_hash_root(); cache.put(indexed_id, attestation_data_root); @@ -536,7 +579,7 @@ impl SlasherDB { indexed_attestation_id: IndexedAttestationId, ) -> Result, Error> { // See if there's an existing attestation for this attester. - let target_epoch = attestation.data.target.epoch; + let target_epoch = attestation.data().target.epoch; let prev_max_target = self.get_attester_max_target(validator_index, txn)?; @@ -699,21 +742,17 @@ impl SlasherDB { return Ok(()); } - loop { - let (key_bytes, _) = cursor.get_current()?.ok_or(Error::MissingProposerKey)?; - - let (slot, _) = ProposerKey::parse(key_bytes)?; + let should_delete = |key: &[u8]| -> Result { + let mut should_delete = false; + let (slot, _) = ProposerKey::parse(Cow::from(key))?; if slot < min_slot { - cursor.delete_current()?; - - // End the loop if there is no next entry. - if cursor.next_key()?.is_none() { - break; - } - } else { - break; + should_delete = true; } - } + + Ok(should_delete) + }; + + cursor.delete_while(should_delete)?; Ok(()) } @@ -727,9 +766,6 @@ impl SlasherDB { .saturating_add(1u64) .saturating_sub(self.config.history_length as u64); - // Collect indexed attestation IDs to delete. - let mut indexed_attestation_ids = vec![]; - let mut cursor = txn.cursor(&self.databases.indexed_attestation_id_db)?; // Position cursor at first key, bailing out if the database is empty. @@ -737,27 +773,20 @@ impl SlasherDB { return Ok(()); } - loop { - let (key_bytes, value) = cursor - .get_current()? - .ok_or(Error::MissingIndexedAttestationIdKey)?; - - let (target_epoch, _) = IndexedAttestationIdKey::parse(key_bytes)?; - + let should_delete = |key: &[u8]| -> Result { + let (target_epoch, _) = IndexedAttestationIdKey::parse(Cow::from(key))?; if target_epoch < min_epoch { - indexed_attestation_ids.push(IndexedAttestationId::new( - IndexedAttestationId::parse(value)?, - )); + return Ok(true); + } - cursor.delete_current()?; + Ok(false) + }; - if cursor.next_key()?.is_none() { - break; - } - } else { - break; - } - } + let indexed_attestation_ids = cursor + .delete_while(should_delete)? + .into_iter() + .map(|id| IndexedAttestationId::parse(id).map(IndexedAttestationId::new)) + .collect::, Error>>()?; drop(cursor); // Delete the indexed attestations. @@ -771,3 +800,93 @@ impl SlasherDB { Ok(()) } } + +#[cfg(test)] +mod test { + use super::*; + use types::{Checkpoint, ForkName, MainnetEthSpec, Unsigned}; + + type E = MainnetEthSpec; + + fn indexed_attestation_on_disk_roundtrip_test( + spec: &ChainSpec, + make_attestation: fn( + Vec, + AttestationData, + AggregateSignature, + ) -> IndexedAttestation, + committee_len: u64, + ) { + let attestation_data = AttestationData { + slot: Slot::new(1000), + index: 0, + beacon_block_root: Hash256::repeat_byte(0xaa), + source: Checkpoint { + epoch: Epoch::new(0), + root: Hash256::repeat_byte(0xbb), + }, + target: Checkpoint { + epoch: Epoch::new(31), + root: Hash256::repeat_byte(0xcc), + }, + }; + + let attesting_indices = (0..committee_len).collect::>(); + let signature = AggregateSignature::infinity(); + + let fork_attestation = make_attestation( + attesting_indices.clone(), + attestation_data.clone(), + signature.clone(), + ); + + let on_disk = IndexedAttestationOnDisk { + attesting_indices, + data: attestation_data, + signature, + }; + let encoded = on_disk.as_ssz_bytes(); + assert_eq!(encoded, fork_attestation.as_ssz_bytes()); + + let decoded_on_disk = IndexedAttestationOnDisk::from_ssz_bytes(&encoded).unwrap(); + assert_eq!(decoded_on_disk, on_disk); + + let decoded = on_disk.into_indexed_attestation(spec).unwrap(); + assert_eq!(decoded, fork_attestation); + } + + /// Check that `IndexedAttestationOnDisk` and `IndexedAttestation` have compatible encodings. + #[test] + fn indexed_attestation_on_disk_roundtrip_base() { + let spec = ForkName::Base.make_genesis_spec(E::default_spec()); + let make_attestation = |attesting_indices, data, signature| { + IndexedAttestation::::Base(IndexedAttestationBase { + attesting_indices: VariableList::new(attesting_indices).unwrap(), + data, + signature, + }) + }; + indexed_attestation_on_disk_roundtrip_test( + &spec, + make_attestation, + ::MaxValidatorsPerCommittee::to_u64(), + ) + } + + #[test] + fn indexed_attestation_on_disk_roundtrip_electra() { + let spec = ForkName::Electra.make_genesis_spec(E::default_spec()); + let make_attestation = |attesting_indices, data, signature| { + IndexedAttestation::::Electra(IndexedAttestationElectra { + attesting_indices: VariableList::new(attesting_indices).unwrap(), + data, + signature, + }) + }; + indexed_attestation_on_disk_roundtrip_test( + &spec, + make_attestation, + ::MaxValidatorsPerSlot::to_u64(), + ) + } +} diff --git a/slasher/src/database/interface.rs b/slasher/src/database/interface.rs index 5bb920383c3..46cf9a4a0c3 100644 --- a/slasher/src/database/interface.rs +++ b/slasher/src/database/interface.rs @@ -7,6 +7,8 @@ use std::path::PathBuf; use crate::database::lmdb_impl; #[cfg(feature = "mdbx")] use crate::database::mdbx_impl; +#[cfg(feature = "redb")] +use crate::database::redb_impl; #[derive(Debug)] pub enum Environment { @@ -14,6 +16,8 @@ pub enum Environment { Mdbx(mdbx_impl::Environment), #[cfg(feature = "lmdb")] Lmdb(lmdb_impl::Environment), + #[cfg(feature = "redb")] + Redb(redb_impl::Environment), Disabled, } @@ -23,6 +27,8 @@ pub enum RwTransaction<'env> { Mdbx(mdbx_impl::RwTransaction<'env>), #[cfg(feature = "lmdb")] Lmdb(lmdb_impl::RwTransaction<'env>), + #[cfg(feature = "redb")] + Redb(redb_impl::RwTransaction<'env>), Disabled(PhantomData<&'env ()>), } @@ -32,6 +38,8 @@ pub enum Database<'env> { Mdbx(mdbx_impl::Database<'env>), #[cfg(feature = "lmdb")] Lmdb(lmdb_impl::Database<'env>), + #[cfg(feature = "redb")] + Redb(redb_impl::Database<'env>), Disabled(PhantomData<&'env ()>), } @@ -54,6 +62,8 @@ pub enum Cursor<'env> { Mdbx(mdbx_impl::Cursor<'env>), #[cfg(feature = "lmdb")] Lmdb(lmdb_impl::Cursor<'env>), + #[cfg(feature = "redb")] + Redb(redb_impl::Cursor<'env>), Disabled(PhantomData<&'env ()>), } @@ -67,6 +77,8 @@ impl Environment { DatabaseBackend::Mdbx => mdbx_impl::Environment::new(config).map(Environment::Mdbx), #[cfg(feature = "lmdb")] DatabaseBackend::Lmdb => lmdb_impl::Environment::new(config).map(Environment::Lmdb), + #[cfg(feature = "redb")] + DatabaseBackend::Redb => redb_impl::Environment::new(config).map(Environment::Redb), DatabaseBackend::Disabled => Err(Error::SlasherDatabaseBackendDisabled), } } @@ -77,6 +89,8 @@ impl Environment { Self::Mdbx(env) => env.create_databases(), #[cfg(feature = "lmdb")] Self::Lmdb(env) => env.create_databases(), + #[cfg(feature = "redb")] + Self::Redb(env) => env.create_databases(), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -87,6 +101,8 @@ impl Environment { Self::Mdbx(env) => env.begin_rw_txn().map(RwTransaction::Mdbx), #[cfg(feature = "lmdb")] Self::Lmdb(env) => env.begin_rw_txn().map(RwTransaction::Lmdb), + #[cfg(feature = "redb")] + Self::Redb(env) => env.begin_rw_txn().map(RwTransaction::Redb), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -98,6 +114,8 @@ impl Environment { Self::Mdbx(env) => env.filenames(config), #[cfg(feature = "lmdb")] Self::Lmdb(env) => env.filenames(config), + #[cfg(feature = "redb")] + Self::Redb(env) => env.filenames(config), _ => vec![], } } @@ -106,7 +124,7 @@ impl Environment { impl<'env> RwTransaction<'env> { pub fn get + ?Sized>( &'env self, - db: &Database<'env>, + db: &'env Database, key: &K, ) -> Result>, Error> { match (self, db) { @@ -114,6 +132,8 @@ impl<'env> RwTransaction<'env> { (Self::Mdbx(txn), Database::Mdbx(db)) => txn.get(db, key), #[cfg(feature = "lmdb")] (Self::Lmdb(txn), Database::Lmdb(db)) => txn.get(db, key), + #[cfg(feature = "redb")] + (Self::Redb(txn), Database::Redb(db)) => txn.get(db, key), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -129,6 +149,8 @@ impl<'env> RwTransaction<'env> { (Self::Mdbx(txn), Database::Mdbx(db)) => txn.put(db, key, value), #[cfg(feature = "lmdb")] (Self::Lmdb(txn), Database::Lmdb(db)) => txn.put(db, key, value), + #[cfg(feature = "redb")] + (Self::Redb(txn), Database::Redb(db)) => txn.put(db, key, value), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -139,26 +161,32 @@ impl<'env> RwTransaction<'env> { (Self::Mdbx(txn), Database::Mdbx(db)) => txn.del(db, key), #[cfg(feature = "lmdb")] (Self::Lmdb(txn), Database::Lmdb(db)) => txn.del(db, key), + #[cfg(feature = "redb")] + (Self::Redb(txn), Database::Redb(db)) => txn.del(db, key), _ => Err(Error::MismatchedDatabaseVariant), } } - pub fn cursor<'a>(&'a mut self, db: &Database) -> Result, Error> { - match (self, db) { + pub fn commit(self) -> Result<(), Error> { + match self { #[cfg(feature = "mdbx")] - (Self::Mdbx(txn), Database::Mdbx(db)) => txn.cursor(db).map(Cursor::Mdbx), + Self::Mdbx(txn) => txn.commit(), #[cfg(feature = "lmdb")] - (Self::Lmdb(txn), Database::Lmdb(db)) => txn.cursor(db).map(Cursor::Lmdb), + Self::Lmdb(txn) => txn.commit(), + #[cfg(feature = "redb")] + Self::Redb(txn) => txn.commit(), _ => Err(Error::MismatchedDatabaseVariant), } } - pub fn commit(self) -> Result<(), Error> { - match self { + pub fn cursor<'a>(&'a mut self, db: &'a Database) -> Result, Error> { + match (self, db) { #[cfg(feature = "mdbx")] - Self::Mdbx(txn) => txn.commit(), + (Self::Mdbx(txn), Database::Mdbx(db)) => txn.cursor(db).map(Cursor::Mdbx), #[cfg(feature = "lmdb")] - Self::Lmdb(txn) => txn.commit(), + (Self::Lmdb(txn), Database::Lmdb(db)) => txn.cursor(db).map(Cursor::Lmdb), + #[cfg(feature = "redb")] + (Self::Redb(txn), Database::Redb(db)) => txn.cursor(db).map(Cursor::Redb), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -172,6 +200,8 @@ impl<'env> Cursor<'env> { Cursor::Mdbx(cursor) => cursor.first_key(), #[cfg(feature = "lmdb")] Cursor::Lmdb(cursor) => cursor.first_key(), + #[cfg(feature = "redb")] + Cursor::Redb(cursor) => cursor.first_key(), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -183,6 +213,8 @@ impl<'env> Cursor<'env> { Cursor::Mdbx(cursor) => cursor.last_key(), #[cfg(feature = "lmdb")] Cursor::Lmdb(cursor) => cursor.last_key(), + #[cfg(feature = "redb")] + Cursor::Redb(cursor) => cursor.last_key(), _ => Err(Error::MismatchedDatabaseVariant), } } @@ -193,37 +225,47 @@ impl<'env> Cursor<'env> { Cursor::Mdbx(cursor) => cursor.next_key(), #[cfg(feature = "lmdb")] Cursor::Lmdb(cursor) => cursor.next_key(), + #[cfg(feature = "redb")] + Cursor::Redb(cursor) => cursor.next_key(), _ => Err(Error::MismatchedDatabaseVariant), } } - /// Get the key value pair at the current position. - pub fn get_current(&mut self) -> Result, Error> { + pub fn delete_current(&mut self) -> Result<(), Error> { match self { #[cfg(feature = "mdbx")] - Cursor::Mdbx(cursor) => cursor.get_current(), + Cursor::Mdbx(cursor) => cursor.delete_current(), #[cfg(feature = "lmdb")] - Cursor::Lmdb(cursor) => cursor.get_current(), + Cursor::Lmdb(cursor) => cursor.delete_current(), + #[cfg(feature = "redb")] + Cursor::Redb(cursor) => cursor.delete_current(), _ => Err(Error::MismatchedDatabaseVariant), } } - pub fn delete_current(&mut self) -> Result<(), Error> { + pub fn put, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<(), Error> { match self { #[cfg(feature = "mdbx")] - Cursor::Mdbx(cursor) => cursor.delete_current(), + Self::Mdbx(cursor) => cursor.put(key, value), #[cfg(feature = "lmdb")] - Cursor::Lmdb(cursor) => cursor.delete_current(), + Self::Lmdb(cursor) => cursor.put(key, value), + #[cfg(feature = "redb")] + Self::Redb(cursor) => cursor.put(key, value), _ => Err(Error::MismatchedDatabaseVariant), } } - pub fn put, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<(), Error> { + pub fn delete_while( + &mut self, + f: impl Fn(&[u8]) -> Result, + ) -> Result>, Error> { match self { #[cfg(feature = "mdbx")] - Self::Mdbx(cursor) => cursor.put(key, value), + Self::Mdbx(txn) => txn.delete_while(f), #[cfg(feature = "lmdb")] - Self::Lmdb(cursor) => cursor.put(key, value), + Self::Lmdb(txn) => txn.delete_while(f), + #[cfg(feature = "redb")] + Self::Redb(txn) => txn.delete_while(f), _ => Err(Error::MismatchedDatabaseVariant), } } diff --git a/slasher/src/database/lmdb_impl.rs b/slasher/src/database/lmdb_impl.rs index 78deaf17676..20d89a36fb0 100644 --- a/slasher/src/database/lmdb_impl.rs +++ b/slasher/src/database/lmdb_impl.rs @@ -100,7 +100,7 @@ impl Environment { impl<'env> RwTransaction<'env> { pub fn get + ?Sized>( &'env self, - db: &Database<'env>, + db: &'env Database, key: &K, ) -> Result>, Error> { Ok(self.txn.get(db.db, key).optional()?.map(Cow::Borrowed)) @@ -182,6 +182,29 @@ impl<'env> Cursor<'env> { .put(&key, &value, RwTransaction::write_flags())?; Ok(()) } + + pub fn delete_while( + &mut self, + f: impl Fn(&[u8]) -> Result, + ) -> Result>, Error> { + let mut result = vec![]; + + loop { + let (key_bytes, value) = self.get_current()?.ok_or(Error::MissingKey)?; + + if f(&key_bytes)? { + result.push(value); + self.delete_current()?; + if self.next_key()?.is_none() { + break; + } + } else { + break; + } + } + + Ok(result) + } } /// Mix-in trait for loading values from LMDB that may or may not exist. diff --git a/slasher/src/database/mdbx_impl.rs b/slasher/src/database/mdbx_impl.rs index d25f17e7acf..e249de963f6 100644 --- a/slasher/src/database/mdbx_impl.rs +++ b/slasher/src/database/mdbx_impl.rs @@ -113,7 +113,7 @@ impl<'env> RwTransaction<'env> { pub fn get + ?Sized>( &'env self, - db: &Database<'env>, + db: &'env Database, key: &K, ) -> Result>, Error> { Ok(self.txn.get(&db.db, key.as_ref())?) @@ -183,4 +183,27 @@ impl<'env> Cursor<'env> { .put(key.as_ref(), value.as_ref(), RwTransaction::write_flags())?; Ok(()) } + + pub fn delete_while( + &mut self, + f: impl Fn(&[u8]) -> Result, + ) -> Result>, Error> { + let mut result = vec![]; + + loop { + let (key_bytes, value) = self.get_current()?.ok_or(Error::MissingKey)?; + + if f(&key_bytes)? { + result.push(value); + self.delete_current()?; + if self.next_key()?.is_none() { + break; + } + } else { + break; + } + } + + Ok(result) + } } diff --git a/slasher/src/database/redb_impl.rs b/slasher/src/database/redb_impl.rs new file mode 100644 index 00000000000..6c5b62a44fd --- /dev/null +++ b/slasher/src/database/redb_impl.rs @@ -0,0 +1,276 @@ +#![cfg(feature = "redb")] +use crate::{ + config::REDB_DATA_FILENAME, + database::{ + interface::{Key, OpenDatabases, Value}, + *, + }, + Config, Error, +}; +use derivative::Derivative; +use redb::{ReadableTable, TableDefinition}; +use std::{borrow::Cow, path::PathBuf}; + +#[derive(Debug)] +pub struct Environment { + _db_count: usize, + db: redb::Database, +} + +#[derive(Debug)] +pub struct Database<'env> { + table_name: String, + _phantom: PhantomData<&'env ()>, +} + +#[derive(Derivative)] +#[derivative(Debug)] +pub struct RwTransaction<'env> { + #[derivative(Debug = "ignore")] + txn: redb::WriteTransaction, + _phantom: PhantomData<&'env ()>, +} + +#[derive(Derivative)] +#[derivative(Debug)] +pub struct Cursor<'env> { + #[derivative(Debug = "ignore")] + txn: &'env redb::WriteTransaction, + db: &'env Database<'env>, + current_key: Option>, +} + +impl Environment { + pub fn new(config: &Config) -> Result { + let db_path = config.database_path.join(REDB_DATA_FILENAME); + let database = redb::Database::create(db_path)?; + + Ok(Environment { + _db_count: MAX_NUM_DBS, + db: database, + }) + } + + pub fn create_databases(&self) -> Result { + let indexed_attestation_db = self.create_table(INDEXED_ATTESTATION_DB)?; + let indexed_attestation_id_db = self.create_table(INDEXED_ATTESTATION_ID_DB)?; + let attesters_db = self.create_table(ATTESTERS_DB)?; + let attesters_max_targets_db = self.create_table(ATTESTERS_MAX_TARGETS_DB)?; + let min_targets_db = self.create_table(MIN_TARGETS_DB)?; + let max_targets_db = self.create_table(MAX_TARGETS_DB)?; + let current_epochs_db = self.create_table(CURRENT_EPOCHS_DB)?; + let proposers_db = self.create_table(PROPOSERS_DB)?; + let metadata_db = self.create_table(METADATA_DB)?; + + Ok(OpenDatabases { + indexed_attestation_db, + indexed_attestation_id_db, + attesters_db, + attesters_max_targets_db, + min_targets_db, + max_targets_db, + current_epochs_db, + proposers_db, + metadata_db, + }) + } + + pub fn create_table<'env>( + &'env self, + table_name: &'env str, + ) -> Result, Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(table_name); + let tx = self.db.begin_write()?; + tx.open_table(table_definition)?; + tx.commit()?; + + Ok(crate::Database::Redb(Database { + table_name: table_name.to_string(), + _phantom: PhantomData, + })) + } + + pub fn filenames(&self, config: &Config) -> Vec { + vec![config.database_path.join(REDB_DATA_FILENAME)] + } + + pub fn begin_rw_txn(&self) -> Result { + let mut txn = self.db.begin_write()?; + txn.set_durability(redb::Durability::Eventual); + Ok(RwTransaction { + txn, + _phantom: PhantomData, + }) + } +} + +impl<'env> RwTransaction<'env> { + pub fn get + ?Sized>( + &'env self, + db: &'env Database, + key: &K, + ) -> Result>, Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&db.table_name); + let table = self.txn.open_table(table_definition)?; + let result = table.get(key.as_ref())?; + if let Some(access_guard) = result { + let value = access_guard.value().to_vec(); + Ok(Some(Cow::from(value))) + } else { + Ok(None) + } + } + + pub fn put, V: AsRef<[u8]>>( + &mut self, + db: &Database, + key: K, + value: V, + ) -> Result<(), Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&db.table_name); + let mut table = self.txn.open_table(table_definition)?; + table.insert(key.as_ref(), value.as_ref())?; + + Ok(()) + } + + pub fn del>(&mut self, db: &Database, key: K) -> Result<(), Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&db.table_name); + let mut table = self.txn.open_table(table_definition)?; + table.remove(key.as_ref())?; + + Ok(()) + } + + pub fn commit(self) -> Result<(), Error> { + self.txn.commit()?; + Ok(()) + } + + pub fn cursor<'a>(&'a mut self, db: &'a Database) -> Result, Error> { + Ok(Cursor { + txn: &self.txn, + db, + current_key: None, + }) + } +} + +impl<'env> Cursor<'env> { + pub fn first_key(&mut self) -> Result, Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + let table = self.txn.open_table(table_definition)?; + let first = table + .iter()? + .next() + .map(|x| x.map(|(key, _)| key.value().to_vec())); + + if let Some(owned_key) = first { + let owned_key = owned_key?; + self.current_key = Some(Cow::from(owned_key)); + Ok(self.current_key.clone()) + } else { + Ok(None) + } + } + + pub fn last_key(&mut self) -> Result>, Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + let table = self.txn.open_table(table_definition)?; + let last = table + .iter()? + .next_back() + .map(|x| x.map(|(key, _)| key.value().to_vec())); + + if let Some(owned_key) = last { + let owned_key = owned_key?; + self.current_key = Some(Cow::from(owned_key)); + return Ok(self.current_key.clone()); + } + Ok(None) + } + + pub fn get_current(&self) -> Result, Value<'env>)>, Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + let table = self.txn.open_table(table_definition)?; + if let Some(key) = &self.current_key { + let result = table.get(key.as_ref())?; + + if let Some(access_guard) = result { + let value = access_guard.value().to_vec(); + return Ok(Some((key.clone(), Cow::from(value)))); + } + } + Ok(None) + } + + pub fn next_key(&mut self) -> Result>, Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + let table = self.txn.open_table(table_definition)?; + if let Some(current_key) = &self.current_key { + let range: std::ops::RangeFrom<&[u8]> = current_key..; + + let next = table + .range(range)? + .next() + .map(|x| x.map(|(key, _)| key.value().to_vec())); + + if let Some(owned_key) = next { + let owned_key = owned_key?; + self.current_key = Some(Cow::from(owned_key)); + return Ok(self.current_key.clone()); + } + } + Ok(None) + } + + pub fn delete_current(&self) -> Result<(), Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + let mut table = self.txn.open_table(table_definition)?; + if let Some(key) = &self.current_key { + table.remove(key.as_ref())?; + } + Ok(()) + } + + pub fn delete_while( + &self, + f: impl Fn(&[u8]) -> Result, + ) -> Result>, Error> { + let mut deleted_values = vec![]; + if let Some(current_key) = &self.current_key { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + + let mut table = self.txn.open_table(table_definition)?; + + let deleted = + table.extract_from_if(current_key.as_ref().., |key, _| f(key).unwrap_or(false))?; + + deleted.for_each(|result| { + if let Ok(item) = result { + let value = item.1.value().to_vec(); + deleted_values.push(Cow::from(value)); + } + }) + }; + Ok(deleted_values) + } + + pub fn put, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<(), Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(&self.db.table_name); + let mut table = self.txn.open_table(table_definition)?; + table.insert(key.as_ref(), value.as_ref())?; + + Ok(()) + } +} diff --git a/slasher/src/error.rs b/slasher/src/error.rs index b939c281e9f..b2e32f3dcd2 100644 --- a/slasher/src/error.rs +++ b/slasher/src/error.rs @@ -8,11 +8,14 @@ pub enum Error { DatabaseMdbxError(mdbx::Error), #[cfg(feature = "lmdb")] DatabaseLmdbError(lmdb::Error), + #[cfg(feature = "redb")] + DatabaseRedbError(redb::Error), SlasherDatabaseBackendDisabled, MismatchedDatabaseVariant, DatabaseIOError(io::Error), DatabasePermissionsError(filesystem::Error), SszDecodeError(ssz::DecodeError), + SszTypesError(ssz_types::Error), BincodeError(bincode::Error), ArithError(safe_arith::ArithError), ChunkIndexOutOfBounds(usize), @@ -66,6 +69,7 @@ pub enum Error { MissingIndexedAttestationId, MissingIndexedAttestationIdKey, InconsistentAttestationDataRoot, + MissingKey, } #[cfg(feature = "mdbx")] @@ -88,6 +92,41 @@ impl From for Error { } } +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::TableError) -> Self { + Error::DatabaseRedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::TransactionError) -> Self { + Error::DatabaseRedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::DatabaseError) -> Self { + Error::DatabaseRedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::StorageError) -> Self { + Error::DatabaseRedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::CommitError) -> Self { + Error::DatabaseRedbError(e.into()) + } +} + impl From for Error { fn from(e: io::Error) -> Self { Error::DatabaseIOError(e) @@ -100,6 +139,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: ssz_types::Error) -> Self { + Error::SszTypesError(e) + } +} + impl From for Error { fn from(e: bincode::Error) -> Self { Error::BincodeError(e) diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index 45cbef84f21..d3a26337d6a 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -1,6 +1,6 @@ #![deny(missing_debug_implementations)] #![cfg_attr( - not(any(feature = "mdbx", feature = "lmdb")), + not(any(feature = "mdbx", feature = "lmdb", feature = "redb")), allow(unused, clippy::drop_non_drop) )] @@ -28,7 +28,8 @@ pub use database::{ }; pub use error::Error; -use types::{AttesterSlashing, EthSpec, IndexedAttestation, ProposerSlashing}; +use types::{AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra}; +use types::{EthSpec, IndexedAttestation, ProposerSlashing}; #[derive(Debug, PartialEq)] pub enum AttesterSlashingStatus { @@ -59,14 +60,34 @@ impl AttesterSlashingStatus { match self { NotSlashable => None, AlreadyDoubleVoted => None, - DoubleVote(existing) | SurroundedByExisting(existing) => Some(AttesterSlashing { - attestation_1: *existing, - attestation_2: new_attestation.clone(), - }), - SurroundsExisting(existing) => Some(AttesterSlashing { - attestation_1: new_attestation.clone(), - attestation_2: *existing, - }), + DoubleVote(existing) | SurroundedByExisting(existing) => { + match (&*existing, new_attestation) { + (IndexedAttestation::Base(existing_att), IndexedAttestation::Base(new)) => { + Some(AttesterSlashing::Base(AttesterSlashingBase { + attestation_1: existing_att.clone(), + attestation_2: new.clone(), + })) + } + // A slashing involving an electra attestation type must return an `AttesterSlashingElectra` type + (_, _) => Some(AttesterSlashing::Electra(AttesterSlashingElectra { + attestation_1: existing.clone().to_electra(), + attestation_2: new_attestation.clone().to_electra(), + })), + } + } + SurroundsExisting(existing) => match (&*existing, new_attestation) { + (IndexedAttestation::Base(existing_att), IndexedAttestation::Base(new)) => { + Some(AttesterSlashing::Base(AttesterSlashingBase { + attestation_1: new.clone(), + attestation_2: existing_att.clone(), + })) + } + // A slashing involving an electra attestation type must return an `AttesterSlashingElectra` type + (_, _) => Some(AttesterSlashing::Electra(AttesterSlashingElectra { + attestation_1: new_attestation.clone().to_electra(), + attestation_2: existing.clone().to_electra(), + })), + }, } } } diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 066c8d63d98..0bb7c9c3ffe 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -13,7 +13,8 @@ use slog::{debug, error, info, Logger}; use std::collections::HashSet; use std::sync::Arc; use types::{ - AttesterSlashing, Epoch, EthSpec, IndexedAttestation, ProposerSlashing, SignedBeaconBlockHeader, + AttesterSlashing, ChainSpec, Epoch, EthSpec, IndexedAttestation, ProposerSlashing, + SignedBeaconBlockHeader, }; #[derive(Debug)] @@ -28,10 +29,10 @@ pub struct Slasher { } impl Slasher { - pub fn open(config: Config, log: Logger) -> Result { + pub fn open(config: Config, spec: Arc, log: Logger) -> Result { config.validate()?; let config = Arc::new(config); - let db = SlasherDB::open(config.clone(), log.clone())?; + let db = SlasherDB::open(config.clone(), spec, log.clone())?; let attester_slashings = Mutex::new(HashSet::new()); let proposer_slashings = Mutex::new(HashSet::new()); let attestation_queue = AttestationQueue::default(); @@ -299,7 +300,7 @@ impl Slasher { self.log, "Found double-vote slashing"; "validator_index" => validator_index, - "epoch" => slashing.attestation_1.data.target.epoch, + "epoch" => slashing.attestation_1().data().target.epoch, ); slashings.insert(slashing); } @@ -325,8 +326,8 @@ impl Slasher { for indexed_record in batch { let attestation = &indexed_record.indexed; - let target_epoch = attestation.data.target.epoch; - let source_epoch = attestation.data.source.epoch; + let target_epoch = attestation.data().target.epoch; + let source_epoch = attestation.data().source.epoch; if source_epoch > target_epoch || source_epoch + self.config.history_length as u64 <= current_epoch diff --git a/slasher/src/test_utils.rs b/slasher/src/test_utils.rs index 0011df1ffb0..453d0e66670 100644 --- a/slasher/src/test_utils.rs +++ b/slasher/src/test_utils.rs @@ -1,18 +1,46 @@ use std::collections::HashSet; +use std::sync::Arc; use types::{ - AggregateSignature, AttestationData, AttesterSlashing, BeaconBlockHeader, Checkpoint, Epoch, - Hash256, IndexedAttestation, MainnetEthSpec, Signature, SignedBeaconBlockHeader, Slot, + indexed_attestation::{IndexedAttestationBase, IndexedAttestationElectra}, + AggregateSignature, AttestationData, AttesterSlashing, AttesterSlashingBase, + AttesterSlashingElectra, BeaconBlockHeader, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, + IndexedAttestation, MainnetEthSpec, Signature, SignedBeaconBlockHeader, Slot, }; pub type E = MainnetEthSpec; +pub fn indexed_att_electra( + attesting_indices: impl AsRef<[u64]>, + source_epoch: u64, + target_epoch: u64, + target_root: u64, +) -> IndexedAttestation { + IndexedAttestation::Electra(IndexedAttestationElectra { + attesting_indices: attesting_indices.as_ref().to_vec().into(), + data: AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + source: Checkpoint { + epoch: Epoch::new(source_epoch), + root: Hash256::from_low_u64_be(0), + }, + target: Checkpoint { + epoch: Epoch::new(target_epoch), + root: Hash256::from_low_u64_be(target_root), + }, + }, + signature: AggregateSignature::empty(), + }) +} + pub fn indexed_att( attesting_indices: impl AsRef<[u64]>, source_epoch: u64, target_epoch: u64, target_root: u64, ) -> IndexedAttestation { - IndexedAttestation { + IndexedAttestation::Base(IndexedAttestationBase { attesting_indices: attesting_indices.as_ref().to_vec().into(), data: AttestationData { slot: Slot::new(0), @@ -28,16 +56,25 @@ pub fn indexed_att( }, }, signature: AggregateSignature::empty(), - } + }) } pub fn att_slashing( attestation_1: &IndexedAttestation, attestation_2: &IndexedAttestation, ) -> AttesterSlashing { - AttesterSlashing { - attestation_1: attestation_1.clone(), - attestation_2: attestation_2.clone(), + match (attestation_1, attestation_2) { + (IndexedAttestation::Base(att1), IndexedAttestation::Base(att2)) => { + AttesterSlashing::Base(AttesterSlashingBase { + attestation_1: att1.clone(), + attestation_2: att2.clone(), + }) + } + // A slashing involving an electra attestation type must return an electra AttesterSlashing type + (_, _) => AttesterSlashing::Electra(AttesterSlashingElectra { + attestation_1: attestation_1.clone().to_electra(), + attestation_2: attestation_2.clone().to_electra(), + }), } } @@ -59,14 +96,16 @@ pub fn slashed_validators_from_slashings(slashings: &HashSet slashings .iter() .flat_map(|slashing| { - let att1 = &slashing.attestation_1; - let att2 = &slashing.attestation_2; + let att1 = slashing.attestation_1(); + let att2 = slashing.attestation_2(); assert!( att1.is_double_vote(att2) || att1.is_surround_vote(att2), "invalid slashing: {:#?}", slashing ); - hashset_intersection(&att1.attesting_indices, &att2.attesting_indices) + let attesting_indices_1 = att1.attesting_indices_to_vec(); + let attesting_indices_2 = att2.attesting_indices_to_vec(); + hashset_intersection(&attesting_indices_1, &attesting_indices_2) }) .collect() } @@ -83,9 +122,11 @@ pub fn slashed_validators_from_attestations( } if att1.is_double_vote(att2) || att1.is_surround_vote(att2) { + let attesting_indices_1 = att1.attesting_indices_to_vec(); + let attesting_indices_2 = att2.attesting_indices_to_vec(); slashed_validators.extend(hashset_intersection( - &att1.attesting_indices, - &att2.attesting_indices, + &attesting_indices_1, + &attesting_indices_2, )); } } @@ -105,3 +146,7 @@ pub fn block(slot: u64, proposer_index: u64, block_root: u64) -> SignedBeaconBlo signature: Signature::empty(), } } + +pub fn chain_spec() -> Arc { + Arc::new(E::default_spec()) +} diff --git a/slasher/tests/attester_slashings.rs b/slasher/tests/attester_slashings.rs index 40d9fa511c6..cc6e57d95d7 100644 --- a/slasher/tests/attester_slashings.rs +++ b/slasher/tests/attester_slashings.rs @@ -1,11 +1,14 @@ -#![cfg(any(feature = "mdbx", feature = "lmdb"))] +#![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] use logging::test_logger; use maplit::hashset; use rayon::prelude::*; use slasher::{ config::DEFAULT_CHUNK_SIZE, - test_utils::{att_slashing, indexed_att, slashed_validators_from_slashings, E}, + test_utils::{ + att_slashing, chain_spec, indexed_att, indexed_att_electra, + slashed_validators_from_slashings, E, + }, Config, Slasher, }; use std::collections::HashSet; @@ -15,23 +18,35 @@ use types::{AttesterSlashing, Epoch, IndexedAttestation}; #[test] fn double_vote_single_val() { let v = vec![99]; - let att1 = indexed_att(&v, 0, 1, 0); - let att2 = indexed_att(&v, 0, 1, 1); - let slashings = hashset![att_slashing(&att1, &att2)]; - let attestations = vec![att1, att2]; - slasher_test_indiv(&attestations, &slashings, 1); - slasher_test_indiv(&attestations, &slashings, 1000); + for (att1, att2) in [ + (indexed_att(&v, 0, 1, 0), indexed_att(&v, 0, 1, 1)), + ( + indexed_att_electra(&v, 0, 1, 0), + indexed_att_electra(&v, 0, 1, 1), + ), + ] { + let slashings = hashset![att_slashing(&att1, &att2)]; + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &slashings, 1); + slasher_test_indiv(&attestations, &slashings, 1000); + } } #[test] fn double_vote_multi_vals() { let v = vec![0, 1, 2]; - let att1 = indexed_att(&v, 0, 1, 0); - let att2 = indexed_att(&v, 0, 1, 1); - let slashings = hashset![att_slashing(&att1, &att2)]; - let attestations = vec![att1, att2]; - slasher_test_indiv(&attestations, &slashings, 1); - slasher_test_indiv(&attestations, &slashings, 1000); + for (att1, att2) in [ + (indexed_att(&v, 0, 1, 0), indexed_att(&v, 0, 1, 1)), + ( + indexed_att_electra(&v, 0, 1, 0), + indexed_att_electra(&v, 0, 1, 1), + ), + ] { + let slashings = hashset![att_slashing(&att1, &att2)]; + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &slashings, 1); + slasher_test_indiv(&attestations, &slashings, 1000); + } } // A subset of validators double vote. @@ -39,12 +54,18 @@ fn double_vote_multi_vals() { fn double_vote_some_vals() { let v1 = vec![0, 1, 2, 3, 4, 5, 6]; let v2 = vec![0, 2, 4, 6]; - let att1 = indexed_att(v1, 0, 1, 0); - let att2 = indexed_att(v2, 0, 1, 1); - let slashings = hashset![att_slashing(&att1, &att2)]; - let attestations = vec![att1, att2]; - slasher_test_indiv(&attestations, &slashings, 1); - slasher_test_indiv(&attestations, &slashings, 1000); + for (att1, att2) in [ + (indexed_att(&v1, 0, 1, 0), indexed_att(&v2, 0, 1, 1)), + ( + indexed_att_electra(&v1, 0, 1, 0), + indexed_att_electra(&v2, 0, 1, 1), + ), + ] { + let slashings = hashset![att_slashing(&att1, &att2)]; + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &slashings, 1); + slasher_test_indiv(&attestations, &slashings, 1000); + } } // A subset of validators double vote, others vote twice for the same thing. @@ -53,13 +74,23 @@ fn double_vote_some_vals_repeat() { let v1 = vec![0, 1, 2, 3, 4, 5, 6]; let v2 = vec![0, 2, 4, 6]; let v3 = vec![1, 3, 5]; - let att1 = indexed_att(v1, 0, 1, 0); - let att2 = indexed_att(v2, 0, 1, 1); - let att3 = indexed_att(v3, 0, 1, 0); - let slashings = hashset![att_slashing(&att1, &att2)]; - let attestations = vec![att1, att2, att3]; - slasher_test_indiv(&attestations, &slashings, 1); - slasher_test_indiv(&attestations, &slashings, 1000); + for (att1, att2, att3) in [ + ( + indexed_att(&v1, 0, 1, 0), + indexed_att(&v2, 0, 1, 1), + indexed_att(&v3, 0, 1, 0), + ), + ( + indexed_att_electra(&v1, 0, 1, 0), + indexed_att_electra(&v2, 0, 1, 1), + indexed_att_electra(&v3, 0, 1, 0), + ), + ] { + let slashings = hashset![att_slashing(&att1, &att2)]; + let attestations = vec![att1, att2, att3]; + slasher_test_indiv(&attestations, &slashings, 1); + slasher_test_indiv(&attestations, &slashings, 1000); + } } // Nobody double votes, nobody gets slashed. @@ -67,11 +98,17 @@ fn double_vote_some_vals_repeat() { fn no_double_vote_same_target() { let v1 = vec![0, 1, 2, 3, 4, 5, 6]; let v2 = vec![0, 1, 2, 3, 4, 5, 7, 8]; - let att1 = indexed_att(v1, 0, 1, 0); - let att2 = indexed_att(v2, 0, 1, 0); - let attestations = vec![att1, att2]; - slasher_test_indiv(&attestations, &hashset! {}, 1); - slasher_test_indiv(&attestations, &hashset! {}, 1000); + for (att1, att2) in [ + (indexed_att(&v1, 0, 1, 0), indexed_att(&v2, 0, 1, 0)), + ( + indexed_att_electra(&v1, 0, 1, 0), + indexed_att_electra(&v2, 0, 1, 0), + ), + ] { + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &hashset! {}, 1); + slasher_test_indiv(&attestations, &hashset! {}, 1000); + } } // Two groups votes for different things, no slashings. @@ -79,73 +116,133 @@ fn no_double_vote_same_target() { fn no_double_vote_distinct_vals() { let v1 = vec![0, 1, 2, 3]; let v2 = vec![4, 5, 6, 7]; - let att1 = indexed_att(v1, 0, 1, 0); - let att2 = indexed_att(v2, 0, 1, 1); - let attestations = vec![att1, att2]; - slasher_test_indiv(&attestations, &hashset! {}, 1); - slasher_test_indiv(&attestations, &hashset! {}, 1000); + for (att1, att2) in [ + (indexed_att(&v1, 0, 1, 0), indexed_att(&v2, 0, 1, 0)), + ( + indexed_att_electra(&v1, 0, 1, 0), + indexed_att_electra(&v2, 0, 1, 1), + ), + ] { + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &hashset! {}, 1); + slasher_test_indiv(&attestations, &hashset! {}, 1000); + } } #[test] fn no_double_vote_repeated() { let v = vec![0, 1, 2, 3, 4]; - let att1 = indexed_att(v, 0, 1, 0); - let att2 = att1.clone(); - let attestations = vec![att1, att2]; - slasher_test_indiv(&attestations, &hashset! {}, 1); - slasher_test_batch(&attestations, &hashset! {}, 1); - parallel_slasher_test(&attestations, hashset! {}, 1); + for att1 in [indexed_att(&v, 0, 1, 0), indexed_att_electra(&v, 0, 1, 0)] { + let att2 = att1.clone(); + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &hashset! {}, 1); + slasher_test_batch(&attestations, &hashset! {}, 1); + parallel_slasher_test(&attestations, hashset! {}, 1); + } } #[test] fn surrounds_existing_single_val_single_chunk() { let v = vec![0]; - let att1 = indexed_att(&v, 1, 2, 0); - let att2 = indexed_att(&v, 0, 3, 0); - let slashings = hashset![att_slashing(&att2, &att1)]; - slasher_test_indiv(&[att1, att2], &slashings, 3); + for (att1, att2) in [ + (indexed_att(&v, 1, 2, 0), indexed_att(&v, 0, 3, 0)), + (indexed_att(&v, 1, 2, 0), indexed_att_electra(&v, 0, 3, 0)), + ( + indexed_att_electra(&v, 1, 2, 0), + indexed_att_electra(&v, 0, 3, 0), + ), + ] { + let slashings = hashset![att_slashing(&att2, &att1)]; + slasher_test_indiv(&[att1, att2], &slashings, 3); + } } #[test] fn surrounds_existing_multi_vals_single_chunk() { let validators = vec![0, 16, 1024, 300_000, 300_001]; - let att1 = indexed_att(validators.clone(), 1, 2, 0); - let att2 = indexed_att(validators, 0, 3, 0); - let slashings = hashset![att_slashing(&att2, &att1)]; - slasher_test_indiv(&[att1, att2], &slashings, 3); + for (att1, att2) in [ + ( + indexed_att(&validators, 1, 2, 0), + indexed_att(&validators, 0, 3, 0), + ), + ( + indexed_att(&validators, 1, 2, 0), + indexed_att_electra(&validators, 0, 3, 0), + ), + ( + indexed_att_electra(&validators, 1, 2, 0), + indexed_att_electra(&validators, 0, 3, 0), + ), + ] { + let slashings = hashset![att_slashing(&att2, &att1)]; + slasher_test_indiv(&[att1, att2], &slashings, 3); + } } #[test] fn surrounds_existing_many_chunks() { let v = vec![0]; let chunk_size = DEFAULT_CHUNK_SIZE as u64; - let att1 = indexed_att(&v, 3 * chunk_size, 3 * chunk_size + 1, 0); - let att2 = indexed_att(&v, 0, 3 * chunk_size + 2, 0); - let slashings = hashset![att_slashing(&att2, &att1)]; - let attestations = vec![att1, att2]; - slasher_test_indiv(&attestations, &slashings, 4 * chunk_size); + for (att1, att2) in [ + ( + indexed_att(&v, 3 * chunk_size, 3 * chunk_size + 1, 0), + indexed_att(&v, 0, 3 * chunk_size + 2, 0), + ), + ( + indexed_att(&v, 3 * chunk_size, 3 * chunk_size + 1, 0), + indexed_att_electra(&v, 0, 3 * chunk_size + 2, 0), + ), + ( + indexed_att_electra(&v, 3 * chunk_size, 3 * chunk_size + 1, 0), + indexed_att_electra(&v, 0, 3 * chunk_size + 2, 0), + ), + ] { + let slashings = hashset![att_slashing(&att2, &att1)]; + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &slashings, 4 * chunk_size); + } } #[test] fn surrounded_by_single_val_single_chunk() { let v = vec![0]; - let att1 = indexed_att(&v, 0, 15, 0); - let att2 = indexed_att(&v, 1, 14, 0); - let slashings = hashset![att_slashing(&att1, &att2)]; - let attestations = vec![att1, att2]; - slasher_test_indiv(&attestations, &slashings, 15); + for (att1, att2) in [ + (indexed_att(&v, 0, 15, 0), indexed_att(&v, 1, 14, 0)), + (indexed_att(&v, 0, 15, 0), indexed_att_electra(&v, 1, 14, 0)), + ( + indexed_att_electra(&v, 0, 15, 0), + indexed_att_electra(&v, 1, 14, 0), + ), + ] { + let slashings = hashset![att_slashing(&att1, &att2)]; + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &slashings, 15); + } } #[test] fn surrounded_by_single_val_multi_chunk() { let v = vec![0]; let chunk_size = DEFAULT_CHUNK_SIZE as u64; - let att1 = indexed_att(&v, 0, 3 * chunk_size, 0); - let att2 = indexed_att(&v, chunk_size, chunk_size + 1, 0); - let slashings = hashset![att_slashing(&att1, &att2)]; - let attestations = vec![att1, att2]; - slasher_test_indiv(&attestations, &slashings, 3 * chunk_size); - slasher_test_indiv(&attestations, &slashings, 4 * chunk_size); + for (att1, att2) in [ + ( + indexed_att(&v, 0, 3 * chunk_size, 0), + indexed_att(&v, chunk_size, chunk_size + 1, 0), + ), + ( + indexed_att(&v, 0, 3 * chunk_size, 0), + indexed_att_electra(&v, chunk_size, chunk_size + 1, 0), + ), + ( + indexed_att_electra(&v, 0, 3 * chunk_size, 0), + indexed_att_electra(&v, chunk_size, chunk_size + 1, 0), + ), + ] { + let slashings = hashset![att_slashing(&att1, &att2)]; + let attestations = vec![att1, att2]; + slasher_test_indiv(&attestations, &slashings, 3 * chunk_size); + slasher_test_indiv(&attestations, &slashings, 4 * chunk_size); + } } // Process each attestation individually, and confirm that the slashings produced are as expected. @@ -174,7 +271,8 @@ fn slasher_test( ) { let tempdir = tempdir().unwrap(); let config = Config::new(tempdir.path().into()); - let slasher = Slasher::open(config, test_logger()).unwrap(); + let spec = chain_spec(); + let slasher = Slasher::open(config, spec, test_logger()).unwrap(); let current_epoch = Epoch::new(current_epoch); for (i, attestation) in attestations.iter().enumerate() { @@ -203,7 +301,8 @@ fn parallel_slasher_test( ) { let tempdir = tempdir().unwrap(); let config = Config::new(tempdir.path().into()); - let slasher = Slasher::open(config, test_logger()).unwrap(); + let spec = chain_spec(); + let slasher = Slasher::open(config, spec, test_logger()).unwrap(); let current_epoch = Epoch::new(current_epoch); attestations diff --git a/slasher/tests/proposer_slashings.rs b/slasher/tests/proposer_slashings.rs index 3b7b8ed583c..6d2a1f5176b 100644 --- a/slasher/tests/proposer_slashings.rs +++ b/slasher/tests/proposer_slashings.rs @@ -1,8 +1,8 @@ -#![cfg(any(feature = "mdbx", feature = "lmdb"))] +#![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] use logging::test_logger; use slasher::{ - test_utils::{block as test_block, E}, + test_utils::{block as test_block, chain_spec, E}, Config, Slasher, }; use tempfile::tempdir; @@ -12,7 +12,8 @@ use types::{Epoch, EthSpec}; fn empty_pruning() { let tempdir = tempdir().unwrap(); let config = Config::new(tempdir.path().into()); - let slasher = Slasher::::open(config, test_logger()).unwrap(); + let spec = chain_spec(); + let slasher = Slasher::::open(config, spec, test_logger()).unwrap(); slasher.prune_database(Epoch::new(0)).unwrap(); } @@ -24,8 +25,9 @@ fn block_pruning() { let mut config = Config::new(tempdir.path().into()); config.chunk_size = 2; config.history_length = 2; + let spec = chain_spec(); - let slasher = Slasher::::open(config.clone(), test_logger()).unwrap(); + let slasher = Slasher::::open(config.clone(), spec, test_logger()).unwrap(); let current_epoch = Epoch::from(2 * config.history_length); // Pruning the empty database should be safe. diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index ce0e42df1d3..0aaaa63f65c 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -1,10 +1,10 @@ -#![cfg(any(feature = "mdbx", feature = "lmdb"))] +#![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] use logging::test_logger; use rand::prelude::*; use slasher::{ test_utils::{ - block, indexed_att, slashed_validators_from_attestations, + block, chain_spec, indexed_att, slashed_validators_from_attestations, slashed_validators_from_slashings, E, }, Config, Slasher, @@ -49,7 +49,9 @@ fn random_test(seed: u64, test_config: TestConfig) { config.chunk_size = 1 << chunk_size_exponent; config.history_length = 1 << rng.gen_range(chunk_size_exponent..chunk_size_exponent + 3); - let slasher = Slasher::::open(config.clone(), test_logger()).unwrap(); + let spec = chain_spec(); + + let slasher = Slasher::::open(config.clone(), spec, test_logger()).unwrap(); let validators = (0..num_validators as u64).collect::>(); diff --git a/slasher/tests/wrap_around.rs b/slasher/tests/wrap_around.rs index d2c876d3630..2ec56bc7d5d 100644 --- a/slasher/tests/wrap_around.rs +++ b/slasher/tests/wrap_around.rs @@ -1,7 +1,10 @@ -#![cfg(any(feature = "mdbx", feature = "lmdb"))] +#![cfg(any(feature = "mdbx", feature = "lmdb", feature = "redb"))] use logging::test_logger; -use slasher::{test_utils::indexed_att, Config, Slasher}; +use slasher::{ + test_utils::{chain_spec, indexed_att}, + Config, Slasher, +}; use tempfile::tempdir; use types::Epoch; @@ -9,11 +12,12 @@ use types::Epoch; fn attestation_pruning_empty_wrap_around() { let tempdir = tempdir().unwrap(); let mut config = Config::new(tempdir.path().into()); + let spec = chain_spec(); config.validator_chunk_size = 1; config.chunk_size = 16; config.history_length = 16; - let slasher = Slasher::open(config.clone(), test_logger()).unwrap(); + let slasher = Slasher::open(config.clone(), spec, test_logger()).unwrap(); let v = vec![0]; let history_length = config.history_length as u64; diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index f3d00fa035c..fc4614f5d45 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -24,19 +24,16 @@ serde_json = { workspace = true } serde_repr = { workspace = true } serde_yaml = { workspace = true } eth2_network_config = { workspace = true } -ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } -cached_tree_hash = { workspace = true } state_processing = { workspace = true } swap_or_not_shuffle = { workspace = true } types = { workspace = true } snap = { workspace = true } fs2 = { workspace = true } beacon_chain = { workspace = true } -store = { workspace = true } fork_choice = { workspace = true } execution_layer = { workspace = true } logging = { workspace = true } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 508c284275a..5dc3d2a0404 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.4.0-beta.6 +TESTS_TAG := v1.5.0-alpha.2 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 7629d61827f..e1a308f7a40 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -42,7 +42,15 @@ "bls12-381-tests/deserialization_G2", "bls12-381-tests/hash_to_G2", "tests/.*/eip6110", - "tests/.*/whisk" + "tests/.*/whisk", + "tests/.*/eip7594", + # TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved + "tests/.*/electra/ssz_static/LightClientUpdate", + "tests/.*/electra/ssz_static/LightClientFinalityUpdate", + "tests/.*/electra/ssz_static/LightClientBootstrap", + # TODO(electra) re-enable as DepositRequest when EF tests are updated + "tests/.*/electra/operations/deposit_receipt", + "tests/.*/electra/ssz_static/DepositReceipt" ] diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index 6763edbe22b..8b253919805 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -2,7 +2,6 @@ use serde::Deserialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::fmt::Debug; -use tree_hash::TreeHash; use types::ForkName; /// Macro to wrap U128 and U256 so they deserialize correctly. @@ -49,12 +48,12 @@ uint_wrapper!(TestU256, ethereum_types::U256); /// Trait for types that can be used in SSZ static tests. pub trait SszStaticType: - serde::de::DeserializeOwned + Encode + TreeHash + Clone + PartialEq + Debug + Sync + serde::de::DeserializeOwned + Encode + Clone + PartialEq + Debug + Sync { } impl SszStaticType for T where - T: serde::de::DeserializeOwned + Encode + TreeHash + Clone + PartialEq + Debug + Sync + T: serde::de::DeserializeOwned + Encode + Clone + PartialEq + Debug + Sync { } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index c4c592e4cf2..dfd782a22b3 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -10,6 +10,9 @@ use state_processing::per_epoch_processing::capella::process_historical_summarie use state_processing::per_epoch_processing::effective_balance_updates::{ process_effective_balance_updates, process_effective_balance_updates_slow, }; +use state_processing::per_epoch_processing::single_pass::{ + process_epoch_single_pass, SinglePassConfig, +}; use state_processing::per_epoch_processing::{ altair, base, historical_roots_update::process_historical_roots_update, @@ -53,6 +56,10 @@ pub struct Slashings; #[derive(Debug)] pub struct Eth1DataReset; #[derive(Debug)] +pub struct PendingBalanceDeposits; +#[derive(Debug)] +pub struct PendingConsolidations; +#[derive(Debug)] pub struct EffectiveBalanceUpdates; #[derive(Debug)] pub struct SlashingsReset; @@ -79,6 +86,8 @@ type_name!(RewardsAndPenalties, "rewards_and_penalties"); type_name!(RegistryUpdates, "registry_updates"); type_name!(Slashings, "slashings"); type_name!(Eth1DataReset, "eth1_data_reset"); +type_name!(PendingBalanceDeposits, "pending_balance_deposits"); +type_name!(PendingConsolidations, "pending_consolidations"); type_name!(EffectiveBalanceUpdates, "effective_balance_updates"); type_name!(SlashingsReset, "slashings_reset"); type_name!(RandaoMixesReset, "randao_mixes_reset"); @@ -178,6 +187,35 @@ impl EpochTransition for Eth1DataReset { } } +impl EpochTransition for PendingBalanceDeposits { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + process_epoch_single_pass( + state, + spec, + SinglePassConfig { + pending_balance_deposits: true, + ..SinglePassConfig::disable_all() + }, + ) + .map(|_| ()) + } +} + +impl EpochTransition for PendingConsolidations { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + initialize_epoch_cache(state, spec)?; + process_epoch_single_pass( + state, + spec, + SinglePassConfig { + pending_consolidations: true, + ..SinglePassConfig::disable_all() + }, + ) + .map(|_| ()) + } +} + impl EpochTransition for EffectiveBalanceUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { if let BeaconState::Base(_) = state { @@ -304,24 +342,32 @@ impl> Case for EpochProcessing { } fn is_enabled_for_fork(fork_name: ForkName) -> bool { - match fork_name { - // No Altair tests for genesis fork. - ForkName::Base => { - T::name() != "sync_committee_updates" - && T::name() != "inactivity_updates" - && T::name() != "participation_flag_updates" - && T::name() != "historical_summaries_update" - } - // No phase0 tests for Altair and later. - ForkName::Altair | ForkName::Bellatrix => { - T::name() != "participation_record_updates" - && T::name() != "historical_summaries_update" - } - ForkName::Capella | ForkName::Deneb | ForkName::Electra => { - T::name() != "participation_record_updates" - && T::name() != "historical_roots_update" - } + if !fork_name.altair_enabled() + && (T::name() == "sync_committee_updates" + || T::name() == "inactivity_updates" + || T::name() == "participation_flag_updates") + { + return false; + } + + if fork_name.altair_enabled() && T::name() == "participation_record_updates" { + return false; + } + + if !fork_name.capella_enabled() && T::name() == "historical_summaries_update" { + return false; + } + + if fork_name.capella_enabled() && T::name() == "historical_roots_update" { + return false; + } + + if !fork_name.electra_enabled() + && (T::name() == "pending_consolidations" || T::name() == "pending_balance_deposits") + { + return false; } + true } fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index f0749c3c7e4..2a2cc067e58 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -24,9 +24,9 @@ use std::future::Future; use std::sync::Arc; use std::time::Duration; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlobSidecar, BlobsList, Checkpoint, - ExecutionBlockHash, Hash256, IndexedAttestation, KzgProof, ProposerPreparationData, - SignedBeaconBlock, Slot, Uint256, + Attestation, AttestationRef, AttesterSlashing, AttesterSlashingRef, BeaconBlock, BeaconState, + BlobSidecar, BlobsList, BlockImportSource, Checkpoint, ExecutionBlockHash, Hash256, + IndexedAttestation, KzgProof, ProposerPreparationData, SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -180,12 +180,32 @@ impl LoadCase for ForkChoiceTest { }) } Step::Attestation { attestation } => { - ssz_decode_file(&path.join(format!("{}.ssz_snappy", attestation))) - .map(|attestation| Step::Attestation { attestation }) + if fork_name.electra_enabled() { + ssz_decode_file(&path.join(format!("{}.ssz_snappy", attestation))).map( + |attestation| Step::Attestation { + attestation: Attestation::Electra(attestation), + }, + ) + } else { + ssz_decode_file(&path.join(format!("{}.ssz_snappy", attestation))).map( + |attestation| Step::Attestation { + attestation: Attestation::Base(attestation), + }, + ) + } } Step::AttesterSlashing { attester_slashing } => { - ssz_decode_file(&path.join(format!("{}.ssz_snappy", attester_slashing))) - .map(|attester_slashing| Step::AttesterSlashing { attester_slashing }) + if fork_name.electra_enabled() { + ssz_decode_file(&path.join(format!("{}.ssz_snappy", attester_slashing))) + .map(|attester_slashing| Step::AttesterSlashing { + attester_slashing: AttesterSlashing::Electra(attester_slashing), + }) + } else { + ssz_decode_file(&path.join(format!("{}.ssz_snappy", attester_slashing))) + .map(|attester_slashing| Step::AttesterSlashing { + attester_slashing: AttesterSlashing::Base(attester_slashing), + }) + } } Step::PowBlock { pow_block } => { ssz_decode_file(&path.join(format!("{}.ssz_snappy", pow_block))) @@ -249,7 +269,7 @@ impl Case for ForkChoiceTest { } => tester.process_block(block.clone(), blobs.clone(), proofs.clone(), *valid)?, Step::Attestation { attestation } => tester.process_attestation(attestation)?, Step::AttesterSlashing { attester_slashing } => { - tester.process_attester_slashing(attester_slashing) + tester.process_attester_slashing(attester_slashing.to_ref()) } Step::PowBlock { pow_block } => tester.process_pow_block(pow_block), Step::OnPayloadInfo { @@ -498,6 +518,7 @@ impl Tester { block_root, block.clone(), NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, || Ok(()), ))? .map(|avail: AvailabilityProcessingStatus| avail.try_into()); @@ -574,11 +595,11 @@ impl Tester { } pub fn process_attestation(&self, attestation: &Attestation) -> Result<(), Error> { - let (indexed_attestation, _) = - obtain_indexed_attestation_and_committees_per_slot(&self.harness.chain, attestation) - .map_err(|e| { - Error::InternalError(format!("attestation indexing failed with {:?}", e)) - })?; + let (indexed_attestation, _) = obtain_indexed_attestation_and_committees_per_slot( + &self.harness.chain, + attestation.to_ref(), + ) + .map_err(|e| Error::InternalError(format!("attestation indexing failed with {:?}", e)))?; let verified_attestation: ManuallyVerifiedAttestation> = ManuallyVerifiedAttestation { attestation, @@ -591,7 +612,7 @@ impl Tester { .map_err(|e| Error::InternalError(format!("attestation import failed with {:?}", e))) } - pub fn process_attester_slashing(&self, attester_slashing: &AttesterSlashing) { + pub fn process_attester_slashing(&self, attester_slashing: AttesterSlashingRef) { self.harness .chain .canonical_head @@ -850,8 +871,8 @@ pub struct ManuallyVerifiedAttestation<'a, T: BeaconChainTypes> { } impl<'a, T: BeaconChainTypes> VerifiedAttestation for ManuallyVerifiedAttestation<'a, T> { - fn attestation(&self) -> &Attestation { - self.attestation + fn attestation(&self) -> AttestationRef { + self.attestation.to_ref() } fn indexed_attestation(&self) -> &IndexedAttestation { diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 158f2334dc3..0af2c818271 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -6,6 +6,9 @@ use serde::Deserialize; use ssz::Decode; use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; use state_processing::epoch_cache::initialize_epoch_cache; +use state_processing::per_block_processing::process_operations::{ + process_consolidations, process_deposit_requests, process_execution_layer_withdrawal_requests, +}; use state_processing::{ per_block_processing::{ errors::BlockProcessingError, @@ -21,8 +24,9 @@ use state_processing::{ use std::fmt::Debug; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockBodyBellatrix, - BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconState, BlindedPayload, Deposit, - ExecutionPayload, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, + BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconState, + BlindedPayload, Deposit, DepositRequest, ExecutionLayerWithdrawalRequest, ExecutionPayload, + FullPayload, ProposerSlashing, SignedBlsToExecutionChange, SignedConsolidation, SignedVoluntaryExit, SyncAggregate, }; @@ -78,8 +82,12 @@ impl Operation for Attestation { "attestation".into() } - fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { - ssz_decode_file(path) + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + if fork_name < ForkName::Electra { + Ok(Self::Base(ssz_decode_file(path)?)) + } else { + Ok(Self::Electra(ssz_decode_file(path)?)) + } } fn apply_to( @@ -93,7 +101,7 @@ impl Operation for Attestation { match state { BeaconState::Base(_) => base::process_attestations( state, - &[self.clone()], + [self.clone().to_ref()].into_iter(), VerifySignatures::True, &mut ctxt, spec, @@ -106,7 +114,7 @@ impl Operation for Attestation { initialize_progressive_balances_cache(state, spec)?; altair_deneb::process_attestation( state, - self, + self.to_ref(), 0, &mut ctxt, VerifySignatures::True, @@ -122,8 +130,15 @@ impl Operation for AttesterSlashing { "attester_slashing".into() } - fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { - ssz_decode_file(path) + fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { + Ok(match fork_name { + ForkName::Base + | ForkName::Altair + | ForkName::Bellatrix + | ForkName::Capella + | ForkName::Deneb => Self::Base(ssz_decode_file(path)?), + ForkName::Electra => Self::Electra(ssz_decode_file(path)?), + }) } fn apply_to( @@ -136,7 +151,7 @@ impl Operation for AttesterSlashing { initialize_progressive_balances_cache(state, spec)?; process_attester_slashings( state, - &[self.clone()], + [self.clone().to_ref()].into_iter(), VerifySignatures::True, &mut ctxt, spec, @@ -283,7 +298,7 @@ impl Operation for BeaconBlockBody> { } fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name != ForkName::Base && fork_name != ForkName::Altair + fork_name.bellatrix_enabled() } fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { @@ -292,6 +307,7 @@ impl Operation for BeaconBlockBody> { ForkName::Bellatrix => BeaconBlockBody::Bellatrix(<_>::from_ssz_bytes(bytes)?), ForkName::Capella => BeaconBlockBody::Capella(<_>::from_ssz_bytes(bytes)?), ForkName::Deneb => BeaconBlockBody::Deneb(<_>::from_ssz_bytes(bytes)?), + ForkName::Electra => BeaconBlockBody::Electra(<_>::from_ssz_bytes(bytes)?), _ => panic!(), }) }) @@ -324,7 +340,7 @@ impl Operation for BeaconBlockBody> { } fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name != ForkName::Base && fork_name != ForkName::Altair + fork_name.bellatrix_enabled() } fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { @@ -343,6 +359,10 @@ impl Operation for BeaconBlockBody> { let inner = >>::from_ssz_bytes(bytes)?; BeaconBlockBody::Deneb(inner.clone_as_blinded()) } + ForkName::Electra => { + let inner = >>::from_ssz_bytes(bytes)?; + BeaconBlockBody::Electra(inner.clone_as_blinded()) + } _ => panic!(), }) }) @@ -376,9 +396,7 @@ impl Operation for WithdrawalsPayload { } fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name != ForkName::Base - && fork_name != ForkName::Altair - && fork_name != ForkName::Bellatrix + fork_name.capella_enabled() } fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { @@ -410,9 +428,7 @@ impl Operation for SignedBlsToExecutionChange { } fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name != ForkName::Base - && fork_name != ForkName::Altair - && fork_name != ForkName::Bellatrix + fork_name.capella_enabled() } fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { @@ -429,6 +445,75 @@ impl Operation for SignedBlsToExecutionChange { } } +impl Operation for ExecutionLayerWithdrawalRequest { + fn handler_name() -> String { + "execution_layer_withdrawal_request".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.electra_enabled() + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _extra: &Operations, + ) -> Result<(), BlockProcessingError> { + process_execution_layer_withdrawal_requests(state, &[self.clone()], spec) + } +} + +impl Operation for DepositRequest { + fn handler_name() -> String { + "deposit_request".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.electra_enabled() + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _extra: &Operations, + ) -> Result<(), BlockProcessingError> { + process_deposit_requests(state, &[self.clone()], spec) + } +} + +impl Operation for SignedConsolidation { + fn handler_name() -> String { + "consolidation".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.electra_enabled() + } + + fn decode(path: &Path, _fork_name: ForkName, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _extra: &Operations, + ) -> Result<(), BlockProcessingError> { + process_consolidations(state, &[self.clone()], VerifySignatures::True, spec) + } +} + impl> LoadCase for Operations { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index 8de3e217f00..7933fc65c70 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -6,6 +6,7 @@ use crate::cases::ssz_static::{check_serialization, check_tree_hash}; use crate::decode::{log_file_access, snappy_decode_file, yaml_decode_file}; use serde::{de::Error as SerdeError, Deserialize, Deserializer}; use ssz_derive::{Decode, Encode}; +use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use types::typenum::*; use types::{BitList, BitVector, FixedVector, ForkName, VariableList, Vector}; @@ -206,7 +207,7 @@ impl Case for SszGeneric { } } -fn ssz_generic_test(path: &Path) -> Result<(), Error> { +fn ssz_generic_test(path: &Path) -> Result<(), Error> { let meta_path = path.join("meta.yaml"); let meta: Option = if meta_path.is_file() { Some(yaml_decode_file(&meta_path)?) diff --git a/testing/ef_tests/src/cases/ssz_static.rs b/testing/ef_tests/src/cases/ssz_static.rs index 5f0ac3525c4..e17aa469bfc 100644 --- a/testing/ef_tests/src/cases/ssz_static.rs +++ b/testing/ef_tests/src/cases/ssz_static.rs @@ -101,7 +101,7 @@ pub fn check_tree_hash(expected_str: &str, actual_root: &[u8]) -> Result<(), Err compare_result::(&Ok(tree_hash_root), &Some(expected_root)) } -impl Case for SszStatic { +impl Case for SszStatic { fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { check_serialization(&self.value, &self.serialized, T::from_ssz_bytes)?; check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_bytes())?; @@ -115,7 +115,6 @@ impl Case for SszStaticTHC> { check_serialization(&self.value, &self.serialized, |bytes| { BeaconState::from_ssz_bytes(bytes, spec) })?; - check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_bytes())?; let mut state = self.value.clone(); let cached_tree_hash_root = state.update_tree_hash_cache().unwrap(); diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 2d5ea4149ef..410a37e7682 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -20,9 +20,8 @@ pub trait Handler { // Add forks here to exclude them from EF spec testing. Helpful for adding future or // unspecified forks. - // TODO(electra): Enable Electra once spec tests are available. fn disabled_forks(&self) -> Vec { - vec![ForkName::Electra] + vec![] } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { @@ -229,6 +228,10 @@ impl SszStaticHandler { Self::for_forks(vec![ForkName::Deneb]) } + pub fn electra_only() -> Self { + Self::for_forks(vec![ForkName::Electra]) + } + pub fn altair_and_later() -> Self { Self::for_forks(ForkName::list_all()[1..].to_vec()) } @@ -240,6 +243,18 @@ impl SszStaticHandler { pub fn capella_and_later() -> Self { Self::for_forks(ForkName::list_all()[3..].to_vec()) } + + pub fn deneb_and_later() -> Self { + Self::for_forks(ForkName::list_all()[4..].to_vec()) + } + + pub fn electra_and_later() -> Self { + Self::for_forks(ForkName::list_all()[5..].to_vec()) + } + + pub fn pre_electra() -> Self { + Self::for_forks(ForkName::list_all()[0..5].to_vec()) + } } /// Handler for SSZ types that implement `CachedTreeHash`. @@ -254,7 +269,7 @@ pub struct SszStaticWithSpecHandler(PhantomData<(T, E)>); impl Handler for SszStaticHandler where - T: cases::SszStaticType + ssz::Decode + TypeName, + T: cases::SszStaticType + tree_hash::TreeHash + ssz::Decode + TypeName, E: TypeName, { type Case = cases::SszStatic; @@ -569,7 +584,7 @@ impl Handler for ForkChoiceHandler { // No FCU override tests prior to bellatrix. if self.handler_name == "should_override_forkchoice_update" - && (fork_name == ForkName::Base || fork_name == ForkName::Altair) + && !fork_name.bellatrix_enabled() { return false; } @@ -605,9 +620,7 @@ impl Handler for OptimisticSyncHandler { } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - fork_name != ForkName::Base - && fork_name != ForkName::Altair - && cfg!(not(feature = "fake_crypto")) + fork_name.bellatrix_enabled() && cfg!(not(feature = "fake_crypto")) } } @@ -790,13 +803,12 @@ impl Handler for MerkleProofValidityHandler { "single_merkle_proof".into() } - fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - fork_name != ForkName::Base - // Test is skipped due to some changes in the Capella light client - // spec. - // - // https://github.com/sigp/lighthouse/issues/4022 - && fork_name != ForkName::Capella && fork_name != ForkName::Deneb + fn is_enabled_for_fork(&self, _fork_name: ForkName) -> bool { + // Test is skipped due to some changes in the Capella light client + // spec. + // + // https://github.com/sigp/lighthouse/issues/4022 + false } } @@ -821,10 +833,7 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { // Enabled in Deneb - fork_name != ForkName::Base - && fork_name != ForkName::Altair - && fork_name != ForkName::Bellatrix - && fork_name != ForkName::Capella + fork_name == ForkName::Deneb } } diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index 5ab2b4b7b43..e55551be701 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -3,8 +3,8 @@ pub use cases::WithdrawalsPayload; pub use cases::{ Case, EffectiveBalanceUpdates, Eth1DataReset, HistoricalRootsUpdate, HistoricalSummariesUpdate, InactivityUpdates, JustificationAndFinalization, ParticipationFlagUpdates, - ParticipationRecordUpdates, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, - SlashingsReset, SyncCommitteeUpdates, + ParticipationRecordUpdates, PendingBalanceDeposits, PendingConsolidations, RandaoMixesReset, + RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, SyncCommitteeUpdates, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 49ebbe81909..d6ef873ead4 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -37,11 +37,16 @@ macro_rules! type_name_generic { type_name!(MinimalEthSpec, "minimal"); type_name!(MainnetEthSpec, "mainnet"); - type_name_generic!(AggregateAndProof); +type_name_generic!(AggregateAndProofBase, "AggregateAndProof"); +type_name_generic!(AggregateAndProofElectra, "AggregateAndProof"); type_name_generic!(Attestation); +type_name_generic!(AttestationBase, "Attestation"); +type_name_generic!(AttestationElectra, "Attestation"); type_name!(AttestationData); type_name_generic!(AttesterSlashing); +type_name_generic!(AttesterSlashingBase, "AttesterSlashing"); +type_name_generic!(AttesterSlashingElectra, "AttesterSlashing"); type_name_generic!(BeaconBlock); type_name_generic!(BeaconBlockBody); type_name_generic!(BeaconBlockBodyBase, "BeaconBlockBody"); @@ -49,34 +54,43 @@ type_name_generic!(BeaconBlockBodyAltair, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyBellatrix, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyCapella, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyDeneb, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyElectra, "BeaconBlockBody"); type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); type_name!(BlobIdentifier); type_name_generic!(BlobSidecar); type_name!(Checkpoint); +type_name!(Consolidation); type_name_generic!(ContributionAndProof); type_name!(Deposit); type_name!(DepositData); type_name!(DepositMessage); +type_name!(DepositRequest); type_name!(Eth1Data); +type_name!(ExecutionLayerWithdrawalRequest); type_name_generic!(ExecutionPayload); type_name_generic!(ExecutionPayloadBellatrix, "ExecutionPayload"); type_name_generic!(ExecutionPayloadCapella, "ExecutionPayload"); type_name_generic!(ExecutionPayloadDeneb, "ExecutionPayload"); +type_name_generic!(ExecutionPayloadElectra, "ExecutionPayload"); type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); type_name_generic!(ExecutionPayloadHeaderBellatrix, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderCapella, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderDeneb, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionPayloadHeaderElectra, "ExecutionPayloadHeader"); type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); type_name_generic!(HistoricalBatch); type_name_generic!(IndexedAttestation); +type_name_generic!(IndexedAttestationBase, "IndexedAttestation"); +type_name_generic!(IndexedAttestationElectra, "IndexedAttestation"); type_name_generic!(LightClientBootstrap); type_name_generic!(LightClientBootstrapAltair, "LightClientBootstrap"); type_name_generic!(LightClientBootstrapCapella, "LightClientBootstrap"); type_name_generic!(LightClientBootstrapDeneb, "LightClientBootstrap"); +type_name_generic!(LightClientBootstrapElectra, "LightClientBootstrap"); type_name_generic!(LightClientFinalityUpdate); type_name_generic!(LightClientFinalityUpdateAltair, "LightClientFinalityUpdate"); type_name_generic!( @@ -84,10 +98,15 @@ type_name_generic!( "LightClientFinalityUpdate" ); type_name_generic!(LightClientFinalityUpdateDeneb, "LightClientFinalityUpdate"); +type_name_generic!( + LightClientFinalityUpdateElectra, + "LightClientFinalityUpdate" +); type_name_generic!(LightClientHeader); -type_name_generic!(LightClientHeaderDeneb, "LightClientHeader"); -type_name_generic!(LightClientHeaderCapella, "LightClientHeader"); type_name_generic!(LightClientHeaderAltair, "LightClientHeader"); +type_name_generic!(LightClientHeaderCapella, "LightClientHeader"); +type_name_generic!(LightClientHeaderDeneb, "LightClientHeader"); +type_name_generic!(LightClientHeaderElectra, "LightClientHeader"); type_name_generic!(LightClientOptimisticUpdate); type_name_generic!( LightClientOptimisticUpdateAltair, @@ -101,15 +120,26 @@ type_name_generic!( LightClientOptimisticUpdateDeneb, "LightClientOptimisticUpdate" ); +type_name_generic!( + LightClientOptimisticUpdateElectra, + "LightClientOptimisticUpdate" +); type_name_generic!(LightClientUpdate); type_name_generic!(LightClientUpdateAltair, "LightClientUpdate"); type_name_generic!(LightClientUpdateCapella, "LightClientUpdate"); type_name_generic!(LightClientUpdateDeneb, "LightClientUpdate"); +type_name_generic!(LightClientUpdateElectra, "LightClientUpdate"); type_name_generic!(PendingAttestation); +type_name!(PendingConsolidation); +type_name!(PendingPartialWithdrawal); +type_name!(PendingBalanceDeposit); type_name!(ProposerSlashing); type_name_generic!(SignedAggregateAndProof); +type_name_generic!(SignedAggregateAndProofBase, "SignedAggregateAndProof"); +type_name_generic!(SignedAggregateAndProofElectra, "SignedAggregateAndProof"); type_name_generic!(SignedBeaconBlock); type_name!(SignedBeaconBlockHeader); +type_name!(SignedConsolidation); type_name_generic!(SignedContributionAndProof); type_name!(SignedVoluntaryExit); type_name!(SigningData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 5226c7ac2b0..10a57a6b45e 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -1,7 +1,7 @@ #![cfg(feature = "ef_tests")] use ef_tests::*; -use types::{MainnetEthSpec, MinimalEthSpec, *}; +use types::{ExecutionLayerWithdrawalRequest, MainnetEthSpec, MinimalEthSpec, *}; // Check that the hand-computed multiplications on EthSpec are correctly computed. // This test lives here because one is most likely to muck these up during a spec update. @@ -14,6 +14,10 @@ fn check_typenum_values() { E::SlotsPerEth1VotingPeriod::to_u64(), E::EpochsPerEth1VotingPeriod::to_u64() * E::SlotsPerEpoch::to_u64() ); + assert_eq!( + E::MaxValidatorsPerSlot::to_u64(), + E::MaxCommitteesPerSlot::to_u64() * E::MaxValidatorsPerCommittee::to_u64() + ); } #[test] @@ -88,6 +92,27 @@ fn operations_withdrawals() { OperationsHandler::>::default().run(); } +#[test] +fn operations_execution_layer_withdrawal_reqeusts() { + OperationsHandler::::default().run(); + OperationsHandler::::default().run(); +} + +#[test] +#[cfg(not(feature = "fake_crypto"))] +fn operations_deposit_requests() { + //TODO(electra): re-enable mainnet once they update the name for this + // OperationsHandler::::default().run(); + // OperationsHandler::::default().run(); +} + +#[test] +fn operations_consolidations() { + OperationsHandler::::default().run(); + //TODO(electra): re-enable mainnet once they make tests for this + //OperationsHandler::::default().run(); +} + #[test] fn operations_bls_to_execution_change() { OperationsHandler::::default().run(); @@ -217,12 +242,13 @@ mod ssz_static { use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; use types::blob_sidecar::BlobIdentifier; use types::historical_summary::HistoricalSummary; - use types::{LightClientBootstrapAltair, *}; + use types::{ + AttesterSlashingBase, AttesterSlashingElectra, Consolidation, + ExecutionLayerWithdrawalRequest, LightClientBootstrapAltair, PendingBalanceDeposit, + PendingPartialWithdrawal, *, + }; - ssz_static_test!(aggregate_and_proof, AggregateAndProof<_>); - ssz_static_test!(attestation, Attestation<_>); ssz_static_test!(attestation_data, AttestationData); - ssz_static_test!(attester_slashing, AttesterSlashing<_>); ssz_static_test!(beacon_block, SszStaticWithSpecHandler, BeaconBlock<_>); ssz_static_test!(beacon_block_header, BeaconBlockHeader); ssz_static_test!(beacon_state, SszStaticTHCHandler, BeaconState<_>); @@ -235,10 +261,8 @@ mod ssz_static { ssz_static_test!(fork, Fork); ssz_static_test!(fork_data, ForkData); ssz_static_test!(historical_batch, HistoricalBatch<_>); - ssz_static_test!(indexed_attestation, IndexedAttestation<_>); ssz_static_test!(pending_attestation, PendingAttestation<_>); ssz_static_test!(proposer_slashing, ProposerSlashing); - ssz_static_test!(signed_aggregate_and_proof, SignedAggregateAndProof<_>); ssz_static_test!( signed_beacon_block, SszStaticWithSpecHandler, @@ -249,6 +273,71 @@ mod ssz_static { ssz_static_test!(signing_data, SigningData); ssz_static_test!(validator, Validator); ssz_static_test!(voluntary_exit, VoluntaryExit); + + #[test] + fn attestation() { + SszStaticHandler::, MinimalEthSpec>::pre_electra().run(); + SszStaticHandler::, MainnetEthSpec>::pre_electra().run(); + SszStaticHandler::, MinimalEthSpec>::electra_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only() + .run(); + } + + #[test] + fn attester_slashing() { + SszStaticHandler::, MinimalEthSpec>::pre_electra() + .run(); + SszStaticHandler::, MainnetEthSpec>::pre_electra() + .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only() + .run(); + } + + #[test] + fn indexed_attestation() { + SszStaticHandler::, MinimalEthSpec>::pre_electra() + .run(); + SszStaticHandler::, MainnetEthSpec>::pre_electra() + .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only() + .run(); + } + + #[test] + fn signed_aggregate_and_proof() { + SszStaticHandler::, MinimalEthSpec>::pre_electra( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::pre_electra( + ) + .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only( + ) + .run(); + } + + #[test] + fn aggregate_and_proof() { + SszStaticHandler::, MinimalEthSpec>::pre_electra() + .run(); + SszStaticHandler::, MainnetEthSpec>::pre_electra() + .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only( + ) + .run(); + } + // BeaconBlockBody has no internal indicator of which fork it is for, so we test it separately. #[test] fn beacon_block_body() { @@ -270,6 +359,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only() + .run(); } // Altair and later @@ -304,6 +397,11 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); + // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved + // SszStaticHandler::, MinimalEthSpec>::electra_only() + // .run(); + // SszStaticHandler::, MainnetEthSpec>::electra_only() + // .run(); } // LightClientHeader has no internal indicator of which fork it is for, so we test it separately. @@ -329,35 +427,27 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only( + ) + .run(); } // LightClientOptimisticUpdate has no internal indicator of which fork it is for, so we test it separately. #[test] fn light_client_optimistic_update() { - SszStaticHandler::, MinimalEthSpec>::altair_only( - ) - .run(); - SszStaticHandler::, MainnetEthSpec>::altair_only( - ) - .run(); - SszStaticHandler::, MinimalEthSpec>::bellatrix_only( - ) - .run(); - SszStaticHandler::, MainnetEthSpec>::bellatrix_only( - ) - .run(); - SszStaticHandler::, MinimalEthSpec>::capella_only( - ) - .run(); - SszStaticHandler::, MainnetEthSpec>::capella_only( - ) - .run(); - SszStaticHandler::, MinimalEthSpec>::deneb_only( - ) - .run(); - SszStaticHandler::, MainnetEthSpec>::deneb_only( - ) - .run(); + SszStaticHandler::, MinimalEthSpec>::altair_only().run(); + SszStaticHandler::, MainnetEthSpec>::altair_only().run(); + SszStaticHandler::, MinimalEthSpec>::bellatrix_only().run(); + SszStaticHandler::, MainnetEthSpec>::bellatrix_only().run(); + SszStaticHandler::, MinimalEthSpec>::capella_only().run(); + SszStaticHandler::, MainnetEthSpec>::capella_only().run(); + SszStaticHandler::, MinimalEthSpec>::deneb_only().run(); + SszStaticHandler::, MainnetEthSpec>::deneb_only().run(); + SszStaticHandler::, MinimalEthSpec>::electra_only().run(); + SszStaticHandler::, MainnetEthSpec>::electra_only().run(); } // LightClientFinalityUpdate has no internal indicator of which fork it is for, so we test it separately. @@ -387,6 +477,13 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::deneb_only( ) .run(); + // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved + // SszStaticHandler::, MinimalEthSpec>::electra_only( + // ) + // .run(); + // SszStaticHandler::, MainnetEthSpec>::electra_only( + // ) + // .run(); } // LightClientUpdate has no internal indicator of which fork it is for, so we test it separately. @@ -410,6 +507,13 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); + // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved + // SszStaticHandler::, MinimalEthSpec>::electra_only( + // ) + // .run(); + // SszStaticHandler::, MainnetEthSpec>::electra_only( + // ) + // .run(); } #[test] @@ -463,6 +567,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only() + .run(); } #[test] @@ -479,6 +587,10 @@ mod ssz_static { ::deneb_only().run(); SszStaticHandler::, MainnetEthSpec> ::deneb_only().run(); + SszStaticHandler::, MinimalEthSpec> + ::electra_only().run(); + SszStaticHandler::, MainnetEthSpec> + ::electra_only().run(); } #[test] @@ -501,14 +613,14 @@ mod ssz_static { #[test] fn blob_sidecar() { - SszStaticHandler::, MinimalEthSpec>::deneb_only().run(); - SszStaticHandler::, MainnetEthSpec>::deneb_only().run(); + SszStaticHandler::, MinimalEthSpec>::deneb_and_later().run(); + SszStaticHandler::, MainnetEthSpec>::deneb_and_later().run(); } #[test] fn blob_identifier() { - SszStaticHandler::::deneb_only().run(); - SszStaticHandler::::deneb_only().run(); + SszStaticHandler::::deneb_and_later().run(); + SszStaticHandler::::deneb_and_later().run(); } #[test] @@ -516,6 +628,51 @@ mod ssz_static { SszStaticHandler::::capella_and_later().run(); SszStaticHandler::::capella_and_later().run(); } + + #[test] + fn consolidation() { + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); + } + + // TODO(electra) re-enable when EF tests are updated + // #[test] + // fn deposit_request() { + // SszStaticHandler::::electra_and_later().run(); + // SszStaticHandler::::electra_and_later().run(); + // } + + #[test] + fn execution_layer_withdrawal_request() { + SszStaticHandler::::electra_and_later() + .run(); + SszStaticHandler::::electra_and_later() + .run(); + } + + #[test] + fn pending_balance_deposit() { + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); + } + + #[test] + fn pending_consolidation() { + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); + } + + #[test] + fn pending_partial_withdrawal() { + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); + } + + #[test] + fn signed_consolidation() { + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); + } } #[test] @@ -558,6 +715,18 @@ fn epoch_processing_eth1_data_reset() { EpochProcessingHandler::::default().run(); } +#[test] +fn epoch_processing_pending_balance_deposits() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + +#[test] +fn epoch_processing_pending_consolidations() { + EpochProcessingHandler::::default().run(); + EpochProcessingHandler::::default().run(); +} + #[test] fn epoch_processing_effective_balance_updates() { EpochProcessingHandler::::default().run(); diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 0063975ee19..55a71605940 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -95,7 +95,7 @@ impl DepositContract { .await .map_err(|e| { format!( - "Failed to deploy contract: {}. Is scripts/anvil_tests_node.sh running?.", + "Failed to deploy contract: {}. Is the RPC server running?.", e ) })?; diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 7f66658f0fa..43d24cd1237 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -10,7 +10,6 @@ serde_json = { workspace = true } task_executor = { workspace = true } tokio = { workspace = true } futures = { workspace = true } -environment = { workspace = true } execution_layer = { workspace = true } sensitive_url = { workspace = true } types = { workspace = true } diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index aad37c32bd1..c3b8651789f 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -11,7 +11,7 @@ use unused_port::unused_tcp4_port; /// We've pinned the Nethermind version since our method of using the `master` branch to /// find the latest tag isn't working. It appears Nethermind don't always tag on `master`. /// We should fix this so we always pull the latest version of Nethermind. -const NETHERMIND_BRANCH: &str = "release/1.21.0"; +const NETHERMIND_BRANCH: &str = "release/1.27.0"; const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind"; fn build_result(repo_dir: &Path) -> Output { @@ -70,11 +70,10 @@ impl NethermindEngine { .join("nethermind") .join("src") .join("Nethermind") - .join("Nethermind.Runner") + .join("artifacts") .join("bin") - .join("Release") - .join("net7.0") - .join("linux-x64") + .join("Nethermind.Runner") + .join("release") .join("nethermind") } } diff --git a/testing/network_testing/README.md b/testing/network_testing/README.md index f97c3cff289..1dcf372dbdc 100644 --- a/testing/network_testing/README.md +++ b/testing/network_testing/README.md @@ -50,11 +50,11 @@ $ cargo build --release --bin lighthouse --features network/disable-backfill Once built, it can run via checkpoint sync on any network, making sure we point to our mock-el -Prater testnet: +Holesky testnet: ``` -$ lighthouse --network prater bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url -https://prater.checkpoint.sigp.io --execution-endpoint http://localhost:8551 +$ lighthouse --network holesky bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url +https://holesky.checkpoint.sigp.io --execution-endpoint http://localhost:8551 ``` Mainnet: diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index d7ff7b3dd85..f8769b10e21 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -8,7 +8,6 @@ edition = { workspace = true } [dependencies] node_test_rig = { path = "../node_test_rig" } -eth1 = { workspace = true } execution_layer = { workspace = true } types = { workspace = true } parking_lot = { workspace = true } @@ -18,7 +17,5 @@ env_logger = { workspace = true } clap = { workspace = true } rayon = { workspace = true } sensitive_url = { path = "../../common/sensitive_url" } -ssz_types = { workspace = true } -ethereum-types = { workspace = true } eth2_network_config = { workspace = true } serde_json = { workspace = true } diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 755bb71b430..f69d107e344 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -27,15 +27,32 @@ const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { - let node_count = value_t!(matches, "nodes", usize).expect("Missing nodes default"); - let proposer_nodes = - value_t!(matches, "proposer-nodes", usize).expect("Missing proposer-nodes default"); - let validators_per_node = value_t!(matches, "validators-per-node", usize) - .expect("Missing validators-per-node default"); - let speed_up_factor = - value_t!(matches, "speed-up-factor", u64).expect("Missing speed-up-factor default"); - let log_level = value_t!(matches, "debug-level", String).expect("Missing default log-level"); - let continue_after_checks = matches.is_present("continue-after-checks"); + let node_count = matches + .get_one::("nodes") + .expect("missing nodes default") + .parse::() + .expect("missing nodes default"); + let proposer_nodes = matches + .get_one::("proposer-nodes") + .unwrap_or(&String::from("0")) + .parse::() + .unwrap_or(0); + println!("PROPOSER-NODES: {}", proposer_nodes); + let validators_per_node = matches + .get_one::("validators-per-node") + .expect("missing validators-per-node default") + .parse::() + .expect("missing validators-per-node default"); + let speed_up_factor = matches + .get_one::("speed-up-factor") + .expect("missing speed-up-factor default") + .parse::() + .expect("missing speed-up-factor default"); + let log_level = matches + .get_one::("debug-level") + .expect("missing debug-level"); + + let continue_after_checks = matches.get_flag("continue-after-checks"); println!("Basic Simulator:"); println!(" nodes: {}", node_count); @@ -64,7 +81,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { .initialize_logger(LoggerConfig { path: None, debug_level: log_level.clone(), - logfile_debug_level: log_level, + logfile_debug_level: log_level.clone(), log_format: None, logfile_format: None, log_color: false, diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index 00af7e560ce..a82c8b85775 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -1,12 +1,12 @@ -use clap::{App, Arg, SubCommand}; +use clap::{crate_version, Arg, ArgAction, Command}; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("simulator") +pub fn cli_app() -> Command { + Command::new("simulator") .version(crate_version!()) .author("Sigma Prime ") .about("Options for interacting with simulator") .subcommand( - SubCommand::with_name("basic-sim") + Command::new("basic-sim") .about( "Runs a Beacon Chain simulation with `n` beacon node and validator clients, \ each with `v` validators. \ @@ -16,55 +16,55 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { exit immediately.", ) .arg( - Arg::with_name("nodes") - .short("n") + Arg::new("nodes") + .short('n') .long("nodes") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Number of beacon nodes"), ) .arg( - Arg::with_name("proposer-nodes") - .short("p") + Arg::new("proposer-nodes") + .short('p') .long("proposer-nodes") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Number of proposer-only beacon nodes"), ) .arg( - Arg::with_name("validators-per-node") - .short("v") + Arg::new("validators-per-node") + .short('v') .long("validators-per-node") - .takes_value(true) + .action(ArgAction::Set) .default_value("20") .help("Number of validators"), ) .arg( - Arg::with_name("speed-up-factor") - .short("s") + Arg::new("speed-up-factor") + .short('s') .long("speed-up-factor") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Speed up factor. Please use a divisor of 12."), ) .arg( - Arg::with_name("debug-level") - .short("d") + Arg::new("debug-level") + .short('d') .long("debug-level") - .takes_value(true) + .action(ArgAction::Set) .default_value("debug") .help("Set the severity level of the logs."), ) .arg( - Arg::with_name("continue-after-checks") - .short("c") + Arg::new("continue-after-checks") + .short('c') .long("continue_after_checks") - .takes_value(false) + .action(ArgAction::SetTrue) .help("Continue after checks (default false)"), ), ) .subcommand( - SubCommand::with_name("fallback-sim") + Command::new("fallback-sim") .about( "Runs a Beacon Chain simulation with `c` validator clients where each VC is \ connected to `b` beacon nodes with `v` validators. \ @@ -76,50 +76,50 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Otherwise, the simulation will exit and an error will be reported.", ) .arg( - Arg::with_name("vc-count") - .short("c") + Arg::new("vc-count") + .short('c') .long("vc-count") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Number of validator clients."), ) .arg( - Arg::with_name("bns-per-vc") - .short("b") + Arg::new("bns-per-vc") + .short('b') .long("bns-per-vc") - .takes_value(true) + .action(ArgAction::Set) .default_value("2") .help("Number of beacon nodes per validator client."), ) .arg( - Arg::with_name("validators-per-vc") - .short("v") + Arg::new("validators-per-vc") + .short('v') .long("validators-per-vc") - .takes_value(true) + .action(ArgAction::Set) .default_value("20") .help("Number of validators per client."), ) .arg( - Arg::with_name("speed-up-factor") - .short("s") + Arg::new("speed-up-factor") + .short('s') .long("speed-up-factor") - .takes_value(true) + .action(ArgAction::Set) .default_value("3") .help("Speed up factor. Please use a divisor of 12."), ) .arg( - Arg::with_name("debug-level") - .short("d") + Arg::new("debug-level") + .short('d') .long("debug-level") - .takes_value(true) + .action(ArgAction::Set) .default_value("debug") .help("Set the severity level of the logs."), ) .arg( - Arg::with_name("continue-after-checks") - .short("c") + Arg::new("continue-after-checks") + .short('c') .long("continue_after_checks") - .takes_value(false) + .action(ArgAction::SetTrue) .help("Continue after checks (default false)"), ), ) diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index c9deeba04d9..33f497f37ff 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -34,15 +34,36 @@ const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { - let vc_count = value_t!(matches, "vc-count", usize).expect("Missing validator-count default"); - let validators_per_vc = - value_t!(matches, "validators-per-vc", usize).expect("Missing validators-per-vc default"); - let bns_per_vc = value_t!(matches, "bns-per-vc", usize).expect("Missing bns-per-vc default"); + let vc_count = matches + .get_one::("vc-count") + .expect("missing vc-count default") + .parse::() + .expect("missing vc-count default"); + + let validators_per_vc = matches + .get_one::("validators-per-vc") + .expect("missing validators-per-vc default") + .parse::() + .expect("missing validators-per-vc default"); + + let bns_per_vc = matches + .get_one::("bns-per-vc") + .expect("missing bns-per-vc default") + .parse::() + .expect("missing bns-per-vc default"); + assert!(bns_per_vc > 1); - let speed_up_factor = - value_t!(matches, "speed-up-factor", u64).expect("Missing speed-up-factor default"); - let log_level = value_t!(matches, "debug-level", String).expect("Missing default log-level"); - let continue_after_checks = matches.is_present("continue-after-checks"); + let speed_up_factor = matches + .get_one::("speed-up-factor") + .expect("missing speed-up-factor default") + .parse::() + .expect("missing speed-up-factor default"); + + let log_level = matches + .get_one::("debug-level") + .expect("missing debug-level default"); + + let continue_after_checks = matches.get_flag("continue-after-checks"); println!("Fallback Simulator:"); println!(" vc-count: {}", vc_count); @@ -70,7 +91,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { .initialize_logger(LoggerConfig { path: None, debug_level: log_level.clone(), - logfile_debug_level: log_level, + logfile_debug_level: log_level.clone(), log_format: None, logfile_format: None, log_color: false, diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index d1a2d0dc672..a259ac11339 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -10,10 +10,6 @@ //! simulation uses `println` to communicate some info. It might be nice if the nodes logged to //! easy-to-find files and stdout only contained info from the simulation. //! - -#[macro_use] -extern crate clap; - mod basic_sim; mod checks; mod cli; @@ -34,14 +30,14 @@ fn main() { let matches = cli_app().get_matches(); match matches.subcommand() { - ("basic-sim", Some(matches)) => match basic_sim::run_basic_sim(matches) { + Some(("basic-sim", matches)) => match basic_sim::run_basic_sim(matches) { Ok(()) => println!("Simulation exited successfully"), Err(e) => { eprintln!("Simulation exited with error: {}", e); std::process::exit(1) } }, - ("fallback-sim", Some(matches)) => match fallback_sim::run_fallback_sim(matches) { + Some(("fallback-sim", matches)) => match fallback_sim::run_fallback_sim(matches) { Ok(()) => println!("Simulation exited successfully"), Err(e) => { eprintln!("Simulation exited with error: {}", e); diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 292e10d0542..4187844cecc 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -39,7 +39,7 @@ mod tests { use tempfile::{tempdir, TempDir}; use tokio::sync::OnceCell; use tokio::time::sleep; - use types::*; + use types::{attestation::AttestationBase, *}; use url::Url; use validator_client::{ initialized_validators::{ @@ -542,7 +542,7 @@ mod tests { /// Get a generic, arbitrary attestation for signing. fn get_attestation() -> Attestation { - Attestation { + Attestation::Base(AttestationBase { aggregation_bits: BitList::with_capacity(1).unwrap(), data: AttestationData { slot: <_>::default(), @@ -558,7 +558,7 @@ mod tests { }, }, signature: AggregateSignature::empty(), - } + }) } fn get_validator_registration(pubkey: PublicKeyBytes) -> ValidatorRegistrationData { @@ -778,28 +778,28 @@ mod tests { let first_attestation = || { let mut attestation = get_attestation(); - attestation.data.source.epoch = Epoch::new(1); - attestation.data.target.epoch = Epoch::new(4); + attestation.data_mut().source.epoch = Epoch::new(1); + attestation.data_mut().target.epoch = Epoch::new(4); attestation }; let double_vote_attestation = || { let mut attestation = first_attestation(); - attestation.data.beacon_block_root = Hash256::from_low_u64_be(1); + attestation.data_mut().beacon_block_root = Hash256::from_low_u64_be(1); attestation }; let surrounding_attestation = || { let mut attestation = first_attestation(); - attestation.data.source.epoch = Epoch::new(0); - attestation.data.target.epoch = Epoch::new(5); + attestation.data_mut().source.epoch = Epoch::new(0); + attestation.data_mut().target.epoch = Epoch::new(5); attestation }; let surrounded_attestation = || { let mut attestation = first_attestation(); - attestation.data.source.epoch = Epoch::new(2); - attestation.data.target.epoch = Epoch::new(3); + attestation.data_mut().source.epoch = Epoch::new(2); + attestation.data_mut().target.epoch = Epoch::new(3); attestation }; @@ -901,13 +901,14 @@ mod tests { } #[tokio::test] - async fn prater_base_types() { - test_base_types("prater", 4246).await + async fn mainnet_bellatrix_types() { + test_bellatrix_types("mainnet", 4244).await } #[tokio::test] - async fn prater_altair_types() { - test_altair_types("prater", 4247).await + async fn holesky_bellatrix_types() { + // web3signer does not support forks prior to Bellatrix on Holesky + test_bellatrix_types("holesky", 4247).await } #[tokio::test] diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index d3dffc3d02e..0df687abec5 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -61,3 +61,4 @@ sysinfo = { workspace = true } system_health = { path = "../common/system_health" } logging = { workspace = true } strum = { workspace = true } +fdlimit = "0.3.0" diff --git a/validator_client/slashing_protection/src/interchange.rs b/validator_client/slashing_protection/src/interchange.rs index ad5f21e5110..95a39c50e48 100644 --- a/validator_client/slashing_protection/src/interchange.rs +++ b/validator_client/slashing_protection/src/interchange.rs @@ -7,7 +7,7 @@ use types::{Epoch, Hash256, PublicKeyBytes, Slot}; #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct InterchangeMetadata { #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub interchange_format_version: u64, @@ -16,7 +16,7 @@ pub struct InterchangeMetadata { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct InterchangeData { pub pubkey: PublicKeyBytes, pub signed_blocks: Vec, @@ -25,7 +25,7 @@ pub struct InterchangeData { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct SignedBlock { #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub slot: Slot, @@ -35,7 +35,7 @@ pub struct SignedBlock { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct SignedAttestation { #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub source_epoch: Epoch, @@ -46,7 +46,7 @@ pub struct SignedAttestation { } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct Interchange { pub metadata: InterchangeMetadata, pub data: Vec, diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index d88bb93a0d5..d99647bc936 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -9,7 +9,7 @@ use tempfile::tempdir; use types::{Epoch, Hash256, PublicKeyBytes, Slot}; #[derive(Debug, Clone, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct MultiTestCase { pub name: String, pub genesis_validators_root: Hash256, @@ -17,7 +17,7 @@ pub struct MultiTestCase { } #[derive(Debug, Clone, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct TestCase { pub should_succeed: bool, pub contains_slashable_data: bool, @@ -27,7 +27,7 @@ pub struct TestCase { } #[derive(Debug, Clone, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct TestBlock { pub pubkey: PublicKeyBytes, pub slot: Slot, @@ -37,7 +37,7 @@ pub struct TestBlock { } #[derive(Debug, Clone, Deserialize, Serialize)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct TestAttestation { pub pubkey: PublicKeyBytes, pub source_epoch: Epoch, diff --git a/validator_client/slashing_protection/src/lib.rs b/validator_client/slashing_protection/src/lib.rs index c4fa32b611c..e5606d4042a 100644 --- a/validator_client/slashing_protection/src/lib.rs +++ b/validator_client/slashing_protection/src/lib.rs @@ -67,9 +67,9 @@ impl From for SigningRoot { } } -impl Into for SigningRoot { - fn into(self) -> Hash256 { - self.0 +impl From for Hash256 { + fn from(from: SigningRoot) -> Hash256 { + from.0 } } diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index b497abd7dde..04554786f6f 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -23,7 +23,7 @@ pub const POOL_SIZE: u32 = 1; #[cfg(not(test))] pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); #[cfg(test)] -pub const CONNECTION_TIMEOUT: Duration = Duration::from_millis(500); +pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(1); /// Supported version of the interchange format. pub const SUPPORTED_INTERCHANGE_FORMAT_VERSION: u64 = 5; diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index 1c6b60addb6..30fe508a2c2 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -14,10 +14,7 @@ use std::ops::Deref; use std::sync::Arc; use tokio::time::{sleep, sleep_until, Duration, Instant}; use tree_hash::TreeHash; -use types::{ - AggregateSignature, Attestation, AttestationData, BitList, ChainSpec, CommitteeIndex, EthSpec, - Slot, -}; +use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot}; /// Builds an `AttestationService`. pub struct AttestationServiceBuilder { @@ -290,17 +287,21 @@ impl AttestationService { // Then download, sign and publish a `SignedAggregateAndProof` for each // validator that is elected to aggregate for this `slot` and // `committee_index`. - self.produce_and_publish_aggregates(&attestation_data, &validator_duties) - .await - .map_err(move |e| { - crit!( - log, - "Error during attestation routine"; - "error" => format!("{:?}", e), - "committee_index" => committee_index, - "slot" => slot.as_u64(), - ) - })?; + self.produce_and_publish_aggregates( + &attestation_data, + committee_index, + &validator_duties, + ) + .await + .map_err(move |e| { + crit!( + log, + "Error during attestation routine"; + "error" => format!("{:?}", e), + "committee_index" => committee_index, + "slot" => slot.as_u64(), + ) + })?; } Ok(()) @@ -363,9 +364,7 @@ impl AttestationService { let attestation_data = attestation_data_ref; // Ensure that the attestation matches the duties. - #[allow(clippy::suspicious_operation_groupings)] - if duty.slot != attestation_data.slot || duty.committee_index != attestation_data.index - { + if !duty.match_attestation_data::(attestation_data, &self.context.eth2_config.spec) { crit!( log, "Inconsistent validator duties during signing"; @@ -378,10 +377,26 @@ impl AttestationService { return None; } - let mut attestation = Attestation { - aggregation_bits: BitList::with_capacity(duty.committee_length as usize).unwrap(), - data: attestation_data.clone(), - signature: AggregateSignature::infinity(), + let mut attestation = match Attestation::::empty_for_signing( + duty.committee_index, + duty.committee_length as usize, + attestation_data.slot, + attestation_data.beacon_block_root, + attestation_data.source, + attestation_data.target, + &self.context.eth2_config.spec, + ) { + Ok(attestation) => attestation, + Err(err) => { + crit!( + log, + "Invalid validator duties during signing"; + "validator" => ?duty.pubkey, + "duty" => ?duty, + "err" => ?err, + ); + return None; + } }; match self @@ -434,6 +449,11 @@ impl AttestationService { warn!(log, "No attestations were published"); return Ok(None); } + let fork_name = self + .context + .eth2_config + .spec + .fork_name_at_slot::(attestation_data.slot); // Post the attestations to the BN. match self @@ -447,9 +467,15 @@ impl AttestationService { &metrics::ATTESTATION_SERVICE_TIMES, &[metrics::ATTESTATIONS_HTTP_POST], ); - beacon_node - .post_beacon_pool_attestations(attestations) - .await + if fork_name.electra_enabled() { + beacon_node + .post_beacon_pool_attestations_v2(attestations, fork_name) + .await + } else { + beacon_node + .post_beacon_pool_attestations_v1(attestations) + .await + } }, ) .await @@ -493,6 +519,7 @@ impl AttestationService { async fn produce_and_publish_aggregates( &self, attestation_data: &AttestationData, + committee_index: CommitteeIndex, validator_duties: &[DutyAndProof], ) -> Result<(), String> { let log = self.context.log(); @@ -505,6 +532,12 @@ impl AttestationService { return Ok(()); } + let fork_name = self + .context + .eth2_config + .spec + .fork_name_at_slot::(attestation_data.slot); + let aggregated_attestation = &self .beacon_nodes .first_success( @@ -515,17 +548,36 @@ impl AttestationService { &metrics::ATTESTATION_SERVICE_TIMES, &[metrics::AGGREGATES_HTTP_GET], ); - beacon_node - .get_validator_aggregate_attestation( - attestation_data.slot, - attestation_data.tree_hash_root(), - ) - .await - .map_err(|e| { - format!("Failed to produce an aggregate attestation: {:?}", e) - })? - .ok_or_else(|| format!("No aggregate available for {:?}", attestation_data)) - .map(|result| result.data) + if fork_name.electra_enabled() { + beacon_node + .get_validator_aggregate_attestation_v2( + attestation_data.slot, + attestation_data.tree_hash_root(), + committee_index, + ) + .await + .map_err(|e| { + format!("Failed to produce an aggregate attestation: {:?}", e) + })? + .ok_or_else(|| { + format!("No aggregate available for {:?}", attestation_data) + }) + .map(|result| result.data) + } else { + beacon_node + .get_validator_aggregate_attestation_v1( + attestation_data.slot, + attestation_data.tree_hash_root(), + ) + .await + .map_err(|e| { + format!("Failed to produce an aggregate attestation: {:?}", e) + })? + .ok_or_else(|| { + format!("No aggregate available for {:?}", attestation_data) + }) + .map(|result| result.data) + } }, ) .await @@ -536,10 +588,7 @@ impl AttestationService { let duty = &duty_and_proof.duty; let selection_proof = duty_and_proof.selection_proof.as_ref()?; - let slot = attestation_data.slot; - let committee_index = attestation_data.index; - - if duty.slot != slot || duty.committee_index != committee_index { + if !duty.match_attestation_data::(attestation_data, &self.context.eth2_config.spec) { crit!(log, "Inconsistent validator duties during signing"); return None; } @@ -596,38 +645,49 @@ impl AttestationService { &metrics::ATTESTATION_SERVICE_TIMES, &[metrics::AGGREGATES_HTTP_POST], ); - beacon_node - .post_validator_aggregate_and_proof(signed_aggregate_and_proofs_slice) - .await + if fork_name.electra_enabled() { + beacon_node + .post_validator_aggregate_and_proof_v2( + signed_aggregate_and_proofs_slice, + fork_name, + ) + .await + } else { + beacon_node + .post_validator_aggregate_and_proof_v1( + signed_aggregate_and_proofs_slice, + ) + .await + } }, ) .await { Ok(()) => { for signed_aggregate_and_proof in signed_aggregate_and_proofs { - let attestation = &signed_aggregate_and_proof.message.aggregate; + let attestation = signed_aggregate_and_proof.message().aggregate(); info!( log, "Successfully published attestation"; - "aggregator" => signed_aggregate_and_proof.message.aggregator_index, - "signatures" => attestation.aggregation_bits.num_set_bits(), - "head_block" => format!("{:?}", attestation.data.beacon_block_root), - "committee_index" => attestation.data.index, - "slot" => attestation.data.slot.as_u64(), + "aggregator" => signed_aggregate_and_proof.message().aggregator_index(), + "signatures" => attestation.num_set_aggregation_bits(), + "head_block" => format!("{:?}", attestation.data().beacon_block_root), + "committee_index" => attestation.committee_index(), + "slot" => attestation.data().slot.as_u64(), "type" => "aggregated", ); } } Err(e) => { for signed_aggregate_and_proof in signed_aggregate_and_proofs { - let attestation = &signed_aggregate_and_proof.message.aggregate; + let attestation = &signed_aggregate_and_proof.message().aggregate(); crit!( log, "Failed to publish attestation"; "error" => %e, - "aggregator" => signed_aggregate_and_proof.message.aggregator_index, - "committee_index" => attestation.data.index, - "slot" => attestation.data.slot.as_u64(), + "aggregator" => signed_aggregate_and_proof.message().aggregator_index(), + "committee_index" => attestation.committee_index(), + "slot" => attestation.data().slot.as_u64(), "type" => "aggregated", ); } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 508d69bf7cc..da4bcd1fc52 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -693,8 +693,8 @@ impl SignedBlock { } pub fn num_attestations(&self) -> usize { match self { - SignedBlock::Full(block) => block.signed_block().message().body().attestations().len(), - SignedBlock::Blinded(block) => block.message().body().attestations().len(), + SignedBlock::Full(block) => block.signed_block().message().body().attestations_len(), + SignedBlock::Blinded(block) => block.message().body().attestations_len(), } } } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 78be3776a48..36927e4f337 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -1,34 +1,47 @@ -use clap::{App, Arg}; +use clap::{builder::ArgPredicate, Arg, ArgAction, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new("validator_client") - .visible_aliases(&["v", "vc", "validator"]) - .setting(clap::AppSettings::ColoredHelp) +pub fn cli_app() -> Command { + Command::new("validator_client") + .visible_aliases(["v", "vc", "validator"]) + .styles(get_color_style()) + .display_order(0) .about( "When connected to a beacon node, performs the duties of a staked \ validator (e.g., proposing blocks and attestations).", ) .arg( - Arg::with_name("beacon-nodes") + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER) + ) + .arg( + Arg::new("beacon-nodes") .long("beacon-nodes") .value_name("NETWORK_ADDRESSES") .help("Comma-separated addresses to one or more beacon node HTTP APIs. \ Default is http://localhost:5052." ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("proposer-nodes") + Arg::new("proposer-nodes") .long("proposer-nodes") .value_name("NETWORK_ADDRESSES") .help("Comma-separated addresses to one or more beacon node HTTP APIs. \ These specify nodes that are used to send beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes." ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) // TODO remove this flag in a future release .arg( - Arg::with_name("disable-run-on-all") + Arg::new("disable-run-on-all") .long("disable-run-on-all") .value_name("DISABLE_RUN_ON_ALL") .help("DEPRECATED. Use --broadcast. \ @@ -36,10 +49,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { and proposer preparation messages to all beacon nodes provided in the \ `--beacon-nodes flag`. This option changes that behaviour such that these \ api calls only go out to the first available and synced beacon node") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("broadcast") + Arg::new("broadcast") .long("broadcast") .value_name("API_TOPICS") .help("Comma-separated list of beacon API topics to broadcast to all beacon nodes. \ @@ -47,10 +62,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { sync-committee. Default (when flag is omitted) is to broadcast \ subscriptions only." ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("validators-dir") + Arg::new("validators-dir") .long("validators-dir") .alias("validator-dir") .value_name("VALIDATORS_DIR") @@ -59,11 +75,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { each validator along with the common slashing protection database \ and the validator_definitions.yml" ) - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("datadir") + .display_order(0) ) .arg( - Arg::with_name("secrets-dir") + Arg::new("secrets-dir") .long("secrets-dir") .value_name("SECRETS_DIRECTORY") .help( @@ -72,11 +89,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { name is the 0x-prefixed hex representation of the validators voting public \ key. Defaults to ~/.lighthouse/{network}/secrets.", ) - .takes_value(true) + .action(ArgAction::Set) + .conflicts_with("datadir") + .display_order(0) ) .arg( - Arg::with_name("init-slashing-protection") + Arg::new("init-slashing-protection") .long("init-slashing-protection") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, do not require the slashing protection database to exist before \ running. You SHOULD NOT use this flag unless you're certain that a new \ @@ -84,75 +105,100 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { will have been initialized when you imported your validator keys. If you \ misplace your database and then run with this flag you risk being slashed." ) + .display_order(0) ) .arg( - Arg::with_name("disable-auto-discover") + Arg::new("disable-auto-discover") .long("disable-auto-discover") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help( "If present, do not attempt to discover new validators in the validators-dir. Validators \ will need to be manually added to the validator_definitions.yml file." ) + .display_order(0) ) .arg( - Arg::with_name("use-long-timeouts") + Arg::new("use-long-timeouts") .long("use-long-timeouts") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .help("If present, the validator client will use longer timeouts for requests \ made to the beacon node. This flag is generally not recommended, \ longer timeouts can cause missed duties when fallbacks are used.") + .display_order(0) ) .arg( - Arg::with_name("beacon-nodes-tls-certs") + Arg::new("beacon-nodes-tls-certs") .long("beacon-nodes-tls-certs") .value_name("CERTIFICATE-FILES") - .takes_value(true) + .action(ArgAction::Set) .help("Comma-separated paths to custom TLS certificates to use when connecting \ to a beacon node (and/or proposer node). These certificates must be in PEM format and are used \ in addition to the OS trust store. Commas must only be used as a \ delimiter, and must not be part of the certificate path.") + .display_order(0) ) // This overwrites the graffiti configured in the beacon node. .arg( - Arg::with_name("graffiti") + Arg::new("graffiti") .long("graffiti") .help("Specify your custom graffiti to be included in blocks.") .value_name("GRAFFITI") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("graffiti-file") + Arg::new("graffiti-file") .long("graffiti-file") .help("Specify a graffiti file to load validator graffitis from.") .value_name("GRAFFITI-FILE") - .takes_value(true) + .action(ArgAction::Set) .conflicts_with("graffiti") + .display_order(0) ) .arg( - Arg::with_name("suggested-fee-recipient") + Arg::new("suggested-fee-recipient") .long("suggested-fee-recipient") .help("Once the merge has happened, this address will receive transaction fees \ from blocks proposed by this validator client. If a fee recipient is \ configured in the validator definitions it takes priority over this value.") .value_name("FEE-RECIPIENT") - .takes_value(true) + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("produce-block-v3") + Arg::new("produce-block-v3") .long("produce-block-v3") +<<<<<<< HEAD .help("This flag is deprecated and is no longer in use.") .takes_value(false) - ) - .arg( - Arg::with_name("distributed") +======= + .help("Enable block production via the block v3 endpoint for this validator client. \ + This should only be enabled when paired with a beacon node \ + that has this endpoint implemented. This flag will be enabled by default in \ + future.") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) +>>>>>>> 8a32df756ddaa8831182c016311f25a3c26cf36f + ) + .arg( + Arg::new("distributed") .long("distributed") .help("Enables functionality required for running the validator in a distributed validator cluster.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* REST API related arguments */ .arg( - Arg::with_name("http") + Arg::new("http") .long("http") .help("Enable the RESTful HTTP API server. Disabled by default.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* * Note: The HTTP server is **not** encrypted (i.e., not HTTPS) and therefore it is @@ -162,7 +208,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { * must also be used in order to make it clear to the user that this is unsafe. */ .arg( - Arg::with_name("http-address") + Arg::new("http-address") .long("http-address") .requires("http") .value_name("ADDRESS") @@ -172,26 +218,31 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { `--unencrypted-http-transport` flag to ensure the user is aware of the \ risks involved. For access via the Internet, users should apply \ transport-layer security like a HTTPS reverse-proxy or SSH tunnelling.") - .requires("unencrypted-http-transport"), + .requires("unencrypted-http-transport") + .display_order(0) ) .arg( - Arg::with_name("unencrypted-http-transport") - .long("unencrypted-http-transport") - .help("This is a safety flag to ensure that the user is aware that the http \ - transport is unencrypted and using a custom HTTP address is unsafe.") - .requires("http-address"), + Arg::new("unencrypted-http-transport") + .long("unencrypted-http-transport") + .help("This is a safety flag to ensure that the user is aware that the http \ + transport is unencrypted and using a custom HTTP address is unsafe.") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .requires("http-address") + .display_order(0) ) .arg( - Arg::with_name("http-port") + Arg::new("http-port") .long("http-port") .requires("http") .value_name("PORT") .help("Set the listen TCP port for the RESTful HTTP API server.") - .default_value_if("http", None, "5062") - .takes_value(true), + .default_value_if("http", ArgPredicate::IsPresent, "5062") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-allow-origin") + Arg::new("http-allow-origin") .long("http-allow-origin") .requires("http") .value_name("ORIGIN") @@ -199,10 +250,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ address of this server (e.g., http://localhost:5062).") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("http-allow-keystore-export") + Arg::new("http-allow-keystore-export") .long("http-allow-keystore-export") .requires("http") .help("If present, allow access to the DELETE /lighthouse/keystores HTTP \ @@ -210,44 +262,52 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { consumers who have access to the API token. This method is useful for \ exporting validators, however it should be used with caution since it \ exposes private key data to authorized users.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("http-store-passwords-in-secrets-dir") + Arg::new("http-store-passwords-in-secrets-dir") .long("http-store-passwords-in-secrets-dir") .requires("http") .help("If present, any validators created via the HTTP will have keystore \ passwords stored in the secrets-dir rather than the validator \ definitions file.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* Prometheus metrics HTTP server related arguments */ .arg( - Arg::with_name("metrics") + Arg::new("metrics") .long("metrics") .help("Enable the Prometheus metrics HTTP server. Disabled by default.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("metrics-address") + Arg::new("metrics-address") .long("metrics-address") .requires("metrics") .value_name("ADDRESS") .help("Set the listen address for the Prometheus metrics HTTP server.") - .default_value_if("metrics", None, "127.0.0.1") - .takes_value(true), + .default_value_if("metrics", ArgPredicate::IsPresent, "127.0.0.1") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("metrics-port") + Arg::new("metrics-port") .long("metrics-port") .requires("metrics") .value_name("PORT") .help("Set the listen TCP port for the Prometheus metrics HTTP server.") - .default_value_if("metrics", None, "5064") - .takes_value(true), + .default_value_if("metrics", ArgPredicate::IsPresent, "5064") + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("metrics-allow-origin") + Arg::new("metrics-allow-origin") .long("metrics-allow-origin") .requires("metrics") .value_name("ORIGIN") @@ -255,22 +315,25 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ address of this server (e.g., http://localhost:5064).") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enable-high-validator-count-metrics") + Arg::new("enable-high-validator-count-metrics") .long("enable-high-validator-count-metrics") .help("Enable per validator metrics for > 64 validators. \ Note: This flag is automatically enabled for <= 64 validators. \ Enabling this metric for higher validator counts will lead to higher volume \ of prometheus metrics being collected.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* * Explorer metrics */ .arg( - Arg::with_name("monitoring-endpoint") + Arg::new("monitoring-endpoint") .long("monitoring-endpoint") .value_name("ADDRESS") .help("Enables the monitoring service for sending system metrics to a remote endpoint. \ @@ -279,19 +342,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Note: This will send information to a remote sever which may identify and associate your \ validators, IP address and other personal information. Always use a HTTPS connection \ and never provide an untrusted URL.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("monitoring-endpoint-period") + Arg::new("monitoring-endpoint-period") .long("monitoring-endpoint-period") .value_name("SECONDS") .help("Defines how many seconds to wait between each message sent to \ the monitoring-endpoint. Default: 60s") .requires("monitoring-endpoint") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("enable-doppelganger-protection") + Arg::new("enable-doppelganger-protection") .long("enable-doppelganger-protection") .value_name("ENABLE_DOPPELGANGER_PROTECTION") .help("If this flag is set, Lighthouse will delay startup for three epochs and \ @@ -303,56 +368,71 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { to avoid potentially committing a slashable offense. Use this flag in order to \ ENABLE this functionality, without this flag Lighthouse will begin attesting \ immediately.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("builder-proposals") + Arg::new("builder-proposals") .long("builder-proposals") .alias("private-tx-proposals") .help("If this flag is set, Lighthouse will query the Beacon Node for only block \ headers during proposals and will sign over headers. Useful for outsourcing \ execution payload construction during proposals.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("builder-registration-timestamp-override") + Arg::new("builder-registration-timestamp-override") .long("builder-registration-timestamp-override") .alias("builder-registration-timestamp-override") .help("This flag takes a unix timestamp value that will be used to override the \ timestamp used in the builder api registration") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("gas-limit") + Arg::new("gas-limit") .long("gas-limit") .value_name("INTEGER") - .takes_value(true) + .action(ArgAction::Set) .help("The gas limit to be used in all builder proposals for all validators managed \ by this validator client. Note this will not necessarily be used if the gas limit \ set here moves too far from the previous block's gas limit. [default: 30,000,000]") - .requires("builder-proposals"), + .requires("builder-proposals") + .display_order(0) + ) + .arg( + Arg::new("disable-latency-measurement-service") + .long("disable-latency-measurement-service") + .help("Disables the service that periodically attempts to measure latency to BNs.") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("latency-measurement-service") + Arg::new("latency-measurement-service") .long("latency-measurement-service") - .value_name("BOOLEAN") - .help("Set to 'true' to enable a service that periodically attempts to measure latency to BNs. \ - Set to 'false' to disable.") - .default_value("true") - .takes_value(true), + .help("DEPRECATED") + .action(ArgAction::Set) + .help_heading(FLAG_HEADER) + .display_order(0) + .hide(true) ) .arg( - Arg::with_name("validator-registration-batch-size") + Arg::new("validator-registration-batch-size") .long("validator-registration-batch-size") .value_name("INTEGER") .help("Defines the number of validators per \ validator/register_validator request sent to the BN. This value \ can be reduced to avoid timeouts from builders.") .default_value("500") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("builder-boost-factor") + Arg::new("builder-boost-factor") .long("builder-boost-factor") .value_name("UINT64") .help("Defines the boost factor, \ @@ -360,17 +440,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { when choosing between a builder payload header and payload from \ the local execution node.") .conflicts_with("prefer-builder-proposals") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("prefer-builder-proposals") + Arg::new("prefer-builder-proposals") .long("prefer-builder-proposals") .help("If this flag is set, Lighthouse will always prefer blocks \ constructed by builders, regardless of payload value.") - .takes_value(false), + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) .arg( - Arg::with_name("disable-slashing-protection-web3signer") + Arg::new("disable-slashing-protection-web3signer") .long("disable-slashing-protection-web3signer") .help("Disable Lighthouse's slashing protection for all web3signer keys. This can \ reduce the I/O burden on the VC but is only safe if slashing protection \ @@ -378,26 +461,30 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { THIS FLAG UNLESS YOU ARE CERTAIN THAT SLASHING PROTECTION IS ENABLED ON \ THE REMOTE SIGNER. YOU WILL GET SLASHED IF YOU USE THIS FLAG WITHOUT \ ENABLING WEB3SIGNER'S SLASHING PROTECTION.") - .takes_value(false) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0) ) /* * Experimental/development options. */ .arg( - Arg::with_name("web3-signer-keep-alive-timeout") + Arg::new("web3-signer-keep-alive-timeout") .long("web3-signer-keep-alive-timeout") .value_name("MILLIS") .default_value("20000") .help("Keep-alive timeout for each web3signer connection. Set to 'null' to never \ timeout") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) .arg( - Arg::with_name("web3-signer-max-idle-connections") + Arg::new("web3-signer-max-idle-connections") .long("web3-signer-max-idle-connections") .value_name("COUNT") .help("Maximum number of idle connections to maintain per web3signer host. Default \ is unlimited.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0) ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 0a9823f49ce..75b7cb16025 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -148,15 +148,15 @@ impl Config { .unwrap_or_else(|| PathBuf::from(".")); let (mut validator_dir, mut secrets_dir) = (None, None); - if cli_args.value_of("datadir").is_some() { + if cli_args.get_one::("datadir").is_some() { let base_dir: PathBuf = parse_required(cli_args, "datadir")?; validator_dir = Some(base_dir.join(DEFAULT_VALIDATOR_DIR)); secrets_dir = Some(base_dir.join(DEFAULT_SECRET_DIR)); } - if cli_args.value_of("validators-dir").is_some() { + if cli_args.get_one::("validators-dir").is_some() { validator_dir = Some(parse_required(cli_args, "validators-dir")?); } - if cli_args.value_of("secrets-dir").is_some() { + if cli_args.get_one::("secrets-dir").is_some() { secrets_dir = Some(parse_required(cli_args, "secrets-dir")?); } @@ -192,11 +192,11 @@ impl Config { .map_err(|e| format!("Unable to parse proposer node URL: {:?}", e))?; } - config.disable_auto_discover = cli_args.is_present("disable-auto-discover"); - config.init_slashing_protection = cli_args.is_present("init-slashing-protection"); - config.use_long_timeouts = cli_args.is_present("use-long-timeouts"); + config.disable_auto_discover = cli_args.get_flag("disable-auto-discover"); + config.init_slashing_protection = cli_args.get_flag("init-slashing-protection"); + config.use_long_timeouts = cli_args.get_flag("use-long-timeouts"); - if let Some(graffiti_file_path) = cli_args.value_of("graffiti-file") { + if let Some(graffiti_file_path) = cli_args.get_one::("graffiti-file") { let mut graffiti_file = GraffitiFile::new(graffiti_file_path.into()); graffiti_file .read_graffiti_file() @@ -205,7 +205,7 @@ impl Config { info!(log, "Successfully loaded graffiti file"; "path" => graffiti_file_path); } - if let Some(input_graffiti) = cli_args.value_of("graffiti") { + if let Some(input_graffiti) = cli_args.get_one::("graffiti") { let graffiti_bytes = input_graffiti.as_bytes(); if graffiti_bytes.len() > GRAFFITI_BYTES_LEN { return Err(format!( @@ -234,11 +234,11 @@ impl Config { config.beacon_nodes_tls_certs = Some(tls_certs.split(',').map(PathBuf::from).collect()); } - if cli_args.is_present("distributed") { + if cli_args.get_flag("distributed") { config.distributed = true; } - if cli_args.is_present("disable-run-on-all") { + if cli_args.get_flag("disable-run-on-all") { warn!( log, "The --disable-run-on-all flag is deprecated"; @@ -246,7 +246,7 @@ impl Config { ); config.broadcast_topics = vec![]; } - if let Some(broadcast_topics) = cli_args.value_of("broadcast") { + if let Some(broadcast_topics) = cli_args.get_one::("broadcast") { config.broadcast_topics = broadcast_topics .split(',') .filter(|t| *t != "none") @@ -278,12 +278,12 @@ impl Config { * Http API server */ - if cli_args.is_present("http") { + if cli_args.get_flag("http") { config.http_api.enabled = true; } - if let Some(address) = cli_args.value_of("http-address") { - if cli_args.is_present("unencrypted-http-transport") { + if let Some(address) = cli_args.get_one::("http-address") { + if cli_args.get_flag("unencrypted-http-transport") { config.http_api.listen_addr = address .parse::() .map_err(|_| "http-address is not a valid IP address.")?; @@ -295,13 +295,13 @@ impl Config { } } - if let Some(port) = cli_args.value_of("http-port") { + if let Some(port) = cli_args.get_one::("http-port") { config.http_api.listen_port = port .parse::() .map_err(|_| "http-port is not a valid u16.")?; } - if let Some(allow_origin) = cli_args.value_of("http-allow-origin") { + if let Some(allow_origin) = cli_args.get_one::("http-allow-origin") { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -310,11 +310,11 @@ impl Config { config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.is_present("http-allow-keystore-export") { + if cli_args.get_flag("http-allow-keystore-export") { config.http_api.allow_keystore_export = true; } - if cli_args.is_present("http-store-passwords-in-secrets-dir") { + if cli_args.get_flag("http-store-passwords-in-secrets-dir") { config.http_api.store_passwords_in_secrets_dir = true; } @@ -322,27 +322,27 @@ impl Config { * Prometheus metrics HTTP server */ - if cli_args.is_present("metrics") { + if cli_args.get_flag("metrics") { config.http_metrics.enabled = true; } - if cli_args.is_present("enable-high-validator-count-metrics") { + if cli_args.get_flag("enable-high-validator-count-metrics") { config.enable_high_validator_count_metrics = true; } - if let Some(address) = cli_args.value_of("metrics-address") { + if let Some(address) = cli_args.get_one::("metrics-address") { config.http_metrics.listen_addr = address .parse::() .map_err(|_| "metrics-address is not a valid IP address.")?; } - if let Some(port) = cli_args.value_of("metrics-port") { + if let Some(port) = cli_args.get_one::("metrics-port") { config.http_metrics.listen_port = port .parse::() .map_err(|_| "metrics-port is not a valid u16.")?; } - if let Some(allow_origin) = cli_args.value_of("metrics-allow-origin") { + if let Some(allow_origin) = cli_args.get_one::("metrics-allow-origin") { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -351,14 +351,14 @@ impl Config { config.http_metrics.allow_origin = Some(allow_origin.to_string()); } - if cli_args.is_present(DISABLE_MALLOC_TUNING_FLAG) { + if cli_args.get_flag(DISABLE_MALLOC_TUNING_FLAG) { config.http_metrics.allocator_metrics_enabled = false; } /* * Explorer metrics */ - if let Some(monitoring_endpoint) = cli_args.value_of("monitoring-endpoint") { + if let Some(monitoring_endpoint) = cli_args.get_one::("monitoring-endpoint") { let update_period_secs = clap_utils::parse_optional(cli_args, "monitoring-endpoint-period")?; config.monitoring_api = Some(monitoring_api::Config { @@ -369,20 +369,20 @@ impl Config { }); } - if cli_args.is_present("enable-doppelganger-protection") { + if cli_args.get_flag("enable-doppelganger-protection") { config.enable_doppelganger_protection = true; } - if cli_args.is_present("builder-proposals") { + if cli_args.get_flag("builder-proposals") { config.builder_proposals = true; } - if cli_args.is_present("prefer-builder-proposals") { + if cli_args.get_flag("prefer-builder-proposals") { config.prefer_builder_proposals = true; } config.gas_limit = cli_args - .value_of("gas-limit") + .get_one::("gas-limit") .map(|gas_limit| { gas_limit .parse::() @@ -391,7 +391,7 @@ impl Config { .transpose()?; if let Some(registration_timestamp_override) = - cli_args.value_of("builder-registration-timestamp-override") + cli_args.get_one::("builder-registration-timestamp-override") { config.builder_registration_timestamp_override = Some( registration_timestamp_override @@ -403,7 +403,18 @@ impl Config { config.builder_boost_factor = parse_optional(cli_args, "builder-boost-factor")?; config.enable_latency_measurement_service = - parse_optional(cli_args, "latency-measurement-service")?.unwrap_or(true); + !cli_args.get_flag("disable-latency-measurement-service"); + + if cli_args + .get_one::("latency-measurement-service") + .is_some() + { + warn!( + log, + "latency-measurement-service flag"; + "note" => "deprecated flag has no effect and should be removed" + ); + } config.validator_registration_batch_size = parse_required(cli_args, "validator-registration-batch-size")?; @@ -412,7 +423,7 @@ impl Config { } config.enable_web3signer_slashing_protection = - if cli_args.is_present("disable-slashing-protection-web3signer") { + if cli_args.get_flag("disable-slashing-protection-web3signer") { warn!( log, "Slashing protection for remote keys disabled"; diff --git a/validator_client/src/doppelganger_service.rs b/validator_client/src/doppelganger_service.rs index 442a950ddcd..9f93795e29f 100644 --- a/validator_client/src/doppelganger_service.rs +++ b/validator_client/src/doppelganger_service.rs @@ -1115,7 +1115,7 @@ mod test { ) // All validators should still be disabled. .assert_all_disabled() - // The states of all validators should be jammed with `u64::max_value()`. + // The states of all validators should be jammed with `u64:MAX`. .assert_all_states(&DoppelgangerState { next_check_epoch: starting_epoch + 1, remaining_epochs: u64::MAX, @@ -1347,7 +1347,7 @@ mod test { ) .assert_all_states(&DoppelgangerState { next_check_epoch: initial_epoch + 1, - remaining_epochs: u64::max_value(), + remaining_epochs: u64::MAX, }); } diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 15e5c45e0d5..880f0eaa488 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -215,6 +215,8 @@ pub struct DutiesService { pub sync_duties: SyncDutiesMap, /// Provides the canonical list of locally-managed validators. pub validator_store: Arc>, + /// Maps unknown validator pubkeys to the next slot time when a poll should be conducted again. + pub unknown_validator_next_poll_slots: RwLock>, /// Tracks the current slot. pub slot_clock: T, /// Provides HTTP access to remote beacon nodes. @@ -489,6 +491,24 @@ async fn poll_validator_indices( .is_some(); if !is_known { + let current_slot_opt = duties_service.slot_clock.now(); + + if let Some(current_slot) = current_slot_opt { + let is_first_slot_of_epoch = current_slot % E::slots_per_epoch() == 0; + + // Query an unknown validator later if it was queried within the last epoch, or if + // the current slot is the first slot of an epoch. + let poll_later = duties_service + .unknown_validator_next_poll_slots + .read() + .get(&pubkey) + .map(|&poll_slot| poll_slot > current_slot || is_first_slot_of_epoch) + .unwrap_or(false); + if poll_later { + continue; + } + } + // Query the remote BN to resolve a pubkey to a validator index. let download_result = duties_service .beacon_nodes @@ -533,10 +553,23 @@ async fn poll_validator_indices( .initialized_validators() .write() .set_index(&pubkey, response.data.index); + + duties_service + .unknown_validator_next_poll_slots + .write() + .remove(&pubkey); } // This is not necessarily an error, it just means the validator is not yet known to // the beacon chain. Ok(None) => { + if let Some(current_slot) = current_slot_opt { + let next_poll_slot = current_slot.saturating_add(E::slots_per_epoch()); + duties_service + .unknown_validator_next_poll_slots + .write() + .insert(pubkey, next_poll_slot); + } + debug!( log, "Validator without index"; @@ -897,7 +930,7 @@ async fn poll_beacon_attesters_for_epoch( "Attester duties re-org"; "prior_dependent_root" => %prior_dependent_root, "dependent_root" => %dependent_root, - "msg" => "this may happen from time to time" + "note" => "this may happen from time to time" ) } *mut_value = (dependent_root, duty_and_proof); diff --git a/validator_client/src/http_api/api_secret.rs b/validator_client/src/http_api/api_secret.rs index e688792ddc1..32035caf473 100644 --- a/validator_client/src/http_api/api_secret.rs +++ b/validator_client/src/http_api/api_secret.rs @@ -1,85 +1,53 @@ -use eth2::lighthouse_vc::{PK_LEN, SECRET_PREFIX as PK_PREFIX}; use filesystem::create_with_600_perms; -use libsecp256k1::{Message, PublicKey, SecretKey}; -use rand::thread_rng; -use ring::digest::{digest, SHA256}; +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; use std::fs; use std::path::{Path, PathBuf}; use warp::Filter; -/// The name of the file which stores the secret key. -/// -/// It is purposefully opaque to prevent users confusing it with the "secret" that they need to -/// share with API consumers (which is actually the public key). -pub const SK_FILENAME: &str = ".secp-sk"; - -/// Length of the raw secret key, in bytes. -pub const SK_LEN: usize = 32; - -/// The name of the file which stores the public key. -/// -/// For users, this public key is a "secret" that can be shared with API consumers to provide them -/// access to the API. We avoid calling it a "public" key to users, since they should not post this -/// value in a public forum. +/// The name of the file which stores the API token. pub const PK_FILENAME: &str = "api-token.txt"; -/// Contains a `secp256k1` keypair that is saved-to/loaded-from disk on instantiation. The keypair -/// is used for authorization/authentication for requests/responses on the HTTP API. +pub const PK_LEN: usize = 33; + +/// Contains a randomly generated string which is used for authorization of requests to the HTTP API. /// /// Provides convenience functions to ultimately provide: /// -/// - A signature across outgoing HTTP responses, applied to the `Signature` header. /// - Verification of proof-of-knowledge of the public key in `self` for incoming HTTP requests, /// via the `Authorization` header. /// /// The aforementioned scheme was first defined here: /// /// https://github.com/sigp/lighthouse/issues/1269#issuecomment-649879855 +/// +/// This scheme has since been tweaked to remove VC response signing and secp256k1 key generation. +/// https://github.com/sigp/lighthouse/issues/5423 pub struct ApiSecret { - pk: PublicKey, - sk: SecretKey, + pk: String, pk_path: PathBuf, } impl ApiSecret { - /// If both the secret and public keys are already on-disk, parse them and ensure they're both - /// from the same keypair. + /// If the public key is already on-disk, use it. /// - /// The provided `dir` is a directory containing two files, `SK_FILENAME` and `PK_FILENAME`. + /// The provided `dir` is a directory containing `PK_FILENAME`. /// - /// If either the secret or public key files are missing on disk, create a new keypair and + /// If the public key file is missing on disk, create a new key and /// write it to disk (over-writing any existing files). pub fn create_or_open>(dir: P) -> Result { - let sk_path = dir.as_ref().join(SK_FILENAME); let pk_path = dir.as_ref().join(PK_FILENAME); - if !(sk_path.exists() && pk_path.exists()) { - let sk = SecretKey::random(&mut thread_rng()); - let pk = PublicKey::from_secret_key(&sk); - - // Create and write the secret key to file with appropriate permissions - create_with_600_perms( - &sk_path, - serde_utils::hex::encode(sk.serialize()).as_bytes(), - ) - .map_err(|e| { - format!( - "Unable to create file with permissions for {:?}: {:?}", - sk_path, e - ) - })?; + if !pk_path.exists() { + let length = PK_LEN; + let pk: String = thread_rng() + .sample_iter(&Alphanumeric) + .take(length) + .map(char::from) + .collect(); // Create and write the public key to file with appropriate permissions - create_with_600_perms( - &pk_path, - format!( - "{}{}", - PK_PREFIX, - serde_utils::hex::encode(&pk.serialize_compressed()[..]) - ) - .as_bytes(), - ) - .map_err(|e| { + create_with_600_perms(&pk_path, pk.to_string().as_bytes()).map_err(|e| { format!( "Unable to create file with permissions for {:?}: {:?}", pk_path, e @@ -87,78 +55,18 @@ impl ApiSecret { })?; } - let sk = fs::read(&sk_path) - .map_err(|e| format!("cannot read {}: {}", SK_FILENAME, e)) - .and_then(|bytes| { - serde_utils::hex::decode(&String::from_utf8_lossy(&bytes)) - .map_err(|_| format!("{} should be 0x-prefixed hex", PK_FILENAME)) - }) - .and_then(|bytes| { - if bytes.len() == SK_LEN { - let mut array = [0; SK_LEN]; - array.copy_from_slice(&bytes); - SecretKey::parse(&array).map_err(|e| format!("invalid {}: {}", SK_FILENAME, e)) - } else { - Err(format!( - "{} expected {} bytes not {}", - SK_FILENAME, - SK_LEN, - bytes.len() - )) - } - })?; - let pk = fs::read(&pk_path) - .map_err(|e| format!("cannot read {}: {}", PK_FILENAME, e)) - .and_then(|bytes| { - let hex = - String::from_utf8(bytes).map_err(|_| format!("{} is not utf8", SK_FILENAME))?; - if let Some(stripped) = hex.strip_prefix(PK_PREFIX) { - serde_utils::hex::decode(stripped) - .map_err(|_| format!("{} should be 0x-prefixed hex", SK_FILENAME)) - } else { - Err(format!("unable to parse {}", SK_FILENAME)) - } - }) - .and_then(|bytes| { - if bytes.len() == PK_LEN { - let mut array = [0; PK_LEN]; - array.copy_from_slice(&bytes); - PublicKey::parse_compressed(&array) - .map_err(|e| format!("invalid {}: {}", PK_FILENAME, e)) - } else { - Err(format!( - "{} expected {} bytes not {}", - PK_FILENAME, - PK_LEN, - bytes.len() - )) - } - })?; + .map_err(|e| format!("cannot read {}: {}", PK_FILENAME, e))? + .iter() + .map(|&c| char::from(c)) + .collect(); - // Ensure that the keys loaded from disk are indeed a pair. - if PublicKey::from_secret_key(&sk) != pk { - fs::remove_file(&sk_path) - .map_err(|e| format!("unable to remove {}: {}", SK_FILENAME, e))?; - fs::remove_file(&pk_path) - .map_err(|e| format!("unable to remove {}: {}", PK_FILENAME, e))?; - return Err(format!( - "{:?} does not match {:?} and the files have been deleted. Please try again.", - sk_path, pk_path - )); - } - - Ok(Self { pk, sk, pk_path }) - } - - /// Returns the public key of `self` as a 0x-prefixed hex string. - fn pubkey_string(&self) -> String { - serde_utils::hex::encode(&self.pk.serialize_compressed()[..]) + Ok(Self { pk, pk_path }) } /// Returns the API token. pub fn api_token(&self) -> String { - format!("{}{}", PK_PREFIX, self.pubkey_string()) + self.pk.clone() } /// Returns the path for the API token file @@ -196,16 +104,4 @@ impl ApiSecret { .untuple_one() .boxed() } - - /// Returns a closure which produces a signature over some bytes using the secret key in - /// `self`. The signature is a 32-byte hash formatted as a 0x-prefixed string. - pub fn signer(&self) -> impl Fn(&[u8]) -> String + Clone { - let sk = self.sk; - move |input: &[u8]| -> String { - let message = - Message::parse_slice(digest(&SHA256, input).as_ref()).expect("sha256 is 32 bytes"); - let (signature, _) = libsecp256k1::sign(&message, &sk); - serde_utils::hex::encode(signature.serialize_der().as_ref()) - } - } } diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index a4480195e59..3d7cab8e5e0 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -45,15 +45,8 @@ use task_executor::TaskExecutor; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ChainSpec, ConfigAndPreset, EthSpec}; use validator_dir::Builder as ValidatorDirBuilder; -use warp::{ - http::{ - header::{HeaderValue, CONTENT_TYPE}, - response::Response, - StatusCode, - }, - sse::Event, - Filter, -}; +use warp::{sse::Event, Filter}; +use warp_utils::task::blocking_json_task; #[derive(Debug)] pub enum Error { @@ -176,9 +169,6 @@ pub fn serve( } }; - let signer = ctx.api_secret.signer(); - let signer = warp::any().map(move || signer.clone()); - let inner_validator_store = ctx.validator_store.clone(); let validator_store_filter = warp::any() .map(move || inner_validator_store.clone()) @@ -270,9 +260,8 @@ pub fn serve( let get_node_version = warp::path("lighthouse") .and(warp::path("version")) .and(warp::path::end()) - .and(signer.clone()) - .and_then(|signer| { - blocking_signed_json_task(signer, move || { + .then(|| { + blocking_json_task(move || { Ok(api_types::GenericResponse::from(api_types::VersionData { version: version_with_platform(), })) @@ -283,9 +272,8 @@ pub fn serve( let get_lighthouse_health = warp::path("lighthouse") .and(warp::path("health")) .and(warp::path::end()) - .and(signer.clone()) - .and_then(|signer| { - blocking_signed_json_task(signer, move || { + .then(|| { + blocking_json_task(move || { eth2::lighthouse::Health::observe() .map(api_types::GenericResponse::from) .map_err(warp_utils::reject::custom_bad_request) @@ -297,9 +285,8 @@ pub fn serve( .and(warp::path("spec")) .and(warp::path::end()) .and(spec_filter.clone()) - .and(signer.clone()) - .and_then(|spec: Arc<_>, signer| { - blocking_signed_json_task(signer, move || { + .then(|spec: Arc<_>| { + blocking_json_task(move || { let config = ConfigAndPreset::from_chain_spec::(&spec, None); Ok(api_types::GenericResponse::from(config)) }) @@ -310,9 +297,8 @@ pub fn serve( .and(warp::path("validators")) .and(warp::path::end()) .and(validator_store_filter.clone()) - .and(signer.clone()) - .and_then(|validator_store: Arc>, signer| { - blocking_signed_json_task(signer, move || { + .then(|validator_store: Arc>| { + blocking_json_task(move || { let validators = validator_store .initialized_validators() .read() @@ -335,10 +321,9 @@ pub fn serve( .and(warp::path::param::()) .and(warp::path::end()) .and(validator_store_filter.clone()) - .and(signer.clone()) - .and_then( - |validator_pubkey: PublicKey, validator_store: Arc>, signer| { - blocking_signed_json_task(signer, move || { + .then( + |validator_pubkey: PublicKey, validator_store: Arc>| { + blocking_json_task(move || { let validator = validator_store .initialized_validators() .read() @@ -370,9 +355,8 @@ pub fn serve( .and(system_info_filter) .and(app_start_filter) .and(validator_dir_filter.clone()) - .and(signer.clone()) - .and_then(|sysinfo, app_start: std::time::Instant, val_dir, signer| { - blocking_signed_json_task(signer, move || { + .then(|sysinfo, app_start: std::time::Instant, val_dir| { + blocking_json_task(move || { let app_uptime = app_start.elapsed().as_secs(); Ok(api_types::GenericResponse::from(observe_system_health_vc( sysinfo, val_dir, app_uptime, @@ -387,15 +371,13 @@ pub fn serve( .and(validator_store_filter.clone()) .and(graffiti_file_filter.clone()) .and(graffiti_flag_filter) - .and(signer.clone()) .and(log_filter.clone()) - .and_then( + .then( |validator_store: Arc>, graffiti_file: Option, graffiti_flag: Option, - signer, log| { - blocking_signed_json_task(signer, move || { + blocking_json_task(move || { let mut result = HashMap::new(); for (key, graffiti_definition) in validator_store .initialized_validators() @@ -425,17 +407,15 @@ pub fn serve( .and(secrets_dir_filter.clone()) .and(validator_store_filter.clone()) .and(spec_filter.clone()) - .and(signer.clone()) .and(task_executor_filter.clone()) - .and_then( + .then( move |body: Vec, validator_dir: PathBuf, secrets_dir: PathBuf, validator_store: Arc>, spec: Arc, - signer, task_executor: TaskExecutor| { - blocking_signed_json_task(signer, move || { + blocking_json_task(move || { let secrets_dir = store_passwords_in_secrets_dir.then_some(secrets_dir); if let Some(handle) = task_executor.handle() { let (validators, mnemonic) = @@ -472,17 +452,15 @@ pub fn serve( .and(secrets_dir_filter.clone()) .and(validator_store_filter.clone()) .and(spec_filter) - .and(signer.clone()) .and(task_executor_filter.clone()) - .and_then( + .then( move |body: api_types::CreateValidatorsMnemonicRequest, validator_dir: PathBuf, secrets_dir: PathBuf, validator_store: Arc>, spec: Arc, - signer, task_executor: TaskExecutor| { - blocking_signed_json_task(signer, move || { + blocking_json_task(move || { let secrets_dir = store_passwords_in_secrets_dir.then_some(secrets_dir); if let Some(handle) = task_executor.handle() { let mnemonic = @@ -521,16 +499,14 @@ pub fn serve( .and(validator_dir_filter.clone()) .and(secrets_dir_filter.clone()) .and(validator_store_filter.clone()) - .and(signer.clone()) .and(task_executor_filter.clone()) - .and_then( + .then( move |body: api_types::KeystoreValidatorsPostRequest, validator_dir: PathBuf, secrets_dir: PathBuf, validator_store: Arc>, - signer, task_executor: TaskExecutor| { - blocking_signed_json_task(signer, move || { + blocking_json_task(move || { // Check to ensure the password is correct. let keypair = body .keystore @@ -611,14 +587,12 @@ pub fn serve( .and(warp::path::end()) .and(warp::body::json()) .and(validator_store_filter.clone()) - .and(signer.clone()) .and(task_executor_filter.clone()) - .and_then( + .then( |body: Vec, validator_store: Arc>, - signer, task_executor: TaskExecutor| { - blocking_signed_json_task(signer, move || { + blocking_json_task(move || { if let Some(handle) = task_executor.handle() { let web3signers: Vec = body .into_iter() @@ -666,16 +640,14 @@ pub fn serve( .and(warp::body::json()) .and(validator_store_filter.clone()) .and(graffiti_file_filter.clone()) - .and(signer.clone()) .and(task_executor_filter.clone()) - .and_then( + .then( |validator_pubkey: PublicKey, body: api_types::ValidatorPatchRequest, validator_store: Arc>, graffiti_file: Option, - signer, task_executor: TaskExecutor| { - blocking_signed_json_task(signer, move || { + blocking_json_task(move || { if body.graffiti.is_some() && graffiti_file.is_some() { return Err(warp_utils::reject::custom_bad_request( "Unable to update graffiti as the \"--graffiti-file\" flag is set" @@ -784,10 +756,9 @@ pub fn serve( // GET /lighthouse/auth let get_auth = warp::path("lighthouse").and(warp::path("auth").and(warp::path::end())); let get_auth = get_auth - .and(signer.clone()) .and(api_token_path_filter) - .and_then(|signer, token_path: PathBuf| { - blocking_signed_json_task(signer, move || { + .then(move |token_path: PathBuf| { + blocking_json_task(move || { Ok(AuthResponse { token_path: token_path.display().to_string(), }) @@ -799,23 +770,20 @@ pub fn serve( .and(warp::path("keystores")) .and(warp::path::end()) .and(warp::body::json()) - .and(signer.clone()) .and(validator_store_filter.clone()) .and(task_executor_filter.clone()) .and(log_filter.clone()) - .and_then( - move |request, signer, validator_store, task_executor, log| { - blocking_signed_json_task(signer, move || { - if allow_keystore_export { - keystores::export(request, validator_store, task_executor, log) - } else { - Err(warp_utils::reject::custom_bad_request( - "keystore export is disabled".to_string(), - )) - } - }) - }, - ); + .then(move |request, validator_store, task_executor, log| { + blocking_json_task(move || { + if allow_keystore_export { + keystores::export(request, validator_store, task_executor, log) + } else { + Err(warp_utils::reject::custom_bad_request( + "keystore export is disabled".to_string(), + )) + } + }) + }); // Standard key-manager endpoints. let eth_v1 = warp::path("eth").and(warp::path("v1")); @@ -829,10 +797,9 @@ pub fn serve( .and(warp::path("feerecipient")) .and(warp::path::end()) .and(validator_store_filter.clone()) - .and(signer.clone()) - .and_then( - |validator_pubkey: PublicKey, validator_store: Arc>, signer| { - blocking_signed_json_task(signer, move || { + .then( + |validator_pubkey: PublicKey, validator_store: Arc>| { + blocking_json_task(move || { if validator_store .initialized_validators() .read() @@ -869,13 +836,11 @@ pub fn serve( .and(warp::body::json()) .and(warp::path::end()) .and(validator_store_filter.clone()) - .and(signer.clone()) - .and_then( + .then( |validator_pubkey: PublicKey, request: api_types::UpdateFeeRecipientRequest, - validator_store: Arc>, - signer| { - blocking_signed_json_task(signer, move || { + validator_store: Arc>| { + blocking_json_task(move || { if validator_store .initialized_validators() .read() @@ -909,10 +874,9 @@ pub fn serve( .and(warp::path("feerecipient")) .and(warp::path::end()) .and(validator_store_filter.clone()) - .and(signer.clone()) - .and_then( - |validator_pubkey: PublicKey, validator_store: Arc>, signer| { - blocking_signed_json_task(signer, move || { + .then( + |validator_pubkey: PublicKey, validator_store: Arc>| { + blocking_json_task(move || { if validator_store .initialized_validators() .read() @@ -946,10 +910,9 @@ pub fn serve( .and(warp::path("gas_limit")) .and(warp::path::end()) .and(validator_store_filter.clone()) - .and(signer.clone()) - .and_then( - |validator_pubkey: PublicKey, validator_store: Arc>, signer| { - blocking_signed_json_task(signer, move || { + .then( + |validator_pubkey: PublicKey, validator_store: Arc>| { + blocking_json_task(move || { if validator_store .initialized_validators() .read() @@ -978,13 +941,11 @@ pub fn serve( .and(warp::body::json()) .and(warp::path::end()) .and(validator_store_filter.clone()) - .and(signer.clone()) - .and_then( + .then( |validator_pubkey: PublicKey, request: api_types::UpdateGasLimitRequest, - validator_store: Arc>, - signer| { - blocking_signed_json_task(signer, move || { + validator_store: Arc>| { + blocking_json_task(move || { if validator_store .initialized_validators() .read() @@ -1018,10 +979,9 @@ pub fn serve( .and(warp::path("gas_limit")) .and(warp::path::end()) .and(validator_store_filter.clone()) - .and(signer.clone()) - .and_then( - |validator_pubkey: PublicKey, validator_store: Arc>, signer| { - blocking_signed_json_task(signer, move || { + .then( + |validator_pubkey: PublicKey, validator_store: Arc>| { + blocking_json_task(move || { if validator_store .initialized_validators() .read() @@ -1058,17 +1018,15 @@ pub fn serve( .and(validator_store_filter.clone()) .and(slot_clock_filter) .and(log_filter.clone()) - .and(signer.clone()) .and(task_executor_filter.clone()) - .and_then( + .then( |pubkey: PublicKey, query: api_types::VoluntaryExitQuery, validator_store: Arc>, slot_clock: T, log, - signer, task_executor: TaskExecutor| { - blocking_signed_json_task(signer, move || { + blocking_json_task(move || { if let Some(handle) = task_executor.handle() { let signed_voluntary_exit = handle.block_on(create_signed_voluntary_exit( @@ -1096,13 +1054,11 @@ pub fn serve( .and(warp::path::end()) .and(validator_store_filter.clone()) .and(graffiti_flag_filter) - .and(signer.clone()) - .and_then( + .then( |pubkey: PublicKey, validator_store: Arc>, - graffiti_flag: Option, - signer| { - blocking_signed_json_task(signer, move || { + graffiti_flag: Option| { + blocking_json_task(move || { let graffiti = get_graffiti(pubkey.clone(), validator_store, graffiti_flag)?; Ok(GenericResponse::from(GetGraffitiResponse { pubkey: pubkey.into(), @@ -1121,14 +1077,12 @@ pub fn serve( .and(warp::path::end()) .and(validator_store_filter.clone()) .and(graffiti_file_filter.clone()) - .and(signer.clone()) - .and_then( + .then( |pubkey: PublicKey, query: SetGraffitiRequest, validator_store: Arc>, - graffiti_file: Option, - signer| { - blocking_signed_json_task(signer, move || { + graffiti_file: Option| { + blocking_json_task(move || { if graffiti_file.is_some() { return Err(warp_utils::reject::invalid_auth( "Unable to update graffiti as the \"--graffiti-file\" flag is set" @@ -1149,13 +1103,11 @@ pub fn serve( .and(warp::path::end()) .and(validator_store_filter.clone()) .and(graffiti_file_filter.clone()) - .and(signer.clone()) - .and_then( + .then( |pubkey: PublicKey, validator_store: Arc>, - graffiti_file: Option, - signer| { - blocking_signed_json_task(signer, move || { + graffiti_file: Option| { + blocking_json_task(move || { if graffiti_file.is_some() { return Err(warp_utils::reject::invalid_auth( "Unable to delete graffiti as the \"--graffiti-file\" flag is set" @@ -1169,32 +1121,24 @@ pub fn serve( .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT)); // GET /eth/v1/keystores - let get_std_keystores = std_keystores - .and(signer.clone()) - .and(validator_store_filter.clone()) - .and_then(|signer, validator_store: Arc>| { - blocking_signed_json_task(signer, move || Ok(keystores::list(validator_store))) - }); + let get_std_keystores = std_keystores.and(validator_store_filter.clone()).then( + |validator_store: Arc>| { + blocking_json_task(move || Ok(keystores::list(validator_store))) + }, + ); // POST /eth/v1/keystores let post_std_keystores = std_keystores .and(warp::body::json()) - .and(signer.clone()) .and(validator_dir_filter) .and(secrets_dir_filter) .and(validator_store_filter.clone()) .and(task_executor_filter.clone()) .and(log_filter.clone()) - .and_then( - move |request, - signer, - validator_dir, - secrets_dir, - validator_store, - task_executor, - log| { + .then( + move |request, validator_dir, secrets_dir, validator_store, task_executor, log| { let secrets_dir = store_passwords_in_secrets_dir.then_some(secrets_dir); - blocking_signed_json_task(signer, move || { + blocking_json_task(move || { keystores::import( request, validator_dir, @@ -1210,33 +1154,30 @@ pub fn serve( // DELETE /eth/v1/keystores let delete_std_keystores = std_keystores .and(warp::body::json()) - .and(signer.clone()) .and(validator_store_filter.clone()) .and(task_executor_filter.clone()) .and(log_filter.clone()) - .and_then(|request, signer, validator_store, task_executor, log| { - blocking_signed_json_task(signer, move || { + .then(|request, validator_store, task_executor, log| { + blocking_json_task(move || { keystores::delete(request, validator_store, task_executor, log) }) }); // GET /eth/v1/remotekeys - let get_std_remotekeys = std_remotekeys - .and(signer.clone()) - .and(validator_store_filter.clone()) - .and_then(|signer, validator_store: Arc>| { - blocking_signed_json_task(signer, move || Ok(remotekeys::list(validator_store))) - }); + let get_std_remotekeys = std_remotekeys.and(validator_store_filter.clone()).then( + |validator_store: Arc>| { + blocking_json_task(move || Ok(remotekeys::list(validator_store))) + }, + ); // POST /eth/v1/remotekeys let post_std_remotekeys = std_remotekeys .and(warp::body::json()) - .and(signer.clone()) .and(validator_store_filter.clone()) .and(task_executor_filter.clone()) .and(log_filter.clone()) - .and_then(|request, signer, validator_store, task_executor, log| { - blocking_signed_json_task(signer, move || { + .then(|request, validator_store, task_executor, log| { + blocking_json_task(move || { remotekeys::import(request, validator_store, task_executor, log) }) }); @@ -1244,12 +1185,11 @@ pub fn serve( // DELETE /eth/v1/remotekeys let delete_std_remotekeys = std_remotekeys .and(warp::body::json()) - .and(signer) .and(validator_store_filter) .and(task_executor_filter) .and(log_filter.clone()) - .and_then(|request, signer, validator_store, task_executor, log| { - blocking_signed_json_task(signer, move || { + .then(|request, validator_store, task_executor, log| { + blocking_json_task(move || { remotekeys::delete(request, validator_store, task_executor, log) }) }); @@ -1369,42 +1309,3 @@ pub fn serve( Ok((listening_socket, server)) } - -/// Executes `func` in blocking tokio task (i.e., where long-running tasks are permitted). -/// JSON-encodes the return value of `func`, using the `signer` function to produce a signature of -/// those bytes. -pub async fn blocking_signed_json_task( - signer: S, - func: F, -) -> Result -where - S: Fn(&[u8]) -> String, - F: FnOnce() -> Result + Send + 'static, - T: Serialize + Send + 'static, -{ - warp_utils::task::blocking_task(func) - .await - .map(|func_output| { - let mut response = match serde_json::to_vec(&func_output) { - Ok(body) => { - let mut res = Response::new(body); - res.headers_mut() - .insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); - res - } - Err(_) => Response::builder() - .status(StatusCode::INTERNAL_SERVER_ERROR) - .body(vec![]) - .expect("can produce simple response from static values"), - }; - - let body: &Vec = response.body(); - let signature = signer(body); - let header_value = - HeaderValue::from_str(&signature).expect("hash can be encoded as header"); - - response.headers_mut().append("Signature", header_value); - - response - }) -} diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index fe58393bb8d..b6923d1c788 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -13,7 +13,7 @@ use rand::{rngs::SmallRng, Rng, SeedableRng}; use slashing_protection::interchange::{Interchange, InterchangeMetadata}; use std::{collections::HashMap, path::Path}; use tokio::runtime::Handle; -use types::Address; +use types::{attestation::AttestationBase, Address}; fn new_keystore(password: ZeroizeString) -> Keystore { let keypair = Keypair::random(); @@ -1094,7 +1094,7 @@ async fn generic_migration_test( // Sign attestations on VC1. for (validator_index, mut attestation) in first_vc_attestations { let public_key = keystore_pubkey(&keystores[validator_index]); - let current_epoch = attestation.data.target.epoch; + let current_epoch = attestation.data().target.epoch; tester1 .validator_store .sign_attestation(public_key, 0, &mut attestation, current_epoch) @@ -1170,7 +1170,7 @@ async fn generic_migration_test( // Sign attestations on the second VC. for (validator_index, mut attestation, should_succeed) in second_vc_attestations { let public_key = keystore_pubkey(&keystores[validator_index]); - let current_epoch = attestation.data.target.epoch; + let current_epoch = attestation.data().target.epoch; match tester2 .validator_store .sign_attestation(public_key, 0, &mut attestation, current_epoch) @@ -1236,7 +1236,7 @@ async fn delete_nonexistent_keystores() { } fn make_attestation(source_epoch: u64, target_epoch: u64) -> Attestation { - Attestation { + Attestation::Base(AttestationBase { aggregation_bits: BitList::with_capacity( ::MaxValidatorsPerCommittee::to_usize(), ) @@ -1253,7 +1253,7 @@ fn make_attestation(source_epoch: u64, target_epoch: u64) -> Attestation { ..AttestationData::default() }, signature: AggregateSignature::empty(), - } + }) } #[tokio::test] diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 268c25cdf7d..729ff62ee30 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -47,7 +47,7 @@ use notifier::spawn_notifier; use parking_lot::RwLock; use preparation_service::{PreparationService, PreparationServiceBuilder}; use reqwest::Certificate; -use slog::{error, info, warn, Logger}; +use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; use slot_clock::SystemTimeSlotClock; use std::fs::File; @@ -109,7 +109,7 @@ impl ProductionValidatorClient { /// and attestation production. pub async fn new_from_cli( context: RuntimeContext, - cli_args: &ArgMatches<'_>, + cli_args: &ArgMatches, ) -> Result { let config = Config::from_cli(cli_args, context.log()) .map_err(|e| format!("Unable to initialize config: {}", e))?; @@ -121,6 +121,27 @@ impl ProductionValidatorClient { pub async fn new(context: RuntimeContext, config: Config) -> Result { let log = context.log().clone(); + // Attempt to raise soft fd limit. The behavior is OS specific: + // `linux` - raise soft fd limit to hard + // `macos` - raise soft fd limit to `min(kernel limit, hard fd limit)` + // `windows` & rest - noop + match fdlimit::raise_fd_limit().map_err(|e| format!("Unable to raise fd limit: {}", e))? { + fdlimit::Outcome::LimitRaised { from, to } => { + debug!( + log, + "Raised soft open file descriptor resource limit"; + "old_limit" => from, + "new_limit" => to + ); + } + fdlimit::Outcome::Unsupported => { + debug!( + log, + "Raising soft open file descriptor resource limit is not supported" + ); + } + }; + info!( log, "Starting validator client"; @@ -455,6 +476,7 @@ impl ProductionValidatorClient { slot_clock: slot_clock.clone(), beacon_nodes: beacon_nodes.clone(), validator_store: validator_store.clone(), + unknown_validator_next_poll_slots: <_>::default(), spec: context.eth2_config.spec.clone(), context: duties_context, enable_high_validator_count_metrics: config.enable_high_validator_count_metrics, diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index 7aabc7d5abb..474f9f47609 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -477,7 +477,7 @@ impl PreparationService { for batch in signed.chunks(self.validator_registration_batch_size) { match self .beacon_nodes - .first_success( + .broadcast( RequireSynced::No, OfflineOnFailure::No, |beacon_node| async move { diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index fe520e11f5f..d89c9b82292 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -7,7 +7,7 @@ use crate::http_metrics::metrics; use eth2_keystore::Keystore; use lockfile::Lockfile; use parking_lot::Mutex; -use reqwest::Client; +use reqwest::{header::ACCEPT, Client}; use std::path::PathBuf; use std::sync::Arc; use task_executor::TaskExecutor; @@ -38,7 +38,7 @@ pub enum SignableMessage<'a, E: EthSpec, Payload: AbstractExecPayload = FullP RandaoReveal(Epoch), BeaconBlock(&'a BeaconBlock), AttestationData(&'a AttestationData), - SignedAggregateAndProof(&'a AggregateAndProof), + SignedAggregateAndProof(AggregateAndProofRef<'a, E>), SelectionProof(Slot), SyncSelectionProof(&'a SyncAggregatorSelectionData), SyncCommitteeSignature { @@ -243,6 +243,7 @@ impl SigningMethod { // Request a signature from the Web3Signer instance via HTTP(S). let response: SigningResponse = http_client .post(signing_url.clone()) + .header(ACCEPT, "application/json") .json(&request) .send() .await diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 8ad37a1620a..86e7015ad35 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -43,7 +43,7 @@ pub enum Web3SignerObject<'a, E: EthSpec, Payload: AbstractExecPayload> { AggregationSlot { slot: Slot, }, - AggregateAndProof(&'a AggregateAndProof), + AggregateAndProof(AggregateAndProofRef<'a, E>), Attestation(&'a AttestationData), BeaconBlock { version: ForkName, diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index ba85a4f262f..17aab6f23b3 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -641,9 +641,9 @@ impl ValidatorStore { current_epoch: Epoch, ) -> Result<(), Error> { // Make sure the target epoch is not higher than the current epoch to avoid potential attacks. - if attestation.data.target.epoch > current_epoch { + if attestation.data().target.epoch > current_epoch { return Err(Error::GreaterThanCurrentEpoch { - epoch: attestation.data.target.epoch, + epoch: attestation.data().target.epoch, current_epoch, }); } @@ -652,7 +652,7 @@ impl ValidatorStore { let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; // Checking for slashing conditions. - let signing_epoch = attestation.data.target.epoch; + let signing_epoch = attestation.data().target.epoch; let signing_context = self.signing_context(Domain::BeaconAttester, signing_epoch); let domain_hash = signing_context.domain_hash(&self.spec); let slashing_status = if signing_method @@ -660,7 +660,7 @@ impl ValidatorStore { { self.slashing_protection.check_and_insert_attestation( &validator_pubkey, - &attestation.data, + attestation.data(), domain_hash, ) } else { @@ -672,7 +672,7 @@ impl ValidatorStore { Ok(Safe::Valid) => { let signature = signing_method .get_signature::>( - SignableMessage::AttestationData(&attestation.data), + SignableMessage::AttestationData(attestation.data()), signing_context, &self.spec, &self.task_executor, @@ -714,7 +714,7 @@ impl ValidatorStore { crit!( self.log, "Not signing slashable attestation"; - "attestation" => format!("{:?}", attestation.data), + "attestation" => format!("{:?}", attestation.data()), "error" => format!("{:?}", e) ); metrics::inc_counter_vec( @@ -792,19 +792,16 @@ impl ValidatorStore { aggregate: Attestation, selection_proof: SelectionProof, ) -> Result, Error> { - let signing_epoch = aggregate.data.target.epoch; + let signing_epoch = aggregate.data().target.epoch; let signing_context = self.signing_context(Domain::AggregateAndProof, signing_epoch); - let message = AggregateAndProof { - aggregator_index, - aggregate, - selection_proof: selection_proof.into(), - }; + let message = + AggregateAndProof::from_attestation(aggregator_index, aggregate, selection_proof); let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; let signature = signing_method .get_signature::>( - SignableMessage::SignedAggregateAndProof(&message), + SignableMessage::SignedAggregateAndProof(message.to_ref()), signing_context, &self.spec, &self.task_executor, @@ -813,7 +810,9 @@ impl ValidatorStore { metrics::inc_counter_vec(&metrics::SIGNED_AGGREGATES_TOTAL, &[metrics::SUCCESS]); - Ok(SignedAggregateAndProof { message, signature }) + Ok(SignedAggregateAndProof::from_aggregate_and_proof( + message, signature, + )) } /// Produces a `SelectionProof` for the `slot`, signed by with corresponding secret key to diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index 35af2b1ce73..ebcde6a8288 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -6,14 +6,12 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -bls = { workspace = true } clap = { workspace = true } types = { workspace = true } environment = { workspace = true } eth2_network_config = { workspace = true } clap_utils = { workspace = true } eth2_wallet = { workspace = true } -eth2_keystore = { workspace = true } account_utils = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index cd19bd0ae3b..d53e92deb30 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -1,7 +1,8 @@ use super::common::*; use crate::DumpConfig; use account_utils::{random_password_string, read_mnemonic_from_cli, read_password_from_user}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use eth2::{ lighthouse_vc::std_types::KeystoreJsonStr, types::{StateId, ValidatorId}, @@ -35,8 +36,8 @@ pub const DEPOSITS_FILENAME: &str = "deposits.json"; const BEACON_NODE_HTTP_TIMEOUT: Duration = Duration::from_secs(2); -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Creates new validators from BIP-39 mnemonic. A JSON file will be created which \ contains all the validator keystores and other validator data. This file can then \ @@ -45,7 +46,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { deposits in the same format as the \"ethereum/staking-deposit-cli\" tool.", ) .arg( - Arg::with_name(OUTPUT_PATH_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new(OUTPUT_PATH_FLAG) .long(OUTPUT_PATH_FLAG) .value_name("DIRECTORY") .help( @@ -53,10 +63,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { files will be created. The directory will be created if it does not exist.", ) .required(true) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(DEPOSIT_GWEI_FLAG) + Arg::new(DEPOSIT_GWEI_FLAG) .long(DEPOSIT_GWEI_FLAG) .value_name("DEPOSIT_GWEI") .help( @@ -64,51 +75,60 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { required for an active validator (MAX_EFFECTIVE_BALANCE)", ) .conflicts_with(DISABLE_DEPOSITS_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(FIRST_INDEX_FLAG) + Arg::new(FIRST_INDEX_FLAG) .long(FIRST_INDEX_FLAG) .value_name("FIRST_INDEX") .help("The first of consecutive key indexes you wish to create.") - .takes_value(true) + .action(ArgAction::Set) .required(false) - .default_value("0"), + .default_value("0") + .display_order(0), ) .arg( - Arg::with_name(COUNT_FLAG) + Arg::new(COUNT_FLAG) .long(COUNT_FLAG) .value_name("VALIDATOR_COUNT") .help("The number of validators to create, regardless of how many already exist") .conflicts_with("at-most") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(MNEMONIC_FLAG) + Arg::new(MNEMONIC_FLAG) .long(MNEMONIC_FLAG) .value_name("MNEMONIC_PATH") .help("If present, the mnemonic will be read in from this file.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0) + .help_heading(FLAG_HEADER), ) .arg( - Arg::with_name(DISABLE_DEPOSITS_FLAG) + Arg::new(DISABLE_DEPOSITS_FLAG) .long(DISABLE_DEPOSITS_FLAG) .help( "When provided don't generate the deposits JSON file that is \ commonly used for submitting validator deposits via a web UI. \ Using this flag will save several seconds per validator if the \ user has an alternate strategy for submitting deposits.", - ), + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0), ) .arg( - Arg::with_name(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG) + Arg::new(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG) .long(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG) .help( "If present, the user will be prompted to enter the voting keystore \ @@ -116,10 +136,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { flag is not provided, a random password will be used. It is not \ necessary to keep backups of voting keystore passwords if the \ mnemonic is safely backed up.", - ), + ) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .display_order(0), ) .arg( - Arg::with_name(ETH1_WITHDRAWAL_ADDRESS_FLAG) + Arg::new(ETH1_WITHDRAWAL_ADDRESS_FLAG) .long(ETH1_WITHDRAWAL_ADDRESS_FLAG) .value_name("ETH1_ADDRESS") .help( @@ -128,10 +151,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { with the mnemonic-derived withdrawal public key in EIP-2334 format.", ) .conflicts_with(DISABLE_DEPOSITS_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(GAS_LIMIT_FLAG) + Arg::new(GAS_LIMIT_FLAG) .long(GAS_LIMIT_FLAG) .value_name("UINT64") .help( @@ -139,10 +163,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { to leave this as the default value by not specifying this flag.", ) .required(false) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(FEE_RECIPIENT_FLAG) + Arg::new(FEE_RECIPIENT_FLAG) .long(FEE_RECIPIENT_FLAG) .value_name("ETH1_ADDRESS") .help( @@ -150,21 +175,23 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { fee recipient. Omit this flag to use the default value from the VC.", ) .required(false) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(BUILDER_PROPOSALS_FLAG) + Arg::new(BUILDER_PROPOSALS_FLAG) .long(BUILDER_PROPOSALS_FLAG) .help( "When provided, all created validators will attempt to create \ blocks via builder rather than the local EL.", ) .required(false) - .possible_values(&["true", "false"]) - .takes_value(true), + .value_parser(["true", "false"]) + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(BEACON_NODE_FLAG) + Arg::new(BEACON_NODE_FLAG) .long(BEACON_NODE_FLAG) .value_name("HTTP_ADDRESS") .help( @@ -174,21 +201,24 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { prevent the same validator being created twice and therefore slashable \ conditions.", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(FORCE_BLS_WITHDRAWAL_CREDENTIALS) - .takes_value(false) + Arg::new(FORCE_BLS_WITHDRAWAL_CREDENTIALS) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long(FORCE_BLS_WITHDRAWAL_CREDENTIALS) .help( "If present, allows BLS withdrawal credentials rather than an execution \ address. This is not recommended.", - ), + ) + .display_order(0), ) .arg( - Arg::with_name(BUILDER_BOOST_FACTOR_FLAG) + Arg::new(BUILDER_BOOST_FACTOR_FLAG) .long(BUILDER_BOOST_FACTOR_FLAG) - .takes_value(true) + .action(ArgAction::Set) .value_name("UINT64") .required(false) .help( @@ -196,18 +226,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { a percentage multiplier to apply to the builder's payload value \ when choosing between a builder payload header and payload from \ the local execution node.", - ), + ) + .display_order(0), ) .arg( - Arg::with_name(PREFER_BUILDER_PROPOSALS_FLAG) + Arg::new(PREFER_BUILDER_PROPOSALS_FLAG) .long(PREFER_BUILDER_PROPOSALS_FLAG) .help( "If this flag is set, Lighthouse will always prefer blocks \ constructed by builders, regardless of payload value.", ) .required(false) - .possible_values(&["true", "false"]) - .takes_value(true), + .value_parser(["true", "false"]) + .action(ArgAction::Set) + .display_order(0), ) } @@ -242,10 +274,10 @@ impl CreateConfig { first_index: clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?, count: clap_utils::parse_required(matches, COUNT_FLAG)?, mnemonic_path: clap_utils::parse_optional(matches, MNEMONIC_FLAG)?, - stdin_inputs: cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG), - disable_deposits: matches.is_present(DISABLE_DEPOSITS_FLAG), + stdin_inputs: cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG), + disable_deposits: matches.get_flag(DISABLE_DEPOSITS_FLAG), specify_voting_keystore_password: matches - .is_present(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG), + .get_flag(SPECIFY_VOTING_KEYSTORE_PASSWORD_FLAG), eth1_withdrawal_address: clap_utils::parse_optional( matches, ETH1_WITHDRAWAL_ADDRESS_FLAG, @@ -259,7 +291,7 @@ impl CreateConfig { fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT_FLAG)?, gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT_FLAG)?, bn_url: clap_utils::parse_optional(matches, BEACON_NODE_FLAG)?, - force_bls_withdrawal_credentials: matches.is_present(FORCE_BLS_WITHDRAWAL_CREDENTIALS), + force_bls_withdrawal_credentials: matches.get_flag(FORCE_BLS_WITHDRAWAL_CREDENTIALS), }) } } @@ -516,8 +548,8 @@ impl ValidatorsAndDeposits { } } -pub async fn cli_run<'a, E: EthSpec>( - matches: &'a ArgMatches<'a>, +pub async fn cli_run( + matches: &ArgMatches, spec: &ChainSpec, dump_config: DumpConfig, ) -> Result<(), String> { @@ -581,7 +613,7 @@ pub mod tests { type E = MainnetEthSpec; - const TEST_VECTOR_DEPOSIT_CLI_VERSION: &str = "2.3.0"; + const TEST_VECTOR_DEPOSIT_CLI_VERSION: &str = "2.7.0"; fn junk_execution_address() -> Option
{ Some(Address::from_str("0x0f51bb10119727a7e5ea3538074fb341f56b09ad").unwrap()) @@ -933,12 +965,6 @@ pub mod tests { for deposit in &mut deposits { // Ensures we can match test vectors. deposit.deposit_cli_version = TEST_VECTOR_DEPOSIT_CLI_VERSION.to_string(); - - // We use "prater" and the vectors use "goerli" now. The two names refer to the same - // network so there should be no issue here. - if deposit.network_name == "prater" { - deposit.network_name = "goerli".to_string(); - } } deposits }; diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 4b924189f20..f193e8d0fbd 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -1,6 +1,7 @@ use super::common::*; use crate::DumpConfig; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use eth2::{lighthouse_vc::std_types::ImportKeystoreStatus, SensitiveUrl}; use serde::{Deserialize, Serialize}; use std::fs; @@ -13,15 +14,24 @@ pub const VC_TOKEN_FLAG: &str = "vc-token"; pub const DETECTED_DUPLICATE_MESSAGE: &str = "Duplicate validator detected!"; -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Uploads validators to a validator client using the HTTP API. The validators \ are defined in a JSON file which can be generated using the \"create-validators\" \ command.", ) .arg( - Arg::with_name(VALIDATORS_FILE_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new(VALIDATORS_FILE_FLAG) .long(VALIDATORS_FILE_FLAG) .value_name("PATH_TO_JSON_FILE") .help( @@ -30,10 +40,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { \"validators.json\".", ) .required(true) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(VC_URL_FLAG) + Arg::new(VC_URL_FLAG) .long(VC_URL_FLAG) .value_name("HTTP_ADDRESS") .help( @@ -43,18 +54,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .default_value("http://localhost:5062") .requires(VC_TOKEN_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(VC_TOKEN_FLAG) + Arg::new(VC_TOKEN_FLAG) .long(VC_TOKEN_FLAG) .value_name("PATH") .help("The file containing a token required by the validator client.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(IGNORE_DUPLICATES_FLAG) - .takes_value(false) + Arg::new(IGNORE_DUPLICATES_FLAG) + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .long(IGNORE_DUPLICATES_FLAG) .help( "If present, ignore any validators which already exist on the VC. \ @@ -63,7 +77,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { slashable conditions, it might be an indicator that something is amiss. \ Users should also be careful to avoid submitting duplicate deposits for \ validators that already exist on the VC.", - ), + ) + .display_order(0), ) } @@ -81,15 +96,12 @@ impl ImportConfig { validators_file_path: clap_utils::parse_required(matches, VALIDATORS_FILE_FLAG)?, vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, - ignore_duplicates: matches.is_present(IGNORE_DUPLICATES_FLAG), + ignore_duplicates: matches.get_flag(IGNORE_DUPLICATES_FLAG), }) } } -pub async fn cli_run<'a>( - matches: &'a ArgMatches<'a>, - dump_config: DumpConfig, -) -> Result<(), String> { +pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { let config = ImportConfig::from_cli(matches)?; if dump_config.should_exit_early(&config)? { Ok(()) diff --git a/validator_manager/src/lib.rs b/validator_manager/src/lib.rs index a9991d3272c..222dd7076de 100644 --- a/validator_manager/src/lib.rs +++ b/validator_manager/src/lib.rs @@ -1,5 +1,5 @@ -use clap::App; -use clap::ArgMatches; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::{get_color_style, FLAG_HEADER}; use common::write_to_json_file; use environment::Environment; use serde::Serialize; @@ -38,17 +38,28 @@ impl DumpConfig { } } -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) - .visible_aliases(&["vm", "validator-manager", CMD]) +pub fn cli_app() -> Command { + Command::new(CMD) + .visible_aliases(["vm", "validator-manager"]) + .display_order(0) + .styles(get_color_style()) .about("Utilities for managing a Lighthouse validator client via the HTTP API.") + .arg( + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) .subcommand(create_validators::cli_app()) .subcommand(import_validators::cli_app()) .subcommand(move_validators::cli_app()) } /// Run the account manager, returning an error if the operation did not succeed. -pub fn run<'a, E: EthSpec>(matches: &'a ArgMatches<'a>, env: Environment) -> Result<(), String> { +pub fn run(matches: &ArgMatches, env: Environment) -> Result<(), String> { let context = env.core_context(); let spec = context.eth2_config.spec; let dump_config = clap_utils::parse_optional(matches, DUMP_CONFIGS_FLAG)? @@ -63,20 +74,20 @@ pub fn run<'a, E: EthSpec>(matches: &'a ArgMatches<'a>, env: Environment) -> .block_on_dangerous( async { match matches.subcommand() { - (create_validators::CMD, Some(matches)) => { + Some((create_validators::CMD, matches)) => { create_validators::cli_run::(matches, &spec, dump_config).await } - (import_validators::CMD, Some(matches)) => { + Some((import_validators::CMD, matches)) => { import_validators::cli_run(matches, dump_config).await } - (move_validators::CMD, Some(matches)) => { + Some((move_validators::CMD, matches)) => { move_validators::cli_run(matches, dump_config).await } - ("", _) => Err("No command supplied. See --help.".to_string()), - (unknown, _) => Err(format!( + Some((unknown, _)) => Err(format!( "{} is not a valid {} command. See --help.", unknown, CMD )), + _ => Err("No command supplied. See --help.".to_string()), } }, "validator_manager", diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index 5826f2756be..d2149d742c1 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -1,7 +1,8 @@ use super::common::*; use crate::DumpConfig; use account_utils::{read_password_from_user, ZeroizeString}; -use clap::{App, Arg, ArgMatches}; +use clap::{Arg, ArgAction, ArgMatches, Command}; +use clap_utils::FLAG_HEADER; use eth2::{ lighthouse_vc::{ std_types::{ @@ -66,8 +67,8 @@ impl PasswordSource { } } -pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) +pub fn cli_app() -> Command { + Command::new(CMD) .about( "Uploads validators to a validator client using the HTTP API. The validators \ are defined in a JSON file which can be generated using the \"create-validators\" \ @@ -75,7 +76,16 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { file system (i.e., not Web3Signer validators).", ) .arg( - Arg::with_name(SRC_VC_URL_FLAG) + Arg::new("help") + .long("help") + .short('h') + .help("Prints help information") + .action(ArgAction::HelpLong) + .display_order(0) + .help_heading(FLAG_HEADER), + ) + .arg( + Arg::new(SRC_VC_URL_FLAG) .long(SRC_VC_URL_FLAG) .value_name("HTTP_ADDRESS") .help( @@ -85,17 +95,19 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .required(true) .requires(SRC_VC_TOKEN_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(SRC_VC_TOKEN_FLAG) + Arg::new(SRC_VC_TOKEN_FLAG) .long(SRC_VC_TOKEN_FLAG) .value_name("PATH") .help("The file containing a token required by the source validator client.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(DEST_VC_URL_FLAG) + Arg::new(DEST_VC_URL_FLAG) .long(DEST_VC_URL_FLAG) .value_name("HTTP_ADDRESS") .help( @@ -105,35 +117,39 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .required(true) .requires(DEST_VC_TOKEN_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(DEST_VC_TOKEN_FLAG) + Arg::new(DEST_VC_TOKEN_FLAG) .long(DEST_VC_TOKEN_FLAG) .value_name("PATH") .help("The file containing a token required by the destination validator client.") - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(VALIDATORS_FLAG) + Arg::new(VALIDATORS_FLAG) .long(VALIDATORS_FLAG) .value_name("STRING") .help( "The validators to be moved. Either a list of 0x-prefixed \ validator pubkeys or the keyword \"all\".", ) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(COUNT_FLAG) + Arg::new(COUNT_FLAG) .long(COUNT_FLAG) .value_name("VALIDATOR_COUNT") .help("The number of validators to move.") .conflicts_with(VALIDATORS_FLAG) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(GAS_LIMIT_FLAG) + Arg::new(GAS_LIMIT_FLAG) .long(GAS_LIMIT_FLAG) .value_name("UINT64") .help( @@ -141,10 +157,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { to leave this as the default value by not specifying this flag.", ) .required(false) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(FEE_RECIPIENT_FLAG) + Arg::new(FEE_RECIPIENT_FLAG) .long(FEE_RECIPIENT_FLAG) .value_name("ETH1_ADDRESS") .help( @@ -152,30 +169,33 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { fee recipient. Omit this flag to use the default value from the VC.", ) .required(false) - .takes_value(true), + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(BUILDER_PROPOSALS_FLAG) + Arg::new(BUILDER_PROPOSALS_FLAG) .long(BUILDER_PROPOSALS_FLAG) .help( "When provided, all created validators will attempt to create \ blocks via builder rather than the local EL.", ) .required(false) - .possible_values(&["true", "false"]) - .takes_value(true), + .value_parser(["true", "false"]) + .action(ArgAction::Set) + .display_order(0), ) .arg( - Arg::with_name(STDIN_INPUTS_FLAG) - .takes_value(false) - .hidden(cfg!(windows)) + Arg::new(STDIN_INPUTS_FLAG) + .action(ArgAction::SetTrue) + .hide(cfg!(windows)) .long(STDIN_INPUTS_FLAG) - .help("If present, read all user inputs from stdin instead of tty."), + .help("If present, read all user inputs from stdin instead of tty.") + .display_order(0), ) .arg( - Arg::with_name(BUILDER_BOOST_FACTOR_FLAG) + Arg::new(BUILDER_BOOST_FACTOR_FLAG) .long(BUILDER_BOOST_FACTOR_FLAG) - .takes_value(true) + .action(ArgAction::Set) .value_name("UINT64") .required(false) .help( @@ -183,18 +203,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { a percentage multiplier to apply to the builder's payload value \ when choosing between a builder payload header and payload from \ the local execution node.", - ), + ) + .display_order(0), ) .arg( - Arg::with_name(PREFER_BUILDER_PROPOSALS_FLAG) + Arg::new(PREFER_BUILDER_PROPOSALS_FLAG) .long(PREFER_BUILDER_PROPOSALS_FLAG) .help( "If this flag is set, Lighthouse will always prefer blocks \ constructed by builders, regardless of payload value.", ) .required(false) - .possible_values(&["true", "false"]) - .takes_value(true), + .value_parser(["true", "false"]) + .action(ArgAction::Set) + .display_order(0), ) } @@ -223,10 +245,10 @@ pub struct MoveConfig { impl MoveConfig { fn from_cli(matches: &ArgMatches) -> Result { let count_flag = clap_utils::parse_optional(matches, COUNT_FLAG)?; - let validators_flag = matches.value_of(VALIDATORS_FLAG); + let validators_flag = matches.get_one::(VALIDATORS_FLAG); let validators = match (count_flag, validators_flag) { (Some(count), None) => Validators::Count(count), - (None, Some(string)) => match string { + (None, Some(string)) => match string.as_str() { "all" => Validators::All, pubkeys => pubkeys .split(',') @@ -257,16 +279,13 @@ impl MoveConfig { fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT_FLAG)?, gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT_FLAG)?, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || matches.is_present(STDIN_INPUTS_FLAG), + stdin_inputs: cfg!(windows) || matches.get_flag(STDIN_INPUTS_FLAG), }, }) } } -pub async fn cli_run<'a>( - matches: &'a ArgMatches<'a>, - dump_config: DumpConfig, -) -> Result<(), String> { +pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { let config = MoveConfig::from_cli(matches)?; if dump_config.should_exit_early(&config)? { Ok(()) diff --git a/validator_manager/test_vectors/generate.py b/validator_manager/test_vectors/generate.py index 722414de733..8bf7f5f52d3 100644 --- a/validator_manager/test_vectors/generate.py +++ b/validator_manager/test_vectors/generate.py @@ -1,10 +1,13 @@ # This script uses the `ethereum/staking-deposit-cli` tool to generate # deposit data files which are then used for testing by Lighthouse. # -# To generate vectors, simply run this Python script: +# To generate vectors, run this Python script: # # `python generate.py` # +# This script was last run on Linux using Python v3.10.4. Python v3.11.0 was not working at time +# of writing due to dependency issues in `staking-deposit-cli`. You should probably use `pyenv` and +# `virtualenv`. import os import sys import shutil @@ -89,8 +92,7 @@ def sdc_generate(network, first_index, count, eth1_withdrawal_address=None): os.mkdir(output_dir) command = [ - '/bin/sh', - 'deposit.sh', + './deposit.sh', '--language', 'english', '--non_interactive', 'existing-mnemonic', @@ -114,10 +116,10 @@ def test_network(network): sdc_generate(network, first_index=99, count=2) sdc_generate(network, first_index=1024, count=3) sdc_generate(network, first_index=0, count=2, - eth1_withdrawal_address="0x0f51bb10119727a7e5ea3538074fb341f56b09ad") + eth1_withdrawal_address="0x0f51bb10119727a7e5eA3538074fb341F56B09Ad") setup() test_network("mainnet") -test_network("prater") +test_network("holesky") cleanup() diff --git a/validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584111.json b/validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584111.json new file mode 100644 index 00000000000..6b343d087a3 --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584111.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "846c83b1ec80038974ded0ef5b89d86c862a7bd4559c10528cd4bb6a48e71987f17a963bc6165a6f51c8b87474e64b450b549ce2d14a25bea3c86c241f3740f3d3edc3dc36fddbeadb1ec8969d7193da602270fea8dd31d3e64674aa2090b73d", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "cdfe14518026e99b9dfa8a029054349e37d4632ee2bbed7c2f5af19a01912368", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584114.json b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584114.json new file mode 100644 index 00000000000..f70410746bd --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584114.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "846c83b1ec80038974ded0ef5b89d86c862a7bd4559c10528cd4bb6a48e71987f17a963bc6165a6f51c8b87474e64b450b549ce2d14a25bea3c86c241f3740f3d3edc3dc36fddbeadb1ec8969d7193da602270fea8dd31d3e64674aa2090b73d", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "cdfe14518026e99b9dfa8a029054349e37d4632ee2bbed7c2f5af19a01912368", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "997cff67c1675ecd2467ac050850ddec8b0488995abf363cee40cbe1461043acf4e68422e9731340437d566542e010cd186031dc0de30b2f56d19f3bb866e0fa9be31dd49ea27777f25ad786cc8587fb745598e5870647b6deeaab77fba4a9e4", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "8787f86d699426783983d03945a8ebe45b349118d28e8af528b9695887f98fac", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584129.json b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584129.json new file mode 100644 index 00000000000..9b2678651f7 --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584129.json @@ -0,0 +1 @@ +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8eed5bb34dec5fdee4a3e68a774143072af0ebdae26a9b24ea0601d516a5eeb18aa2ec804be3f05f8475f2e472ce91809d93b7586c3a90fc8a7bbb63ad1f762eee3df0dc0ea3d33dd8ba782e48de495b3bc76e280658c1406e11d07db659e69", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "74ead0279baa86ed7106268e4806484eaae26a8f1c42f693e4b3cb626c724b63", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "8d87cdd627ed169114c00653fd3167e2afc917010071bbbbddd60e331ed0d0d7273cb4a887efe63e7b840bac713420d907e9dac20df56e50e7346b59e3acfe56753234a34c7ab3d8c40ea00b447db005b4b780701a0a2416c4fdadbdb18bf174", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "978b04b76d0a56ff28beb8eb1859792e0967d0b51e4a31485d2078b8390954d2", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584124.json b/validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584124.json new file mode 100644 index 00000000000..997260bb872 --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584124.json @@ -0,0 +1 @@ +[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "818141f1f2fdba651f6a3de4ed43c774974b6cec82b3e6c3fa00569b6b67a88c37742d0033275dc98b4bbaac875e48b416b89cebfd1fe9996e2a29c0a2c512d1cedff558420a1a2b50cf5c743a622d85d941b896b00520b3e9a3eaf1f5eff12c", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "9c9f6ed171b93a08f4e1bc46c0a7feace6466e3e213c6c2d567428c73e22e242", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b62103a32290ec8c710d48f3147895a2dddb25231c9ae38b8ca12bcaf30770a9fc632f4da6b3c5b7a43cfa6a9f096f5e13d26b2c68a42c1c86385aea268dcd2ad3cf766b3f01ee2ba19379ddae9c15830aac8acbef20accc82c734f4c40e5ffd", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "37b75d75086f4b980c85c021ca22343008d445061714cff41d63aea4dca49a5f", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "af2dc295084b4a3eff01a52fe5d42aa931509c24328d5304e59026d0957b55bc35e64802a8d64fdb4a9700bf12e1d6bb184eba01682d8413d86b737e63d3d79a16243d9c8e00115a202efc889ef7129861d8aa32bf8ec9ef5305eecce87b2eda", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "fd0c081818d2ce1bc54b7979e9b348bbbdb8fe5904694143bf4b355dcbbde692", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584117.json b/validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584117.json new file mode 100644 index 00000000000..4fa3724c597 --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584117.json @@ -0,0 +1 @@ +[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "b687aa7d55752f00a060c21fa9287485bab94c841d96b3516263fb384a812c92e60ef9fa2e09add9f55db71961fc051e0bb83d214b6f31d04ee59eaba3b43e27eadd2a64884c5d4125a1f5bd6e1d930e5a1e420c278c697d4af6ed3fcdac16cf", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "54dc56d2838ca70bac89ca92ae1f8d04945d3305ce8507b390756b646163387a", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584120.json b/validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584120.json new file mode 100644 index 00000000000..7436b53f24b --- /dev/null +++ b/validator_manager/test_vectors/vectors/holesky_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584120.json @@ -0,0 +1 @@ +[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "a59a2c510c5ce378b514f62550a7115cd6cfebaf73a5ba20c2cf21456a2d2c11d6e117b91d23743fc0361794cf7e5405030eb296926b526e8a2d68aa87569358e69d3884563a23770714730b6fab6ba639977d725a5ed4f29abe3ccc34575610", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "149a5dfbba87109dac65142cc067aed97c9579730488cfe16625be3ce4f753a6", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "966ae45b81402f1155ff313e48ca3a5346264dcc4bc9ee9e69994ee74368852d9d27c1684752735feba6c21042ad366b13f12c6e772c453518900435d87e2d743e1818e7471cf3574598e3b085c4527f643efe679841ddf8a480cac12b2c6e08", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "f44dac412ae36929a84f64d5f7f91cada908a8f9e837fc70628f58804591798d", "fork_version": "01017000", "network_name": "holesky", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584089.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json rename to validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584089.json index 31c00c57f24..d9ba926d1c3 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803666.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_1_eth1_false/validator_keys/deposit_data-1715584089.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584092.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json rename to validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584092.json index 2880b7724cf..f1ea4c6ad32 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803669.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_false/validator_keys/deposit_data-1715584092.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "84b9fc8f260a1488c4c9a438f875edfa2bac964d651b2bc886d8442829b13f89752e807c8ca9bae9d50b1b506d3a64730015dd7f91e271ff9c1757d1996dcf6082fe5205cf6329fa2b6be303c21b66d75be608757a123da6ee4a4f14c01716d7", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "cd991ea8ff32e6b3940aed43b476c720fc1abd3040893b77a8a3efb306320d4c", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "8ac88247c1b431a2d1eb2c5f00e7b8467bc21d6dc267f1af9ef727a12e32b4299e3b289ae5734a328b3202478dd746a80bf9e15a2217240dca1fc1b91a6b7ff7a0f5830d9a2610c1c30f19912346271357c21bd9af35a74097ebbdda2ddaf491", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "807a20b2801eabfd9065c1b74ed6ae3e991a1ab770e4eaf268f30b37cfd2cbd7", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "84b9fc8f260a1488c4c9a438f875edfa2bac964d651b2bc886d8442829b13f89752e807c8ca9bae9d50b1b506d3a64730015dd7f91e271ff9c1757d1996dcf6082fe5205cf6329fa2b6be303c21b66d75be608757a123da6ee4a4f14c01716d7", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "cd991ea8ff32e6b3940aed43b476c720fc1abd3040893b77a8a3efb306320d4c", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584107.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json rename to validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584107.json index da92a1d0d94..5741f23d8fd 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803684.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_0_count_2_eth1_true/validator_keys/deposit_data-1715584107.json @@ -1 +1 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8461b58a5a5a0573c4af37da6ee4ba63e35894cffad6797d4a2c80f8f2c79d2c30c0de0299d8edde76e0c3f3e6d4f1e03cc377969f56d8760717d6e86f9316da9375573ce7bb87a8520daedb13c49284377f7a4f64a70aa2ca44b1581d47e20", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "d26d642a880ff8a109260fe69681840f6e1868c8c1cd2163a1db5a094e8db03a", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "93a398c09143203beb94c9223c7e18f36e5ea36090875284b222c2fcb16982e6f2e26f27ca9d30e3c6f6b5ad44857fc50f531925f4736810712f68a9d7a9c0eb664a851180f3b7d2e44a35717d43b3d3e4fd555354fa1dfa92f451870f36084d", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7c7617a2c11870ec49e975b3691b9f822d63938df38555161e23aa245b150c66", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "a8461b58a5a5a0573c4af37da6ee4ba63e35894cffad6797d4a2c80f8f2c79d2c30c0de0299d8edde76e0c3f3e6d4f1e03cc377969f56d8760717d6e86f9316da9375573ce7bb87a8520daedb13c49284377f7a4f64a70aa2ca44b1581d47e20", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "d26d642a880ff8a109260fe69681840f6e1868c8c1cd2163a1db5a094e8db03a", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "93a398c09143203beb94c9223c7e18f36e5ea36090875284b222c2fcb16982e6f2e26f27ca9d30e3c6f6b5ad44857fc50f531925f4736810712f68a9d7a9c0eb664a851180f3b7d2e44a35717d43b3d3e4fd555354fa1dfa92f451870f36084d", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7c7617a2c11870ec49e975b3691b9f822d63938df38555161e23aa245b150c66", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json b/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584103.json similarity index 93% rename from validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json rename to validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584103.json index 9cc01dc0df7..9b9556cf9da 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803679.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_1024_count_3_eth1_false/validator_keys/deposit_data-1715584103.json @@ -1 +1 @@ -[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "a0a96851892b257c032284928641021e58e0bcd277c3da5a2c41bcce6633d144781e4761261138277b5a8cf0ead59cce073e5a3bbc4704a37abf8cd1e290dc52e56cb0c334303945ebbb79be453c8177937e44e08f980679f1a2997fe58d2d86", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "2bedaf48f8315d8631defc97c1c4c05a8152e2dc3fe779fc8e800dd67bd839a2", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b469179ad8ba9d6ad71b99a3c7ae662d9b77cca3ee53b20ab2eb20beee31874ad47224e94e75578fa6ecd30c1d40a0b300053817f934169d84425691edf13216445fbc6dd9b0953ad3af20c834fba63c1f50c0b0f92dd8bf383cd2cc8e0431f1", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "69862477671957ab0b3f1167c5cd550c107132a0079eb70eaa4bc5c5fe06b5a0", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "a8b05626657ce5b1801e0824aaeb21de2e1a11bc16cad6100ac911bcb873aaf7e7282f1f8465df4aaea998a1a4e1645f075e7e65f8c6b8688b0162f86be2128541f91fc9feb628bcab3b4afec1f7aeccaba04aaa54dc17c738233d360f94b97e", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "34ef32901d793cd9a0a3d93e7ee40e7be9abe6fb26f0b49a86b8ff29dc649930", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "a0a96851892b257c032284928641021e58e0bcd277c3da5a2c41bcce6633d144781e4761261138277b5a8cf0ead59cce073e5a3bbc4704a37abf8cd1e290dc52e56cb0c334303945ebbb79be453c8177937e44e08f980679f1a2997fe58d2d86", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "2bedaf48f8315d8631defc97c1c4c05a8152e2dc3fe779fc8e800dd67bd839a2", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "b469179ad8ba9d6ad71b99a3c7ae662d9b77cca3ee53b20ab2eb20beee31874ad47224e94e75578fa6ecd30c1d40a0b300053817f934169d84425691edf13216445fbc6dd9b0953ad3af20c834fba63c1f50c0b0f92dd8bf383cd2cc8e0431f1", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "69862477671957ab0b3f1167c5cd550c107132a0079eb70eaa4bc5c5fe06b5a0", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "a8b05626657ce5b1801e0824aaeb21de2e1a11bc16cad6100ac911bcb873aaf7e7282f1f8465df4aaea998a1a4e1645f075e7e65f8c6b8688b0162f86be2128541f91fc9feb628bcab3b4afec1f7aeccaba04aaa54dc17c738233d360f94b97e", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "34ef32901d793cd9a0a3d93e7ee40e7be9abe6fb26f0b49a86b8ff29dc649930", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json b/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584095.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json rename to validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584095.json index 3a971d0959a..84140f53feb 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803672.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_12_count_1_eth1_false/validator_keys/deposit_data-1715584095.json @@ -1 +1 @@ -[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "a57299cde3c2ea8dc17ad3ce5a38a5f6de69d198599150dc4df02624ba1d8672440d02c0d27c3dc3b8c9f86c679571ab14c798426acd9b059895f1f5887bdee805fb4e31bd8f93ec9e78403c23d7924f23eae6af056154f35fee03bf9ffe0e98", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "246619823b45d80f53a30404542ec4be447d4e268cc0afcdf480e6a846d58411", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "a57299cde3c2ea8dc17ad3ce5a38a5f6de69d198599150dc4df02624ba1d8672440d02c0d27c3dc3b8c9f86c679571ab14c798426acd9b059895f1f5887bdee805fb4e31bd8f93ec9e78403c23d7924f23eae6af056154f35fee03bf9ffe0e98", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "246619823b45d80f53a30404542ec4be447d4e268cc0afcdf480e6a846d58411", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json b/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584098.json similarity index 90% rename from validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json rename to validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584098.json index 2efa5c4ec8c..3205390a434 100644 --- a/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803675.json +++ b/validator_manager/test_vectors/vectors/mainnet_first_99_count_2_eth1_false/validator_keys/deposit_data-1715584098.json @@ -1 +1 @@ -[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "8ca8a6f30b4346d7b9912e3dcd820652bc472511f89d91fd102acfb0c8df1cfc7a2629f44170727e126e88f2847fe5c9081b13fb0838a2b2343a95cabf16f57708fc0cf846bc5307209ae976c34500cc826ff48ab64169d8bebec99dded5dd1d", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "c0c6cd40b43ea0fe7fcc284de9acd9c1bd001bb88c059c155393af22a6c85d46", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8c0784645c611b4f514a6519b737f2d02df3eba0e04cd30efebffcca769af8cc599ce28e4421cefe665ec31d3c34e44c174e0cca4891d8196796085e712459b45e411efecd07cf3258f1d6309a07a6dd52a0ae186e6184d37bf11cee36ec84e8", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "c57790b77ef97318d4ec7b97ea07ea458d08209ba372bfe76171e2ece22d6130", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.3.0"}] \ No newline at end of file +[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "8ca8a6f30b4346d7b9912e3dcd820652bc472511f89d91fd102acfb0c8df1cfc7a2629f44170727e126e88f2847fe5c9081b13fb0838a2b2343a95cabf16f57708fc0cf846bc5307209ae976c34500cc826ff48ab64169d8bebec99dded5dd1d", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "c0c6cd40b43ea0fe7fcc284de9acd9c1bd001bb88c059c155393af22a6c85d46", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8c0784645c611b4f514a6519b737f2d02df3eba0e04cd30efebffcca769af8cc599ce28e4421cefe665ec31d3c34e44c174e0cca4891d8196796085e712459b45e411efecd07cf3258f1d6309a07a6dd52a0ae186e6184d37bf11cee36ec84e8", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "c57790b77ef97318d4ec7b97ea07ea458d08209ba372bfe76171e2ece22d6130", "fork_version": "00000000", "network_name": "mainnet", "deposit_cli_version": "2.7.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json b/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json deleted file mode 100644 index c736d75b7e9..00000000000 --- a/validator_manager/test_vectors/vectors/prater_first_0_count_1_eth1_false/validator_keys/deposit_data-1660803687.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "a940e0142ad9b56a1310326137347d1ada275b31b3748af4accc63bd189573376615be8e8ae047766c6d10864e54b2e7098177598edf3a043eb560bbdf1a1c12588375a054d1323a0900e2286d0993cde9675e5b74523e6e8e03715cc96b3ce5", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "28484efb20c961a1354689a556d4c352fe9deb24684efdb32d22e1af17e2a45d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json deleted file mode 100644 index e86500d14f2..00000000000 --- a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_false/validator_keys/deposit_data-1660803690.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0049b6188ed20314309f617dd4030b8ddfac3c6e65759a03c226a13b2fe4cc72", "amount": 32000000000, "signature": "a940e0142ad9b56a1310326137347d1ada275b31b3748af4accc63bd189573376615be8e8ae047766c6d10864e54b2e7098177598edf3a043eb560bbdf1a1c12588375a054d1323a0900e2286d0993cde9675e5b74523e6e8e03715cc96b3ce5", "deposit_message_root": "a9bc1d21cc009d9b10782a07213e37592c0d235463ed0117dec755758da90d51", "deposit_data_root": "28484efb20c961a1354689a556d4c352fe9deb24684efdb32d22e1af17e2a45d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "00ad3748cbd1adc855c2bdab431f7e755a21663f4f6447ac888e5855c588af5a", "amount": 32000000000, "signature": "87b4b4e9c923aa9e1687219e9df0e838956ee6e15b7ab18142467430d00940dc7aa243c9996e85125dfe72d9dbdb00a30a36e16a2003ee0c86f29c9f5d74f12bfe5b7f62693dbf5187a093555ae8d6b48acd075788549c4b6a249b397af24cd0", "deposit_message_root": "c5271aba974c802ff5b02b11fa33b545d7f430ff3b85c0f9eeef4cd59d83abf3", "deposit_data_root": "ea80b639356a03f6f58e4acbe881fabefc9d8b93375a6aa7e530c77d7e45d3e4", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json b/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json deleted file mode 100644 index c79ae5a4fc0..00000000000 --- a/validator_manager/test_vectors/vectors/prater_first_0_count_2_eth1_true/validator_keys/deposit_data-1660803705.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "88b6b3a9b391fa5593e8bce8d06102df1a56248368086929709fbb4a8570dc6a560febeef8159b19789e9c1fd13572f0", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "ab32595d8201c2b4e8173aece9151fdc15f4d2ad36008462d0416598ddbf0f37ed0877f06d284a9669e73dbc0885bd2207fe64385e95a4488dc2bcb2c324d5c20da3248a6244463583dfbba8db20805765421e59cb56b0bc3ee6d24a9218216d", "deposit_message_root": "62967565d11471da4af7769911926cd1826124048036b25616216f99bc320f13", "deposit_data_root": "b4df3a3a26dd5f6eb32999d8a7051a7d1a8573a16553d4b45ee706a0d59c1066", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a33ab9d93fb53c4f027944aaa11a13be0c150b7cc2e379d85d1ed4db38d178b4e4ebeae05832158b8c746c1961da00ce", "withdrawal_credentials": "0100000000000000000000000f51bb10119727a7e5ea3538074fb341f56b09ad", "amount": 32000000000, "signature": "9655e195eda5517efe6f36bcebd45250c889a4177d7bf5fcd59598d2d03f37f038b5ee2ec079a30a8382ea42f351943f08a6f006bab9c2130db2742bd7315c8ad5aa1f03a0801c26d4c9efdef71c4c59c449c7f9b21fa62600ab8f5f1e2b938a", "deposit_message_root": "ce110433298ffb78d827d67dcc13655344a139cb7e3ce10b341937c0a76b25b7", "deposit_data_root": "7661474fba11bfb453274f62df022cab3c0b6f4a58af4400f6bce83c9cb5fcb8", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json b/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json deleted file mode 100644 index 136dc38554c..00000000000 --- a/validator_manager/test_vectors/vectors/prater_first_1024_count_3_eth1_false/validator_keys/deposit_data-1660803701.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "92ca8dddba4ae7ada6584c377fc53fb978ad9d5ee8db585b18e226c27682b326b3c68e10f5d99a453e233268c144e0ef", "withdrawal_credentials": "00dd4f8bfd1a48be288c2af8bb7315f6198900b5b3f56df010420d5328e682cb", "amount": 32000000000, "signature": "b5dae79ce8f3d7326b46f93182981c5f3d64257a457f038caa78ec8e5cc25a9fdac52c7beb221ab2a3205404131366ad18e1e13801393b3d486819e8cca96128bf1244884a91d05dced092c74bc1e7259788f30dd3432df15f3d2f629645f345", "deposit_message_root": "5421d9177b4d035e6525506509ab702c5f458c53458dad437097b37cb8209b43", "deposit_data_root": "94213d76aba9e6a434589d4939dd3764e0832df78f66d30db22a760c14ba1b89", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "86474cd2874663445ef0ee02aca81b2b942a383fd4c7085fa675388e26c67afc0fef44a8666d46f571723e349ae4a0cb", "withdrawal_credentials": "001c31aa161ed1d3c481c1ee8f3ad1853217296a15877917fe3c2f680580ac01", "amount": 32000000000, "signature": "816f38a321c4f84ad5187eda58f6d9c1fd1e81c860ed1722bdb76b920fdd430a1e814b9bb893837ae3b38ad738684fbf1795fa687f617c52121472b1ac8d2e34e5c1127186233a8833ffb54c509d9e52cb7242c6c6a65b5e496296b3caa90d89", "deposit_message_root": "279271f7065c83868c37021c32c014516b21e6188fb2cee4e8543c5d38427698", "deposit_data_root": "7ad1d059d69794680a1deef5e72c33827f0c449a5f0917095821c0343572789d", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "997e27aa262238beb01464434694a466321b5270297bdfdb944b65a3b6617b6ce2613628ac35a8f4cf2e9b4b55c46ef8", "withdrawal_credentials": "0097fffee9cf9fd91a6fa89af90e73f1cb8b8a043e742afaeb2e57b83b0845fe", "amount": 32000000000, "signature": "95d20c35484dea6b2a0bd7c2da2d2e810d7829e14c03657b2524adfc2111aa5ed95908ecb975ff75ff742c68ce8df417016c048959b0f807675430f6d981478e26d48e594e0830a0406da9817f8a1ecb94bd8be1f9281eeb5e952a82173c72bb", "deposit_message_root": "187e177721bfdd8ea13cb52c8de2dead29164a0e093efb640457a0e6ac918191", "deposit_data_root": "83abfb2a166f7af708526a9bdd2767c4be3cd231c9bc4e2f047a80df88a2860c", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json b/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json deleted file mode 100644 index ccd2ece0699..00000000000 --- a/validator_manager/test_vectors/vectors/prater_first_12_count_1_eth1_false/validator_keys/deposit_data-1660803693.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "8b181759a027c09a409ef24f6b35db213982c2474e2017f3851d76b1c4e560a4238072f67a0c22cb667f940da4ea9ec9", "withdrawal_credentials": "00cbec90e8570679f565bd4645f73a078981067a705564283e61c93c81707842", "amount": 32000000000, "signature": "8f75836ceb390dd4fc8c16bc4be52ca09b9c5aa0ab5bc16dcfdb344787b29ddfd76d877b0a2330bc8e904b233397c6bd124845d1b868e4951cb6daacea023c986bdf0c6ac28d73f65681d941ea96623bc23acc7c84dcfc1304686240d9171cfc", "deposit_message_root": "fcdf3d94740766299a95b3e477e64abadff6ab8978400578f241c93eb367b938", "deposit_data_root": "3011f5cac32f13e86ecc061e89ed6675c27a46ab6ecb1ec6f6e5f133ae1d0287", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json b/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json deleted file mode 100644 index 2ab5908307b..00000000000 --- a/validator_manager/test_vectors/vectors/prater_first_99_count_2_eth1_false/validator_keys/deposit_data-1660803696.json +++ /dev/null @@ -1 +0,0 @@ -[{"pubkey": "a57a4ed429e415b862cc758e75c93936e3f6339640d0763b969ba133a82c03717827fbdd8ec42fc862ed50e3b5b528dc", "withdrawal_credentials": "00864081ef2f5aec1aa667872615e25027f1fdc256a4948b6318cf75a8d635a3", "amount": 32000000000, "signature": "a7706e102bfb0b986a5c8050044f7e221919463149771a92c3ca46ff7d4564867db48eaf89b5237fed8db2cdb9c9c057099d0982bbdb3fbfcbe0ab7259ad3f31f7713692b78ee25e6251982e7081d049804632b70b8a24d8c3e59b624a0bd221", "deposit_message_root": "c08d0ecd085bc0f50c35f1b34d8b8937b2b9c8a172a9808de70f8d448c526f07", "deposit_data_root": "8a26fbee0c3a99fe090af1fce68afc525b4e7efa70df72abaa91f29148b2f672", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}, {"pubkey": "a2801622bc391724989004b5de78cb85746f85a303572691ecc945d9f5c61ec512127e58482e0dfcb4de77be3294ab01", "withdrawal_credentials": "00edff674c66a7f58285554e700183aeee5e740691de8087f7ce4d81f3597108", "amount": 32000000000, "signature": "8b7aa5b0e97d15ec8c2281b919fde9e064f6ac064b163445ea99441ab063f9d10534bfde861b5606021ae46614ff075e0c2305ce5a6cbcc9f0bc8e7df1a177c4d969a5ed4ac062b0ea959bdac963fe206b73565a1a3937adcca736c6117c15f0", "deposit_message_root": "f5a530bee9698c2447961ecd210184fbb130bbb8e8916988d802d47e3b147842", "deposit_data_root": "d38575167a94b516455c5b7e36d24310a612fa0f4580446c5f9d45e4e94f0642", "fork_version": "00001020", "network_name": "goerli", "deposit_cli_version": "2.3.0"}] \ No newline at end of file diff --git a/watch/Cargo.toml b/watch/Cargo.toml index aaaf50aa40a..9e8da3b293b 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -15,6 +15,7 @@ path = "src/main.rs" [dependencies] clap = { workspace = true } +clap_utils = { workspace = true } log = { workspace = true } env_logger = { workspace = true } types = { workspace = true } @@ -30,9 +31,7 @@ url = { workspace = true } rand = { workspace = true } diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } diesel_migrations = { version = "2.0.0", features = ["postgres"] } -byteorder = { workspace = true } bls = { workspace = true } -hex = { workspace = true } r2d2 = { workspace = true } serde_yaml = { workspace = true } diff --git a/watch/src/blockprint/database.rs b/watch/src/blockprint/database.rs index afa35c81b63..f0bc3f8ac86 100644 --- a/watch/src/blockprint/database.rs +++ b/watch/src/blockprint/database.rs @@ -33,6 +33,7 @@ pub struct WatchBlockprint { } #[derive(Debug, QueryableByName, diesel::FromSqlRow)] +#[allow(dead_code)] pub struct WatchValidatorBlockprint { #[diesel(sql_type = Integer)] pub proposer_index: i32, diff --git a/watch/src/blockprint/mod.rs b/watch/src/blockprint/mod.rs index 532776f425a..319090c6565 100644 --- a/watch/src/blockprint/mod.rs +++ b/watch/src/blockprint/mod.rs @@ -24,6 +24,7 @@ pub use server::blockprint_routes; const TIMEOUT: Duration = Duration::from_secs(50); #[derive(Debug)] +#[allow(dead_code)] pub enum Error { Reqwest(reqwest::Error), Url(url::ParseError), diff --git a/watch/src/cli.rs b/watch/src/cli.rs index 97dc2172933..b7179efe5d4 100644 --- a/watch/src/cli.rs +++ b/watch/src/cli.rs @@ -1,28 +1,29 @@ use crate::{config::Config, logger, server, updater}; -use clap::{App, Arg}; +use clap::{Arg, ArgAction, Command}; +use clap_utils::get_color_style; pub const SERVE: &str = "serve"; pub const RUN_UPDATER: &str = "run-updater"; pub const CONFIG: &str = "config"; -fn run_updater<'a, 'b>() -> App<'a, 'b> { - App::new(RUN_UPDATER).setting(clap::AppSettings::ColoredHelp) +fn run_updater() -> Command { + Command::new(RUN_UPDATER).styles(get_color_style()) } -fn serve<'a, 'b>() -> App<'a, 'b> { - App::new(SERVE).setting(clap::AppSettings::ColoredHelp) +fn serve() -> Command { + Command::new(SERVE).styles(get_color_style()) } -pub fn app<'a, 'b>() -> App<'a, 'b> { - App::new("beacon_watch_daemon") +pub fn app() -> Command { + Command::new("beacon_watch_daemon") .author("Sigma Prime ") - .setting(clap::AppSettings::ColoredHelp) + .styles(get_color_style()) .arg( - Arg::with_name(CONFIG) + Arg::new(CONFIG) .long(CONFIG) .value_name("PATH_TO_CONFIG") .help("Path to configuration file") - .takes_value(true) + .action(ArgAction::Set) .global(true), ) .subcommand(run_updater()) @@ -32,7 +33,7 @@ pub fn app<'a, 'b>() -> App<'a, 'b> { pub async fn run() -> Result<(), String> { let matches = app().get_matches(); - let config = match matches.value_of(CONFIG) { + let config = match matches.get_one::(CONFIG) { Some(path) => Config::load_from_file(path.to_string())?, None => Config::default(), }; @@ -40,10 +41,10 @@ pub async fn run() -> Result<(), String> { logger::init_logger(&config.log_level); match matches.subcommand() { - (RUN_UPDATER, Some(_)) => updater::run_updater(config) + Some((RUN_UPDATER, _)) => updater::run_updater(config) .await .map_err(|e| format!("Failure: {:?}", e)), - (SERVE, Some(_)) => server::serve(config) + Some((SERVE, _)) => server::serve(config) .await .map_err(|e| format!("Failure: {:?}", e)), _ => Err("Unsupported subcommand. See --help".into()), diff --git a/watch/src/database/mod.rs b/watch/src/database/mod.rs index 315fcbc8358..b31583c6299 100644 --- a/watch/src/database/mod.rs +++ b/watch/src/database/mod.rs @@ -141,7 +141,7 @@ pub fn insert_beacon_block( let parent_root = WatchHash::from_hash(block.parent_root()); let proposer_index = block_message.proposer_index() as i32; let graffiti = block_message.body().graffiti().as_utf8_lossy(); - let attestation_count = block_message.body().attestations().len() as i32; + let attestation_count = block_message.body().attestations_len() as i32; let full_payload = block_message.execution_payload().ok(); diff --git a/watch/src/server/error.rs b/watch/src/server/error.rs index 0db3df2a0d0..e2c8f0f42ac 100644 --- a/watch/src/server/error.rs +++ b/watch/src/server/error.rs @@ -6,6 +6,7 @@ use serde_json::json; use std::io::Error as IoError; #[derive(Debug)] +#[allow(dead_code)] pub enum Error { Axum(AxumError), Hyper(HyperError), diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs index 25dd242aab6..08036db9510 100644 --- a/watch/src/server/mod.rs +++ b/watch/src/server/mod.rs @@ -31,7 +31,7 @@ pub async fn serve(config: FullConfig) -> Result<(), Error> { ) })?; - let server = start_server(&config, slots_per_epoch as u64, db)?; + let (_addr, server) = start_server(&config, slots_per_epoch as u64, db)?; server.await?; @@ -58,7 +58,13 @@ pub fn start_server( config: &FullConfig, slots_per_epoch: u64, pool: PgPool, -) -> Result> + 'static, Error> { +) -> Result< + ( + SocketAddr, + impl Future> + 'static, + ), + Error, +> { let mut routes = Router::new() .route("/v1/slots", get(handler::get_slots_by_range)) .route("/v1/slots/:slot", get(handler::get_slot)) @@ -106,11 +112,15 @@ pub fn start_server( let addr = SocketAddr::new(config.server.listen_addr, config.server.listen_port); let listener = TcpListener::bind(addr)?; listener.set_nonblocking(true)?; + + // Read the socket address (it may be different from `addr` if listening on port 0). + let socket_addr = listener.local_addr()?; + let serve = axum::serve(tokio::net::TcpListener::from_std(listener)?, app); info!("HTTP server listening on {}", addr); - Ok(serve.into_future()) + Ok((socket_addr, serve.into_future())) } // The default route indicating that no available routes matched the request. diff --git a/watch/src/updater/error.rs b/watch/src/updater/error.rs index 74091c8f217..13c83bcf010 100644 --- a/watch/src/updater/error.rs +++ b/watch/src/updater/error.rs @@ -5,6 +5,7 @@ use eth2::{Error as Eth2Error, SensitiveError}; use std::fmt; #[derive(Debug)] +#[allow(dead_code)] pub enum Error { BeaconChain(BeaconChainError), Eth2(Eth2Error), diff --git a/watch/src/updater/handler.rs b/watch/src/updater/handler.rs index a0bfc0b9a46..3ee32560ad7 100644 --- a/watch/src/updater/handler.rs +++ b/watch/src/updater/handler.rs @@ -9,6 +9,7 @@ use eth2::{ }; use log::{debug, error, info, warn}; use std::collections::HashSet; +use std::marker::PhantomData; use types::{BeaconBlockHeader, EthSpec, Hash256, SignedBeaconBlock, Slot}; use crate::updater::{get_beacon_block, get_header, get_validators}; @@ -47,7 +48,7 @@ pub struct UpdateHandler { pub blockprint: Option, pub config: Config, pub slots_per_epoch: u64, - pub spec: WatchSpec, + pub _phantom: PhantomData, } impl UpdateHandler { @@ -84,7 +85,7 @@ impl UpdateHandler { blockprint, config: config.updater, slots_per_epoch: spec.slots_per_epoch(), - spec, + _phantom: PhantomData, }) } diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs index 0e29e7f0cd8..5461508edd8 100644 --- a/watch/tests/tests.rs +++ b/watch/tests/tests.rs @@ -14,7 +14,6 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use std::collections::HashMap; use std::env; -use std::net::SocketAddr; use std::time::Duration; use testcontainers::{clients::Cli, core::WaitFor, Image, RunnableImage}; use tokio::{runtime, task::JoinHandle}; @@ -154,7 +153,7 @@ impl TesterBuilder { * Create a watch configuration */ let database_port = unused_tcp4_port().expect("Unable to find unused port."); - let server_port = unused_tcp4_port().expect("Unable to find unused port."); + let server_port = 0; let config = Config { database: DatabaseConfig { dbname: random_dbname(), @@ -187,14 +186,9 @@ impl TesterBuilder { /* * Spawn a Watch HTTP API. */ - let watch_server = start_server(&self.config, SLOTS_PER_EPOCH, pool).unwrap(); + let (addr, watch_server) = start_server(&self.config, SLOTS_PER_EPOCH, pool).unwrap(); tokio::spawn(watch_server); - let addr = SocketAddr::new( - self.config.server.listen_addr, - self.config.server.listen_port, - ); - /* * Create a HTTP client to talk to the watch HTTP API. */