diff --git a/.env b/.env index 9ba09028ef..8121b24844 100644 --- a/.env +++ b/.env @@ -33,7 +33,7 @@ ESPRESSO_SEQUENCER4_API_PORT=24004 ESPRESSO_SEQUENCER_URL=http://sequencer0:${ESPRESSO_SEQUENCER_API_PORT} ESPRESSO_SEQUENCER_MAX_CONNECTIONS=25 ESPRESSO_SEQUENCER_STORAGE_PATH=/store/sequencer -ESPRESSO_SEQUENCER_GENESIS_FILE=/genesis/demo.toml +ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo.toml ESPRESSO_SEQUENCER_L1_PORT=8545 ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL=100ms ESPRESSO_SEQUENCER_L1_WS_PORT=8546 @@ -57,10 +57,10 @@ ESPRESSO_BUILDER_ETH_ACCOUNT_INDEX=8 ESPRESSO_DEPLOYER_ACCOUNT_INDEX=9 # Contracts -ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS=0x0c8e79f3534b00d9a3d4a856b665bf4ebc22f2ba +ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS=0xe1da8919f262ee86f9be05059c9280142cf23f48 ESPRESSO_SEQUENCER_LIGHTCLIENT_ADDRESS=$ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS ESPRESSO_SEQUENCER_PERMISSIONED_PROVER=0x14dc79964da2c08b23698b3d3cc7ca32193d9955 -ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS=0x8ce361602b935680e8dec218b820ff5056beb7af +ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS=0xb19b36b1456e65e3a6d514d3f715f204bd59f431 # Example sequencer demo private keys ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_0=BLS_SIGNING_KEY~lNDh4Pn-pTAyzyprOAFdXHwhrKhEwqwtMtkD3CZF4x3o diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b8a58a1756..3737805559 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -94,8 +94,8 @@ jobs: - name: Build Bins run: | - cargo build --locked --profile test --bins - cargo build --manifest-path ./sequencer-sqlite/Cargo.toml --target-dir ./target + cargo build --features "fee,pos,marketplace" --locked --profile test --bins + cargo build --features "fee,pos,marketplace" --manifest-path ./sequencer-sqlite/Cargo.toml --target-dir ./target timeout-minutes: 60 - name: Upload archive to workflow @@ -193,11 +193,14 @@ jobs: needs: [build-test-bins, build-test-artifacts-postgres] strategy: matrix: - version: [02,99] + version: [02, 03, 99] include: - version: 02 compose: "-f process-compose.yaml -D" - + - version: 03 + env: + ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo-pos.toml + compose: "-f process-compose.yaml -D" - version: 99 compose: "-f process-compose.yaml -f process-compose-mp.yml -D" fail-fast: false @@ -235,7 +238,7 @@ jobs: NEXTEST_PROFILE: integration INTEGRATION_TEST_SEQUENCER_VERSION: ${{ matrix.version }} run: | - cargo nextest run --archive-file nextest-archive-postgres.tar.zst --verbose --no-fail-fast --nocapture \ + cargo nextest run --archive-file nextest-archive-postgres.tar.zst --verbose --no-fail-fast \ --workspace-remap $PWD $(if [ "${{ matrix.version }}" == "2" ]; then echo " smoke"; fi) timeout-minutes: 10 diff --git a/.gitignore b/.gitignore index 17dd3e5e9a..0645c7ceda 100644 --- a/.gitignore +++ b/.gitignore @@ -56,4 +56,7 @@ contracts/broadcast/*/11155111/ docs/ # Autogen files -.vscode/ \ No newline at end of file +.vscode/ + +# initial stake table +data/initial_stake_table.toml \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 0331e3eb43..44f4170e75 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2495,7 +2495,9 @@ dependencies = [ "espresso-types", "ethers", "futures", + "hotshot-types", "jf-merkle-tree", + "serde_json", "surf-disco", "tokio", "tracing", @@ -3726,6 +3728,7 @@ dependencies = [ "hotshot-contract-adapter", "hotshot-query-service", "hotshot-types", + "indexmap 2.7.1", "itertools 0.12.1", "jf-merkle-tree", "jf-utils", @@ -3752,6 +3755,7 @@ dependencies = [ "tracing", "url", "vbs", + "vec1", ] [[package]] @@ -11158,9 +11162,13 @@ dependencies = [ "espresso-types", "ethers", "futures", + "hotshot-types", "reqwest 0.12.12", + "sequencer-utils", + "serde", "surf-disco", "tokio", + "tracing", "vbs", ] diff --git a/builder/src/bin/permissionless-builder.rs b/builder/src/bin/permissionless-builder.rs index 1ff329399e..771e5308eb 100644 --- a/builder/src/bin/permissionless-builder.rs +++ b/builder/src/bin/permissionless-builder.rs @@ -2,10 +2,7 @@ use std::{num::NonZeroUsize, path::PathBuf, time::Duration}; use builder::non_permissioned::{build_instance_state, BuilderConfig}; use clap::Parser; -use espresso_types::{ - eth_signature_key::EthKeyPair, parse_duration, FeeVersion, MarketplaceVersion, - SequencerVersions, V0_0, -}; +use espresso_types::{eth_signature_key::EthKeyPair, parse_duration, SequencerVersions}; use futures::future::pending; use hotshot::traits::ValidatedState; use hotshot_types::{ @@ -120,12 +117,25 @@ async fn main() -> anyhow::Result<()> { let upgrade = genesis.upgrade_version; match (base, upgrade) { - (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { - run::>(genesis, opt).await + (espresso_types::FeeVersion::VERSION, espresso_types::EpochVersion::VERSION) => { + run::>( + genesis, opt + ) + .await + } + (espresso_types::EpochVersion::VERSION, _) => { + run::>( + genesis, opt + // Specifying V0_0 disables upgrades + ) + .await } - (FeeVersion::VERSION, _) => run::>(genesis, opt).await, - (MarketplaceVersion::VERSION, _) => { - run::>(genesis, opt).await + // TODO change `fee` to `pos` + (espresso_types::FeeVersion::VERSION, espresso_types::MarketplaceVersion::VERSION) => { + run::>( + genesis, opt + ) + .await } _ => panic!( "Invalid base ({base}) and upgrade ({upgrade}) versions specified in the toml file." diff --git a/builder/src/lib.rs b/builder/src/lib.rs index 534773df39..c36e5d2034 100755 --- a/builder/src/lib.rs +++ b/builder/src/lib.rs @@ -138,7 +138,7 @@ pub mod testing { start_voting_time: 0, stop_proposing_time: 0, stop_voting_time: 0, - epoch_height: 0, + epoch_height: 150, }; Self { diff --git a/client/Cargo.toml b/client/Cargo.toml index 85db2033cc..fb65f0ce4b 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -10,7 +10,9 @@ anyhow = { workspace = true } espresso-types = { path = "../types" } ethers = { workspace = true } futures = { workspace = true } +hotshot-types = { workspace = true } jf-merkle-tree = { workspace = true } +serde_json = { workspace = true } surf-disco = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } diff --git a/client/src/lib.rs b/client/src/lib.rs index a9d5cc995f..2370201787 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -1,7 +1,8 @@ use anyhow::Context; -use espresso_types::{FeeAccount, FeeAmount, FeeMerkleTree, Header}; +use espresso_types::{FeeAccount, FeeAmount, FeeMerkleTree, Header, PubKey, PublicNetworkConfig}; use ethers::types::Address; use futures::{stream::BoxStream, StreamExt}; +use hotshot_types::stake_table::StakeTableEntry; use jf_merkle_tree::{ prelude::{MerkleProof, Sha3Node}, MerkleTreeScheme, @@ -51,7 +52,7 @@ impl SequencerClient { height: u64, ) -> anyhow::Result>> { self.0 - .socket(&format!("availability/stream/headers/{height}")) + .socket(&format!("v0/availability/stream/headers/{height}")) .subscribe::
() .await .context("subscribing to Espresso headers") @@ -119,6 +120,38 @@ impl SequencerClient { let balance = proof.elem().copied().unwrap_or(0.into()); Ok(balance) } + + pub async fn current_epoch(&self) -> anyhow::Result> { + self.0 + .get::>("node/current_epoch") + .send() + .await + .context("getting epoch value") + } + + pub async fn stake_table(&self, epoch: u64) -> anyhow::Result>> { + self.0 + .get::<_>(&format!("node/stake-table/{epoch}")) + .send() + .await + .context("getting stake table") + } + + pub async fn da_members(&self, epoch: u64) -> anyhow::Result>> { + self.0 + .get::<_>(&format!("node/stake-table/da/{epoch}")) + .send() + .await + .context("getting da stake table") + } + + pub async fn config(&self) -> anyhow::Result { + self.0 + .get::("config/hotshot") + .send() + .await + .context("getting hotshot config") + } } #[cfg(test)] diff --git a/data/genesis/demo-pos-base.toml b/data/genesis/demo-pos-base.toml new file mode 100644 index 0000000000..d909148ef6 --- /dev/null +++ b/data/genesis/demo-pos-base.toml @@ -0,0 +1,21 @@ +base_version = "0.3" +upgrade_version = "0.3" +epoch_height = 10 + +[stake_table] +capacity = 10 + +[chain_config] +chain_id = 999999999 +max_block_size = '1mb' +base_fee = '1 wei' +fee_recipient = "0x0000000000000000000000000000000000000000" +fee_contract = "0xa15bb66138824a1c7167f5e85b957d04dd34e468" +stake_table_contract = "0xb19b36b1456e65e3a6d514d3f715f204bd59f431" + +[header] +timestamp = "1970-01-01T00:00:00Z" + +[l1_finalized] +number = 0 + \ No newline at end of file diff --git a/data/genesis/demo-epoch.toml b/data/genesis/demo-pos.toml similarity index 75% rename from data/genesis/demo-epoch.toml rename to data/genesis/demo-pos.toml index 5b351243f5..6414cab0d2 100644 --- a/data/genesis/demo-epoch.toml +++ b/data/genesis/demo-pos.toml @@ -1,12 +1,13 @@ base_version = "0.2" upgrade_version = "0.3" +epoch_height = 10 [stake_table] capacity = 10 [chain_config] chain_id = 999999999 -base_fee = '0 wei' +base_fee = '1 wei' max_block_size = '1mb' fee_recipient = '0x0000000000000000000000000000000000000000' fee_contract = '0xa15bb66138824a1c7167f5e85b957d04dd34e468' @@ -18,9 +19,9 @@ timestamp = "1970-01-01T00:00:00Z" number = 0 [[upgrade]] -version = "0.99" +version = "0.3" start_proposing_view = 10 -stop_proposing_view = 60 +stop_proposing_view = 50 [upgrade.epoch] [upgrade.epoch.chain_config] @@ -28,6 +29,5 @@ chain_id = 999999999 max_block_size = '1mb' base_fee = '1 wei' fee_recipient = "0x0000000000000000000000000000000000000000" -bid_recipient = "0x0000000000000000000000000000000000000000" fee_contract = "0xa15bb66138824a1c7167f5e85b957d04dd34e468" -stake_table_contract = "0x8ce361602b935680e8dec218b820ff5056beb7af" \ No newline at end of file +stake_table_contract = "0xb19b36b1456e65e3a6d514d3f715f204bd59f431" \ No newline at end of file diff --git a/data/initial_stake_table.toml b/data/initial_stake_table.toml deleted file mode 100644 index 59384a6509..0000000000 --- a/data/initial_stake_table.toml +++ /dev/null @@ -1,30 +0,0 @@ -[[public_keys]] -stake_table_key = "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" -state_ver_key = "SCHNORR_VER_KEY~ibJCbfPOhDoURqiGLe683TDJ_KOLQCx8_Hdq43dOviSuL6WJJ_2mARKO3xA2k5zpXE3iiq4_z7mzvA-V1VXvIWw" -da = true -stake = 1 - -[[public_keys]] -stake_table_key = "BLS_VER_KEY~4zQnaCOFJ7m95OjxeNls0QOOwWbz4rfxaL3NwmN2zSdnf8t5Nw_dfmMHq05ee8jCegw6Bn5T8inmrnGGAsQJMMWLv77nd7FJziz2ViAbXg-XGGF7o4HyzELCmypDOIYF3X2UWferFE_n72ZX0iQkUhOvYZZ7cfXToXxRTtb_mwRR" -state_ver_key = "SCHNORR_VER_KEY~lNCMqH5qLthH5OXxW_Z25tLXJUqmzzhsuQ6oVuaPWhtRPmgIKSqcBoJTaEbmGZL2VfTyQNguaoQL4U_4tCA_HmI" -da = true -stake = 1 - -[[public_keys]] -stake_table_key = "BLS_VER_KEY~IBRoz_Q1EXvcm1pNZcmVlyYZU8hZ7qmy337ePAjEMhz8Hl2q8vWPFOd3BaLwgRS1UzAPW3z4E-XIgRDGcRBTAMZX9b_0lKYjlyTlNF2EZfNnKmvv-xJ0yurkfjiveeYEsD2l5d8q_rJJbH1iZdXy-yPEbwI0SIvQfwdlcaKw9po4" -state_ver_key = "SCHNORR_VER_KEY~nkFKzpLhJAafJ3LBkY_0h9OzxSyTu95Z029EUFPO4QNkeUo6DHQGTTVjxmprTA5H8jRSn73i0slJvig6dZ5kLX4" -da = true -stake = 1 - -[[public_keys]] -stake_table_key = "BLS_VER_KEY~rO2PIjyY30HGfapFcloFe3mNDKMIFi6JlOLkH5ZWBSYoRm5fE2-Rm6Lp3EvmAcB5r7KFJ0c1Uor308x78r04EY_sfjcsDCWt7RSJdL4cJoD_4fSTCv_bisO8k98hs_8BtqQt8BHlPeJohpUXvcfnK8suXJETiJ6Er97pfxRbzgAL" -state_ver_key = "SCHNORR_VER_KEY~NwYhzlWarlZHxTNvChWuf74O3fP7zIt5NdC7V8gV6w2W92JOBDkrNmKQeMGxMUke-G5HHxUjHlZEWr1m1xLjEaI" -da = false -stake = 1 - - -[[public_keys]] -stake_table_key = "BLS_VER_KEY~r6b-Cwzp-b3czlt0MHmYPJIow5kMsXbrNmZsLSYg9RV49oCCO4WEeCRFR02x9bqLCa_sgNFMrIeNdEa11qNiBAohApYFIvrSa-zP5QGj3xbZaMOCrshxYit6E2TR-XsWvv6gjOrypmugjyTAth-iqQzTboSfmO9DD1-gjJIdCaD7" -state_ver_key = "SCHNORR_VER_KEY~qMfMj1c1hRVTnugvz3MKNnVC5JA9jvZcV3ZCLL_J4Ap-u0i6ulGWveTk3OOelZj2-kd_WD5ojtYGWV1jHx9wCaA" -da = true -stake = 1 \ No newline at end of file diff --git a/data/v3/messages.bin b/data/v3/messages.bin index b07e615a29..ba47afc646 100644 Binary files a/data/v3/messages.bin and b/data/v3/messages.bin differ diff --git a/data/v3/messages.json b/data/v3/messages.json index fcf77f9592..eed2d4964f 100644 --- a/data/v3/messages.json +++ b/data/v3/messages.json @@ -18,7 +18,8 @@ "chain_id": "35353", "fee_contract": null, "fee_recipient": "0x0000000000000000000000000000000000000000", - "max_block_size": "30720" + "max_block_size": "30720", + "stake_table_contract": null } } }, diff --git a/docker-compose.yaml b/docker-compose.yaml index 22e29f0e34..2e6a892065 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -15,11 +15,10 @@ services: - "./geth-config/genesis-default.json:/genesis.json" - "./geth-config/test-jwt-secret.txt:/config/test-jwt-secret.txt" - deploy-sequencer-contracts: + deploy-fee-contract: image: ghcr.io/espressosystems/espresso-sequencer/deploy:main - command: deploy --only fee-contract,permissioned-stake-table + command: deploy --only fee-contract environment: - - ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH=/data/initial_stake_table.toml - ESPRESSO_SEQUENCER_ETH_MULTISIG_ADDRESS - ESPRESSO_SEQUENCER_L1_PROVIDER - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL @@ -27,12 +26,24 @@ services: - RUST_LOG - RUST_LOG_FORMAT - ASYNC_STD_THREAD_COUNT - volumes: - - "./data/initial_stake_table.toml:/data/initial_stake_table.toml" depends_on: demo-l1-network: condition: service_healthy + deploy-stake-table-contract: + image: ghcr.io/espressosystems/espresso-sequencer/deploy:main + command: deploy --only permissioned-stake-table + environment: + - ESPRESSO_SEQUENCER_L1_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL + - ESPRESSO_DEPLOYER_ACCOUNT_INDEX + - RUST_LOG + - RUST_LOG_FORMAT + - ASYNC_STD_THREAD_COUNT + depends_on: + deploy-fee-contract: + condition: service_completed_successfully + deploy-prover-contracts: image: ghcr.io/espressosystems/espresso-sequencer/deploy:main command: deploy --use-mock-contract --only light-client @@ -54,7 +65,9 @@ services: sequencer0: condition: service_healthy # Make sure this doesn't start until the other contracts have been deployed, since we use the same mnemonic. - deploy-sequencer-contracts: + deploy-stake-table-contract: + condition: service_completed_successfully + deploy-fee-contract: condition: service_completed_successfully fund-builder: @@ -72,7 +85,7 @@ services: - RUST_LOG - RUST_LOG_FORMAT depends_on: - deploy-sequencer-contracts: + deploy-stake-table-contract: condition: service_completed_successfully sequencer1: condition: service_healthy @@ -220,6 +233,19 @@ services: deploy-prover-contracts: condition: service_completed_successfully + + update-permissioned-stake-table: + image: ghcr.io/espressosystems/espresso-sequencer/update-permissioned-stake-table:main + environment: + - ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS + - ESPRESSO_SEQUENCER_STATE_PEERS=http://sequencer:$ESPRESSO_SEQUENCER_API_PORT + - ESPRESSO_SEQUENCER_ETH_MNEMONIC + - ESPRESSO_SEQUENCER_L1_PROVIDER + depends_on: + deploy-prover-contracts: + condition: service_completed_successfully + sequencer0: + condition: service_healthy sequencer0: image: ghcr.io/espressosystems/espresso-sequencer/sequencer:main ports: @@ -278,7 +304,7 @@ services: condition: service_healthy marshal-0: condition: service_healthy - deploy-sequencer-contracts: + deploy-stake-table-contract: condition: service_completed_successfully sequencer1: @@ -337,7 +363,7 @@ services: condition: service_healthy marshal-0: condition: service_healthy - deploy-sequencer-contracts: + deploy-stake-table-contract: condition: service_completed_successfully sequencer2: @@ -388,7 +414,7 @@ services: condition: service_healthy marshal-0: condition: service_healthy - deploy-sequencer-contracts: + deploy-stake-table-contract: condition: service_completed_successfully sequencer3: @@ -440,7 +466,7 @@ services: condition: service_healthy marshal-0: condition: service_healthy - deploy-sequencer-contracts: + deploy-stake-table-contract: condition: service_completed_successfully sequencer4: @@ -491,7 +517,7 @@ services: condition: service_healthy marshal-0: condition: service_healthy - deploy-sequencer-contracts: + deploy-stake-table-contract: condition: service_completed_successfully submit-transactions-public: diff --git a/flake.nix b/flake.nix index 47b0a13bba..c4e74ad5fe 100644 --- a/flake.nix +++ b/flake.nix @@ -323,4 +323,4 @@ ]; }); }); -} +} \ No newline at end of file diff --git a/hotshot-query-service/examples/simple-server.rs b/hotshot-query-service/examples/simple-server.rs index d22c341858..aeb907d9c7 100644 --- a/hotshot-query-service/examples/simple-server.rs +++ b/hotshot-query-service/examples/simple-server.rs @@ -216,7 +216,7 @@ async fn init_consensus( stop_proposing_time: 0, start_voting_time: 0, stop_voting_time: 0, - epoch_height: 0, + epoch_height: 150, }; let nodes = join_all(priv_keys.into_iter().zip(data_sources).enumerate().map( diff --git a/hotshot-query-service/src/testing/consensus.rs b/hotshot-query-service/src/testing/consensus.rs index 4e04ac0a86..20516711b9 100644 --- a/hotshot-query-service/src/testing/consensus.rs +++ b/hotshot-query-service/src/testing/consensus.rs @@ -148,7 +148,7 @@ impl MockNetwork { stop_proposing_time: 0, start_voting_time: 0, stop_voting_time: 0, - epoch_height: 0, + epoch_height: 150, }; update_config(&mut config); diff --git a/hotshot-task-impls/src/quorum_proposal/handlers.rs b/hotshot-task-impls/src/quorum_proposal/handlers.rs index 6e4e73d671..22c24c85e5 100644 --- a/hotshot-task-impls/src/quorum_proposal/handlers.rs +++ b/hotshot-task-impls/src/quorum_proposal/handlers.rs @@ -407,7 +407,7 @@ impl ProposalDependencyHandle { .await .drb_seeds_and_results .results - .get(epoch_val) + .get(&(*epoch_val + 1)) .copied() } else { None diff --git a/hotshot-types/src/data.rs b/hotshot-types/src/data.rs index 3f68744b60..6704993296 100644 --- a/hotshot-types/src/data.rs +++ b/hotshot-types/src/data.rs @@ -138,7 +138,7 @@ impl_u64_wrapper!(EpochNumber); impl EpochNumber { /// Create a genesis number (1) #[allow(dead_code)] - fn genesis() -> Self { + pub fn genesis() -> Self { Self(1) } } diff --git a/justfile b/justfile index 8ecf0302c4..6816aed8ab 100644 --- a/justfile +++ b/justfile @@ -24,17 +24,23 @@ lint: cargo clippy --workspace --features testing --all-targets -- -D warnings cargo clippy --workspace --all-targets --manifest-path sequencer-sqlite/Cargo.toml -- -D warnings -build profile="test": +build profile="test" features="": #!/usr/bin/env bash set -euxo pipefail # Use the same target dir for both `build` invocations export CARGO_TARGET_DIR=${CARGO_TARGET_DIR:-target} - cargo build --profile {{profile}} - cargo build --profile {{profile}} --manifest-path ./sequencer-sqlite/Cargo.toml + cargo build --profile {{profile}} {{features}} + cargo build --profile {{profile}} --manifest-path ./sequencer-sqlite/Cargo.toml {{features}} demo-native-mp *args: build scripts/demo-native -f process-compose.yaml -f process-compose-mp.yml {{args}} +demo-native-pos *args: (build "test" "--features fee,pos") + ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo-pos.toml scripts/demo-native -f process-compose.yaml {{args}} + +demo-native-pos-base *args: build + ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo-pos-base.toml scripts/demo-native -f process-compose.yaml {{args}} + demo-native-benchmark: cargo build --release --features benchmarking scripts/demo-native @@ -77,10 +83,15 @@ test-all: test-integration: @echo 'NOTE that demo-native must be running for this test to succeed.' INTEGRATION_TEST_SEQUENCER_VERSION=2 cargo nextest run --all-features --nocapture --profile integration smoke + test-integration-mp: @echo 'NOTE that demo-native-mp must be running for this test to succeed.' INTEGRATION_TEST_SEQUENCER_VERSION=99 cargo nextest run --all-features --nocapture --profile integration +test-integration-pos: + @echo 'NOTE that demo-native-pos must be running for this test to succeed.' + INTEGRATION_TEST_SEQUENCER_VERSION=3 cargo nextest run --all-features --nocapture --profile integration smoke + clippy: @echo 'features: "embedded-db"' cargo clippy --workspace --features embedded-db --all-targets -- -D warnings diff --git a/marketplace-builder/src/bin/marketplace-builder.rs b/marketplace-builder/src/bin/marketplace-builder.rs index c23cf6a1b3..4091a41dce 100644 --- a/marketplace-builder/src/bin/marketplace-builder.rs +++ b/marketplace-builder/src/bin/marketplace-builder.rs @@ -2,8 +2,7 @@ use std::{num::NonZeroUsize, path::PathBuf, time::Duration}; use clap::Parser; use espresso_types::{ - eth_signature_key::EthKeyPair, parse_duration, FeeAmount, FeeVersion, MarketplaceVersion, - NamespaceId, SequencerVersions, V0_0, + eth_signature_key::EthKeyPair, parse_duration, FeeAmount, NamespaceId, SequencerVersions, }; use futures::future::pending; use hotshot::helpers::initialize_logging; @@ -127,12 +126,24 @@ async fn main() -> anyhow::Result<()> { let upgrade = genesis.upgrade_version; match (base, upgrade) { - (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { - run::>(genesis, opt).await + (espresso_types::FeeVersion::VERSION, espresso_types::EpochVersion::VERSION) => { + run::>( + genesis, opt + ) + .await } - (FeeVersion::VERSION, _) => run::>(genesis, opt).await, - (MarketplaceVersion::VERSION, _) => { - run::>(genesis, opt).await + (espresso_types::EpochVersion::VERSION, _) => { + run::>( + genesis, opt + // Specifying V0_0 disables upgrades + ) + .await + } + (espresso_types::FeeVersion::VERSION, espresso_types::MarketplaceVersion::VERSION) => { + run::>( + genesis, opt + ) + .await } _ => panic!( "Invalid base ({base}) and upgrade ({upgrade}) versions specified in the toml file." diff --git a/process-compose.yaml b/process-compose.yaml index b19578b95b..6a704feffd 100644 --- a/process-compose.yaml +++ b/process-compose.yaml @@ -5,8 +5,7 @@ environment: - ESPRESSO_SEQUENCER_ORCHESTRATOR_URL=http://localhost:$ESPRESSO_ORCHESTRATOR_PORT - ESPRESSO_SEQUENCER_URL=http://localhost:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_L1_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER_L1_PORT - - ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo.toml - - ESPRESSO_BUILDER_GENESIS_FILE=data/genesis/demo.toml + - ESPRESSO_BUILDER_GENESIS_FILE=$ESPRESSO_SEQUENCER_GENESIS_FILE - ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH=data/initial_stake_table.toml - ESPRESSO_STATE_RELAY_SERVER_URL=http://localhost:$ESPRESSO_STATE_RELAY_SERVER_PORT - QUERY_SERVICE_URI=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT/v0/ @@ -28,7 +27,9 @@ processes: command: unset ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS ESPRESSO_SEQUENCER_FEE_CONTRACT_PROXY_ADDRESS - && deploy --only fee-contract,permissioned-stake-table + && deploy --only fee-contract + && unset ESPRESSO_SEQUENCER_ETH_MULTISIG_ADDRESS + && deploy --only permissioned-stake-table namespace: setup depends_on: demo-l1-network: @@ -111,6 +112,21 @@ processes: condition: process_healthy deploy-prover-contracts: condition: process_completed + + update-permissioned-stake-table: + command: update-permissioned-stake-table + environment: + - ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS + - ESPRESSO_SEQUENCER_STATE_PEERS=http://localhost:$ESPRESSO_SEQUENCER_API_PORT + - ESPRESSO_SEQUENCER_ETH_MNEMONIC + - ESPRESSO_SEQUENCER_L1_PROVIDER + depends_on: + deploy-prover-contracts: + condition: process_completed + sequencer0: + condition: process_healthy + + sequencer0: command: sequencer -- storage-sql -- http -- query -- submit -- hotshot-events -- config diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index 7cec522929..62b192f64a 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -2386,7 +2386,9 @@ dependencies = [ "espresso-types", "ethers", "futures", + "hotshot-types", "jf-merkle-tree", + "serde_json", "surf-disco", "tokio", "tracing", @@ -3552,6 +3554,7 @@ dependencies = [ "hotshot-contract-adapter", "hotshot-query-service", "hotshot-types", + "indexmap 2.7.1", "itertools 0.12.1", "jf-merkle-tree", "jf-utils", @@ -3577,6 +3580,7 @@ dependencies = [ "tracing", "url", "vbs", + "vec1", ] [[package]] diff --git a/sequencer-sqlite/Cargo.toml b/sequencer-sqlite/Cargo.toml index 5c6c1e6d58..8ada2b8dde 100644 --- a/sequencer-sqlite/Cargo.toml +++ b/sequencer-sqlite/Cargo.toml @@ -7,7 +7,10 @@ version = "0.1.0" edition = "2021" [features] -default = ["embedded-db"] +fee = ["sequencer/fee"] +pos = ["sequencer/pos"] +marketplace = ["sequencer/marketplace"] +default = ["embedded-db", "pos"] sqlite-unbundled = ["sequencer/sqlite-unbundled"] embedded-db = ["sequencer/embedded-db"] diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 661385145e..69102a5d19 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] -default = ["fee"] +default = ["pos"] testing = [ "hotshot-testing", "marketplace-builder-core", diff --git a/sequencer/api/node.toml b/sequencer/api/node.toml index ddcdaa7db6..1ad3d4a05c 100644 --- a/sequencer/api/node.toml +++ b/sequencer/api/node.toml @@ -2,7 +2,20 @@ PATH = ["stake-table/current"] DOC = "Get the stake table for the current epoch" +[route.da_members_current] +PATH = ["stake-table/da/current"] +DOC = "Get the stake table da members for the current epoch" + [route.stake_table] PATH = ["stake-table/:epoch_number"] ":epoch_number" = "Integer" DOC = "Get the stake table for the given epoch" + +[route.da_members] +PATH = ["stake-table/da/:epoch_number"] +":epoch_number" = "Integer" +DOC = "Get the stake table da members for the given epoch" + +[route.current_epoch] +PATH = ["current_epoch"] +DOC = "Get the current epoch" \ No newline at end of file diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 750eae170e..c76207e192 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -7,8 +7,8 @@ use data_source::{CatchupDataSource, StakeTableDataSource, SubmitDataSource}; use derivative::Derivative; use espresso_types::{ retain_accounts, v0::traits::SequencerPersistence, v0_99::ChainConfig, AccountQueryData, - BlockMerkleTree, FeeAccount, FeeAccountProof, FeeMerkleTree, NodeState, PubKey, Transaction, - ValidatedState, + BlockMerkleTree, FeeAccount, FeeAccountProof, FeeMerkleTree, NodeState, PubKey, + PublicNetworkConfig, Transaction, ValidatedState, }; use futures::{ future::{BoxFuture, Future, FutureExt}, @@ -35,9 +35,7 @@ use jf_merkle_tree::MerkleTreeScheme; use std::pin::Pin; use std::sync::Arc; -use self::data_source::{ - HotShotConfigDataSource, NodeStateDataSource, PublicNetworkConfig, StateSignatureDataSource, -}; +use self::data_source::{HotShotConfigDataSource, NodeStateDataSource, StateSignatureDataSource}; use crate::{ catchup::CatchupStorage, context::Consensus, state_signature::StateSigner, SeqTypes, SequencerApiVersion, SequencerContext, @@ -171,12 +169,32 @@ impl, D: Sync, V: Versions, P: SequencerPersistence> self.as_ref().get_stake_table(epoch).await } + /// Get the stake table for a given epoch + async fn get_da_members( + &self, + epoch: Option<::Epoch>, + ) -> Vec::SignatureKey>> { + self.as_ref().get_da_members(epoch).await + } + /// Get the stake table for the current epoch if not provided async fn get_stake_table_current( &self, ) -> Vec::SignatureKey>> { self.as_ref().get_stake_table_current().await } + + /// Get the stake table for the current epoch if not provided + async fn get_da_members_current( + &self, + ) -> Vec::SignatureKey>> { + self.as_ref().get_da_members_current().await + } + + /// Get the stake table for the current epoch if not provided + async fn get_current_epoch(&self) -> Option<::Epoch> { + self.as_ref().get_current_epoch().await + } } impl, V: Versions, P: SequencerPersistence> @@ -205,6 +223,33 @@ impl, V: Versions, P: SequencerPersistence> self.get_stake_table(epoch).await } + + async fn get_current_epoch(&self) -> Option<::Epoch> { + self.consensus().await.read().await.cur_epoch().await + } + + async fn get_da_members( + &self, + epoch: Option<::Epoch>, + ) -> Vec::SignatureKey>> { + self.consensus() + .await + .read() + .await + .memberships + .read() + .await + .da_stake_table(epoch) + } + + /// Get the stake table for the current epoch if not provided + async fn get_da_members_current( + &self, + ) -> Vec::SignatureKey>> { + let epoch = self.consensus().await.read().await.cur_epoch().await; + + self.get_da_members(epoch).await + } } impl, V: Versions, P: SequencerPersistence> SubmitDataSource @@ -1558,15 +1603,18 @@ mod api_tests { #[cfg(test)] mod test { use committable::{Commitment, Committable}; - use std::{collections::BTreeMap, time::Duration}; + use std::{ + collections::{BTreeMap, HashSet}, + time::Duration, + }; use tokio::time::sleep; use espresso_types::{ traits::NullEventConsumer, v0_1::{UpgradeMode, ViewBasedUpgrade}, - BackoffParams, FeeAccount, FeeAmount, FeeVersion, Header, MarketplaceVersion, - MockSequencerVersions, SequencerVersions, TimeBasedUpgrade, Timestamp, Upgrade, - UpgradeType, ValidatedState, + BackoffParams, FeeAccount, FeeAmount, Header, MarketplaceVersion, MockSequencerVersions, + PublicHotShotConfig, SequencerVersions, TimeBasedUpgrade, Timestamp, Upgrade, UpgradeType, + ValidatedState, }; use ethers::utils::Anvil; use futures::{ @@ -1596,8 +1644,7 @@ mod test { use vbs::version::{StaticVersion, StaticVersionType, Version}; use self::{ - data_source::{testing::TestableSequencerDataSource, PublicHotShotConfig}, - options::HotshotEvents, + data_source::testing::TestableSequencerDataSource, options::HotshotEvents, sql::DataSource as SqlDataSource, }; use super::*; @@ -1606,6 +1653,7 @@ mod test { persistence::no_storage, testing::{TestConfig, TestConfigBuilder}, }; + use espresso_types::EpochVersion; #[tokio::test(flavor = "multi_thread")] async fn test_healthcheck() { @@ -2125,72 +2173,42 @@ mod test { handle.abort(); } - #[tokio::test(flavor = "multi_thread")] - async fn test_fee_upgrade_view_based() { - setup_test(); - - let mut upgrades = std::collections::BTreeMap::new(); - type MySequencerVersions = SequencerVersions, StaticVersion<0, 2>>; - - let mode = UpgradeMode::View(ViewBasedUpgrade { - start_voting_view: None, - stop_voting_view: None, - start_proposing_view: 1, - stop_proposing_view: 10, - }); - - let upgrade_type = UpgradeType::Fee { - chain_config: ChainConfig { - max_block_size: 300.into(), - base_fee: 1.into(), - ..Default::default() - }, - }; - - upgrades.insert( - ::Upgrade::VERSION, - Upgrade { mode, upgrade_type }, - ); - test_upgrade_helper::(upgrades, MySequencerVersions::new()).await; - } - - #[tokio::test(flavor = "multi_thread")] - async fn test_fee_upgrade_time_based() { - setup_test(); - - let now = OffsetDateTime::now_utc().unix_timestamp() as u64; - - let mut upgrades = std::collections::BTreeMap::new(); - type MySequencerVersions = SequencerVersions, StaticVersion<0, 2>>; - - let mode = UpgradeMode::Time(TimeBasedUpgrade { - start_proposing_time: Timestamp::from_integer(now).unwrap(), - stop_proposing_time: Timestamp::from_integer(now + 500).unwrap(), - start_voting_time: None, - stop_voting_time: None, - }); - - let upgrade_type = UpgradeType::Fee { - chain_config: ChainConfig { - max_block_size: 300.into(), - base_fee: 1.into(), - ..Default::default() - }, - }; - - upgrades.insert( - ::Upgrade::VERSION, - Upgrade { mode, upgrade_type }, - ); - test_upgrade_helper::(upgrades, MySequencerVersions::new()).await; - } + // #[tokio::test(flavor = "multi_thread")] + // async fn test_pos_upgrade_view_based() { + // setup_test(); + + // let mut upgrades = std::collections::BTreeMap::new(); + // type MySequencerVersions = SequencerVersions; + + // let mode = UpgradeMode::View(ViewBasedUpgrade { + // start_voting_view: None, + // stop_voting_view: None, + // start_proposing_view: 1, + // stop_proposing_view: 10, + // }); + + // let upgrade_type = UpgradeType::Epoch { + // chain_config: ChainConfig { + // max_block_size: 500.into(), + // base_fee: 2.into(), + // stake_table_contract: Some(Default::default()), + // ..Default::default() + // }, + // }; + + // upgrades.insert( + // ::Upgrade::VERSION, + // Upgrade { mode, upgrade_type }, + // ); + // test_upgrade_helper::(upgrades, MySequencerVersions::new()).await; + // } #[tokio::test(flavor = "multi_thread")] async fn test_marketplace_upgrade_view_based() { setup_test(); let mut upgrades = std::collections::BTreeMap::new(); - type MySequencerVersions = SequencerVersions; + type MySequencerVersions = SequencerVersions; let mode = UpgradeMode::View(ViewBasedUpgrade { start_voting_view: None, @@ -2222,7 +2240,7 @@ mod test { let now = OffsetDateTime::now_utc().unix_timestamp() as u64; let mut upgrades = std::collections::BTreeMap::new(); - type MySequencerVersions = SequencerVersions; + type MySequencerVersions = SequencerVersions; let mode = UpgradeMode::Time(TimeBasedUpgrade { start_proposing_time: Timestamp::from_integer(now).unwrap(), @@ -2339,7 +2357,7 @@ mod test { // ChainConfigs will eventually be resolved if let Some(configs) = configs { tracing::info!(?configs, "configs"); - if height > new_version_first_view { + if height > new_version_first_view + 10 { for config in configs { assert_eq!(config, chain_config_upgrade); } @@ -2571,6 +2589,7 @@ mod test { let mut receive_count = 0; loop { let event = subscribed_events.next().await.unwrap(); + dbg!(&event); tracing::info!( "Received event in hotshot event streaming Client 1: {:?}", event @@ -2583,4 +2602,73 @@ mod test { } assert_eq!(receive_count, total_count + 1); } + // TODO unfinished test. the idea is to observe epochs and views + // are progressing in a sane way + #[tokio::test(flavor = "multi_thread")] + async fn test_hotshot_event_streaming_epoch_progression() { + setup_test(); + + let epoch_height = 5; + type PosVersion = SequencerVersions, StaticVersion<0, 0>>; + + let hotshot_event_streaming_port = + pick_unused_port().expect("No ports free for hotshot event streaming"); + let query_service_port = pick_unused_port().expect("No ports free for query service"); + + let url = format!("http://localhost:{hotshot_event_streaming_port}") + .parse() + .unwrap(); + + let hotshot_events = HotshotEvents { + events_service_port: hotshot_event_streaming_port, + }; + + let client: Client = Client::new(url); + + let options = Options::with_port(query_service_port).hotshot_events(hotshot_events); + + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); + let network_config = TestConfigBuilder::default() + .l1_url(l1) + .with_epoch_height(epoch_height) + .build(); + let config = TestNetworkConfigBuilder::default() + .api_config(options) + .network_config(network_config) + .build(); + let _network = TestNetwork::new(config, PosVersion::new()).await; + + let mut subscribed_events = client + .socket("hotshot-events/events") + .subscribe::>() + .await + .unwrap(); + + // wanted views + let total_count = epoch_height * 2; + // wait for these events to receive on client 1 + let mut views = HashSet::new(); + let mut i = 0; + loop { + let event = subscribed_events.next().await.unwrap(); + let event = event.unwrap(); + let view_number = event.view_number; + views.insert(view_number.u64()); + + if let hotshot::types::EventType::Decide { .. } = event.event { + dbg!("got decide"); + } + if views.contains(&total_count) { + tracing::info!("Client Received at least desired views, exiting loop"); + break; + } + if i > 100 { + // Timeout + panic!("Views are not progressing"); + } + i += 1; + } + assert!(views.contains(&total_count)); + } } diff --git a/sequencer/src/api/data_source.rs b/sequencer/src/api/data_source.rs index e3703d3874..fc4f8784ab 100644 --- a/sequencer/src/api/data_source.rs +++ b/sequencer/src/api/data_source.rs @@ -1,12 +1,11 @@ -use std::{num::NonZeroUsize, time::Duration}; - use anyhow::Context; use async_trait::async_trait; use committable::Commitment; use espresso_types::{ v0::traits::{PersistenceOptions, SequencerPersistence}, v0_99::ChainConfig, - FeeAccount, FeeAccountProof, FeeMerkleTree, NodeState, PubKey, Transaction, + FeeAccount, FeeAccountProof, FeeMerkleTree, NodeState, PubKey, PublicNetworkConfig, + Transaction, }; use futures::future::Future; use hotshot_query_service::{ @@ -16,21 +15,15 @@ use hotshot_query_service::{ node::NodeDataSource, status::StatusDataSource, }; +use hotshot_types::traits::node_implementation::NodeType; use hotshot_types::{ data::ViewNumber, light_client::StateSignatureRequestBody, - network::NetworkConfig, stake_table::StakeTableEntry, traits::{network::ConnectedNetwork, node_implementation::Versions}, - HotShotConfig, PeerConfig, ValidatorConfig, -}; -use hotshot_types::{ - network::{BuilderType, CombinedNetworkConfig, Libp2pConfig, RandomBuilderConfig}, - traits::node_implementation::NodeType, }; -use serde::{Deserialize, Serialize}; + use tide_disco::Url; -use vec1::Vec1; use super::{ fs, @@ -127,6 +120,17 @@ pub(crate) trait StakeTableDataSource { fn get_stake_table_current( &self, ) -> impl Send + Future>>; + + fn get_current_epoch(&self) -> impl Send + Future>; + + fn get_da_members( + &self, + epoch: Option<::Epoch>, + ) -> impl Send + Future::SignatureKey>>>; + /// Get the stake table for the current epoch if not provided + fn get_da_members_current( + &self, + ) -> impl Send + Future::SignatureKey>>>; } pub(crate) trait CatchupDataSource: Sync { @@ -187,245 +191,6 @@ pub(crate) trait CatchupDataSource: Sync { ) -> impl Send + Future>; } -/// This struct defines the public Hotshot validator configuration. -/// Private key and state key pairs are excluded for security reasons. - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct PublicValidatorConfig { - public_key: PubKey, - stake_value: u64, - is_da: bool, - private_key: String, - state_public_key: String, - state_key_pair: String, -} - -impl From> for PublicValidatorConfig { - fn from(v: ValidatorConfig) -> Self { - let ValidatorConfig:: { - public_key, - private_key: _, - stake_value, - state_key_pair, - is_da, - } = v; - - let state_public_key = state_key_pair.ver_key(); - - Self { - public_key, - stake_value, - is_da, - state_public_key: state_public_key.to_string(), - private_key: "*****".into(), - state_key_pair: "*****".into(), - } - } -} - -/// This struct defines the public Hotshot configuration parameters. -/// Our config module features a GET endpoint accessible via the route `/hotshot` to display the hotshot config parameters. -/// Hotshot config has sensitive information like private keys and such fields are excluded from this struct. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct PublicHotShotConfig { - start_threshold: (u64, u64), - num_nodes_with_stake: NonZeroUsize, - known_nodes_with_stake: Vec>, - known_da_nodes: Vec>, - da_staked_committee_size: usize, - fixed_leader_for_gpuvid: usize, - next_view_timeout: u64, - view_sync_timeout: Duration, - num_bootstrap: usize, - builder_timeout: Duration, - data_request_delay: Duration, - builder_urls: Vec1, - start_proposing_view: u64, - stop_proposing_view: u64, - start_voting_view: u64, - stop_voting_view: u64, - start_proposing_time: u64, - stop_proposing_time: u64, - start_voting_time: u64, - stop_voting_time: u64, - epoch_height: u64, -} - -impl From> for PublicHotShotConfig { - fn from(v: HotShotConfig) -> Self { - // Destructure all fields from HotShotConfig to return an error - // if new fields are added to HotShotConfig. This makes sure that we handle - // all fields appropriately and do not miss any updates. - let HotShotConfig:: { - start_threshold, - num_nodes_with_stake, - known_nodes_with_stake, - known_da_nodes, - da_staked_committee_size, - fixed_leader_for_gpuvid, - next_view_timeout, - view_sync_timeout, - num_bootstrap, - builder_timeout, - data_request_delay, - builder_urls, - start_proposing_view, - stop_proposing_view, - start_voting_view, - stop_voting_view, - start_proposing_time, - stop_proposing_time, - start_voting_time, - stop_voting_time, - epoch_height, - } = v; - - Self { - start_threshold, - num_nodes_with_stake, - known_nodes_with_stake, - known_da_nodes, - da_staked_committee_size, - fixed_leader_for_gpuvid, - next_view_timeout, - view_sync_timeout, - num_bootstrap, - builder_timeout, - data_request_delay, - builder_urls, - start_proposing_view, - stop_proposing_view, - start_voting_view, - stop_voting_view, - start_proposing_time, - stop_proposing_time, - start_voting_time, - stop_voting_time, - epoch_height, - } - } -} - -impl PublicHotShotConfig { - pub fn into_hotshot_config(self) -> HotShotConfig { - HotShotConfig { - start_threshold: self.start_threshold, - num_nodes_with_stake: self.num_nodes_with_stake, - known_nodes_with_stake: self.known_nodes_with_stake, - known_da_nodes: self.known_da_nodes, - da_staked_committee_size: self.da_staked_committee_size, - fixed_leader_for_gpuvid: self.fixed_leader_for_gpuvid, - next_view_timeout: self.next_view_timeout, - view_sync_timeout: self.view_sync_timeout, - num_bootstrap: self.num_bootstrap, - builder_timeout: self.builder_timeout, - data_request_delay: self.data_request_delay, - builder_urls: self.builder_urls, - start_proposing_view: self.start_proposing_view, - stop_proposing_view: self.stop_proposing_view, - start_voting_view: self.start_voting_view, - stop_voting_view: self.stop_voting_view, - start_proposing_time: self.start_proposing_time, - stop_proposing_time: self.stop_proposing_time, - start_voting_time: self.start_voting_time, - stop_voting_time: self.stop_voting_time, - epoch_height: self.epoch_height, - } - } -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct PublicNetworkConfig { - rounds: usize, - indexed_da: bool, - transactions_per_round: usize, - manual_start_password: Option, - num_bootrap: usize, - next_view_timeout: u64, - view_sync_timeout: Duration, - builder_timeout: Duration, - data_request_delay: Duration, - node_index: u64, - seed: [u8; 32], - transaction_size: usize, - key_type_name: String, - libp2p_config: Option, - config: PublicHotShotConfig, - cdn_marshal_address: Option, - combined_network_config: Option, - commit_sha: String, - builder: BuilderType, - random_builder: Option, -} - -impl From> for PublicNetworkConfig { - fn from(cfg: NetworkConfig) -> Self { - Self { - rounds: cfg.rounds, - indexed_da: cfg.indexed_da, - transactions_per_round: cfg.transactions_per_round, - manual_start_password: Some("*****".into()), - num_bootrap: cfg.num_bootrap, - next_view_timeout: cfg.next_view_timeout, - view_sync_timeout: cfg.view_sync_timeout, - builder_timeout: cfg.builder_timeout, - data_request_delay: cfg.data_request_delay, - node_index: cfg.node_index, - seed: cfg.seed, - transaction_size: cfg.transaction_size, - key_type_name: cfg.key_type_name, - libp2p_config: cfg.libp2p_config, - config: cfg.config.into(), - cdn_marshal_address: cfg.cdn_marshal_address, - combined_network_config: cfg.combined_network_config, - commit_sha: cfg.commit_sha, - builder: cfg.builder, - random_builder: cfg.random_builder, - } - } -} - -impl PublicNetworkConfig { - pub fn into_network_config( - self, - my_own_validator_config: ValidatorConfig, - ) -> anyhow::Result> { - let node_index = self - .config - .known_nodes_with_stake - .iter() - .position(|peer| peer.stake_table_entry.stake_key == my_own_validator_config.public_key) - .context(format!( - "the node {} is not in the stake table", - my_own_validator_config.public_key - ))? as u64; - - Ok(NetworkConfig { - rounds: self.rounds, - indexed_da: self.indexed_da, - transactions_per_round: self.transactions_per_round, - manual_start_password: self.manual_start_password, - num_bootrap: self.num_bootrap, - next_view_timeout: self.next_view_timeout, - view_sync_timeout: self.view_sync_timeout, - builder_timeout: self.builder_timeout, - data_request_delay: self.data_request_delay, - node_index, - seed: self.seed, - transaction_size: self.transaction_size, - key_type_name: self.key_type_name, - libp2p_config: self.libp2p_config, - config: self.config.into_hotshot_config(), - cdn_marshal_address: self.cdn_marshal_address, - combined_network_config: self.combined_network_config, - commit_sha: self.commit_sha, - builder: self.builder, - random_builder: self.random_builder, - public_keys: Vec::new(), - }) - } -} - #[cfg(any(test, feature = "testing"))] pub mod testing { use super::{super::Options, *}; diff --git a/sequencer/src/api/endpoints.rs b/sequencer/src/api/endpoints.rs index ed661026e8..95c4971030 100644 --- a/sequencer/src/api/endpoints.rs +++ b/sequencer/src/api/endpoints.rs @@ -216,6 +216,35 @@ where .await) } .boxed() + })? + .at("current_epoch", |_, state| { + async move { Ok(state.read(|state| state.get_current_epoch().boxed()).await) }.boxed() + })? + .at("da_members", |req, state| { + async move { + // Try to get the epoch from the request. If this fails, error + // as it was probably a mistake + let epoch = req + .opt_integer_param("epoch_number") + .map_err(|_| hotshot_query_service::node::Error::Custom { + message: "Epoch number is required".to_string(), + status: StatusCode::BAD_REQUEST, + })? + .map(EpochNumber::new); + + Ok(state + .read(|state| state.get_da_members(epoch).boxed()) + .await) + } + .boxed() + })? + .at("da_members_current", |_, state| { + async move { + Ok(state + .read(|state| state.get_da_members_current().boxed()) + .await) + } + .boxed() })?; Ok(api) diff --git a/sequencer/src/bin/deploy.rs b/sequencer/src/bin/deploy.rs index 8a5916bd43..d766827cc0 100644 --- a/sequencer/src/bin/deploy.rs +++ b/sequencer/src/bin/deploy.rs @@ -9,7 +9,6 @@ use hotshot_state_prover::service::light_client_genesis; use sequencer_utils::{ deployer::{deploy, ContractGroup, Contracts, DeployedContracts}, logging, - stake_table::PermissionedStakeTableConfig, }; use url::Url; @@ -122,22 +121,6 @@ struct Options { /// If the light client contract is not being deployed, this option is ignored. #[clap(long, env = "ESPRESSO_SEQUENCER_PERMISSIONED_PROVER")] permissioned_prover: Option
, - - /// A toml file with the initial stake table. - /// - /// Schema: - /// - /// public_keys = [ - /// { - /// stake_table_key = "BLS_VER_KEY~...", - /// state_ver_key = "SCHNORR_VER_KEY~...", - /// da = true, - /// stake = 1, # this value is ignored, but needs to be set - /// }, - /// ] - #[clap(long, env = "ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH")] - initial_stake_table_path: Option, - #[clap(flatten)] logging: logging::Config, } @@ -153,13 +136,6 @@ async fn main() -> anyhow::Result<()> { let genesis = light_client_genesis(&sequencer_url, opt.stake_table_capacity).boxed(); - let initial_stake_table = if let Some(path) = opt.initial_stake_table_path { - tracing::info!("Loading initial stake table from {:?}", path); - Some(PermissionedStakeTableConfig::from_toml_file(&path)?.into()) - } else { - None - }; - let contracts = deploy( opt.rpc_url, opt.l1_polling_interval, @@ -171,7 +147,6 @@ async fn main() -> anyhow::Result<()> { genesis, opt.permissioned_prover, contracts, - initial_stake_table, ) .await?; diff --git a/sequencer/src/bin/espresso-bridge.rs b/sequencer/src/bin/espresso-bridge.rs index 8201904e45..2382ba5b38 100644 --- a/sequencer/src/bin/espresso-bridge.rs +++ b/sequencer/src/bin/espresso-bridge.rs @@ -231,6 +231,8 @@ async fn deposit(opt: Deposit) -> anyhow::Result<()> { } }; + // TODO this appears to be broken. We often hit the `else` block + // when the builder was in fact funded. // Confirm that the Espresso balance has increased. let final_balance = espresso .get_espresso_balance(l1.address(), Some(espresso_block)) diff --git a/sequencer/src/bin/espresso-dev-node.rs b/sequencer/src/bin/espresso-dev-node.rs index 7fed6cec03..c80fb4c72d 100644 --- a/sequencer/src/bin/espresso-dev-node.rs +++ b/sequencer/src/bin/espresso-dev-node.rs @@ -292,7 +292,6 @@ async fn main() -> anyhow::Result<()> { async { Ok(lc_genesis.clone()) }.boxed(), None, contracts.clone(), - None, // initial stake table ) .await?; diff --git a/sequencer/src/bin/update-permissioned-stake-table.rs b/sequencer/src/bin/update-permissioned-stake-table.rs index 28e30767d6..a966a30f22 100644 --- a/sequencer/src/bin/update-permissioned-stake-table.rs +++ b/sequencer/src/bin/update-permissioned-stake-table.rs @@ -1,23 +1,22 @@ -use anyhow::Result; +use anyhow::{Context, Result}; use clap::Parser; +use client::SequencerClient; use espresso_types::parse_duration; use ethers::types::Address; +use hotshot_types::{network::PeerConfigKeys, traits::signature_key::StakeTableEntryType}; + use sequencer_utils::{ logging, stake_table::{update_stake_table, PermissionedStakeTableUpdate}, }; use std::{path::PathBuf, time::Duration}; + use url::Url; #[derive(Debug, Clone, Parser)] struct Options { /// RPC URL for the L1 provider. - #[clap( - short, - long, - env = "ESPRESSO_SEQUENCER_L1_PROVIDER", - default_value = "http://localhost:8545" - )] + #[clap(short, long, env = "ESPRESSO_SEQUENCER_L1_PROVIDER")] rpc_url: Url, /// Request rate when polling L1. @@ -33,12 +32,7 @@ struct Options { /// /// This wallet is used to deploy the contracts, so the account indicated by ACCOUNT_INDEX must /// be funded with with ETH. - #[clap( - long, - name = "MNEMONIC", - env = "ESPRESSO_SEQUENCER_ETH_MNEMONIC", - default_value = "test test test test test test test test test test test junk" - )] + #[clap(long, name = "MNEMONIC", env = "ESPRESSO_SEQUENCER_ETH_MNEMONIC")] mnemonic: String, /// Account index in the L1 wallet generated by MNEMONIC to use when deploying the contracts. @@ -78,7 +72,19 @@ struct Options { env = "ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_UPDATE_TOML_PATH", verbatim_doc_comment )] - update_toml_path: PathBuf, + update_toml_path: Option, + + /// Peers for fetching hotshot config + /// used to update the contract with the initial stake table. + /// This stake table is fetched directly from hotshot config, and is pre-epoch stake table + #[clap( + long, + env = "ESPRESSO_SEQUENCER_STATE_PEERS", + value_delimiter = ',', + conflicts_with = "update_toml_path" + )] + pub state_peers: Option>, + #[clap(flatten)] logging: logging::Config, } @@ -87,7 +93,48 @@ struct Options { async fn main() -> Result<()> { let opts = Options::parse(); opts.logging.init(); - let update = PermissionedStakeTableUpdate::from_toml_file(&opts.update_toml_path)?; + + let mut update: Option = None; + + match opts.update_toml_path { + Some(path) => { + tracing::error!("updating stake table from path: {path:?}"); + update = Some(PermissionedStakeTableUpdate::from_toml_file(&path)?); + } + None => { + let peers = opts.state_peers.context("No state peers found")?; + let clients: Vec = + peers.into_iter().map(SequencerClient::new).collect(); + + for client in &clients { + tracing::warn!("calling config endpoint of {client:?}"); + + match client.config().await { + Ok(config) => { + let hotshot = config.hotshot_config().into_hotshot_config(); + let st = hotshot.known_nodes_with_stake; + let da_nodes = hotshot.known_da_nodes; + + let new_stakers = st + .into_iter() + .map(|s| PeerConfigKeys { + stake_table_key: s.stake_table_entry.stake_key, + state_ver_key: s.state_ver_key.clone(), + stake: s.stake_table_entry.stake().as_u64(), + da: da_nodes.contains(&s), + }) + .collect(); + + update = Some(PermissionedStakeTableUpdate::new(new_stakers, Vec::new())); + break; + } + Err(e) => { + tracing::warn!("Failed to fetch config from sequencer: {e}"); + } + }; + } + } + } update_stake_table( opts.rpc_url, @@ -95,7 +142,7 @@ async fn main() -> Result<()> { opts.mnemonic, opts.account_index, opts.contract_address, - update, + update.unwrap(), ) .await?; diff --git a/sequencer/src/catchup.rs b/sequencer/src/catchup.rs index 8c8ca7a667..99ea884bda 100644 --- a/sequencer/src/catchup.rs +++ b/sequencer/src/catchup.rs @@ -6,6 +6,7 @@ use async_trait::async_trait; use committable::Commitment; use committable::Committable; use espresso_types::traits::SequencerPersistence; +use espresso_types::PublicNetworkConfig; use espresso_types::{ v0::traits::StateCatchup, v0_99::ChainConfig, BackoffParams, BlockMerkleTree, FeeAccount, FeeAccountProof, FeeMerkleCommitment, FeeMerkleTree, Leaf2, NodeState, @@ -31,10 +32,8 @@ use tokio::time::timeout; use url::Url; use vbs::version::StaticVersionType; -use crate::{ - api::{data_source::PublicNetworkConfig, BlocksFrontier}, - PubKey, -}; +use crate::api::BlocksFrontier; +use crate::PubKey; // This newtype is probably not worth having. It's only used to be able to log // URLs before doing requests. diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index 2e6235fc54..790a8d6193 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -53,6 +53,7 @@ pub struct Genesis { pub base_version: Version, #[serde(with = "version_ser")] pub upgrade_version: Version, + pub epoch_height: Option, pub chain_config: ChainConfig, pub stake_table: StakeTableConfig, #[serde(default)] @@ -83,6 +84,7 @@ impl Genesis { } impl Genesis { + // TODO `validate_stake_table_contract` and wrapper `validate_contracts` pub async fn validate_fee_contract(&self, l1_rpc_url: Url) -> anyhow::Result<()> { let l1 = L1Client::new(vec![l1_rpc_url]).with_context(|| "failed to create L1 client")?; @@ -101,7 +103,8 @@ impl Genesis { // now iterate over each upgrade type and validate the fee contract if it exists for (version, upgrade) in &self.upgrades { let chain_config = &upgrade.upgrade_type.chain_config(); - + // Is this not an error case? Isn't a chain config a + // requirement? At least for most versions? if chain_config.is_none() { continue; } diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 680d44feaf..d79d16ae13 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -362,6 +362,10 @@ pub async fn init_node( upgrade.set_hotshot_config_parameters(&mut network_config.config); } + let epoch_height = genesis.epoch_height.unwrap_or_default(); + tracing::info!("setting epoch height={epoch_height:?}"); + network_config.config.epoch_height = epoch_height; + // If the `Libp2p` bootstrap nodes were supplied via the command line, override those // present in the config file. if let Some(bootstrap_nodes) = network_params.libp2p_bootstrap_nodes { @@ -480,7 +484,7 @@ pub async fn init_node( node_id: node_index, upgrades: genesis.upgrades, current_version: V::Base::VERSION, - epoch_height: None, + epoch_height: network_config.config.epoch_height, }; // Create the HotShot membership @@ -738,6 +742,11 @@ pub mod testing { self } + pub fn with_epoch_height(mut self, epoch_height: u64) -> Self { + self.config.epoch_height = epoch_height; + self + } + pub fn upgrades(mut self, upgrades: BTreeMap) -> Self { let upgrade = upgrades.get(&::Upgrade::VERSION).unwrap(); upgrade.set_hotshot_config_parameters(&mut self.config); @@ -811,7 +820,7 @@ pub mod testing { start_voting_time: 0, stop_proposing_time: 0, stop_voting_time: 0, - epoch_height: 0, + epoch_height: 150, }; Self { @@ -971,7 +980,6 @@ pub mod testing { ) .with_current_version(V::Base::version()) .with_genesis(state) - .with_epoch_height(config.epoch_height) .with_upgrades(upgrades); // Create the HotShot membership diff --git a/sequencer/src/options.rs b/sequencer/src/options.rs old mode 100644 new mode 100755 index 02bdddd96a..8a8825f653 --- a/sequencer/src/options.rs +++ b/sequencer/src/options.rs @@ -284,7 +284,7 @@ pub struct Options { long, name = "GENESIS_FILE", env = "ESPRESSO_SEQUENCER_GENESIS_FILE", - default_value = "/genesis/demo.toml" + default_value = "/data/genesis/demo.toml" )] pub genesis_file: PathBuf, diff --git a/sequencer/src/restart_tests.rs b/sequencer/src/restart_tests.rs index 7631120994..4c6c5a82c5 100755 --- a/sequencer/src/restart_tests.rs +++ b/sequencer/src/restart_tests.rs @@ -543,6 +543,7 @@ impl TestNetwork { upgrades: Default::default(), base_version: Version { major: 0, minor: 1 }, upgrade_version: Version { major: 0, minor: 2 }, + epoch_height: None, // Start with a funded account, so we can test catchup after restart. accounts: [(builder_account(), 1000000000.into())] diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index 182be004e2..568716a268 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -8,11 +8,7 @@ use super::{ persistence, Genesis, L1Params, NetworkParams, }; use clap::Parser; -#[allow(unused_imports)] -use espresso_types::{ - traits::NullEventConsumer, FeeVersion, MarketplaceVersion, SequencerVersions, - SolverAuctionResultsProvider, V0_0, -}; +use espresso_types::{traits::NullEventConsumer, SequencerVersions, SolverAuctionResultsProvider}; use futures::future::FutureExt; use hotshot::MarketplaceConfig; use hotshot_types::traits::{metrics::NoMetrics, node_implementation::Versions}; @@ -39,33 +35,35 @@ pub async fn main() -> anyhow::Result<()> { let upgrade = genesis.upgrade_version; match (base, upgrade) { - #[cfg(all(feature = "fee", feature = "marketplace"))] - (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { + #[cfg(all(feature = "fee", feature = "pos"))] + (espresso_types::FeeVersion::VERSION, espresso_types::EpochVersion::VERSION) => { run( genesis, modules, opt, - SequencerVersions::::new(), + SequencerVersions::::new(), ) .await } - #[cfg(feature = "fee")] - (FeeVersion::VERSION, _) => { + #[cfg(feature = "pos")] + (espresso_types::EpochVersion::VERSION, _) => { run( genesis, modules, opt, - SequencerVersions::::new(), + // Specifying V0_0 disables upgrades + SequencerVersions::::new(), ) .await } - #[cfg(feature = "marketplace")] - (MarketplaceVersion::VERSION, _) => { + // TODO change `fee` to `pos` + #[cfg(all(feature = "fee", feature = "marketplace"))] + (espresso_types::FeeVersion::VERSION, espresso_types::MarketplaceVersion::VERSION) => { run( genesis, modules, opt, - SequencerVersions::::new(), + SequencerVersions::::new(), ) .await } @@ -295,6 +293,7 @@ mod test { upgrades: Default::default(), base_version: Version { major: 0, minor: 1 }, upgrade_version: Version { major: 0, minor: 2 }, + epoch_height: None, }; genesis.to_file(&genesis_file).unwrap(); diff --git a/tests/Cargo.toml b/tests/Cargo.toml index eeb91c14a7..551f6f1f36 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -16,7 +16,11 @@ dotenvy = { workspace = true } espresso-types = { path = "../types", features = ["testing"] } ethers = { workspace = true } futures = { workspace = true } +hotshot-types = { workspace = true } reqwest = { workspace = true, features = ["json"] } +sequencer-utils = { path = "../utils" } +serde = { workspace = true } surf-disco = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } vbs = { workspace = true } diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 22b206cfbe..dc5f8e989d 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,13 +1,22 @@ -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, Context, Result}; use client::SequencerClient; use espresso_types::{FeeAmount, FeeVersion, MarketplaceVersion}; use ethers::prelude::*; -use futures::future::join_all; +use futures::future::{join_all, BoxFuture}; +use futures::FutureExt; +use hotshot_types::network::PeerConfigKeys; +use hotshot_types::traits::signature_key::StakeTableEntryType; + use std::{fmt, str::FromStr, time::Duration}; use surf_disco::Url; use tokio::time::{sleep, timeout}; use vbs::version::StaticVersionType; +use dotenvy::var; +use sequencer_utils::stake_table::{ + update_stake_table, PermissionedStakeTableUpdate, StakerIdentity, +}; + const L1_PROVIDER_RETRY_INTERVAL: Duration = Duration::from_secs(1); // TODO add to .env const RECIPIENT_ADDRESS: &str = "0x0000000000000000000000000000000000000000"; @@ -277,3 +286,120 @@ async fn wait_for_service(url: Url, interval: u64, timeout_duration: u64) -> Res .await .map_err(|e| anyhow!("Wait for service, timeout: ({}) {}", url, e))? } + +/* + EPOCH V3 +*/ + +pub async fn test_stake_table_update(clients: Vec) -> Result<()> { + let l1_port = var("ESPRESSO_SEQUENCER_L1_PORT")?; + let account_index = var("ESPRESSO_DEPLOYER_ACCOUNT_INDEX")?; + let contract_address = var("ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS")?; + let client = clients[0].clone(); + + let assert_change = + |u: PermissionedStakeTableUpdate| -> BoxFuture<'static, anyhow::Result<()>> { + let client = client.clone(); + let l1_port = l1_port.clone(); + let account_index = account_index.clone(); + let contract_address = contract_address.clone(); + async move { + let epoch_before_update = client.current_epoch().await?.context("curr epoch")?; + tracing::warn!("current_epoch={epoch_before_update:?}"); + + let current_stake_table = client.stake_table(epoch_before_update).await?; + + let removed = u.stakers_to_remove.len(); + let added = u.new_stakers.len(); + + update_stake_table( + format!("http://localhost:{l1_port}").parse()?, + Duration::from_secs(7), + "test test test test test test test test test test test junk".to_string(), + account_index.parse()?, + contract_address.parse()?, + u.clone(), + ) + .await?; + + loop { + sleep(Duration::from_secs(10)).await; + let epoch = client.current_epoch().await?.context("curr epoch")?; + tracing::info!("current_epoch={epoch:?}"); + if epoch > epoch_before_update + 2 { + let stake_table = client.stake_table(epoch).await?; + tracing::info!("stake_table={stake_table:?}"); + assert_eq!( + stake_table.len(), + current_stake_table.len() + added - removed + ); + + for added in &u.new_stakers { + assert!( + stake_table + .iter() + .any(|st| st.stake_key == added.stake_table_key), + "staker {} not found", + added.stake_table_key + ); + } + + for removed in &u.stakers_to_remove { + assert!( + stake_table + .iter() + .all(|st| st.stake_key != removed.stake_table_key), + "staker {} found", + removed.stake_table_key + ); + } + + break; + } + } + + anyhow::Result::<_>::Ok(()) + } + .boxed() + }; + + let config = client.config().await?.hotshot_config(); + + // currently stake table update does not support DA node member changes + let stake_table = config.known_nodes_with_stake(); + let da_members = config.known_da_nodes(); + + // filtering out DA nodes + let non_da_stakers: Vec<_> = stake_table + .into_iter() + .filter(|x| !da_members.contains(x)) + .collect(); + + let node = non_da_stakers.first().context("no non da staker found")?; + let one_removed = PermissionedStakeTableUpdate::new( + vec![], + vec![StakerIdentity { + stake_table_key: node.stake_table_entry.stake_key, + }], + ); + + // remove one node + assert_change(one_removed) + .await + .expect("failed to remove one node"); + + // add back the removed node + let added = PermissionedStakeTableUpdate::new( + vec![PeerConfigKeys { + stake_table_key: *node.stake_table_entry.key(), + state_ver_key: node.state_ver_key.clone(), + stake: node.stake_table_entry.stake().as_u64(), + da: false, + }], + vec![], + ); + + assert_change(added).await.expect("failed to add a node"); + + Ok(()) +} diff --git a/tests/smoke.rs b/tests/smoke.rs index d154fc03dd..e009fb3ba3 100644 --- a/tests/smoke.rs +++ b/tests/smoke.rs @@ -1,6 +1,7 @@ -use crate::common::TestConfig; -use anyhow::Result; +use crate::common::{test_stake_table_update, TestConfig}; +use anyhow::{Context, Result}; use futures::StreamExt; +use sequencer_utils::test_utils::setup_test; use std::time::Instant; /// We allow for no change in state across this many consecutive iterations. @@ -10,6 +11,7 @@ const MAX_TXNS_NOT_INCREMENTING: u8 = 5; #[tokio::test(flavor = "multi_thread")] async fn test_smoke() -> Result<()> { + setup_test(); let start = Instant::now(); dotenvy::dotenv()?; @@ -77,5 +79,19 @@ async fn test_smoke() -> Result<()> { last = new; } + + let epoch = testing + .espresso + .current_epoch() + .await? + .context("curr epoch")?; + + tracing::info!("epoch before stake table update {epoch:?}"); + + // Check if epoch number is greater than Epoch::genesis() i.e 1 + if epoch > 1 { + tracing::info!("testing stake table update"); + test_stake_table_update(testing.sequencer_clients).await?; + } Ok(()) } diff --git a/tests/upgrades.rs b/tests/upgrades.rs index e680d904be..9c6ebafb9d 100644 --- a/tests/upgrades.rs +++ b/tests/upgrades.rs @@ -1,8 +1,9 @@ -use crate::common::TestConfig; +use crate::common::{test_stake_table_update, TestConfig}; use anyhow::Result; -use espresso_types::{FeeVersion, MarketplaceVersion}; +use client::SequencerClient; +use espresso_types::{EpochVersion, FeeVersion, MarketplaceVersion}; use futures::{future::join_all, StreamExt}; -use vbs::version::StaticVersionType; +use vbs::version::{StaticVersionType, Version}; const SEQUENCER_BLOCKS_TIMEOUT: u64 = 200; @@ -12,10 +13,10 @@ async fn test_upgrade() -> Result<()> { let testing = TestConfig::new().await.unwrap(); - let versions = if testing.sequencer_version as u16 >= MarketplaceVersion::version().minor { - (FeeVersion::version(), MarketplaceVersion::version()) - } else { - panic!("Invalid sequencer version provided for upgrade test."); + let (base, upgrade) = match testing.sequencer_version { + 3 => (FeeVersion::version(), EpochVersion::version()), + version if version > 3 => (FeeVersion::version(), MarketplaceVersion::version()), + _ => panic!("Invalid sequencer version provided for upgrade test."), }; println!("Waiting on readiness"); @@ -25,7 +26,28 @@ async fn test_upgrade() -> Result<()> { println!("Initial State:{}", initial); let clients = testing.sequencer_clients; + let client = clients[0].clone(); + let height = test_header_version(clients.clone(), base, upgrade).await?; + // check that atleast 50 blocks are produced after the upgrade + test_blocks_production(clients.clone(), height, 50).await?; + if upgrade == EpochVersion::version() { + test_stake_table_update(clients.clone()).await?; + } + + let height = client.get_height().await?; + // check that atleast 50 blocks are produced after the stake table updates + test_blocks_production(clients.clone(), height, 50).await?; + + // TODO assert transactions are incrementing + Ok(()) +} + +async fn test_header_version( + clients: Vec, + base: Version, + upgrade: Version, +) -> Result { // Test is limited to those sequencers with correct modules // enabled. It would be less fragile if we could discover them. let subscriptions = join_all(clients.iter().map(|c| c.subscribe_headers(0))) @@ -34,7 +56,7 @@ async fn test_upgrade() -> Result<()> { .collect::>>()?; let mut stream = futures::stream::iter(subscriptions).flatten_unordered(None); - + let mut height = 0; while let Some(header) = stream.next().await { let header = header.unwrap(); println!( @@ -46,11 +68,12 @@ async fn test_upgrade() -> Result<()> { // TODO is it possible to discover the view at which upgrade should be finished? // First few views should be `Base` version. if header.height() <= 20 { - assert_eq!(header.version(), versions.0) + assert_eq!(header.version(), base) } - if header.version() == versions.1 { + if header.version() == upgrade { println!("header version matched! height={:?}", header.height()); + height = header.height(); break; } @@ -59,6 +82,28 @@ async fn test_upgrade() -> Result<()> { } } - // TODO assert transactions are incrementing + Ok(height) +} + +async fn test_blocks_production(clients: Vec, from: u64, num: u64) -> Result<()> { + let subscriptions = join_all(clients.iter().map(|c| c.subscribe_blocks(from))) + .await + .into_iter() + .collect::>>()?; + + let mut num_blocks = 0; + + for mut node in subscriptions { + while let Some(block) = node.next().await { + let _block = block.unwrap(); + num_blocks += 1; + if num_blocks == num { + break; + } + } + + num_blocks = 0; + } + Ok(()) } diff --git a/types/Cargo.toml b/types/Cargo.toml index 40c10d304a..105849e075 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -31,6 +31,7 @@ hotshot = { workspace = true } hotshot-contract-adapter = { workspace = true } hotshot-query-service = { workspace = true } hotshot-types = { workspace = true } +indexmap = "2.7" itertools = { workspace = true } jf-merkle-tree = { workspace = true } jf-utils = { workspace = true } # TODO temporary: used only for test_rng() @@ -56,6 +57,7 @@ tower-service = { version = "0.3", default-features = false } tracing = { workspace = true } url = { workspace = true } vbs = { workspace = true } +vec1 = { workspace = true } [dev-dependencies] espresso-types = { path = ".", features = [ "testing" ] } diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index 9544de3b4e..2092ac9a00 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -33,7 +33,7 @@ use crate::{ v0_1, v0_2, v0_3, v0_99::{self, ChainConfig, IterableFeeInfo, SolverAuctionResults}, BlockMerkleCommitment, BuilderSignature, FeeAccount, FeeAmount, FeeInfo, FeeMerkleCommitment, - Header, L1BlockInfo, L1Snapshot, Leaf2, NamespaceId, NsTable, SeqTypes, UpgradeType, + Header, L1BlockInfo, L1Snapshot, Leaf2, NamespaceId, NsTable, SeqTypes, }; use super::{instance_state::NodeState, state::ValidatedState}; @@ -330,7 +330,7 @@ impl Header { builder_signature: builder_signature.first().copied(), }), 3 => Self::V3(v0_3::Header { - chain_config: v0_1::ResolvableChainConfig::from(v0_1::ChainConfig::from( + chain_config: v0_3::ResolvableChainConfig::from(v0_3::ChainConfig::from( chain_config, )), height, @@ -546,7 +546,7 @@ impl Header { builder_signature: builder_signature.first().copied(), }), 3 => Self::V3(v0_3::Header { - chain_config: v0_1::ResolvableChainConfig::from(v0_1::ChainConfig::from( + chain_config: v0_3::ResolvableChainConfig::from(v0_3::ChainConfig::from( chain_config, )), height, @@ -969,16 +969,9 @@ impl BlockHeader for Header { let mut validated_state = parent_state.clone(); - let chain_config = if version > instance_state.current_version { - match instance_state.upgrades.get(&version) { - Some(upgrade) => match upgrade.upgrade_type { - UpgradeType::Fee { chain_config } => chain_config, - _ => Header::get_chain_config(&validated_state, instance_state).await?, - }, - None => Header::get_chain_config(&validated_state, instance_state).await?, - } - } else { - Header::get_chain_config(&validated_state, instance_state).await? + let chain_config = match instance_state.upgrade_chain_config(version) { + Some(chain_config) => chain_config, + None => Header::get_chain_config(&validated_state, instance_state).await?, }; validated_state.chain_config = chain_config.into(); diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index 545b3722ce..afa4d681ca 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -9,7 +9,7 @@ use vbs::version::Version; #[cfg(any(test, feature = "testing"))] use vbs::version::{StaticVersion, StaticVersionType}; -use super::state::ValidatedState; +use super::{state::ValidatedState, UpgradeType}; /// Represents the immutable state of a node. /// @@ -24,7 +24,7 @@ pub struct NodeState { pub genesis_header: GenesisHeader, pub genesis_state: ValidatedState, pub l1_genesis: Option, - pub epoch_height: Option, + pub epoch_height: u64, /// Map containing all planned and executed upgrades. /// @@ -65,7 +65,7 @@ impl NodeState { l1_genesis: None, upgrades: Default::default(), current_version, - epoch_height: None, + epoch_height: 0, } } @@ -97,6 +97,20 @@ impl NodeState { ) } + #[cfg(any(test, feature = "testing"))] + pub fn mock_v3() -> Self { + use vbs::version::StaticVersion; + + Self::new( + 0, + ChainConfig::default(), + L1Client::new(vec!["http://localhost:3331".parse().unwrap()]) + .expect("Failed to create L1 client"), + mock::MockStateCatchup::default(), + StaticVersion::<0, 3>::version(), + ) + } + #[cfg(any(test, feature = "testing"))] pub fn mock_v99() -> Self { use vbs::version::StaticVersion; @@ -131,16 +145,23 @@ impl NodeState { self } - pub fn with_current_version(mut self, ver: Version) -> Self { - self.current_version = ver; + pub fn with_current_version(mut self, version: Version) -> Self { + self.current_version = version; self } - // TODO remove following `Memberships` trait update: - // https://github.com/EspressoSystems/HotShot/issues/3966 - pub fn with_epoch_height(mut self, epoch_height: u64) -> Self { - self.epoch_height = Some(epoch_height); - self + /// Given a `version`, get the correct `ChainConfig` from `self.upgrades`. + pub fn upgrade_chain_config(&self, version: Version) -> Option { + let chain_config = (version > self.current_version).then(|| { + self.upgrades + .get(&version) + .and_then(|upgrade| match upgrade.upgrade_type { + UpgradeType::Fee { chain_config } => Some(chain_config), + UpgradeType::Epoch { chain_config } => Some(chain_config), + _ => None, + }) + }); + chain_config? } } @@ -282,3 +303,79 @@ pub mod mock { } } } + +#[cfg(test)] +mod test { + + use crate::v0::Versions; + use crate::{EpochVersion, FeeVersion, SequencerVersions, ViewBasedUpgrade}; + + use super::*; + + #[test] + fn test_upgrade_chain_config_version_02() { + let mut upgrades = std::collections::BTreeMap::new(); + type MySequencerVersions = SequencerVersions, FeeVersion>; + + let mode = UpgradeMode::View(ViewBasedUpgrade { + start_voting_view: None, + stop_voting_view: None, + start_proposing_view: 1, + stop_proposing_view: 10, + }); + + let upgraded_chain_config = ChainConfig { + max_block_size: 300.into(), + base_fee: 1.into(), + ..Default::default() + }; + + let upgrade_type = UpgradeType::Fee { + chain_config: upgraded_chain_config, + }; + + upgrades.insert( + ::Upgrade::VERSION, + Upgrade { mode, upgrade_type }, + ); + + let instance_state = NodeState::mock().with_upgrades(upgrades); + + let chain_config = instance_state.upgrade_chain_config(FeeVersion::version()); + assert_eq!(Some(upgraded_chain_config), chain_config); + } + + #[test] + fn test_upgrade_chain_config_version_03() { + let mut upgrades = std::collections::BTreeMap::new(); + type MySequencerVersions = SequencerVersions; + + let mode = UpgradeMode::View(ViewBasedUpgrade { + start_voting_view: None, + stop_voting_view: None, + start_proposing_view: 1, + stop_proposing_view: 10, + }); + + let upgraded_chain_config = ChainConfig { + max_block_size: 300.into(), + base_fee: 1.into(), + stake_table_contract: Some(Default::default()), + ..Default::default() + }; + + let upgrade_type = UpgradeType::Epoch { + chain_config: upgraded_chain_config, + }; + + upgrades.insert( + ::Upgrade::VERSION, + Upgrade { mode, upgrade_type }, + ); + + let instance_state = NodeState::mock_v2().with_upgrades(upgrades); + + let chain_config = instance_state.upgrade_chain_config(EpochVersion::version()); + assert_eq!(Some(upgraded_chain_config), chain_config); + } +} diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 9bbf03daad..da2a90662b 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -1,13 +1,14 @@ -use std::{ - cmp::max, - collections::{BTreeMap, BTreeSet, HashMap}, - num::NonZeroU64, - str::FromStr, +use super::{ + traits::StateCatchup, + v0_3::{DAMembers, StakeTable, StakeTables}, + v0_99::ChainConfig, + Header, L1Client, NodeState, PubKey, SeqTypes, }; use async_trait::async_trait; +use committable::Committable; use contract_bindings_alloy::permissionedstaketable::PermissionedStakeTable::StakersUpdated; -use ethers::types::{Address, U256}; +use ethers::types::U256; use ethers_conv::ToAlloy; use hotshot::types::{BLSPubKey, SignatureKey as _}; use hotshot_contract_adapter::stake_table::{bls_alloy_to_jf, NodeInfoJf}; @@ -25,14 +26,14 @@ use hotshot_types::{ }, PeerConfig, }; -use itertools::Itertools; -use thiserror::Error; -use url::Url; - -use super::{ - v0_3::{DAMembers, StakeTable, StakeTables}, - Header, L1Client, NodeState, PubKey, SeqTypes, +use indexmap::IndexMap; +use std::{ + cmp::max, + collections::{BTreeMap, BTreeSet, HashMap}, + num::NonZeroU64, + sync::Arc, }; +use thiserror::Error; type Epoch = ::Epoch; @@ -53,46 +54,40 @@ impl StakeTables { /// should not significantly affect performance to fetch all events and /// perform the computation in this functions once per epoch. pub fn from_l1_events(updates: Vec) -> Self { - let changes_per_node = updates - .into_iter() - .flat_map(|event| { - event - .removed - .into_iter() - .map(|key| StakeTableChange::Remove(bls_alloy_to_jf(key))) - .chain( - event - .added - .into_iter() - .map(|node_info| StakeTableChange::Add(node_info.into())), - ) - }) - .group_by(|change| change.key()); + let mut index_map = IndexMap::new(); - // If the last event for a stakers is `Added` the staker is currently - // staking, if the last event is removed or (or the staker is not present) - // they are not staking. - let currently_staking = changes_per_node - .into_iter() - .map(|(_pub_key, deltas)| deltas.last().expect("deltas non-empty").clone()) - .filter_map(|change| match change { - StakeTableChange::Add(node_info) => Some(node_info), - StakeTableChange::Remove(_) => None, - }); - - let mut consensus_stake_table: Vec> = vec![]; - let mut da_members: Vec> = vec![]; - for node in currently_staking { - consensus_stake_table.push(node.clone().into()); - if node.da { - da_members.push(node.into()); + for event in updates { + for key in event.removed { + let change = StakeTableChange::Remove(bls_alloy_to_jf(key)); + index_map.insert(change.key(), change); + } + for node_info in event.added { + let change = StakeTableChange::Add(node_info.into()); + index_map.insert(change.key(), change); } } - Self::new(consensus_stake_table.into(), da_members.into()) + + let mut da_members = Vec::new(); + let mut stake_table = Vec::new(); + + for change in index_map.values() { + if let StakeTableChange::Add(node_info_jf) = change { + let entry: StakeTableEntry = node_info_jf.clone().into(); + stake_table.push(entry.clone()); + if change.is_da() { + da_members.push(entry); + } + } + } + + tracing::error!("DA={da_members:?}"); + tracing::error!("ST={stake_table:?}"); + + Self::new(stake_table.into(), da_members.into()) } } -#[derive(Clone, Debug)] +#[derive(derive_more::Debug, Clone)] /// Type to describe DA and Stake memberships pub struct EpochCommittees { /// Committee used when we're in pre-epoch state @@ -107,8 +102,9 @@ pub struct EpochCommittees { /// L1 provider l1_client: L1Client, - /// Address of Stake Table Contract - contract_address: Option
, + chain_config: ChainConfig, + #[debug("{}", peers.name())] + pub peers: Arc, /// Randomized committees, filled when we receive the DrbResult randomized_committees: BTreeMap>>, @@ -127,6 +123,13 @@ impl StakeTableChange { StakeTableChange::Remove(key) => *key, } } + + pub(crate) fn is_da(&self) -> bool { + match self { + StakeTableChange::Add(node_info) => node_info.da, + StakeTableChange::Remove(_) => false, + } + } } /// Holds Stake table and da stake @@ -186,6 +189,8 @@ impl EpochCommittees { .filter(|entry| entry.stake() > U256::zero()) .collect(); + let randomized_committee = generate_stake_cdf(eligible_leaders.clone(), [0u8; 32]); + let committee = Committee { eligible_leaders, stake_table, @@ -195,6 +200,15 @@ impl EpochCommittees { }; self.state.insert(epoch, committee.clone()); + self.state.insert(epoch + 1, committee.clone()); + self.state.insert(epoch + 2, committee.clone()); + self.randomized_committees + .insert(epoch, randomized_committee.clone()); + self.randomized_committees + .insert(epoch + 1, randomized_committee.clone()); + self.randomized_committees + .insert(epoch + 2, randomized_committee.clone()); + committee } @@ -239,6 +253,7 @@ impl EpochCommittees { .iter() .map(|entry| (PubKey::public_key(entry), entry.clone())) .collect(); + let randomized_committee = generate_stake_cdf(eligible_leaders.clone(), [0u8; 32]); let members = Committee { eligible_leaders, @@ -248,18 +263,23 @@ impl EpochCommittees { indexed_da_members, }; + let mut randomized_committees = BTreeMap::new(); + + // TODO: remove this, workaround for hotshot asking for stake tables from epoch 1 and 2 let mut map = HashMap::new(); - map.insert(Epoch::genesis(), members.clone()); - // TODO: remove this, workaround for hotshot asking for stake tables from epoch 1 - map.insert(Epoch::genesis() + 1u64, members.clone()); + for epoch in Epoch::genesis().u64()..=50 { + map.insert(Epoch::new(epoch), members.clone()); + randomized_committees.insert(Epoch::new(epoch), randomized_committee.clone()); + } Self { non_epoch_committee: members, state: map, _epoch_size: epoch_size, l1_client: instance_state.l1_client.clone(), - contract_address: instance_state.chain_config.stake_table_contract, - randomized_committees: BTreeMap::new(), + chain_config: instance_state.chain_config, + peers: instance_state.peers.clone(), + randomized_committees, } } @@ -284,81 +304,32 @@ impl Membership for EpochCommittees { fn new( // TODO remove `new` from trait and remove this fn as well. // https://github.com/EspressoSystems/HotShot/commit/fcb7d54a4443e29d643b3bbc53761856aef4de8b - committee_members: Vec>, - da_members: Vec>, + _committee_members: Vec>, + _da_members: Vec>, ) -> Self { - // For each eligible leader, get the stake table entry - let eligible_leaders: Vec<_> = committee_members - .iter() - .map(|member| member.stake_table_entry.clone()) - .filter(|entry| entry.stake() > U256::zero()) - .collect(); - - // For each member, get the stake table entry - let stake_table: Vec<_> = committee_members - .iter() - .map(|member| member.stake_table_entry.clone()) - .filter(|entry| entry.stake() > U256::zero()) - .collect(); - - // For each member, get the stake table entry - let da_members: Vec<_> = da_members - .iter() - .map(|member| member.stake_table_entry.clone()) - .filter(|entry| entry.stake() > U256::zero()) - .collect(); - - // Index the stake table by public key - let indexed_stake_table: HashMap = stake_table - .iter() - .map(|entry| (PubKey::public_key(entry), entry.clone())) - .collect(); - - // Index the stake table by public key - let indexed_da_members: HashMap = da_members - .iter() - .map(|entry| (PubKey::public_key(entry), entry.clone())) - .collect(); - - let members = Committee { - eligible_leaders, - stake_table, - da_members, - indexed_stake_table, - indexed_da_members, - }; - - let mut map = HashMap::new(); - map.insert(Epoch::genesis(), members.clone()); - // TODO: remove this, workaround for hotshot asking for stake tables from epoch 1 - map.insert(Epoch::genesis() + 1u64, members.clone()); - - Self { - non_epoch_committee: members, - state: map, - _epoch_size: 12, - l1_client: L1Client::new(vec![Url::from_str("http:://ab.b").unwrap()]) - .expect("Failed to create L1 client"), - contract_address: None, - randomized_committees: BTreeMap::new(), - } + panic!("EpochCommittees::new() called. This function has been replaced with new_stake()"); } - /// Get the stake table for the current view fn stake_table(&self, epoch: Option) -> Vec> { - if let Some(st) = self.state(&epoch) { + let st = if let Some(st) = self.state(&epoch) { st.stake_table.clone() } else { vec![] - } + }; + + tracing::debug!("stake table = {st:?}"); + st } /// Get the stake table for the current view fn da_stake_table(&self, epoch: Option) -> Vec> { - if let Some(sc) = self.state(&epoch) { + let da = if let Some(sc) = self.state(&epoch) { sc.da_members.clone() } else { vec![] - } + }; + + tracing::debug!("da members = {da:?}"); + da } /// Get all members of the committee for the current view @@ -367,11 +338,15 @@ impl Membership for EpochCommittees { _view_number: ::View, epoch: Option, ) -> BTreeSet { - if let Some(sc) = self.state(&epoch) { + let committee = if let Some(sc) = self.state(&epoch) { sc.indexed_stake_table.clone().into_keys().collect() } else { BTreeSet::new() - } + }; + + tracing::debug!("committee={committee:?}"); + + committee } /// Get all members of the committee for the current view @@ -380,11 +355,14 @@ impl Membership for EpochCommittees { _view_number: ::View, epoch: Option, ) -> BTreeSet { - if let Some(sc) = self.state(&epoch) { + let da = if let Some(sc) = self.state(&epoch) { sc.indexed_da_members.clone().into_keys().collect() } else { BTreeSet::new() - } + }; + tracing::debug!("da committee={da:?}"); + + da } /// Get all eligible leaders of the committee for the current view @@ -393,12 +371,16 @@ impl Membership for EpochCommittees { _view_number: ::View, epoch: Option, ) -> BTreeSet { - self.state(&epoch) + let committee_leaders = self + .state(&epoch) .unwrap() .eligible_leaders .iter() .map(PubKey::public_key) - .collect() + .collect(); + + tracing::debug!("committee_leaders={committee_leaders:?}"); + committee_leaders } /// Get the stake table entry for a public key @@ -506,9 +488,20 @@ impl Membership for EpochCommittees { epoch: Epoch, block_header: Header, ) -> Option> { - let address = self.contract_address?; + let chain_config = get_chain_config(self.chain_config, &self.peers, &block_header) + .await + .ok()?; + + let contract_address = chain_config.stake_table_contract; + + if contract_address.is_none() { + tracing::error!("No stake table contract address found in Chain config"); + } + + let address = contract_address?; + self.l1_client - .get_stake_table(address.to_alloy(), block_header.height()) + .get_stake_table(address.to_alloy(), block_header.l1_head()) .await .ok() .map(|stake_table| -> Box { @@ -532,6 +525,30 @@ impl Membership for EpochCommittees { } } +pub(crate) async fn get_chain_config( + chain_config: ChainConfig, + peers: &impl StateCatchup, + header: &Header, +) -> anyhow::Result { + let header_cf = header.chain_config(); + if chain_config.commit() == header_cf.commit() { + return Ok(chain_config); + } + + let cf = match header_cf.resolve() { + Some(cf) => cf, + None => peers + .fetch_chain_config(header_cf.commit()) + .await + .map_err(|err| { + tracing::error!("failed to get chain_config from peers. err: {err:?}"); + err + })?, + }; + + Ok(cf) +} + #[cfg(test)] mod tests { use contract_bindings_alloy::permissionedstaketable::PermissionedStakeTable::NodeInfo; diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index 995772947a..0b4eace61f 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -24,11 +24,11 @@ use serde::{Deserialize, Serialize}; use std::ops::Add; use thiserror::Error; use time::OffsetDateTime; -use vbs::version::Version; +use vbs::version::{StaticVersionType, Version}; use super::{ auction::ExecutionError, fee_info::FeeError, instance_state::NodeState, BlockMerkleCommitment, - BlockSize, FeeMerkleCommitment, L1Client, + BlockSize, FeeMerkleCommitment, L1Client, MarketplaceVersion, }; use crate::{ traits::StateCatchup, @@ -668,7 +668,7 @@ fn validate_builder_fee( // TODO Marketplace signatures are placeholders for now. In // finished Marketplace signatures will cover the full // transaction. - if version.minor >= 3 { + if version.minor >= MarketplaceVersion::MINOR { fee_info .account() .validate_sequencing_fee_signature_marketplace( diff --git a/types/src/v0/mod.rs b/types/src/v0/mod.rs index 5b0dce4a3f..12440da3d7 100644 --- a/types/src/v0/mod.rs +++ b/types/src/v0/mod.rs @@ -120,6 +120,9 @@ reexport_unchanged_types!( TimeBasedUpgrade, ViewBasedUpgrade, BlockSize, + PublicHotShotConfig, + PublicNetworkConfig, + PublicValidatorConfig ); pub(crate) use v0_3::{L1ClientMetrics, L1Event, L1State, L1UpdateTask}; @@ -175,8 +178,8 @@ pub type MockSequencerVersions = SequencerVersions, StaticVe pub type V0_0 = StaticVersion<0, 0>; pub type V0_1 = StaticVersion<0, 1>; pub type FeeVersion = StaticVersion<0, 2>; +pub type EpochVersion = StaticVersion<0, 3>; pub type MarketplaceVersion = StaticVersion<0, 99>; -pub type EpochVersion = StaticVersion<0, 100>; pub type Leaf = hotshot_types::data::Leaf; pub type Leaf2 = hotshot_types::data::Leaf2; diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index 51dc634603..5c39b89620 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -579,7 +579,8 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { Ok(( HotShotInitializer { instance_state: state, - epoch_height: 0, + // todo(abdul): load from storage? + epoch_height: 150, anchor_leaf: leaf, anchor_state: validated_state.unwrap_or_default(), anchor_state_delta: None, diff --git a/types/src/v0/v0_1/config.rs b/types/src/v0/v0_1/config.rs new file mode 100644 index 0000000000..4f52574f20 --- /dev/null +++ b/types/src/v0/v0_1/config.rs @@ -0,0 +1,262 @@ +use std::{num::NonZeroUsize, time::Duration}; + +use anyhow::Context; +use vec1::Vec1; + +use crate::PubKey; +use hotshot_types::network::{ + BuilderType, CombinedNetworkConfig, Libp2pConfig, RandomBuilderConfig, +}; +use hotshot_types::{network::NetworkConfig, HotShotConfig, PeerConfig, ValidatorConfig}; +use serde::{Deserialize, Serialize}; +use tide_disco::Url; + +/// This struct defines the public Hotshot validator configuration. +/// Private key and state key pairs are excluded for security reasons. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct PublicValidatorConfig { + public_key: PubKey, + stake_value: u64, + is_da: bool, + private_key: String, + state_public_key: String, + state_key_pair: String, +} + +impl From> for PublicValidatorConfig { + fn from(v: ValidatorConfig) -> Self { + let ValidatorConfig:: { + public_key, + private_key: _, + stake_value, + state_key_pair, + is_da, + } = v; + + let state_public_key = state_key_pair.ver_key(); + + Self { + public_key, + stake_value, + is_da, + state_public_key: state_public_key.to_string(), + private_key: "*****".into(), + state_key_pair: "*****".into(), + } + } +} + +/// This struct defines the public Hotshot configuration parameters. +/// Our config module features a GET endpoint accessible via the route `/hotshot` to display the hotshot config parameters. +/// Hotshot config has sensitive information like private keys and such fields are excluded from this struct. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct PublicHotShotConfig { + start_threshold: (u64, u64), + num_nodes_with_stake: NonZeroUsize, + known_nodes_with_stake: Vec>, + known_da_nodes: Vec>, + da_staked_committee_size: usize, + fixed_leader_for_gpuvid: usize, + next_view_timeout: u64, + view_sync_timeout: Duration, + num_bootstrap: usize, + builder_timeout: Duration, + data_request_delay: Duration, + builder_urls: Vec1, + start_proposing_view: u64, + stop_proposing_view: u64, + start_voting_view: u64, + stop_voting_view: u64, + start_proposing_time: u64, + stop_proposing_time: u64, + start_voting_time: u64, + stop_voting_time: u64, + epoch_height: u64, +} + +impl From> for PublicHotShotConfig { + fn from(v: HotShotConfig) -> Self { + // Destructure all fields from HotShotConfig to return an error + // if new fields are added to HotShotConfig. This makes sure that we handle + // all fields appropriately and do not miss any updates. + let HotShotConfig:: { + start_threshold, + num_nodes_with_stake, + known_nodes_with_stake, + known_da_nodes, + da_staked_committee_size, + fixed_leader_for_gpuvid, + next_view_timeout, + view_sync_timeout, + num_bootstrap, + builder_timeout, + data_request_delay, + builder_urls, + start_proposing_view, + stop_proposing_view, + start_voting_view, + stop_voting_view, + start_proposing_time, + stop_proposing_time, + start_voting_time, + stop_voting_time, + epoch_height, + } = v; + + Self { + start_threshold, + num_nodes_with_stake, + known_nodes_with_stake, + known_da_nodes, + da_staked_committee_size, + fixed_leader_for_gpuvid, + next_view_timeout, + view_sync_timeout, + num_bootstrap, + builder_timeout, + data_request_delay, + builder_urls, + start_proposing_view, + stop_proposing_view, + start_voting_view, + stop_voting_view, + start_proposing_time, + stop_proposing_time, + start_voting_time, + stop_voting_time, + epoch_height, + } + } +} + +impl PublicHotShotConfig { + pub fn into_hotshot_config(self) -> HotShotConfig { + HotShotConfig { + start_threshold: self.start_threshold, + num_nodes_with_stake: self.num_nodes_with_stake, + known_nodes_with_stake: self.known_nodes_with_stake, + known_da_nodes: self.known_da_nodes, + da_staked_committee_size: self.da_staked_committee_size, + fixed_leader_for_gpuvid: self.fixed_leader_for_gpuvid, + next_view_timeout: self.next_view_timeout, + view_sync_timeout: self.view_sync_timeout, + num_bootstrap: self.num_bootstrap, + builder_timeout: self.builder_timeout, + data_request_delay: self.data_request_delay, + builder_urls: self.builder_urls, + start_proposing_view: self.start_proposing_view, + stop_proposing_view: self.stop_proposing_view, + start_voting_view: self.start_voting_view, + stop_voting_view: self.stop_voting_view, + start_proposing_time: self.start_proposing_time, + stop_proposing_time: self.stop_proposing_time, + start_voting_time: self.start_voting_time, + stop_voting_time: self.stop_voting_time, + epoch_height: self.epoch_height, + } + } + + pub fn known_nodes_with_stake(&self) -> Vec> { + self.known_nodes_with_stake.clone() + } + + pub fn known_da_nodes(&self) -> Vec> { + self.known_da_nodes.clone() + } +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct PublicNetworkConfig { + rounds: usize, + indexed_da: bool, + transactions_per_round: usize, + manual_start_password: Option, + num_bootrap: usize, + next_view_timeout: u64, + view_sync_timeout: Duration, + builder_timeout: Duration, + data_request_delay: Duration, + node_index: u64, + seed: [u8; 32], + transaction_size: usize, + key_type_name: String, + libp2p_config: Option, + config: PublicHotShotConfig, + cdn_marshal_address: Option, + combined_network_config: Option, + commit_sha: String, + builder: BuilderType, + random_builder: Option, +} + +impl From> for PublicNetworkConfig { + fn from(cfg: NetworkConfig) -> Self { + Self { + rounds: cfg.rounds, + indexed_da: cfg.indexed_da, + transactions_per_round: cfg.transactions_per_round, + manual_start_password: Some("*****".into()), + num_bootrap: cfg.num_bootrap, + next_view_timeout: cfg.next_view_timeout, + view_sync_timeout: cfg.view_sync_timeout, + builder_timeout: cfg.builder_timeout, + data_request_delay: cfg.data_request_delay, + node_index: cfg.node_index, + seed: cfg.seed, + transaction_size: cfg.transaction_size, + key_type_name: cfg.key_type_name, + libp2p_config: cfg.libp2p_config, + config: cfg.config.into(), + cdn_marshal_address: cfg.cdn_marshal_address, + combined_network_config: cfg.combined_network_config, + commit_sha: cfg.commit_sha, + builder: cfg.builder, + random_builder: cfg.random_builder, + } + } +} + +impl PublicNetworkConfig { + pub fn into_network_config( + self, + my_own_validator_config: ValidatorConfig, + ) -> anyhow::Result> { + let node_index = self + .config + .known_nodes_with_stake + .iter() + .position(|peer| peer.stake_table_entry.stake_key == my_own_validator_config.public_key) + .context(format!( + "the node {} is not in the stake table", + my_own_validator_config.public_key + ))? as u64; + + Ok(NetworkConfig { + rounds: self.rounds, + indexed_da: self.indexed_da, + transactions_per_round: self.transactions_per_round, + manual_start_password: self.manual_start_password, + num_bootrap: self.num_bootrap, + next_view_timeout: self.next_view_timeout, + view_sync_timeout: self.view_sync_timeout, + builder_timeout: self.builder_timeout, + data_request_delay: self.data_request_delay, + node_index, + seed: self.seed, + transaction_size: self.transaction_size, + key_type_name: self.key_type_name, + libp2p_config: self.libp2p_config, + config: self.config.into_hotshot_config(), + cdn_marshal_address: self.cdn_marshal_address, + combined_network_config: self.combined_network_config, + commit_sha: self.commit_sha, + builder: self.builder, + random_builder: self.random_builder, + public_keys: Vec::new(), + }) + } + + pub fn hotshot_config(&self) -> PublicHotShotConfig { + self.config.clone() + } +} diff --git a/types/src/v0/v0_1/mod.rs b/types/src/v0/v0_1/mod.rs index 7115342c14..494659e1be 100644 --- a/types/src/v0/v0_1/mod.rs +++ b/types/src/v0/v0_1/mod.rs @@ -4,6 +4,7 @@ pub const VERSION: Version = Version { major: 0, minor: 1 }; mod block; mod chain_config; +mod config; mod fee_info; mod header; mod instance_state; @@ -14,6 +15,7 @@ mod transaction; pub use block::*; pub use chain_config::*; +pub use config::*; pub use fee_info::*; pub use header::Header; pub use instance_state::*; diff --git a/types/src/v0/v0_2/mod.rs b/types/src/v0/v0_2/mod.rs index b550f2c5a7..5d8acb79f4 100644 --- a/types/src/v0/v0_2/mod.rs +++ b/types/src/v0/v0_2/mod.rs @@ -4,10 +4,11 @@ use vbs::version::Version; pub use super::v0_1::{ AccountQueryData, BlockMerkleCommitment, BlockMerkleTree, BlockSize, BuilderSignature, ChainConfig, ChainId, Delta, FeeAccount, FeeAccountProof, FeeAmount, FeeInfo, - FeeMerkleCommitment, FeeMerkleProof, FeeMerkleTree, Header, Index, Iter, L1BlockInfo, L1Client, L1ClientOptions, - L1Snapshot, NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, - NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, NsTableValidationError, - NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, ResolvableChainConfig, + FeeMerkleCommitment, FeeMerkleProof, FeeMerkleTree, Header, Index, Iter, L1BlockInfo, L1Client, + L1ClientOptions, L1Snapshot, NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, + NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, + NsTableValidationError, NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, + PublicHotShotConfig, PublicNetworkConfig, PublicValidatorConfig, ResolvableChainConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, UpgradeType, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, diff --git a/types/src/v0/v0_3/chain_config.rs b/types/src/v0/v0_3/chain_config.rs index dbc73cb589..f82521377c 100644 --- a/types/src/v0/v0_3/chain_config.rs +++ b/types/src/v0/v0_3/chain_config.rs @@ -1,4 +1,4 @@ -use crate::{v0_1, BlockSize, ChainId, FeeAccount, FeeAmount}; +use crate::{v0_1, v0_99, BlockSize, ChainId, FeeAccount, FeeAmount}; use committable::{Commitment, Committable}; use ethers::types::{Address, U256}; use itertools::Either; @@ -74,13 +74,13 @@ impl Committable for ChainConfig { } impl ResolvableChainConfig { - pub fn _commit(&self) -> Commitment { + pub fn commit(&self) -> Commitment { match self.chain_config { Either::Left(config) => config.commit(), Either::Right(commitment) => commitment, } } - pub fn _resolve(self) -> Option { + pub fn resolve(self) -> Option { match self.chain_config { Either::Left(config) => Some(config), Either::Right(_) => None, @@ -141,23 +141,25 @@ impl From for ChainConfig { } } -impl From for v0_1::ChainConfig { - fn from(chain_config: ChainConfig) -> v0_1::ChainConfig { - let ChainConfig { +impl From for ChainConfig { + fn from(chain_config: v0_99::ChainConfig) -> ChainConfig { + let v0_99::ChainConfig { chain_id, max_block_size, base_fee, fee_contract, fee_recipient, + stake_table_contract, .. } = chain_config; - v0_1::ChainConfig { + ChainConfig { chain_id, max_block_size, base_fee, fee_contract, fee_recipient, + stake_table_contract, } } } diff --git a/types/src/v0/v0_3/header.rs b/types/src/v0/v0_3/header.rs new file mode 100644 index 0000000000..c4dd120916 --- /dev/null +++ b/types/src/v0/v0_3/header.rs @@ -0,0 +1,61 @@ +use crate::NsTable; + +use super::{ + BlockMerkleCommitment, BuilderSignature, FeeInfo, FeeMerkleCommitment, L1BlockInfo, + ResolvableChainConfig, +}; +use ark_serialize::CanonicalSerialize; +use committable::{Commitment, Committable, RawCommitmentBuilder}; +use hotshot_types::{utils::BuilderCommitment, vid::VidCommitment}; +use serde::{Deserialize, Serialize}; + +/// A header is like a [`Block`] with the body replaced by a digest. +#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] +pub struct Header { + /// A commitment to a ChainConfig or a full ChainConfig. + pub(crate) chain_config: ResolvableChainConfig, + pub(crate) height: u64, + pub(crate) timestamp: u64, + pub(crate) l1_head: u64, + pub(crate) l1_finalized: Option, + pub(crate) payload_commitment: VidCommitment, + pub(crate) builder_commitment: BuilderCommitment, + pub(crate) ns_table: NsTable, + pub(crate) block_merkle_tree_root: BlockMerkleCommitment, + pub(crate) fee_merkle_tree_root: FeeMerkleCommitment, + pub(crate) fee_info: FeeInfo, + pub(crate) builder_signature: Option, +} + +impl Committable for Header { + fn commit(&self) -> Commitment { + let mut bmt_bytes = vec![]; + self.block_merkle_tree_root + .serialize_with_mode(&mut bmt_bytes, ark_serialize::Compress::Yes) + .unwrap(); + let mut fmt_bytes = vec![]; + self.fee_merkle_tree_root + .serialize_with_mode(&mut fmt_bytes, ark_serialize::Compress::Yes) + .unwrap(); + + RawCommitmentBuilder::new(&Self::tag()) + .field("chain_config", self.chain_config.commit()) + .u64_field("height", self.height) + .u64_field("timestamp", self.timestamp) + .u64_field("l1_head", self.l1_head) + .optional("l1_finalized", &self.l1_finalized) + .constant_str("payload_commitment") + .fixed_size_bytes(self.payload_commitment.as_ref().as_ref()) + .constant_str("builder_commitment") + .fixed_size_bytes(self.builder_commitment.as_ref()) + .field("ns_table", self.ns_table.commit()) + .var_size_field("block_merkle_tree_root", &bmt_bytes) + .var_size_field("fee_merkle_tree_root", &fmt_bytes) + .field("fee_info", self.fee_info.commit()) + .finalize() + } + + fn tag() -> String { + crate::v0_1::Header::tag() + } +} diff --git a/types/src/v0/v0_3/mod.rs b/types/src/v0/v0_3/mod.rs index 11c8bbf14a..b96fb014a9 100644 --- a/types/src/v0/v0_3/mod.rs +++ b/types/src/v0/v0_3/mod.rs @@ -4,21 +4,23 @@ use vbs::version::Version; pub use super::v0_1::{ AccountQueryData, BlockMerkleCommitment, BlockMerkleTree, BlockSize, BuilderSignature, ChainId, Delta, FeeAccount, FeeAccountProof, FeeAmount, FeeInfo, FeeMerkleCommitment, FeeMerkleProof, - FeeMerkleTree, Header, Index, Iter, L1BlockInfo, L1Client, L1ClientOptions, L1Snapshot, - NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, NsPayloadOwned, - NsPayloadRange, NsProof, NsTable, NsTableBuilder, NsTableValidationError, NumNss, NumTxs, - NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, ResolvableChainConfig, TimeBasedUpgrade, - Transaction, TxIndex, TxIter, TxPayload, TxPayloadRange, TxProof, TxTableEntries, - TxTableEntriesRange, Upgrade, UpgradeMode, UpgradeType, ViewBasedUpgrade, - BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, - NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, + FeeMerkleTree, Index, Iter, L1BlockInfo, L1Client, L1ClientOptions, L1Snapshot, NamespaceId, + NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, + NsProof, NsTable, NsTableBuilder, NsTableValidationError, NumNss, NumTxs, NumTxsRange, + NumTxsUnchecked, Payload, PayloadByteLen, PublicHotShotConfig, PublicNetworkConfig, + PublicValidatorConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, + TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, + UpgradeType, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, + NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, }; pub(crate) use super::v0_1::{L1ClientMetrics, L1Event, L1State, L1UpdateTask}; pub const VERSION: Version = Version { major: 0, minor: 3 }; mod chain_config; +mod header; mod stake_table; pub use chain_config::*; +pub use header::Header; pub use stake_table::*; diff --git a/types/src/v0/v0_99/chain_config.rs b/types/src/v0/v0_99/chain_config.rs index 420580d7af..cd9fd7cba5 100644 --- a/types/src/v0/v0_99/chain_config.rs +++ b/types/src/v0/v0_99/chain_config.rs @@ -130,6 +130,21 @@ impl From<&v0_1::ResolvableChainConfig> for ResolvableChainConfig { } } +impl From<&v0_3::ResolvableChainConfig> for ResolvableChainConfig { + fn from( + &v0_3::ResolvableChainConfig { chain_config }: &v0_3::ResolvableChainConfig, + ) -> ResolvableChainConfig { + match chain_config { + Either::Left(chain_config) => ResolvableChainConfig { + chain_config: Either::Left(ChainConfig::from(chain_config)), + }, + Either::Right(c) => ResolvableChainConfig { + chain_config: Either::Right(Commitment::from_raw(*c.as_ref())), + }, + } + } +} + impl From for ChainConfig { fn from(chain_config: v0_1::ChainConfig) -> ChainConfig { let v0_1::ChainConfig { @@ -217,22 +232,44 @@ mod test { use super::*; #[test] - fn test_upgrade_chain_config_v3_resolvable_chain_config_from_v1() { + fn test_upgrade_chain_config_v99_resolvable_chain_config_from_v1() { let expectation: ResolvableChainConfig = ChainConfig::default().into(); let v1_resolvable: v0_1::ResolvableChainConfig = v0_1::ChainConfig::default().into(); - let v3_resolvable: ResolvableChainConfig = ResolvableChainConfig::from(&v1_resolvable); - assert_eq!(expectation, v3_resolvable); + let v99_resolvable: ResolvableChainConfig = ResolvableChainConfig::from(&v1_resolvable); + assert_eq!(expectation, v99_resolvable); let expectation: ResolvableChainConfig = ChainConfig::default().commit().into(); let v1_resolvable: v0_1::ResolvableChainConfig = v0_1::ChainConfig::default().commit().into(); - let v3_resolvable: ResolvableChainConfig = ResolvableChainConfig::from(&v1_resolvable); - assert_eq!(expectation, v3_resolvable); + let v99_resolvable: ResolvableChainConfig = ResolvableChainConfig::from(&v1_resolvable); + assert_eq!(expectation, v99_resolvable); } + #[test] - fn test_upgrade_chain_config_v1_chain_config_from_v3() { + fn test_upgrade_chain_config_v99_resolvable_chain_config_from_v3() { + let expectation: ResolvableChainConfig = ChainConfig::default().into(); + let v3_resolvable: v0_3::ResolvableChainConfig = v0_3::ChainConfig::default().into(); + let v99_resolvable: ResolvableChainConfig = ResolvableChainConfig::from(&v3_resolvable); + assert_eq!(expectation, v99_resolvable); + let expectation: ResolvableChainConfig = ChainConfig::default().commit().into(); + let v3_resolvable: v0_3::ResolvableChainConfig = + v0_3::ChainConfig::default().commit().into(); + let v99_resolvable: ResolvableChainConfig = ResolvableChainConfig::from(&v3_resolvable); + assert_eq!(expectation, v99_resolvable); + } + + #[test] + fn test_upgrade_chain_config_v1_chain_config_from_v99() { let expectation = v0_1::ChainConfig::default(); - let v3_chain_config = ChainConfig::default(); - let v1_chain_config = v0_1::ChainConfig::from(v3_chain_config); + let v99_chain_config = ChainConfig::default(); + let v1_chain_config = v0_1::ChainConfig::from(v99_chain_config); assert_eq!(expectation, v1_chain_config); } + + #[test] + fn test_upgrade_chain_config_v3_chain_config_from_v99() { + let expectation = v0_3::ChainConfig::default(); + let v99_chain_config = ChainConfig::default(); + let v3_chain_config = v0_3::ChainConfig::from(v99_chain_config); + assert_eq!(expectation, v3_chain_config); + } } diff --git a/types/src/v0/v0_99/mod.rs b/types/src/v0/v0_99/mod.rs index 3e676a4b2a..f45ce0bd5d 100644 --- a/types/src/v0/v0_99/mod.rs +++ b/types/src/v0/v0_99/mod.rs @@ -7,8 +7,9 @@ pub use super::v0_1::{ FeeMerkleTree, Index, Iter, L1BlockInfo, L1Client, L1ClientOptions, L1Snapshot, NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, NsTableValidationError, NumNss, NumTxs, NumTxsRange, - NumTxsUnchecked, Payload, PayloadByteLen, TimeBasedUpgrade, Transaction, TxIndex, TxIter, - TxPayload, TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, + NumTxsUnchecked, Payload, PayloadByteLen, PublicHotShotConfig, PublicNetworkConfig, + PublicValidatorConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, + TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, UpgradeType, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, }; diff --git a/utils/src/deployer.rs b/utils/src/deployer.rs index b19f249d6b..9210ceb436 100644 --- a/utils/src/deployer.rs +++ b/utils/src/deployer.rs @@ -321,7 +321,6 @@ pub async fn deploy( genesis: BoxFuture<'_, anyhow::Result<(ParsedLightClientState, ParsedStakeTableState)>>, permissioned_prover: Option
, mut contracts: Contracts, - initial_stake_table: Option>, ) -> anyhow::Result { let provider = Provider::::try_from(l1url.to_string())?.interval(l1_interval); let chain_id = provider.get_chainid().await?.as_u64(); @@ -461,11 +460,10 @@ pub async fn deploy( // `PermissionedStakeTable.sol` if should_deploy(ContractGroup::PermissionedStakeTable, &only) { - let initial_stake_table: Vec<_> = initial_stake_table.unwrap_or_default(); let stake_table_address = contracts .deploy_tx( Contract::PermissonedStakeTable, - PermissionedStakeTable::deploy(l1.clone(), initial_stake_table)?, + PermissionedStakeTable::deploy(l1.clone(), Vec::::new())?, ) .await?; let stake_table = PermissionedStakeTable::new(stake_table_address, l1.clone()); diff --git a/utils/src/stake_table.rs b/utils/src/stake_table.rs index 943c53f808..6cbb96c0d0 100644 --- a/utils/src/stake_table.rs +++ b/utils/src/stake_table.rs @@ -3,7 +3,7 @@ /// The initial stake table is passed to the permissioned stake table contract /// on deployment. use contract_bindings_ethers::permissioned_stake_table::{ - G2Point, NodeInfo, PermissionedStakeTable, + G2Point, NodeInfo, PermissionedStakeTable, PermissionedStakeTableErrors, }; use derive_more::derive::From; use ethers::{ @@ -19,6 +19,8 @@ use url::Url; use std::{fs, path::Path, sync::Arc, time::Duration}; +use crate::contract_send; + /// A stake table config stored in a file #[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] #[serde(bound(deserialize = ""))] @@ -59,8 +61,8 @@ impl From for Vec { } #[derive(serde::Serialize, serde::Deserialize, Clone, Debug, From, PartialEq)] -struct StakerIdentity { - stake_table_key: BLSPubKey, +pub struct StakerIdentity { + pub stake_table_key: BLSPubKey, } impl From for BLSPubKey { @@ -74,12 +76,22 @@ impl From for BLSPubKey { #[serde(bound(deserialize = ""))] pub struct PermissionedStakeTableUpdate { #[serde(default)] - stakers_to_remove: Vec, + pub stakers_to_remove: Vec, #[serde(default)] - new_stakers: Vec>, + pub new_stakers: Vec>, } impl PermissionedStakeTableUpdate { + pub fn new( + new_stakers: Vec>, + stakers_to_remove: Vec, + ) -> Self { + Self { + stakers_to_remove, + new_stakers, + } + } + pub fn from_toml_file(path: &Path) -> anyhow::Result { let config_file_as_string: String = fs::read_to_string(path) .unwrap_or_else(|_| panic!("Could not read config file located at {}", path.display())); @@ -94,6 +106,16 @@ impl PermissionedStakeTableUpdate { ) } + pub fn to_toml_file(&self, path: &Path) -> anyhow::Result<()> { + let toml_string = toml::to_string_pretty(self) + .unwrap_or_else(|err| panic!("Failed to serialize config to TOML: {err}")); + + fs::write(path, toml_string) + .unwrap_or_else(|_| panic!("Could not write config file to {}", path.display())); + + Ok(()) + } + fn stakers_to_remove(&self) -> Vec { self.stakers_to_remove .iter() @@ -127,17 +149,22 @@ pub async fn update_stake_table( .index(account_index)? .build()? .with_chain_id(chain_id); + let l1 = Arc::new(SignerMiddleware::new(provider.clone(), wallet)); let contract = PermissionedStakeTable::new(contract_address, l1); tracing::info!("sending stake table update transaction"); - let tx_receipt = contract - .update(update.stakers_to_remove(), update.new_stakers()) - .send() - .await? - .await?; + if update.stakers_to_remove().is_empty() && update.new_stakers().is_empty() { + anyhow::bail!("No changes to update in the stake table"); + } + + let (tx_receipt, _) = contract_send::<_, _, PermissionedStakeTableErrors>( + &contract.update(update.stakers_to_remove(), update.new_stakers()), + ) + .await?; + tracing::info!("Transaction receipt: {:?}", tx_receipt); Ok(()) }