diff --git a/Cargo.lock b/Cargo.lock index f026ffb8d4..1cb7767d15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5411,6 +5411,7 @@ dependencies = [ "refinery", "refinery-core", "reqwest 0.12.12", + "semver 1.0.25", "serde", "serde_json", "snafu 0.8.5", @@ -7983,6 +7984,7 @@ dependencies = [ "espresso-types", "futures", "hotshot", + "hotshot-example-types", "hotshot-query-service", "hotshot-stake-table", "hotshot-types", @@ -10226,6 +10228,7 @@ dependencies = [ "rand_distr", "request-response", "reqwest 0.12.12", + "semver 1.0.25", "sequencer", "sequencer-utils", "serde", diff --git a/Cargo.toml b/Cargo.toml index 2e50ee989e..ce2648d08f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -118,6 +118,7 @@ libp2p-swarm-derive = { version = "0.35" } typenum = "1" cbor4ii = { version = "1.0", features = ["serde1"] } serde_bytes = { version = "0.11" } +semver = "1" num_cpus = "1" dashmap = "6" memoize = { version = "0.4", features = ["full"] } diff --git a/hotshot-builder-core-refactored/src/testing/basic.rs b/hotshot-builder-core-refactored/src/testing/basic.rs index 53fb9f9055..f353774c0a 100644 --- a/hotshot-builder-core-refactored/src/testing/basic.rs +++ b/hotshot-builder-core-refactored/src/testing/basic.rs @@ -8,7 +8,7 @@ use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; use hotshot_types::data::VidCommitment; use hotshot_types::data::{Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}; use hotshot_types::event::LeafInfo; -use hotshot_types::simple_certificate::QuorumCertificate; +use hotshot_types::simple_certificate::QuorumCertificate2; use hotshot_types::traits::block_contents::BlockHeader; use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; use hotshot_types::utils::BuilderCommitment; @@ -186,9 +186,7 @@ async fn test_pruning() { // everything else is boilerplate. let mock_qc = - QuorumCertificate::genesis::(&Default::default(), &Default::default()) - .await - .to_qc2(); + QuorumCertificate2::genesis::(&Default::default(), &Default::default()).await; let leaf = Leaf2::from_quorum_proposal(&QuorumProposalWrapper { proposal: QuorumProposal2 { block_header: >::genesis( diff --git a/hotshot-builder-core/src/service.rs b/hotshot-builder-core/src/service.rs index ba58ba0abd..a45c4039a0 100644 --- a/hotshot-builder-core/src/service.rs +++ b/hotshot-builder-core/src/service.rs @@ -1552,12 +1552,12 @@ mod test { use hotshot_types::data::EpochNumber; use hotshot_types::data::Leaf2; use hotshot_types::data::{QuorumProposal2, QuorumProposalWrapper}; + use hotshot_types::simple_certificate::QuorumCertificate2; use hotshot_types::traits::block_contents::Transaction; use hotshot_types::traits::node_implementation::Versions; use hotshot_types::{ data::{vid_commitment, Leaf, ViewNumber}, message::Proposal, - simple_certificate::QuorumCertificate, traits::{node_implementation::ConsensusTime, signature_key::BuilderSignatureKey}, utils::BuilderCommitment, }; @@ -4099,12 +4099,11 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number, - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_epoch_justify_qc: None, @@ -4175,12 +4174,11 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number, - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_epoch_justify_qc: None, @@ -4242,12 +4240,11 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number, - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_epoch_justify_qc: None, diff --git a/hotshot-builder-core/src/testing/basic_test.rs b/hotshot-builder-core/src/testing/basic_test.rs index 0825a9e887..867706c213 100644 --- a/hotshot-builder-core/src/testing/basic_test.rs +++ b/hotshot-builder-core/src/testing/basic_test.rs @@ -5,7 +5,7 @@ pub use hotshot_types::{ data::{EpochNumber, Leaf, ViewNumber}, message::Proposal, signature_key::BLSPubKey, - simple_certificate::{QuorumCertificate, SimpleCertificate, SuccessThreshold}, + simple_certificate::{QuorumCertificate2, SimpleCertificate, SuccessThreshold}, traits::{ block_contents::BlockPayload, node_implementation::{ConsensusTime, NodeType}, @@ -172,12 +172,11 @@ mod tests { let mut previous_commitment = initial_commitment; let mut previous_view = ViewNumber::new(0); let mut previous_quorum_proposal = { - let previous_jc = QuorumCertificate::::genesis::( + let previous_jc = QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(); + .await; QuorumProposalWrapper:: { proposal: QuorumProposal2:: { diff --git a/hotshot-builder-core/src/testing/finalization_test.rs b/hotshot-builder-core/src/testing/finalization_test.rs index 0a1846fa95..a671cbbdf8 100644 --- a/hotshot-builder-core/src/testing/finalization_test.rs +++ b/hotshot-builder-core/src/testing/finalization_test.rs @@ -21,10 +21,10 @@ use hotshot_example_types::{ node_types::{TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; +use hotshot_types::simple_certificate::QuorumCertificate2; use hotshot_types::{ data::{vid_commitment, DaProposal2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}, message::Proposal, - simple_certificate::QuorumCertificate, traits::{ block_contents::BlockHeader, node_implementation::{ConsensusTime, Versions}, @@ -332,12 +332,11 @@ async fn progress_round_with_transactions( proposal: QuorumProposal2:: { block_header, view_number: next_view, - justify_qc: QuorumCertificate::::genesis::( + justify_qc: QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_epoch_justify_qc: None, diff --git a/hotshot-builder-core/src/testing/mod.rs b/hotshot-builder-core/src/testing/mod.rs index 8c8ea676e6..279e3ac84c 100644 --- a/hotshot-builder-core/src/testing/mod.rs +++ b/hotshot-builder-core/src/testing/mod.rs @@ -17,7 +17,7 @@ use hotshot_types::{ vid_commitment, DaProposal2, Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber, }, message::Proposal, - simple_certificate::{QuorumCertificate, SimpleCertificate, SuccessThreshold}, + simple_certificate::{QuorumCertificate2, SimpleCertificate, SuccessThreshold}, simple_vote::QuorumData2, traits::{ node_implementation::{ConsensusTime, Versions}, @@ -186,12 +186,13 @@ pub async fn calc_proposal_msg( }; let justify_qc = match prev_quorum_proposal.as_ref() { - None => QuorumCertificate::::genesis::( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await - .to_qc2(), + None => { + QuorumCertificate2::::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await + } Some(prev_proposal) => { let prev_justify_qc = prev_proposal.justify_qc(); let quorum_data = QuorumData2:: { diff --git a/hotshot-example-types/src/storage_types.rs b/hotshot-example-types/src/storage_types.rs index a20a8a631e..3d7885973d 100644 --- a/hotshot-example-types/src/storage_types.rs +++ b/hotshot-example-types/src/storage_types.rs @@ -20,7 +20,7 @@ use hotshot_types::{ QuorumProposalWrapper, VidCommitment, }, event::HotShotAction, - message::Proposal, + message::{convert_proposal, Proposal}, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, traits::{ node_implementation::{ConsensusTime, NodeType}, @@ -362,13 +362,7 @@ impl Storage for TestStorage { Ok(()) } - async fn migrate_consensus( - &self, - _convert_leaf: fn(Leaf) -> Leaf2, - convert_proposal: fn( - Proposal>, - ) -> Proposal>, - ) -> Result<()> { + async fn migrate_consensus(&self) -> Result<()> { let mut storage_writer = self.inner.write().await; for (view, proposal) in storage_writer.proposals.clone().iter() { diff --git a/hotshot-query-service/Cargo.toml b/hotshot-query-service/Cargo.toml index 8a8d2a830b..c2d5c6d5c9 100644 --- a/hotshot-query-service/Cargo.toml +++ b/hotshot-query-service/Cargo.toml @@ -79,6 +79,7 @@ jf-vid = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfis ] } lazy_static = "1" prometheus = "0.13" +semver = { workspace = true } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" snafu = "0.8" diff --git a/hotshot-query-service/migrations/postgres/V500__types_migration.sql b/hotshot-query-service/migrations/postgres/V500__types_migration.sql new file mode 100644 index 0000000000..838bce59a1 --- /dev/null +++ b/hotshot-query-service/migrations/postgres/V500__types_migration.sql @@ -0,0 +1,23 @@ +CREATE TABLE leaf2 +( + height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, + hash VARCHAR NOT NULL UNIQUE, + block_hash VARCHAR NOT NULL REFERENCES header (hash) ON DELETE CASCADE, + leaf JSONB NOT NULL, + qc JSONB NOT NULL +); + +CREATE TABLE types_migration ( + id SERIAL PRIMARY KEY, + completed bool NOT NULL DEFAULT false +); + +INSERT INTO types_migration ("completed") VALUES (false); + + +CREATE TABLE vid2 +( + height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, + common BYTEA NOT NULL, + share BYTEA +); diff --git a/hotshot-query-service/migrations/sqlite/V300__types_migration.sql b/hotshot-query-service/migrations/sqlite/V300__types_migration.sql new file mode 100644 index 0000000000..1b598c5d5a --- /dev/null +++ b/hotshot-query-service/migrations/sqlite/V300__types_migration.sql @@ -0,0 +1,22 @@ +CREATE TABLE leaf2 +( + height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, + hash VARCHAR NOT NULL UNIQUE, + block_hash VARCHAR NOT NULL REFERENCES header (hash) ON DELETE CASCADE, + leaf JSONB NOT NULL, + qc JSONB NOT NULL +); + +CREATE TABLE types_migration ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + completed bool NOT NULL DEFAULT false +); + +INSERT INTO types_migration ("completed") VALUES (false); + +CREATE TABLE vid2 +( + height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, + common BYTEA NOT NULL, + share BYTEA +); diff --git a/hotshot-query-service/src/availability.rs b/hotshot-query-service/src/availability.rs index 63122094ea..d1dbcd249f 100644 --- a/hotshot-query-service/src/availability.rs +++ b/hotshot-query-service/src/availability.rs @@ -29,7 +29,12 @@ use crate::{api::load_api, Payload, QueryError}; use derive_more::From; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt}; -use hotshot_types::traits::node_implementation::NodeType; + +use hotshot_types::{ + data::{Leaf, Leaf2, QuorumProposal}, + simple_certificate::QuorumCertificate, + traits::node_implementation::NodeType, +}; use serde::{Deserialize, Serialize}; use snafu::{OptionExt, Snafu}; use std::{fmt::Display, path::PathBuf, time::Duration}; @@ -161,9 +166,95 @@ impl Error { } } +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(bound = "")] +pub struct Leaf1QueryData { + pub(crate) leaf: Leaf, + pub(crate) qc: QuorumCertificate, +} + +fn downgrade_leaf(leaf2: Leaf2) -> Leaf { + // TODO do we still need some check here? + // `drb_seed` no longer exists on `Leaf2` + // if leaf2.drb_seed != [0; 32] && leaf2.drb_result != [0; 32] { + // panic!("Downgrade of Leaf2 to Leaf will lose DRB information!"); + // } + let quorum_proposal = QuorumProposal { + block_header: leaf2.block_header().clone(), + view_number: leaf2.view_number(), + justify_qc: leaf2.justify_qc().to_qc(), + upgrade_certificate: leaf2.upgrade_certificate(), + proposal_certificate: None, + }; + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); + if let Some(payload) = leaf2.block_payload() { + leaf.fill_block_payload_unchecked(payload); + } + leaf +} + +fn downgrade_leaf_query_data(leaf: LeafQueryData) -> Leaf1QueryData { + Leaf1QueryData { + leaf: downgrade_leaf(leaf.leaf), + qc: leaf.qc.to_qc(), + } +} + +async fn get_leaf_handler( + req: tide_disco::RequestParams, + state: &State, + timeout: Duration, +) -> Result, Error> +where + State: 'static + Send + Sync + ReadState, + ::State: Send + Sync + AvailabilityDataSource, + Types: NodeType, + Payload: QueryablePayload, +{ + let id = match req.opt_integer_param("height")? { + Some(height) => LeafId::Number(height), + None => LeafId::Hash(req.blob_param("hash")?), + }; + let fetch = state.read(|state| state.get_leaf(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchLeafSnafu { + resource: id.to_string(), + }) +} + +async fn get_leaf_range_handler( + req: tide_disco::RequestParams, + state: &State, + timeout: Duration, + small_object_range_limit: usize, +) -> Result>, Error> +where + State: 'static + Send + Sync + ReadState, + ::State: Send + Sync + AvailabilityDataSource, + Types: NodeType, + Payload: QueryablePayload, +{ + let from = req.integer_param::<_, usize>("from")?; + let until = req.integer_param("until")?; + enforce_range_limit(from, until, small_object_range_limit)?; + + let leaves = state + .read(|state| state.get_leaf_range(from..until).boxed()) + .await; + leaves + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchLeafSnafu { + resource: (index + from).to_string(), + }) + }) + .try_collect::>() + .await +} + pub fn define_api( options: &Options, _: Ver, + api_ver: semver::Version, ) -> Result, ApiError> where State: 'static + Send + Sync + ReadState, @@ -179,310 +270,332 @@ where let small_object_range_limit = options.small_object_range_limit; let large_object_range_limit = options.large_object_range_limit; - api.with_version("0.0.1".parse().unwrap()) - .at("get_leaf", move |req, state| { - async move { - let id = match req.opt_integer_param("height")? { - Some(height) => LeafId::Number(height), - None => LeafId::Hash(req.blob_param("hash")?), - }; - let fetch = state.read(|state| state.get_leaf(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchLeafSnafu { - resource: id.to_string(), - }) - } - .boxed() - })? - .at("get_leaf_range", move |req, state| { - async move { - let from = req.integer_param::<_, usize>("from")?; - let until = req.integer_param("until")?; - enforce_range_limit(from, until, small_object_range_limit)?; - - let leaves = state - .read(|state| state.get_leaf_range(from..until).boxed()) - .await; - leaves - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchLeafSnafu { - resource: (index + from).to_string(), - }) + api.with_version(api_ver.clone()); + + // `LeafQueryData` now contains `Leaf2` and `QC2``, which is a breaking change. + // On node startup, all leaves are migrated to `Leaf2`. + // + // To maintain compatibility with nodes running an older version + // (which expect `LeafQueryData` with `Leaf1` and `QC1`), + // we downgrade `Leaf2` to `Leaf1` and `QC2` to `QC1` if the API version is V0. + // Otherwise, we return the new types. + if api_ver.major == 0 { + api.at("get_leaf", move |req, state| { + get_leaf_handler(req, state, timeout) + .map(|res| res.map(downgrade_leaf_query_data)) + .boxed() + })?; + + api.at("get_leaf_range", move |req, state| { + get_leaf_range_handler(req, state, timeout, small_object_range_limit) + .map(|res| { + res.map(|r| { + r.into_iter() + .map(downgrade_leaf_query_data) + .collect::>>() }) - .try_collect::>() - .await - } - .boxed() - })? - .stream("stream_leaves", move |req, state| { + }) + .boxed() + })?; + + api.stream("stream_leaves", move |req, state| { async move { let height = req.integer_param("height")?; state .read(|state| { - async move { Ok(state.subscribe_leaves(height).await.map(Ok)) }.boxed() + async move { + Ok(state + .subscribe_leaves(height) + .await + .map(|leaf| Ok(downgrade_leaf_query_data(leaf)))) + } + .boxed() }) .await } .try_flatten_stream() .boxed() - })? - .at("get_header", move |req, state| { - async move { - let id = if let Some(height) = req.opt_integer_param("height")? { - BlockId::Number(height) - } else if let Some(hash) = req.opt_blob_param("hash")? { - BlockId::Hash(hash) - } else { - BlockId::PayloadHash(req.blob_param("payload-hash")?) - }; - let fetch = state.read(|state| state.get_header(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchHeaderSnafu { - resource: id.to_string(), - }) - } - .boxed() - })? - .at("get_header_range", move |req, state| { - async move { - let from = req.integer_param::<_, usize>("from")?; - let until = req.integer_param::<_, usize>("until")?; - enforce_range_limit(from, until, large_object_range_limit)?; - - let headers = state - .read(|state| state.get_header_range(from..until).boxed()) - .await; - headers - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchHeaderSnafu { - resource: (index + from).to_string(), - }) - }) - .try_collect::>() - .await - } - .boxed() - })? - .stream("stream_headers", move |req, state| { + })?; + } else { + api.at("get_leaf", move |req, state| { + get_leaf_handler(req, state, timeout).boxed() + })?; + + api.at("get_leaf_range", move |req, state| { + get_leaf_range_handler(req, state, timeout, small_object_range_limit).boxed() + })?; + + api.stream("stream_leaves", move |req, state| { async move { let height = req.integer_param("height")?; state .read(|state| { - async move { Ok(state.subscribe_headers(height).await.map(Ok)) }.boxed() + async move { Ok(state.subscribe_leaves(height).await.map(Ok)) }.boxed() }) .await } .try_flatten_stream() .boxed() - })? - .at("get_block", move |req, state| { - async move { - let id = if let Some(height) = req.opt_integer_param("height")? { - BlockId::Number(height) - } else if let Some(hash) = req.opt_blob_param("hash")? { - BlockId::Hash(hash) - } else { - BlockId::PayloadHash(req.blob_param("payload-hash")?) - }; - let fetch = state.read(|state| state.get_block(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: id.to_string(), + })?; + } + + api.at("get_header", move |req, state| { + async move { + let id = if let Some(height) = req.opt_integer_param("height")? { + BlockId::Number(height) + } else if let Some(hash) = req.opt_blob_param("hash")? { + BlockId::Hash(hash) + } else { + BlockId::PayloadHash(req.blob_param("payload-hash")?) + }; + let fetch = state.read(|state| state.get_header(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchHeaderSnafu { + resource: id.to_string(), + }) + } + .boxed() + })? + .at("get_header_range", move |req, state| { + async move { + let from = req.integer_param::<_, usize>("from")?; + let until = req.integer_param::<_, usize>("until")?; + enforce_range_limit(from, until, large_object_range_limit)?; + + let headers = state + .read(|state| state.get_header_range(from..until).boxed()) + .await; + headers + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchHeaderSnafu { + resource: (index + from).to_string(), + }) }) - } - .boxed() - })? - .at("get_block_range", move |req, state| { - async move { - let from = req.integer_param::<_, usize>("from")?; - let until = req.integer_param("until")?; - enforce_range_limit(from, until, large_object_range_limit)?; - - let blocks = state - .read(|state| state.get_block_range(from..until).boxed()) - .await; - blocks - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: (index + from).to_string(), - }) + .try_collect::>() + .await + } + .boxed() + })? + .stream("stream_headers", move |req, state| { + async move { + let height = req.integer_param("height")?; + state + .read(|state| { + async move { Ok(state.subscribe_headers(height).await.map(Ok)) }.boxed() + }) + .await + } + .try_flatten_stream() + .boxed() + })? + .at("get_block", move |req, state| { + async move { + let id = if let Some(height) = req.opt_integer_param("height")? { + BlockId::Number(height) + } else if let Some(hash) = req.opt_blob_param("hash")? { + BlockId::Hash(hash) + } else { + BlockId::PayloadHash(req.blob_param("payload-hash")?) + }; + let fetch = state.read(|state| state.get_block(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: id.to_string(), + }) + } + .boxed() + })? + .at("get_block_range", move |req, state| { + async move { + let from = req.integer_param::<_, usize>("from")?; + let until = req.integer_param("until")?; + enforce_range_limit(from, until, large_object_range_limit)?; + + let blocks = state + .read(|state| state.get_block_range(from..until).boxed()) + .await; + blocks + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: (index + from).to_string(), }) - .try_collect::>() - .await - } - .boxed() - })? - .stream("stream_blocks", move |req, state| { - async move { - let height = req.integer_param("height")?; - state - .read(|state| { - async move { Ok(state.subscribe_blocks(height).await.map(Ok)) }.boxed() + }) + .try_collect::>() + .await + } + .boxed() + })? + .stream("stream_blocks", move |req, state| { + async move { + let height = req.integer_param("height")?; + state + .read(|state| { + async move { Ok(state.subscribe_blocks(height).await.map(Ok)) }.boxed() + }) + .await + } + .try_flatten_stream() + .boxed() + })? + .at("get_payload", move |req, state| { + async move { + let id = if let Some(height) = req.opt_integer_param("height")? { + BlockId::Number(height) + } else if let Some(hash) = req.opt_blob_param("hash")? { + BlockId::PayloadHash(hash) + } else { + BlockId::Hash(req.blob_param("block-hash")?) + }; + let fetch = state.read(|state| state.get_payload(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: id.to_string(), + }) + } + .boxed() + })? + .at("get_payload_range", move |req, state| { + async move { + let from = req.integer_param::<_, usize>("from")?; + let until = req.integer_param("until")?; + enforce_range_limit(from, until, large_object_range_limit)?; + + let payloads = state + .read(|state| state.get_payload_range(from..until).boxed()) + .await; + payloads + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: (index + from).to_string(), }) - .await - } - .try_flatten_stream() - .boxed() - })? - .at("get_payload", move |req, state| { - async move { - let id = if let Some(height) = req.opt_integer_param("height")? { - BlockId::Number(height) - } else if let Some(hash) = req.opt_blob_param("hash")? { - BlockId::PayloadHash(hash) - } else { - BlockId::Hash(req.blob_param("block-hash")?) - }; - let fetch = state.read(|state| state.get_payload(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: id.to_string(), }) - } - .boxed() - })? - .at("get_payload_range", move |req, state| { - async move { - let from = req.integer_param::<_, usize>("from")?; - let until = req.integer_param("until")?; - enforce_range_limit(from, until, large_object_range_limit)?; - - let payloads = state - .read(|state| state.get_payload_range(from..until).boxed()) - .await; - payloads - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: (index + from).to_string(), + .try_collect::>() + .await + } + .boxed() + })? + .stream("stream_payloads", move |req, state| { + async move { + let height = req.integer_param("height")?; + state + .read(|state| { + async move { Ok(state.subscribe_payloads(height).await.map(Ok)) }.boxed() + }) + .await + } + .try_flatten_stream() + .boxed() + })? + .at("get_vid_common", move |req, state| { + async move { + let id = if let Some(height) = req.opt_integer_param("height")? { + BlockId::Number(height) + } else if let Some(hash) = req.opt_blob_param("hash")? { + BlockId::Hash(hash) + } else { + BlockId::PayloadHash(req.blob_param("payload-hash")?) + }; + let fetch = state.read(|state| state.get_vid_common(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: id.to_string(), + }) + } + .boxed() + })? + .stream("stream_vid_common", move |req, state| { + async move { + let height = req.integer_param("height")?; + state + .read(|state| { + async move { Ok(state.subscribe_vid_common(height).await.map(Ok)) }.boxed() + }) + .await + } + .try_flatten_stream() + .boxed() + })? + .at("get_transaction", move |req, state| { + async move { + match req.opt_blob_param("hash")? { + Some(hash) => { + let fetch = state + .read(|state| state.get_transaction(hash).boxed()) + .await; + fetch + .with_timeout(timeout) + .await + .context(FetchTransactionSnafu { + resource: hash.to_string(), }) - }) - .try_collect::>() - .await - } - .boxed() - })? - .stream("stream_payloads", move |req, state| { - async move { - let height = req.integer_param("height")?; - state - .read(|state| { - async move { Ok(state.subscribe_payloads(height).await.map(Ok)) }.boxed() - }) - .await + } + None => { + let height: u64 = req.integer_param("height")?; + let fetch = state + .read(|state| state.get_block(height as usize).boxed()) + .await; + let block = fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: height.to_string(), + })?; + let i: u64 = req.integer_param("index")?; + let index = block + .payload() + .nth(block.metadata(), i as usize) + .context(InvalidTransactionIndexSnafu { height, index: i })?; + TransactionQueryData::new(&block, index, i) + .context(InvalidTransactionIndexSnafu { height, index: i }) + } } - .try_flatten_stream() - .boxed() - })? - .at("get_vid_common", move |req, state| { - async move { - let id = if let Some(height) = req.opt_integer_param("height")? { - BlockId::Number(height) - } else if let Some(hash) = req.opt_blob_param("hash")? { - BlockId::Hash(hash) - } else { - BlockId::PayloadHash(req.blob_param("payload-hash")?) - }; - let fetch = state.read(|state| state.get_vid_common(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + } + .boxed() + })? + .at("get_block_summary", move |req, state| { + async move { + let id: usize = req.integer_param("height")?; + + let fetch = state.read(|state| state.get_block(id).boxed()).await; + fetch + .with_timeout(timeout) + .await + .context(FetchBlockSnafu { resource: id.to_string(), }) - } - .boxed() - })? - .stream("stream_vid_common", move |req, state| { - async move { - let height = req.integer_param("height")?; - state - .read(|state| { - async move { Ok(state.subscribe_vid_common(height).await.map(Ok)) }.boxed() - }) - .await - } - .try_flatten_stream() - .boxed() - })? - .at("get_transaction", move |req, state| { - async move { - match req.opt_blob_param("hash")? { - Some(hash) => { - let fetch = state - .read(|state| state.get_transaction(hash).boxed()) - .await; - fetch - .with_timeout(timeout) - .await - .context(FetchTransactionSnafu { - resource: hash.to_string(), - }) - } - None => { - let height: u64 = req.integer_param("height")?; - let fetch = state - .read(|state| state.get_block(height as usize).boxed()) - .await; - let block = fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: height.to_string(), - })?; - let i: u64 = req.integer_param("index")?; - let index = block - .payload() - .nth(block.metadata(), i as usize) - .context(InvalidTransactionIndexSnafu { height, index: i })?; - TransactionQueryData::new(&block, index, i) - .context(InvalidTransactionIndexSnafu { height, index: i }) - } - } - } - .boxed() - })? - .at("get_block_summary", move |req, state| { - async move { - let id: usize = req.integer_param("height")?; - - let fetch = state.read(|state| state.get_block(id).boxed()).await; - fetch - .with_timeout(timeout) - .await - .context(FetchBlockSnafu { - resource: id.to_string(), - }) - .map(BlockSummaryQueryData::from) - } - .boxed() - })? - .at("get_block_summary_range", move |req, state| { - async move { - let from: usize = req.integer_param("from")?; - let until: usize = req.integer_param("until")?; - enforce_range_limit(from, until, large_object_range_limit)?; - - let blocks = state - .read(|state| state.get_block_range(from..until).boxed()) - .await; - let result: Vec> = blocks - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: (index + from).to_string(), - }) + .map(BlockSummaryQueryData::from) + } + .boxed() + })? + .at("get_block_summary_range", move |req, state| { + async move { + let from: usize = req.integer_param("from")?; + let until: usize = req.integer_param("until")?; + enforce_range_limit(from, until, large_object_range_limit)?; + + let blocks = state + .read(|state| state.get_block_range(from..until).boxed()) + .await; + let result: Vec> = blocks + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: (index + from).to_string(), }) - .map(|result| result.map(BlockSummaryQueryData::from)) - .try_collect() - .await?; - - Ok(result) - } - .boxed() - })? - .at("get_limits", move |_req, _state| { - async move { - Ok(Limits { - small_object_range_limit, - large_object_range_limit, }) - } - .boxed() - })?; + .map(|result| result.map(BlockSummaryQueryData::from)) + .try_collect() + .await?; + + Ok(result) + } + .boxed() + })? + .at("get_limits", move |_req, _state| { + async move { + Ok(Limits { + small_object_range_limit, + large_object_range_limit, + }) + } + .boxed() + })?; Ok(api) } @@ -498,6 +611,7 @@ mod test { use super::*; use crate::data_source::storage::AvailabilityStorage; use crate::data_source::VersionedDataSource; + use crate::testing::mocks::MockVersions; use crate::{ data_source::ExtensibleDataSource, status::StatusDataSource, @@ -513,7 +627,8 @@ mod test { use async_lock::RwLock; use committable::Committable; use futures::future::FutureExt; - use hotshot_types::{data::Leaf, simple_certificate::QuorumCertificate}; + use hotshot_types::data::Leaf2; + use hotshot_types::simple_certificate::QuorumCertificate2; use portpicker::pick_unused_port; use serde::de::DeserializeOwned; use std::{fmt::Debug, time::Duration}; @@ -788,7 +903,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -884,10 +1004,10 @@ mod test { // mock up some consensus data. let leaf = - Leaf::::genesis::(&Default::default(), &Default::default()) + Leaf2::::genesis::(&Default::default(), &Default::default()) .await; let qc = - QuorumCertificate::genesis::(&Default::default(), &Default::default()) + QuorumCertificate2::genesis::(&Default::default(), &Default::default()) .await; let leaf = LeafQueryData::new(leaf, qc).unwrap(); let block = BlockQueryData::new(leaf.header().clone(), MockPayload::genesis()); @@ -924,6 +1044,7 @@ mod test { ..Default::default() }, MockBase::instance(), + "1.0.0".parse().unwrap(), ) .unwrap(); api.get("get_ext", |_, state| { @@ -994,6 +1115,7 @@ mod test { ..Default::default() }, MockBase::instance(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1078,7 +1200,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1115,7 +1242,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( diff --git a/hotshot-query-service/src/availability/data_source.rs b/hotshot-query-service/src/availability/data_source.rs index f13a9a7b1d..9747b7814b 100644 --- a/hotshot-query-service/src/availability/data_source.rs +++ b/hotshot-query-service/src/availability/data_source.rs @@ -18,7 +18,7 @@ use super::{ VidCommonQueryData, }, }; -use crate::{types::HeightIndexed, Header, Payload, VidCommitment, VidShare}; +use crate::{types::HeightIndexed, Header, Payload}; use async_trait::async_trait; use derivative::Derivative; use derive_more::{Display, From}; @@ -26,7 +26,10 @@ use futures::{ future::Future, stream::{BoxStream, StreamExt}, }; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::{ + data::{VidCommitment, VidShare}, + traits::node_implementation::NodeType, +}; use std::{ cmp::Ordering, ops::{Bound, RangeBounds}, diff --git a/hotshot-query-service/src/availability/query_data.rs b/hotshot-query-service/src/availability/query_data.rs index 6232f7ed70..6e1b1eb1f7 100644 --- a/hotshot-query-service/src/availability/query_data.rs +++ b/hotshot-query-service/src/availability/query_data.rs @@ -10,12 +10,11 @@ // You should have received a copy of the GNU General Public License along with this program. If not, // see . -use crate::{types::HeightIndexed, Header, Metadata, Payload, Transaction, VidCommon, VidShare}; +use crate::{types::HeightIndexed, Header, Metadata, Payload, Transaction, VidCommon}; use committable::{Commitment, Committable}; use hotshot_types::{ - data::Leaf, - data::VidCommitment, - simple_certificate::QuorumCertificate, + data::{Leaf, Leaf2, VidCommitment, VidShare}, + simple_certificate::QuorumCertificate2, traits::{ self, block_contents::{BlockHeader, GENESIS_VID_NUM_STORAGE_NODES}, @@ -29,8 +28,8 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize}; use snafu::{ensure, Snafu}; use std::fmt::Debug; -pub type LeafHash = Commitment>; -pub type QcHash = Commitment>; +pub type LeafHash = Commitment>; +pub type QcHash = Commitment>; /// A block hash is the hash of the block header. /// @@ -193,8 +192,8 @@ pub trait QueryablePayload: traits::BlockPayload { #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] #[serde(bound = "")] pub struct LeafQueryData { - pub(crate) leaf: Leaf, - pub(crate) qc: QuorumCertificate, + pub(crate) leaf: Leaf2, + pub(crate) qc: QuorumCertificate2, } #[derive(Clone, Debug, Snafu)] @@ -213,13 +212,13 @@ impl LeafQueryData { /// /// Fails with an [`InconsistentLeafError`] if `qc` does not reference `leaf`. pub fn new( - mut leaf: Leaf, - qc: QuorumCertificate, + mut leaf: Leaf2, + qc: QuorumCertificate2, ) -> Result> { // TODO: Replace with the new `commit` function in HotShot. Add an `upgrade_lock` parameter // and a `HsVer: Versions` bound, then call `leaf.commit(upgrade_lock).await`. This will // require updates in callers and relevant types as well. - let leaf_commit = as Committable>::commit(&leaf); + let leaf_commit = as Committable>::commit(&leaf); ensure!( qc.data.leaf_commit == leaf_commit, InconsistentLeafSnafu { @@ -240,16 +239,16 @@ impl LeafQueryData { instance_state: &Types::InstanceState, ) -> Self { Self { - leaf: Leaf::genesis::(validated_state, instance_state).await, - qc: QuorumCertificate::genesis::(validated_state, instance_state).await, + leaf: Leaf2::genesis::(validated_state, instance_state).await, + qc: QuorumCertificate2::genesis::(validated_state, instance_state).await, } } - pub fn leaf(&self) -> &Leaf { + pub fn leaf(&self) -> &Leaf2 { &self.leaf } - pub fn qc(&self) -> &QuorumCertificate { + pub fn qc(&self) -> &QuorumCertificate2 { &self.qc } @@ -261,7 +260,7 @@ impl LeafQueryData { // TODO: Replace with the new `commit` function in HotShot. Add an `upgrade_lock` parameter // and a `HsVer: Versions` bound, then call `leaf.commit(upgrade_lock).await`. This will // require updates in callers and relevant types as well. - as Committable>::commit(&self.leaf) + as Committable>::commit(&self.leaf) } pub fn block_hash(&self) -> BlockHash { @@ -326,7 +325,7 @@ impl BlockQueryData { where Payload: QueryablePayload, { - let leaf = Leaf::::genesis::(validated_state, instance_state).await; + let leaf = Leaf2::::genesis::(validated_state, instance_state).await; Self::new(leaf.block_header().clone(), leaf.block_payload().unwrap()) } diff --git a/hotshot-query-service/src/data_source.rs b/hotshot-query-service/src/data_source.rs index b6c707665d..3d45b8cd00 100644 --- a/hotshot-query-service/src/data_source.rs +++ b/hotshot-query-service/src/data_source.rs @@ -133,7 +133,7 @@ pub mod availability_tests { }; use committable::Committable; use futures::stream::StreamExt; - use hotshot_types::data::Leaf; + use hotshot_types::data::Leaf2; use std::collections::HashMap; use std::fmt::Debug; use std::ops::{Bound, RangeBounds}; @@ -148,7 +148,7 @@ pub mod availability_tests { assert_eq!(leaf.height(), i as u64); assert_eq!( leaf.hash(), - as Committable>::commit(&leaf.leaf) + as Committable>::commit(&leaf.leaf) ); // Check indices. @@ -550,11 +550,11 @@ pub mod persistence_tests { setup_test, }, types::HeightIndexed, - Leaf, + Leaf2, }; use committable::Committable; use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; - use hotshot_types::simple_certificate::QuorumCertificate; + use hotshot_types::simple_certificate::QuorumCertificate2; #[tokio::test(flavor = "multi_thread")] pub async fn test_revert() @@ -571,12 +571,12 @@ pub mod persistence_tests { let ds = D::connect(&storage).await; // Mock up some consensus data. - let mut qc = QuorumCertificate::::genesis::( + let mut qc = QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) .await; - let mut leaf = Leaf::::genesis::( + let mut leaf = Leaf2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -584,7 +584,7 @@ pub mod persistence_tests { // Increment the block number, to distinguish this block from the genesis block, which // already exists. leaf.block_header_mut().block_number += 1; - qc.data.leaf_commit = as Committable>::commit(&leaf); + qc.data.leaf_commit = as Committable>::commit(&leaf); let block = BlockQueryData::new(leaf.block_header().clone(), MockPayload::genesis()); let leaf = LeafQueryData::new(leaf, qc).unwrap(); @@ -623,12 +623,12 @@ pub mod persistence_tests { let ds = D::connect(&storage).await; // Mock up some consensus data. - let mut qc = QuorumCertificate::::genesis::( + let mut qc = QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) .await; - let mut leaf = Leaf::::genesis::( + let mut leaf = Leaf2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -636,7 +636,7 @@ pub mod persistence_tests { // Increment the block number, to distinguish this block from the genesis block, which // already exists. leaf.block_header_mut().block_number += 1; - qc.data.leaf_commit = as Committable>::commit(&leaf); + qc.data.leaf_commit = as Committable>::commit(&leaf); let block = BlockQueryData::new(leaf.block_header().clone(), MockPayload::genesis()); let leaf = LeafQueryData::new(leaf, qc).unwrap(); @@ -686,12 +686,12 @@ pub mod persistence_tests { let ds = D::connect(&storage).await; // Mock up some consensus data. - let mut mock_qc = QuorumCertificate::::genesis::( + let mut mock_qc = QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) .await; - let mut mock_leaf = Leaf::::genesis::( + let mut mock_leaf = Leaf2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -699,7 +699,7 @@ pub mod persistence_tests { // Increment the block number, to distinguish this block from the genesis block, which // already exists. mock_leaf.block_header_mut().block_number += 1; - mock_qc.data.leaf_commit = as Committable>::commit(&mock_leaf); + mock_qc.data.leaf_commit = as Committable>::commit(&mock_leaf); let block = BlockQueryData::new(mock_leaf.block_header().clone(), MockPayload::genesis()); let leaf = LeafQueryData::new(mock_leaf.clone(), mock_qc.clone()).unwrap(); @@ -725,7 +725,7 @@ pub mod persistence_tests { // Get a mutable transaction again, insert different data. mock_leaf.block_header_mut().block_number += 1; - mock_qc.data.leaf_commit = as Committable>::commit(&mock_leaf); + mock_qc.data.leaf_commit = as Committable>::commit(&mock_leaf); let block = BlockQueryData::new(mock_leaf.block_header().clone(), MockPayload::genesis()); let leaf = LeafQueryData::new(mock_leaf, mock_qc).unwrap(); @@ -771,7 +771,7 @@ pub mod node_tests { setup_test, sleep, }, types::HeightIndexed, - Header, VidCommitment, VidShare, + Header, }; use committable::Committable; use futures::{future::join_all, stream::StreamExt}; @@ -784,7 +784,7 @@ pub mod node_tests { state_types::TestInstanceState, }; use hotshot_types::{ - data::vid_commitment, + data::{vid_commitment, VidCommitment, VidShare}, traits::{block_contents::EncodeBytes, node_implementation::Versions}, vid::advz::{advz_scheme, ADVZScheme}, }; diff --git a/hotshot-query-service/src/data_source/extension.rs b/hotshot-query-service/src/data_source/extension.rs index 81de4a4293..8eb38d4668 100644 --- a/hotshot-query-service/src/data_source/extension.rs +++ b/hotshot-query-service/src/data_source/extension.rs @@ -27,9 +27,10 @@ use crate::{ metrics::PrometheusMetrics, node::{NodeDataSource, SyncStatus, TimeWindowQueryData, WindowStart}, status::{HasMetrics, StatusDataSource}, - Header, Payload, QueryResult, Transaction, VidShare, + Header, Payload, QueryResult, Transaction, }; use async_trait::async_trait; +use hotshot_types::data::VidShare; use hotshot_types::traits::node_implementation::NodeType; use jf_merkle_tree::prelude::MerkleProof; use std::ops::{Bound, RangeBounds}; diff --git a/hotshot-query-service/src/data_source/fetching.rs b/hotshot-query-service/src/data_source/fetching.rs index d6a7ce8012..e98fa236df 100644 --- a/hotshot-query-service/src/data_source/fetching.rs +++ b/hotshot-query-service/src/data_source/fetching.rs @@ -77,6 +77,7 @@ use super::{ notifier::Notifier, storage::{ pruning::{PruneStorage, PrunedHeightDataSource, PrunedHeightStorage}, + sql::MigrateTypes, Aggregate, AggregatesStorage, AvailabilityStorage, ExplorerStorage, MerklizedStateHeightStorage, MerklizedStateStorage, NodeStorage, UpdateAggregatesStorage, UpdateAvailabilityStorage, @@ -101,7 +102,7 @@ use crate::{ status::{HasMetrics, StatusDataSource}, task::BackgroundTask, types::HeightIndexed, - Header, Payload, QueryError, QueryResult, VidShare, + Header, Payload, QueryError, QueryResult, }; use anyhow::{bail, Context}; use async_lock::Semaphore; @@ -113,9 +114,12 @@ use futures::{ future::{self, join_all, BoxFuture, Either, Future, FutureExt}, stream::{self, BoxStream, StreamExt}, }; -use hotshot_types::traits::{ - metrics::{Gauge, Metrics}, - node_implementation::NodeType, +use hotshot_types::{ + data::VidShare, + traits::{ + metrics::{Gauge, Metrics}, + node_implementation::NodeType, + }, }; use jf_merkle_tree::{prelude::MerkleProof, MerkleTreeScheme}; use std::sync::Arc; @@ -369,7 +373,7 @@ where Types: NodeType, Payload: QueryablePayload, Header: QueryableHeader, - S: PruneStorage + VersionedDataSource + HasMetrics + 'static, + S: PruneStorage + VersionedDataSource + HasMetrics + MigrateTypes + 'static, for<'a> S::ReadOnly<'a>: AvailabilityStorage + PrunedHeightStorage + NodeStorage + AggregatesStorage, for<'a> S::Transaction<'a>: UpdateAvailabilityStorage + UpdateAggregatesStorage, @@ -482,7 +486,7 @@ where Types: NodeType, Payload: QueryablePayload, Header: QueryableHeader, - S: VersionedDataSource + PruneStorage + HasMetrics + 'static, + S: VersionedDataSource + PruneStorage + HasMetrics + MigrateTypes + 'static, for<'a> S::Transaction<'a>: UpdateAvailabilityStorage + UpdateAggregatesStorage, for<'a> S::ReadOnly<'a>: AvailabilityStorage + NodeStorage + PrunedHeightStorage + AggregatesStorage, @@ -510,6 +514,13 @@ where let aggregator_metrics = AggregatorMetrics::new(builder.storage.metrics()); let fetcher = Arc::new(Fetcher::new(builder).await?); + + // Migrate the old types to new PoS types + // This is a one-time operation that should be done before starting the data source + // It migrates leaf1 storage to leaf2 + // and vid to vid2 + fetcher.storage.migrate_types().await?; + let scanner = if proactive_fetching && !leaf_only { Some(BackgroundTask::spawn( "proactive scanner", diff --git a/hotshot-query-service/src/data_source/fetching/vid.rs b/hotshot-query-service/src/data_source/fetching/vid.rs index 7849ba2a77..51c948c79f 100644 --- a/hotshot-query-service/src/data_source/fetching/vid.rs +++ b/hotshot-query-service/src/data_source/fetching/vid.rs @@ -28,13 +28,16 @@ use crate::{ }, fetching::{self, request, Callback}, types::HeightIndexed, - Header, Payload, QueryResult, VidShare, + Header, Payload, QueryResult, }; use async_trait::async_trait; use derivative::Derivative; use derive_more::From; use futures::future::{BoxFuture, FutureExt}; -use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; +use hotshot_types::{ + data::VidShare, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; use std::sync::Arc; use std::{cmp::Ordering, future::IntoFuture, iter::once, ops::RangeBounds}; diff --git a/hotshot-query-service/src/data_source/storage.rs b/hotshot-query-service/src/data_source/storage.rs index 027b6e964f..227950d9e7 100644 --- a/hotshot-query-service/src/data_source/storage.rs +++ b/hotshot-query-service/src/data_source/storage.rs @@ -74,11 +74,11 @@ use crate::{ }, merklized_state::{MerklizedState, Snapshot}, node::{SyncStatus, TimeWindowQueryData, WindowStart}, - Header, Payload, QueryResult, Transaction, VidShare, + Header, Payload, QueryResult, Transaction, }; use async_trait::async_trait; use futures::future::Future; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; use jf_merkle_tree::prelude::MerkleProof; use std::ops::RangeBounds; use tagged_base64::TaggedBase64; diff --git a/hotshot-query-service/src/data_source/storage/fail_storage.rs b/hotshot-query-service/src/data_source/storage/fail_storage.rs index 1b366bd6b2..8398e8c303 100644 --- a/hotshot-query-service/src/data_source/storage/fail_storage.rs +++ b/hotshot-query-service/src/data_source/storage/fail_storage.rs @@ -14,6 +14,7 @@ use super::{ pruning::{PruneStorage, PrunedHeightStorage, PrunerCfg, PrunerConfig}, + sql::MigrateTypes, Aggregate, AggregatesStorage, AvailabilityStorage, NodeStorage, UpdateAggregatesStorage, UpdateAvailabilityStorage, }; @@ -29,12 +30,12 @@ use crate::{ metrics::PrometheusMetrics, node::{SyncStatus, TimeWindowQueryData, WindowStart}, status::HasMetrics, - Header, Payload, QueryError, QueryResult, VidShare, + Header, Payload, QueryError, QueryResult, }; use async_lock::Mutex; use async_trait::async_trait; use futures::future::Future; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; use std::ops::RangeBounds; use std::sync::Arc; @@ -253,6 +254,16 @@ where } } +#[async_trait] +impl MigrateTypes for FailStorage +where + S: MigrateTypes + Sync, +{ + async fn migrate_types(&self) -> anyhow::Result<()> { + Ok(()) + } +} + #[async_trait] impl PruneStorage for FailStorage where diff --git a/hotshot-query-service/src/data_source/storage/fs.rs b/hotshot-query-service/src/data_source/storage/fs.rs index 296261666b..3e58a04911 100644 --- a/hotshot-query-service/src/data_source/storage/fs.rs +++ b/hotshot-query-service/src/data_source/storage/fs.rs @@ -15,6 +15,7 @@ use super::{ ledger_log::{Iter, LedgerLog}, pruning::{PruneStorage, PrunedHeightStorage, PrunerConfig}, + sql::MigrateTypes, Aggregate, AggregatesStorage, AvailabilityStorage, NodeStorage, PayloadMetadata, UpdateAggregatesStorage, UpdateAvailabilityStorage, VidCommonMetadata, }; @@ -33,14 +34,16 @@ use crate::{ status::HasMetrics, types::HeightIndexed, ErrorSnafu, Header, MissingSnafu, NotFoundSnafu, Payload, QueryError, QueryResult, - VidCommitment, VidShare, }; use async_lock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use async_trait::async_trait; use atomic_store::{AtomicStore, AtomicStoreLoader, PersistenceError}; use committable::Committable; use futures::future::Future; -use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; +use hotshot_types::{ + data::{VidCommitment, VidShare}, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; use serde::{de::DeserializeOwned, Serialize}; use snafu::OptionExt; use std::collections::{ @@ -133,6 +136,16 @@ where type Pruner = (); } +#[async_trait] +impl MigrateTypes for FileSystemStorage +where + Payload: QueryablePayload, +{ + async fn migrate_types(&self) -> anyhow::Result<()> { + Ok(()) + } +} + impl FileSystemStorage where Payload: QueryablePayload, diff --git a/hotshot-query-service/src/data_source/storage/sql.rs b/hotshot-query-service/src/data_source/storage/sql.rs index 3bb20aa98a..b12cb5efbb 100644 --- a/hotshot-query-service/src/data_source/storage/sql.rs +++ b/hotshot-query-service/src/data_source/storage/sql.rs @@ -11,7 +11,6 @@ // see . #![cfg(feature = "sql-data-source")] - use crate::{ data_source::{ storage::pruning::{PruneStorage, PrunerCfg, PrunerConfig}, @@ -22,10 +21,17 @@ use crate::{ status::HasMetrics, QueryError, QueryResult, }; +use anyhow::Context; use async_trait::async_trait; use chrono::Utc; +use committable::Committable; +use hotshot_types::{ + data::{Leaf, Leaf2, VidShare}, + simple_certificate::{QuorumCertificate, QuorumCertificate2}, + traits::{metrics::Metrics, node_implementation::NodeType}, + vid::advz::ADVZShare, +}; -use hotshot_types::traits::metrics::Metrics; use itertools::Itertools; use log::LevelFilter; @@ -810,6 +816,159 @@ impl VersionedDataSource for SqlStorage { } } +#[async_trait] +pub trait MigrateTypes { + async fn migrate_types(&self) -> anyhow::Result<()>; +} + +#[async_trait] +impl MigrateTypes for SqlStorage { + async fn migrate_types(&self) -> anyhow::Result<()> { + let mut offset = 0; + let limit = 10000; + let mut tx = self.read().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + let (is_migration_completed,) = + query_as::<(bool,)>("SELECT completed from types_migration LIMIT 1 ") + .fetch_one(tx.as_mut()) + .await?; + + if is_migration_completed { + tracing::info!("types migration already completed"); + return Ok(()); + } + + tracing::warn!("migrating query service types storage"); + + loop { + let mut tx = self.read().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + let rows = QueryBuilder::default() + .query(&format!( + "SELECT leaf, qc, common as vid_common, share as vid_share FROM leaf INNER JOIN vid on leaf.height = vid.height ORDER BY leaf.height LIMIT {} OFFSET {}", + limit, offset + )) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + + if rows.is_empty() { + break; + } + + let mut leaf_rows = Vec::new(); + let mut vid_rows = Vec::new(); + + for row in rows.iter() { + let leaf1 = row.try_get("leaf")?; + let qc = row.try_get("qc")?; + let leaf1: Leaf = serde_json::from_value(leaf1)?; + let qc: QuorumCertificate = serde_json::from_value(qc)?; + + let leaf2: Leaf2 = leaf1.into(); + let qc2: QuorumCertificate2 = qc.to_qc2(); + + let commit = leaf2.commit(); + + let leaf2_json = + serde_json::to_value(leaf2.clone()).context("failed to serialize leaf2")?; + let qc2_json = serde_json::to_value(qc2).context("failed to serialize QC2")?; + + // TODO (abdul): revisit after V1 VID has common field + let vid_common_bytes: Vec = row.try_get("vid_common")?; + let vid_share_bytes: Vec = row.try_get("vid_share")?; + + let vid_share: ADVZShare = bincode::deserialize(&vid_share_bytes) + .context("failed to serialize vid_share")?; + + let new_vid_share_bytes = bincode::serialize(&VidShare::V0(vid_share)) + .context("failed to serialize vid_share")?; + + vid_rows.push((leaf2.height() as i64, vid_common_bytes, new_vid_share_bytes)); + leaf_rows.push(( + leaf2.height() as i64, + commit.to_string(), + leaf2.block_header().commit().to_string(), + leaf2_json, + qc2_json, + )); + } + + // migrate leaf2 + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO leaf2 (height, hash, block_hash, leaf, qc) "); + + query_builder.push_values(leaf_rows.into_iter(), |mut b, row| { + b.push_bind(row.0) + .push_bind(row.1) + .push_bind(row.2) + .push_bind(row.3) + .push_bind(row.4); + }); + + let query = query_builder.build(); + + let mut tx = self.write().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + query.execute(tx.as_mut()).await?; + + tx.commit().await?; + tracing::warn!("inserted {} rows into leaf2 table", offset); + // migrate vid + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO vid2 (height, common, share) "); + + query_builder.push_values(vid_rows.into_iter(), |mut b, row| { + b.push_bind(row.0).push_bind(row.1).push_bind(row.2); + }); + + let query = query_builder.build(); + + let mut tx = self.write().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + query.execute(tx.as_mut()).await?; + + tx.commit().await?; + + tracing::warn!("inserted {} rows into vid2 table", offset); + + if rows.len() < limit { + break; + } + + offset += limit; + } + + let mut tx = self.write().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + tracing::warn!("query service types migration is completed!"); + + tx.upsert( + "types_migration", + ["id", "completed"], + ["id"], + [(0_i64, true)], + ) + .await?; + + tracing::info!("updated types_migration table"); + + tx.commit().await?; + Ok(()) + } +} + // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(any(test, feature = "testing"), not(target_os = "windows")))] pub mod testing { @@ -827,8 +986,8 @@ pub mod testing { use portpicker::pick_unused_port; use super::Config; + use crate::availability::query_data::QueryableHeader; use crate::testing::sleep; - #[derive(Debug)] pub struct TmpDb { #[cfg(not(feature = "embedded-db"))] @@ -1116,23 +1275,38 @@ pub mod testing { // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(test, not(target_os = "windows")))] mod test { + use committable::{Commitment, CommitmentBoundsArkless, Committable}; + use hotshot::traits::BlockPayload; use hotshot_example_types::{ node_types::TestVersions, state_types::{TestInstanceState, TestValidatedState}, }; + use jf_vid::VidScheme; + + use hotshot_types::{ + data::vid_commitment, + traits::{node_implementation::Versions, EncodeBytes}, + vid::advz::advz_scheme, + }; + use hotshot_types::{ + data::{QuorumProposal, ViewNumber}, + simple_vote::QuorumData, + traits::{block_contents::BlockHeader, node_implementation::ConsensusTime}, + }; use jf_merkle_tree::{ prelude::UniversalMerkleTree, MerkleTreeScheme, ToTraversalPath, UniversalMerkleTreeScheme, }; use std::time::Duration; use tokio::time::sleep; + use vbs::version::StaticVersionType; use super::{testing::TmpDb, *}; use crate::{ - availability::LeafQueryData, + availability::{LeafQueryData, QueryableHeader}, data_source::storage::{pruning::PrunedHeightStorage, UpdateAvailabilityStorage}, merklized_state::{MerklizedState, UpdateStateData}, testing::{ - mocks::{MockMerkleTree, MockTypes}, + mocks::{MockHeader, MockMerkleTree, MockPayload, MockTypes, MockVersions}, setup_test, }, }; @@ -1509,4 +1683,156 @@ mod test { ); } } + + #[tokio::test(flavor = "multi_thread")] + async fn test_types_migration() { + setup_test(); + + let num_rows = 200; + let db = TmpDb::init().await; + + let storage = SqlStorage::connect(db.config()).await.unwrap(); + + for i in 0..num_rows { + let view = ViewNumber::new(i); + let validated_state = TestValidatedState::default(); + let instance_state = TestInstanceState::default(); + + let (payload, metadata) = >::from_transactions( + [], + &validated_state, + &instance_state, + ) + .await + .unwrap(); + let builder_commitment = + >::builder_commitment(&payload, &metadata); + let payload_bytes = payload.encode(); + + let payload_commitment = vid_commitment::( + &payload_bytes, + &metadata.encode(), + 4, + ::Base::VERSION, + ); + + let mut block_header = >::genesis( + &instance_state, + payload_commitment, + builder_commitment, + metadata, + ); + + block_header.block_number = i; + + let null_quorum_data = QuorumData { + leaf_commit: Commitment::>::default_commitment_no_preimage(), + }; + + let mut qc = QuorumCertificate::new( + null_quorum_data.clone(), + null_quorum_data.commit(), + view, + None, + std::marker::PhantomData, + ); + + let quorum_proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: qc.clone(), + upgrade_certificate: None, + proposal_certificate: None, + }; + + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); + leaf.fill_block_payload::( + payload.clone(), + 4, + ::Base::VERSION, + ) + .unwrap(); + qc.data.leaf_commit = as Committable>::commit(&leaf); + + let height = leaf.height() as i64; + let hash = as Committable>::commit(&leaf).to_string(); + let header = leaf.block_header(); + + let header_json = serde_json::to_value(header) + .context("failed to serialize header") + .unwrap(); + + let payload_commitment = + >::payload_commitment(header); + let mut tx = storage.write().await.unwrap(); + + tx.upsert( + "header", + ["height", "hash", "payload_hash", "data", "timestamp"], + ["height"], + [( + height, + leaf.block_header().commit().to_string(), + payload_commitment.to_string(), + header_json, + leaf.block_header().timestamp() as i64, + )], + ) + .await + .unwrap(); + + let leaf_json = serde_json::to_value(leaf.clone()).expect("failed to serialize leaf"); + let qc_json = serde_json::to_value(qc).expect("failed to serialize QC"); + tx.upsert( + "leaf", + ["height", "hash", "block_hash", "leaf", "qc"], + ["height"], + [( + height, + hash, + header.commit().to_string(), + leaf_json, + qc_json, + )], + ) + .await + .unwrap(); + + let mut vid = advz_scheme(2); + let disperse = vid.disperse(payload.encode()).unwrap(); + let common = Some(disperse.common); + let share = disperse.shares[0].clone(); + + let common_bytes = bincode::serialize(&common).unwrap(); + let share_bytes = bincode::serialize(&share).unwrap(); + + tx.upsert( + "vid", + ["height", "common", "share"], + ["height"], + [(height, common_bytes, share_bytes)], + ) + .await + .unwrap(); + tx.commit().await.unwrap(); + } + + >::migrate_types(&storage) + .await + .expect("failed to migrate"); + + let mut tx = storage.read().await.unwrap(); + let (leaf_count,) = query_as::<(i64,)>("SELECT COUNT(*) from leaf2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + + let (vid_count,) = query_as::<(i64,)>("SELECT COUNT(*) from vid2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + + assert_eq!(leaf_count as u64, num_rows, "not all leaves migrated"); + assert_eq!(vid_count as u64, num_rows, "not all vid migrated"); + } } diff --git a/hotshot-query-service/src/data_source/storage/sql/queries.rs b/hotshot-query-service/src/data_source/storage/sql/queries.rs index b8c227b752..696aca5ba3 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries.rs @@ -14,22 +14,21 @@ //! Immutable query functionality of a SQL database. use super::{Database, Db, Query, QueryAs, Transaction}; +use crate::Leaf2; use crate::{ availability::{ BlockId, BlockQueryData, LeafQueryData, PayloadQueryData, QueryablePayload, VidCommonQueryData, }, data_source::storage::{PayloadMetadata, VidCommonMetadata}, - Header, Leaf, Payload, QueryError, QueryResult, + Header, Payload, QueryError, QueryResult, }; use anyhow::Context; use derivative::Derivative; -use hotshot_types::{ - simple_certificate::QuorumCertificate, - traits::{ - block_contents::{BlockHeader, BlockPayload}, - node_implementation::NodeType, - }, +use hotshot_types::simple_certificate::QuorumCertificate2; +use hotshot_types::traits::{ + block_contents::{BlockHeader, BlockPayload}, + node_implementation::NodeType, }; use sqlx::{Arguments, FromRow, Row}; use std::{ @@ -171,10 +170,10 @@ where { fn from_row(row: &'r ::Row) -> sqlx::Result { let leaf = row.try_get("leaf")?; - let leaf: Leaf = serde_json::from_value(leaf).decode_error("malformed leaf")?; + let leaf: Leaf2 = serde_json::from_value(leaf).decode_error("malformed leaf")?; let qc = row.try_get("qc")?; - let qc: QuorumCertificate = + let qc: QuorumCertificate2 = serde_json::from_value(qc).decode_error("malformed QC")?; Ok(Self { leaf, qc }) diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs b/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs index 759296fb87..0a51d28bee 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs @@ -50,7 +50,7 @@ where }; let row = query .query(&format!( - "SELECT {LEAF_COLUMNS} FROM leaf WHERE {where_clause}" + "SELECT {LEAF_COLUMNS} FROM leaf2 WHERE {where_clause}" )) .fetch_one(self.as_mut()) .await?; @@ -134,7 +134,7 @@ where let sql = format!( "SELECT {VID_COMMON_COLUMNS} FROM header AS h - JOIN vid AS v ON h.height = v.height + JOIN vid2 AS v ON h.height = v.height WHERE {where_clause} ORDER BY h.height LIMIT 1" @@ -155,7 +155,7 @@ where let sql = format!( "SELECT {VID_COMMON_METADATA_COLUMNS} FROM header AS h - JOIN vid AS v ON h.height = v.height + JOIN vid2 AS v ON h.height = v.height WHERE {where_clause} ORDER BY h.height ASC LIMIT 1" @@ -174,7 +174,7 @@ where { let mut query = QueryBuilder::default(); let where_clause = query.bounds_to_where_clause(range, "height")?; - let sql = format!("SELECT {LEAF_COLUMNS} FROM leaf {where_clause} ORDER BY height"); + let sql = format!("SELECT {LEAF_COLUMNS} FROM leaf2 {where_clause} ORDER BY height"); Ok(query .query(&sql) .fetch(self.as_mut()) @@ -296,7 +296,7 @@ where let sql = format!( "SELECT {VID_COMMON_COLUMNS} FROM header AS h - JOIN vid AS v ON h.height = v.height + JOIN vid2 AS v ON h.height = v.height {where_clause} ORDER BY h.height" ); @@ -321,7 +321,7 @@ where let sql = format!( "SELECT {VID_COMMON_METADATA_COLUMNS} FROM header AS h - JOIN vid AS v ON h.height = v.height + JOIN vid2 AS v ON h.height = v.height {where_clause} ORDER BY h.height ASC" ); @@ -367,7 +367,7 @@ where async fn first_available_leaf(&mut self, from: u64) -> QueryResult> { let row = query(&format!( - "SELECT {LEAF_COLUMNS} FROM leaf WHERE height >= $1 ORDER BY height LIMIT 1" + "SELECT {LEAF_COLUMNS} FROM leaf2 WHERE height >= $1 ORDER BY height LIMIT 1" )) .bind(from as i64) .fetch_one(self.as_mut()) diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs index ca8874c179..326d29e695 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs @@ -22,12 +22,15 @@ use crate::{ }, node::{BlockId, SyncStatus, TimeWindowQueryData, WindowStart}, types::HeightIndexed, - Header, MissingSnafu, NotFoundSnafu, QueryError, QueryResult, VidShare, + Header, MissingSnafu, NotFoundSnafu, QueryError, QueryResult, }; use anyhow::anyhow; use async_trait::async_trait; use futures::stream::{StreamExt, TryStreamExt}; -use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::NodeType}; +use hotshot_types::{ + data::VidShare, + traits::{block_contents::BlockHeader, node_implementation::NodeType}, +}; use snafu::OptionExt; use sqlx::Row; use std::ops::{Bound, RangeBounds}; @@ -115,7 +118,7 @@ where // ORDER BY h.height ASC ensures that if there are duplicate blocks (this can happen when // selecting by payload ID, as payloads are not unique), we return the first one. let sql = format!( - "SELECT v.share AS share FROM vid AS v + "SELECT v.share AS share FROM vid2 AS v JOIN header AS h ON v.height = h.height WHERE {where_clause} ORDER BY h.height @@ -155,10 +158,10 @@ where // need to select the total number of VID rows and the number of present VID rows with a // NULL share. let sql = "SELECT l.max_height, l.total_leaves, p.null_payloads, v.total_vid, vn.null_vid, pruned_height FROM - (SELECT max(leaf.height) AS max_height, count(*) AS total_leaves FROM leaf) AS l, + (SELECT max(leaf2.height) AS max_height, count(*) AS total_leaves FROM leaf2) AS l, (SELECT count(*) AS null_payloads FROM payload WHERE data IS NULL) AS p, - (SELECT count(*) AS total_vid FROM vid) AS v, - (SELECT count(*) AS null_vid FROM vid WHERE share IS NULL) AS vn, + (SELECT count(*) AS total_vid FROM vid2) AS v, + (SELECT count(*) AS null_vid FROM vid2 WHERE share IS NULL) AS vn, (SELECT(SELECT last_height FROM pruned_height ORDER BY id DESC LIMIT 1) as pruned_height) "; let row = query(sql) diff --git a/hotshot-query-service/src/data_source/storage/sql/transaction.rs b/hotshot-query-service/src/data_source/storage/sql/transaction.rs index 105df2586a..f7443e36a7 100644 --- a/hotshot-query-service/src/data_source/storage/sql/transaction.rs +++ b/hotshot-query-service/src/data_source/storage/sql/transaction.rs @@ -36,7 +36,7 @@ use crate::{ }, merklized_state::{MerklizedState, UpdateStateData}, types::HeightIndexed, - Header, Payload, QueryError, QueryResult, VidShare, + Header, Payload, QueryError, QueryResult, }; use anyhow::{bail, Context}; use ark_serialize::CanonicalSerialize; @@ -44,11 +44,14 @@ use async_trait::async_trait; use committable::Committable; use derive_more::{Deref, DerefMut}; use futures::{future::Future, stream::TryStreamExt}; -use hotshot_types::traits::{ - block_contents::BlockHeader, - metrics::{Counter, Gauge, Histogram, Metrics}, - node_implementation::NodeType, - EncodeBytes, +use hotshot_types::{ + data::VidShare, + traits::{ + block_contents::BlockHeader, + metrics::{Counter, Gauge, Histogram, Metrics}, + node_implementation::NodeType, + EncodeBytes, + }, }; use itertools::Itertools; use jf_merkle_tree::prelude::{MerkleNode, MerkleProof}; @@ -509,7 +512,7 @@ where let leaf_json = serde_json::to_value(leaf.leaf()).context("failed to serialize leaf")?; let qc_json = serde_json::to_value(leaf.qc()).context("failed to serialize QC")?; self.upsert( - "leaf", + "leaf2", ["height", "hash", "block_hash", "leaf", "qc"], ["height"], [( @@ -603,7 +606,7 @@ where if let Some(share) = share { let share_data = bincode::serialize(&share).context("failed to serialize VID share")?; self.upsert( - "vid", + "vid2", ["height", "common", "share"], ["height"], [(height as i64, common_data, share_data)], @@ -614,7 +617,7 @@ where // possible that this column already exists, and we are just upserting the common data, // in which case we don't want to overwrite the share with NULL. self.upsert( - "vid", + "vid2", ["height", "common"], ["height"], [(height as i64, common_data)], diff --git a/hotshot-query-service/src/data_source/update.rs b/hotshot-query-service/src/data_source/update.rs index b1ddd13033..0f2da97126 100644 --- a/hotshot-query-service/src/data_source/update.rs +++ b/hotshot-query-service/src/data_source/update.rs @@ -16,47 +16,25 @@ use crate::{ BlockInfo, BlockQueryData, LeafQueryData, QueryablePayload, UpdateAvailabilityData, VidCommonQueryData, }, - Payload, VidShare, + Payload, }; use anyhow::{ensure, Context}; use async_trait::async_trait; use futures::future::Future; use hotshot::types::{Event, EventType}; +use hotshot_types::data::{VidDisperseShare, VidShare}; use hotshot_types::{ - data::{Leaf, Leaf2, QuorumProposal}, + data::Leaf2, traits::{ block_contents::{BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES}, node_implementation::{ConsensusTime, NodeType}, }, vid::advz::advz_scheme, }; -use hotshot_types::{ - data::{VidCommitment, VidDisperseShare}, - event::LeafInfo, -}; +use hotshot_types::{data::VidCommitment, event::LeafInfo}; use jf_vid::VidScheme; use std::iter::once; -fn downgrade_leaf(leaf2: Leaf2) -> Leaf { - // TODO do we still need some check here? - // `drb_seed` no longer exists on `Leaf2` - // if leaf2.drb_seed != [0; 32] && leaf2.drb_result != [0; 32] { - // panic!("Downgrade of Leaf2 to Leaf will lose DRB information!"); - // } - let quorum_proposal = QuorumProposal { - block_header: leaf2.block_header().clone(), - view_number: leaf2.view_number(), - justify_qc: leaf2.justify_qc().to_qc(), - upgrade_certificate: leaf2.upgrade_certificate(), - proposal_certificate: None, - }; - let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); - if let Some(payload) = leaf2.block_payload() { - leaf.fill_block_payload_unchecked(payload); - } - leaf -} - /// An extension trait for types which implement the update trait for each API module. /// /// If a type implements [UpdateAvailabilityData] and @@ -116,24 +94,23 @@ where }, ) in qcs.zip(leaf_chain.iter().rev()) { - let leaf = downgrade_leaf(leaf2.clone()); - let qc = qc2.to_qc(); - let height = leaf.block_header().block_number(); - let leaf_data = match LeafQueryData::new(leaf.clone(), qc.clone()) { + let height = leaf2.block_header().block_number(); + + let leaf_data = match LeafQueryData::new(leaf2.clone(), qc2.clone()) { Ok(leaf) => leaf, Err(err) => { tracing::error!( height, - ?leaf, + ?leaf2, ?qc, "inconsistent leaf; cannot append leaf information: {err:#}" ); - return Err(leaf.block_header().block_number()); + return Err(leaf2.block_header().block_number()); } }; - let block_data = leaf + let block_data = leaf2 .block_payload() - .map(|payload| BlockQueryData::new(leaf.block_header().clone(), payload)); + .map(|payload| BlockQueryData::new(leaf2.block_header().clone(), payload)); if block_data.is_none() { tracing::info!(height, "block not available at decide"); } @@ -141,22 +118,22 @@ where let (vid_common, vid_share) = match vid_share { Some(VidDisperseShare::V0(share)) => ( Some(VidCommonQueryData::new( - leaf.block_header().clone(), + leaf2.block_header().clone(), Some(share.common.clone()), )), Some(VidShare::V0(share.share.clone())), ), Some(VidDisperseShare::V1(share)) => ( - Some(VidCommonQueryData::new(leaf.block_header().clone(), None)), + Some(VidCommonQueryData::new(leaf2.block_header().clone(), None)), Some(VidShare::V1(share.share.clone())), ), None => { - if leaf.view_number().u64() == 0 { + if leaf2.view_number().u64() == 0 { // HotShot does not run VID in consensus for the genesis block. In this case, // the block payload is guaranteed to always be empty, so VID isn't really // necessary. But for consistency, we will still store the VID dispersal data, // computing it ourselves based on the well-known genesis VID commitment. - match genesis_vid(&leaf) { + match genesis_vid(leaf2) { Ok((common, share)) => (Some(common), Some(share)), Err(err) => { tracing::warn!("failed to compute genesis VID: {err:#}"); @@ -178,7 +155,7 @@ where .await { tracing::error!(height, "failed to append leaf information: {err:#}"); - return Err(leaf.block_header().block_number()); + return Err(leaf2.block_header().block_number()); } } } @@ -187,7 +164,7 @@ where } fn genesis_vid( - leaf: &Leaf, + leaf: &Leaf2, ) -> anyhow::Result<(VidCommonQueryData, VidShare)> { let payload = Payload::::empty().0; let bytes = payload.encode(); diff --git a/hotshot-query-service/src/explorer.rs b/hotshot-query-service/src/explorer.rs index 03ca26323c..ef259a83e5 100644 --- a/hotshot-query-service/src/explorer.rs +++ b/hotshot-query-service/src/explorer.rs @@ -878,6 +878,7 @@ mod test { ..Default::default() }, MockBase::instance(), + "0.0.1".parse().unwrap(), ) .unwrap(), ) diff --git a/hotshot-query-service/src/fetching/provider/any.rs b/hotshot-query-service/src/fetching/provider/any.rs index c1112cefe2..0896abdce4 100644 --- a/hotshot-query-service/src/fetching/provider/any.rs +++ b/hotshot-query-service/src/fetching/provider/any.rs @@ -234,7 +234,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); let _server = BackgroundTask::spawn( diff --git a/hotshot-query-service/src/fetching/provider/query_service.rs b/hotshot-query-service/src/fetching/provider/query_service.rs index 8e42bce3fd..dbf09aa900 100644 --- a/hotshot-query-service/src/fetching/provider/query_service.rs +++ b/hotshot-query-service/src/fetching/provider/query_service.rs @@ -16,12 +16,13 @@ use crate::{ availability::{LeafQueryData, PayloadQueryData, VidCommonQueryData}, fetching::request::{LeafRequest, PayloadRequest, VidCommonRequest}, types::HeightIndexed, - Error, Payload, VidCommitment, VidCommon, + Error, Payload, VidCommon, }; use async_trait::async_trait; use committable::Committable; use futures::try_join; use hotshot_types::{ + data::VidCommitment, traits::{node_implementation::NodeType, EncodeBytes}, vid::advz::{advz_scheme, ADVZScheme}, }; @@ -216,7 +217,7 @@ mod test { setup_test, sleep, }, types::HeightIndexed, - ApiState, VidCommitment, + ApiState, }; use committable::Committable; use futures::{ @@ -268,7 +269,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -491,7 +497,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -549,7 +560,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -611,7 +627,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -670,7 +691,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -726,7 +752,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -797,7 +828,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -942,7 +978,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1110,7 +1151,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1209,7 +1255,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1301,7 +1352,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1365,7 +1421,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1423,7 +1484,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1499,7 +1565,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1589,7 +1660,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1656,7 +1732,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1728,7 +1809,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( diff --git a/hotshot-query-service/src/fetching/request.rs b/hotshot-query-service/src/fetching/request.rs index d0dd2710f2..be3d1de434 100644 --- a/hotshot-query-service/src/fetching/request.rs +++ b/hotshot-query-service/src/fetching/request.rs @@ -14,10 +14,10 @@ use crate::{ availability::{LeafHash, LeafQueryData, QcHash}, - Payload, VidCommitment, + Payload, }; use derive_more::{From, Into}; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::{data::VidCommitment, traits::node_implementation::NodeType}; use std::fmt::Debug; use std::hash::Hash; diff --git a/hotshot-query-service/src/lib.rs b/hotshot-query-service/src/lib.rs index 20a9e92846..5fafb197dd 100644 --- a/hotshot-query-service/src/lib.rs +++ b/hotshot-query-service/src/lib.rs @@ -446,11 +446,7 @@ use task::BackgroundTask; use tide_disco::{method::ReadState, App, StatusCode}; use vbs::version::StaticVersionType; -pub use hotshot_types::{ - data::Leaf, - data::{VidCommitment, VidShare}, - simple_certificate::QuorumCertificate, -}; +pub use hotshot_types::{data::Leaf2, simple_certificate::QuorumCertificate}; pub type VidCommon = Option; @@ -543,15 +539,28 @@ where ApiVer: StaticVersionType + 'static, { // Create API modules. - let availability_api = - availability::define_api(&options.availability, bind_version).map_err(Error::internal)?; + let availability_api_v0 = availability::define_api( + &options.availability, + bind_version, + "0.0.1".parse().unwrap(), + ) + .map_err(Error::internal)?; + + let availability_api_v1 = availability::define_api( + &options.availability, + bind_version, + "1.0.0".parse().unwrap(), + ) + .map_err(Error::internal)?; let node_api = node::define_api(&options.node, bind_version).map_err(Error::internal)?; let status_api = status::define_api(&options.status, bind_version).map_err(Error::internal)?; // Create app. let data_source = Arc::new(data_source); let mut app = App::<_, Error>::with_state(ApiState(data_source.clone())); - app.register_module("availability", availability_api) + app.register_module("availability", availability_api_v0) + .map_err(Error::internal)? + .register_module("availability", availability_api_v1) .map_err(Error::internal)? .register_module("node", node_api) .map_err(Error::internal)? @@ -599,7 +608,7 @@ mod test { use async_trait::async_trait; use atomic_store::{load_store::BincodeLoadStore, AtomicStore, AtomicStoreLoader, RollingLog}; use futures::future::FutureExt; - use hotshot_types::simple_certificate::QuorumCertificate; + use hotshot_types::{data::VidShare, simple_certificate::QuorumCertificate2}; use portpicker::pick_unused_port; use std::ops::{Bound, RangeBounds}; use std::time::Duration; @@ -828,10 +837,10 @@ mod test { // Mock up some data and add a block to the store. let leaf = - Leaf::::genesis::(&Default::default(), &Default::default()) + Leaf2::::genesis::(&Default::default(), &Default::default()) .await; let qc = - QuorumCertificate::genesis::(&Default::default(), &Default::default()) + QuorumCertificate2::genesis::(&Default::default(), &Default::default()) .await; let leaf = LeafQueryData::new(leaf, qc).unwrap(); let block = BlockQueryData::new(leaf.header().clone(), MockPayload::genesis()); @@ -862,7 +871,12 @@ mod test { let mut app = App::<_, Error>::with_state(RwLock::new(state)); app.register_module( "availability", - availability::define_api(&Default::default(), MockBase::instance()).unwrap(), + availability::define_api( + &Default::default(), + MockBase::instance(), + "1.0.0".parse().unwrap(), + ) + .unwrap(), ) .unwrap() .register_module( diff --git a/hotshot-query-service/src/node.rs b/hotshot-query-service/src/node.rs index 91d3d7ef1b..6499cc3e36 100644 --- a/hotshot-query-service/src/node.rs +++ b/hotshot-query-service/src/node.rs @@ -210,13 +210,13 @@ mod test { mocks::{mock_transaction, MockBase, MockTypes}, setup_test, }, - ApiState, Error, Header, VidShare, + ApiState, Error, Header, }; use async_lock::RwLock; use committable::Committable; use futures::{FutureExt, StreamExt}; use hotshot_types::{ - data::VidDisperseShare, + data::{VidDisperseShare, VidShare}, event::{EventType, LeafInfo}, traits::{ block_contents::{BlockHeader, BlockPayload}, diff --git a/hotshot-query-service/src/node/data_source.rs b/hotshot-query-service/src/node/data_source.rs index 952a366196..a256b209bc 100644 --- a/hotshot-query-service/src/node/data_source.rs +++ b/hotshot-query-service/src/node/data_source.rs @@ -25,11 +25,11 @@ //! trait](crate::availability::UpdateAvailabilityData). use super::query_data::{BlockHash, BlockId, SyncStatus, TimeWindowQueryData}; -use crate::{Header, QueryResult, VidShare}; +use crate::{Header, QueryResult}; use async_trait::async_trait; use derivative::Derivative; use derive_more::From; -use hotshot_types::traits::node_implementation::NodeType; +use hotshot_types::{data::VidShare, traits::node_implementation::NodeType}; use std::ops::RangeBounds; #[derive(Derivative, From)] diff --git a/hotshot-types/src/data.rs b/hotshot-types/src/data.rs index a57bec5069..e61063227f 100644 --- a/hotshot-types/src/data.rs +++ b/hotshot-types/src/data.rs @@ -372,7 +372,7 @@ impl From for VidShare { } } -mod ns_table; +pub mod ns_table; pub mod vid_disperse; /// VID dispersal data diff --git a/hotshot-types/src/data/ns_table.rs b/hotshot-types/src/data/ns_table.rs index 2a53c1e272..c645c36957 100644 --- a/hotshot-types/src/data/ns_table.rs +++ b/hotshot-types/src/data/ns_table.rs @@ -17,7 +17,7 @@ const NS_ID_BYTE_LEN: usize = 4; /// If the namespace table is invalid, it returns a default single entry namespace table. /// For details, please refer to `block/full_payload/ns_table.rs` in the `sequencer` crate. #[allow(clippy::single_range_in_vec_init)] -pub(crate) fn parse_ns_table(payload_byte_len: usize, bytes: &[u8]) -> Vec> { +pub fn parse_ns_table(payload_byte_len: usize, bytes: &[u8]) -> Vec> { let mut result = vec![]; if bytes.len() < NUM_NSS_BYTE_LEN || (bytes.len() - NUM_NSS_BYTE_LEN) % (NS_OFFSET_BYTE_LEN + NS_ID_BYTE_LEN) != 0 diff --git a/hotshot-types/src/traits/storage.rs b/hotshot-types/src/traits/storage.rs index 1b66b4d512..991ba8282f 100644 --- a/hotshot-types/src/traits/storage.rs +++ b/hotshot-types/src/traits/storage.rs @@ -155,13 +155,7 @@ pub trait Storage: Send + Sync + Clone { decided_upgrade_certificate: Option>, ) -> Result<()>; /// Migrate leaves from `Leaf` to `Leaf2`, and proposals from `QuorumProposal` to `QuorumProposal2` - async fn migrate_consensus( - &self, - _convert_leaf: fn(Leaf) -> Leaf2, - _convert_proposal: fn( - Proposal>, - ) -> Proposal>, - ) -> Result<()> { + async fn migrate_consensus(&self) -> Result<()> { Ok(()) } } diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 6b0e77b1cb..032c82edc5 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -54,9 +54,9 @@ use hotshot_types::{ ViewInner, }, constants::{EVENT_CHANNEL_SIZE, EXTERNAL_EVENT_CHANNEL_SIZE}, - data::{Leaf2, QuorumProposal, QuorumProposal2}, + data::Leaf2, event::{EventType, LeafInfo}, - message::{convert_proposal, DataMessage, Message, MessageKind, Proposal}, + message::{DataMessage, Message, MessageKind, Proposal}, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, traits::{ consensus_api::ConsensusApi, @@ -211,13 +211,7 @@ impl, V: Versions> SystemContext, ) -> Arc { #[allow(clippy::panic)] - match storage - .migrate_consensus( - Into::>::into, - convert_proposal::, QuorumProposal2>, - ) - .await - { + match storage.migrate_consensus().await { Ok(()) => {} Err(e) => { panic!("Failed to migrate consensus storage: {e}"); diff --git a/marketplace-builder-shared/src/testing/consensus.rs b/marketplace-builder-shared/src/testing/consensus.rs index 93845e7113..f4ee61cd00 100644 --- a/marketplace-builder-shared/src/testing/consensus.rs +++ b/marketplace-builder-shared/src/testing/consensus.rs @@ -20,7 +20,7 @@ use hotshot_types::{ data::vid_commitment, data::{DaProposal2, EpochNumber, Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}, message::Proposal, - simple_certificate::{QuorumCertificate, SimpleCertificate, SuccessThreshold}, + simple_certificate::{QuorumCertificate2, SimpleCertificate, SuccessThreshold}, simple_vote::QuorumData2, traits::{ node_implementation::{ConsensusTime, Versions}, @@ -102,12 +102,13 @@ impl SimulatedChainState { }; let justify_qc = match self.previous_quorum_proposal.as_ref() { - None => QuorumCertificate::::genesis::( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await - .to_qc2(), + None => { + QuorumCertificate2::::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await + } Some(prev_proposal) => { let prev_justify_qc = &prev_proposal.justify_qc(); let quorum_data = QuorumData2:: { diff --git a/marketplace-builder/src/builder.rs b/marketplace-builder/src/builder.rs index 8de45bf4d3..cd334f9c0e 100644 --- a/marketplace-builder/src/builder.rs +++ b/marketplace-builder/src/builder.rs @@ -231,9 +231,10 @@ mod test { events::{Error as EventStreamApiError, Options as EventStreamingApiOptions}, events_source::{EventConsumer, EventsStreamer}, }; - use hotshot_query_service::{availability::LeafQueryData, VidCommitment}; + use hotshot_query_service::availability::LeafQueryData; use hotshot_types::{ bundle::Bundle, + data::VidCommitment, event::LeafInfo, light_client::StateKeyPair, signature_key::BLSPubKey, diff --git a/node-metrics/Cargo.toml b/node-metrics/Cargo.toml index 0092500753..a325e748c6 100644 --- a/node-metrics/Cargo.toml +++ b/node-metrics/Cargo.toml @@ -21,6 +21,7 @@ clap = { workspace = true } espresso-types = { path = "../types" } futures = { workspace = true } hotshot = { workspace = true } +hotshot-example-types = { workspace = true } hotshot-query-service = { workspace = true } hotshot-stake-table = { workspace = true } tokio = { workspace = true } diff --git a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs index eb05938302..c60d089e62 100644 --- a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs +++ b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs @@ -13,12 +13,12 @@ use crate::service::{ server_message::ServerMessage, }; use async_lock::RwLock; -use espresso_types::{downgrade_leaf, PubKey, SeqTypes}; +use espresso_types::{PubKey, SeqTypes}; use futures::{ channel::mpsc::{self, Receiver, SendError, Sender}, Sink, SinkExt, Stream, StreamExt, }; -use hotshot_query_service::Leaf; +use hotshot_query_service::Leaf2; use hotshot_types::event::{Event, EventType}; use serde::{Deserialize, Serialize}; use tokio::{spawn, task::JoinHandle}; @@ -88,7 +88,7 @@ impl HotShotEventProcessingTask { where S: Stream> + Send + Unpin + 'static, K1: Sink + Send + Unpin + 'static, - K2: Sink, Error = SendError> + Send + Unpin + 'static, + K2: Sink, Error = SendError> + Send + Unpin + 'static, { let task_handle = spawn(Self::process_messages( event_stream, @@ -107,7 +107,7 @@ impl HotShotEventProcessingTask { where S: Stream> + Send + Unpin + 'static, K1: Sink + Unpin, - K2: Sink, Error = SendError> + Unpin, + K2: Sink, Error = SendError> + Unpin, { let mut event_stream = event_receiver; let mut url_sender = url_sender; @@ -128,9 +128,8 @@ impl HotShotEventProcessingTask { EventType::Decide { leaf_chain, .. } => { for leaf_info in leaf_chain.iter().rev() { let leaf2 = leaf_info.leaf.clone(); - let leaf = downgrade_leaf(leaf2); - let send_result = leaf_sender.send(leaf).await; + let send_result = leaf_sender.send(leaf2).await; if let Err(err) = send_result { tracing::error!("leaf sender closed: {}", err); panic!("HotShotEventProcessingTask leaf sender is closed, unrecoverable, the block state will stagnate."); @@ -280,7 +279,7 @@ impl Drop for ProcessExternalMessageHandlingTask { pub async fn create_node_validator_processing( config: NodeValidatorConfig, internal_client_message_receiver: Receiver>>, - leaf_receiver: Receiver>, + leaf_receiver: Receiver>, ) -> Result>, CreateNodeValidatorProcessingError> { let client_thread_state = ClientThreadState::>::new( Default::default(), diff --git a/node-metrics/src/api/node_validator/v0/mod.rs b/node-metrics/src/api/node_validator/v0/mod.rs index 90d1867474..b364c105ee 100644 --- a/node-metrics/src/api/node_validator/v0/mod.rs +++ b/node-metrics/src/api/node_validator/v0/mod.rs @@ -11,7 +11,7 @@ use futures::{ channel::mpsc::{self, Sender}, FutureExt, Sink, SinkExt, Stream, StreamExt, }; -use hotshot_query_service::Leaf; +use hotshot_query_service::Leaf2; use hotshot_stake_table::vec_based::StakeTable; use hotshot_types::light_client::{CircuitField, StateVerKey}; use hotshot_types::signature_key::BLSPubKey; @@ -461,11 +461,11 @@ impl HotshotQueryServiceLeafStreamRetriever { } impl LeafStreamRetriever for HotshotQueryServiceLeafStreamRetriever { - type Item = Leaf; + type Item = Leaf2; type ItemError = hotshot_query_service::Error; type Error = hotshot_query_service::Error; type Stream = surf_disco::socket::Connection< - Leaf, + Leaf2, surf_disco::socket::Unsupported, Self::ItemError, Version01, @@ -496,7 +496,7 @@ impl LeafStreamRetriever for HotshotQueryServiceLeafStreamRetriever { "availability/stream/leaves/{}", start_block_height )) - .subscribe::() + .subscribe::() .await; let leaves_stream = match leaves_stream_result { @@ -540,8 +540,8 @@ impl ProcessProduceLeafStreamTask { /// returned state. pub fn new(leaf_stream_retriever: R, leaf_sender: K) -> Self where - R: LeafStreamRetriever> + Send + Sync + 'static, - K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, + R: LeafStreamRetriever> + Send + Sync + 'static, + K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, { // let future = Self::process_consume_leaf_stream(leaf_stream_retriever, leaf_sender); let task_handle = spawn(Self::connect_and_process_leaves( @@ -556,8 +556,8 @@ impl ProcessProduceLeafStreamTask { async fn connect_and_process_leaves(leaf_stream_retriever: R, leaf_sender: K) where - R: LeafStreamRetriever>, - K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, + R: LeafStreamRetriever>, + K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, { // We want to try and ensure that we are connected to the HotShot Query // Service, and are consuming leaves. @@ -596,7 +596,7 @@ impl ProcessProduceLeafStreamTask { leaf_stream_receiver: &R, ) -> Result where - R: LeafStreamRetriever>, + R: LeafStreamRetriever>, { let backoff_params = BackoffParams::default(); let mut delay = Duration::ZERO; @@ -639,8 +639,8 @@ impl ProcessProduceLeafStreamTask { /// will return. async fn process_consume_leaf_stream(leaves_stream: R::Stream, leaf_sender: K) where - R: LeafStreamRetriever>, - K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, + R: LeafStreamRetriever>, + K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, { let mut leaf_sender = leaf_sender; let mut leaves_stream = leaves_stream; diff --git a/node-metrics/src/service/client_state/mod.rs b/node-metrics/src/service/client_state/mod.rs index 97a06fac6f..1ca1415d57 100644 --- a/node-metrics/src/service/client_state/mod.rs +++ b/node-metrics/src/service/client_state/mod.rs @@ -1196,12 +1196,12 @@ pub mod tests { }; use async_lock::RwLock; use bitvec::vec::BitVec; - use espresso_types::{Leaf, NodeState, ValidatedState}; + use espresso_types::{Leaf2, NodeState, ValidatedState}; use futures::{ channel::mpsc::{self, Sender}, SinkExt, StreamExt, }; - use hotshot_query_service::testing::mocks::MockVersions; + use hotshot_example_types::node_types::TestVersions; use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; use std::{sync::Arc, time::Duration}; use tokio::{ @@ -1370,7 +1370,7 @@ pub mod tests { #[tokio::test(flavor = "multi_thread")] #[cfg(feature = "testing")] async fn test_process_client_handling_stream_request_latest_blocks_snapshot() { - use hotshot_query_service::testing::mocks::MockVersions; + use hotshot_example_types::node_types::TestVersions; use super::clone_block_detail; use crate::service::data_state::create_block_detail_from_leaf; @@ -1378,7 +1378,7 @@ pub mod tests { let (_, _, _, mut data_state) = create_test_data_state(); let client_thread_state = Arc::new(RwLock::new(create_test_client_thread_state())); let leaf_1 = - Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()).await; + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let block_1 = create_block_detail_from_leaf(&leaf_1); data_state.add_latest_block(clone_block_detail(&block_1)); @@ -1619,7 +1619,7 @@ pub mod tests { // send a new leaf let leaf = - Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()).await; + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let expected_block = create_block_detail_from_leaf(&leaf); let arc_expected_block = Arc::new(expected_block); diff --git a/node-metrics/src/service/data_state/mod.rs b/node-metrics/src/service/data_state/mod.rs index 6325a69b83..ae820f4e5d 100644 --- a/node-metrics/src/service/data_state/mod.rs +++ b/node-metrics/src/service/data_state/mod.rs @@ -9,7 +9,7 @@ use futures::{channel::mpsc::SendError, Sink, SinkExt, Stream, StreamExt}; use hotshot_query_service::{ availability::{QueryableHeader, QueryablePayload}, explorer::{BlockDetail, ExplorerHeader, Timestamp}, - Leaf, Resolvable, + Leaf2, Resolvable, }; use hotshot_stake_table::vec_based::StakeTable; use hotshot_types::{ @@ -151,7 +151,7 @@ impl DataState { /// [create_block_detail_from_leaf] is a helper function that will build a /// [BlockDetail] from the reference to [Leaf]. -pub fn create_block_detail_from_leaf(leaf: &Leaf) -> BlockDetail { +pub fn create_block_detail_from_leaf(leaf: &Leaf2) -> BlockDetail { let block_header = leaf.block_header(); let block_payload = &leaf.block_payload().unwrap_or(Payload::empty().0); @@ -223,7 +223,7 @@ impl std::error::Error for ProcessLeafError { /// computed into a [BlockDetail] and sent to the [Sink] so that it can be /// processed for real-time considerations. async fn process_incoming_leaf( - leaf: Leaf, + leaf: Leaf2, data_state: Arc>, mut block_sender: BDSink, mut voters_sender: BVSink, @@ -339,7 +339,7 @@ impl ProcessLeafStreamTask { voters_sender: K2, ) -> Self where - S: Stream> + Send + Sync + Unpin + 'static, + S: Stream> + Send + Sync + Unpin + 'static, K1: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, K2: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, { @@ -363,7 +363,7 @@ impl ProcessLeafStreamTask { block_sender: BDSink, voters_senders: BVSink, ) where - S: Stream> + Unpin, + S: Stream> + Unpin, Header: BlockHeader + QueryableHeader + ExplorerHeader, Payload: BlockPayload, BDSink: Sink, Error = SendError> + Clone + Unpin, @@ -569,10 +569,11 @@ mod tests { }; use async_lock::RwLock; use espresso_types::{ - v0_99::ChainConfig, BlockMerkleTree, FeeMerkleTree, Leaf, NodeState, ValidatedState, + v0_99::ChainConfig, BlockMerkleTree, FeeMerkleTree, Leaf2, NodeState, ValidatedState, }; use futures::{channel::mpsc, SinkExt, StreamExt}; - use hotshot_query_service::testing::mocks::MockVersions; + + use hotshot_example_types::node_types::TestVersions; use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; use std::{sync::Arc, time::Duration}; use tokio::time::timeout; @@ -628,7 +629,7 @@ mod tests { }; let instance_state = NodeState::mock(); - let sample_leaf = Leaf::genesis::(&validated_state, &instance_state).await; + let sample_leaf = Leaf2::genesis::(&validated_state, &instance_state).await; let mut leaf_sender = leaf_sender; // We should be able to send a leaf without issue diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index 1d9cb79e6d..b975a9d8fe 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -4986,6 +4986,7 @@ dependencies = [ "prometheus", "refinery", "refinery-core", + "semver 1.0.25", "serde", "serde_json", "snafu 0.8.5", @@ -9514,6 +9515,7 @@ dependencies = [ "rand_chacha 0.3.1", "rand_distr", "request-response", + "semver 1.0.25", "sequencer-utils", "serde", "serde_json", diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 1c5d9b5347..f5982e981f 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -103,6 +103,7 @@ rand = { workspace = true } rand_chacha = { workspace = true } rand_distr = { workspace = true } request-response = { path = "../request-response" } +semver = { workspace = true } sequencer-utils = { path = "../utils" } serde = { workspace = true } serde_json = { workspace = true } diff --git a/sequencer/api/migrations/postgres/V501__epoch_tables.sql b/sequencer/api/migrations/postgres/V501__epoch_tables.sql new file mode 100644 index 0000000000..9a25f42c20 --- /dev/null +++ b/sequencer/api/migrations/postgres/V501__epoch_tables.sql @@ -0,0 +1,54 @@ +CREATE TABLE anchor_leaf2 ( + view BIGINT PRIMARY KEY, + leaf BYTEA, + qc BYTEA +); + + +CREATE TABLE da_proposal2 ( + view BIGINT PRIMARY KEY, + payload_hash VARCHAR, + data BYTEA +); + +CREATE TABLE vid_share2 ( + view BIGINT PRIMARY KEY, + payload_hash VARCHAR, + data BYTEA +); + + +CREATE TABLE undecided_state2 ( + -- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or + -- update that there is only a single entry in this table: the latest known state. + id INT PRIMARY KEY, + + leaves BYTEA NOT NULL, + state BYTEA NOT NULL +); + + +CREATE TABLE quorum_proposals2 ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR, + data BYTEA +); + +CREATE UNIQUE INDEX quorum_proposals2_leaf_hash_idx ON quorum_proposals (leaf_hash); +CREATE INDEX da_proposal2_payload_hash_idx ON da_proposal (payload_hash); +CREATE INDEX vid_share2_payload_hash_idx ON vid_share (payload_hash); + +CREATE TABLE quorum_certificate2 ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR NOT NULL, + data BYTEA NOT NULL +); + +CREATE INDEX quorum_certificate2_leaf_hash_idx ON quorum_certificate (leaf_hash); + +CREATE TABLE epoch_migration ( + table_name TEXT PRIMARY KEY, + completed bool DEFAULT FALSE +); + +INSERT INTO epoch_migration (table_name) VALUES ('anchor_leaf'), ('da_proposal'), ('vid_share'), ('undecided_state'), ('quorum_proposals'), ('quorum_certificate'); \ No newline at end of file diff --git a/sequencer/api/migrations/sqlite/V301__epoch_tables.sql b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql new file mode 100644 index 0000000000..c11ec79b0a --- /dev/null +++ b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql @@ -0,0 +1,54 @@ +CREATE TABLE anchor_leaf2 ( + view BIGINT PRIMARY KEY, + leaf BLOB, + qc BLOB +); + + +CREATE TABLE da_proposal2 ( + view BIGINT PRIMARY KEY, + payload_hash VARCHAR, + data BLOB +); + +CREATE TABLE vid_share2 ( + view BIGINT PRIMARY KEY, + payload_hash VARCHAR, + data BLOB +); + + +CREATE TABLE undecided_state2 ( + -- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or + -- update that there is only a single entry in this table: the latest known state. + id INT PRIMARY KEY, + + leaves BLOB NOT NULL, + state BLOB NOT NULL +); + + +CREATE TABLE quorum_proposals2 ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR, + data BLOB +); + +CREATE UNIQUE INDEX quorum_proposals2_leaf_hash_idx ON quorum_proposals (leaf_hash); +CREATE INDEX da_proposal2_payload_hash_idx ON da_proposal (payload_hash); +CREATE INDEX vid_share2_payload_hash_idx ON vid_share (payload_hash); + +CREATE TABLE quorum_certificate2 ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR NOT NULL, + data BLOB NOT NULL +); + +CREATE INDEX quorum_certificate2_leaf_hash_idx ON quorum_certificate (leaf_hash); + +CREATE TABLE epoch_migration ( + table_name TEXT PRIMARY KEY, + completed bool NOT NULL DEFAULT FALSE +); + +INSERT INTO epoch_migration (table_name) VALUES ('anchor_leaf'), ('da_proposal'), ('vid_share'), ('undecided_state'), ('quorum_proposals'), ('quorum_certificate'); \ No newline at end of file diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 0faaa03514..a2eaf1ea35 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1095,7 +1095,7 @@ mod api_tests { use espresso_types::MockSequencerVersions; use espresso_types::{ traits::{EventConsumer, PersistenceOptions}, - Header, Leaf, Leaf2, NamespaceId, + Header, Leaf2, NamespaceId, }; use ethers::utils::Anvil; use futures::{future, stream::StreamExt}; @@ -1104,18 +1104,19 @@ mod api_tests { AvailabilityDataSource, BlockQueryData, VidCommonQueryData, }; - use hotshot_query_service::VidCommitment; - use hotshot_types::data::vid_disperse::ADVZDisperseShare; - use hotshot_types::vid::advz::advz_scheme; + use hotshot_types::data::ns_table::parse_ns_table; + use hotshot_types::data::vid_disperse::VidDisperseShare2; + use hotshot_types::data::{DaProposal2, EpochNumber, VidCommitment}; + use hotshot_types::simple_certificate::QuorumCertificate2; + + use hotshot_types::vid::avidm::{init_avidm_param, AvidMScheme}; use hotshot_types::{ - data::{DaProposal, QuorumProposal2, QuorumProposalWrapper}, + data::{QuorumProposal2, QuorumProposalWrapper}, event::LeafInfo, message::Proposal, - simple_certificate::QuorumCertificate, traits::{node_implementation::ConsensusTime, signature_key::SignatureKey, EncodeBytes}, }; - use jf_vid::VidScheme; use portpicker::pick_unused_port; use sequencer_utils::test_utils::setup_test; use std::fmt::Debug; @@ -1287,21 +1288,26 @@ mod api_tests { // Create two non-consecutive leaf chains. let mut chain1 = vec![]; - let genesis = Leaf::genesis::(&Default::default(), &NodeState::mock()).await; + let genesis = Leaf2::genesis::(&Default::default(), &NodeState::mock()).await; let payload = genesis.block_payload().unwrap(); let payload_bytes_arc = payload.encode(); - let disperse = advz_scheme(2).disperse(payload_bytes_arc.clone()).unwrap(); - let payload_commitment = disperse.commit; + + let avidm_param = init_avidm_param(2).unwrap(); + let weights = vec![1u32; 2]; + + let ns_table = parse_ns_table(payload.byte_len().as_usize(), &payload.encode()); + let (payload_commitment, shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &payload_bytes_arc, ns_table).unwrap(); + let mut quorum_proposal = QuorumProposalWrapper:: { proposal: QuorumProposal2:: { block_header: genesis.block_header().clone(), view_number: ViewNumber::genesis(), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -1309,12 +1315,11 @@ mod api_tests { epoch: None, }, }; - let mut qc = QuorumCertificate::genesis::( + let mut qc = QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(); + .await; let mut justify_qc = qc.clone(); for i in 0..5 { @@ -1332,7 +1337,7 @@ mod api_tests { PubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) .expect("Failed to sign quorum_proposal"); persistence - .append_quorum_proposal(&Proposal { + .append_quorum_proposal2(&Proposal { data: quorum_proposal.clone(), signature: quorum_proposal_signature, _pd: Default::default(), @@ -1341,25 +1346,27 @@ mod api_tests { .unwrap(); // Include VID information for each leaf. - let share = ADVZDisperseShare:: { + let share = VidDisperseShare2:: { view_number: leaf.view_number(), payload_commitment, - share: disperse.shares[0].clone(), - common: disperse.common.clone(), + share: shares[0].clone(), recipient_key: pubkey, + epoch: Some(EpochNumber::new(0)), + target_epoch: Some(EpochNumber::new(0)), }; persistence - .append_vid(&share.to_proposal(&privkey).unwrap()) + .append_vid2(&share.to_proposal(&privkey).unwrap()) .await .unwrap(); // Include payload information for each leaf. let block_payload_signature = PubKey::sign(&privkey, &payload_bytes_arc).expect("Failed to sign block payload"); - let da_proposal_inner = DaProposal:: { + let da_proposal_inner = DaProposal2:: { encoded_transactions: payload_bytes_arc.clone(), metadata: payload.ns_table().clone(), view_number: leaf.view_number(), + epoch: Some(EpochNumber::new(0)), }; let da_proposal = Proposal { data: da_proposal_inner, @@ -1367,7 +1374,7 @@ mod api_tests { _pd: Default::default(), }; persistence - .append_da(&da_proposal, VidCommitment::V0(payload_commitment)) + .append_da2(&da_proposal, VidCommitment::V1(payload_commitment)) .await .unwrap(); } @@ -1413,8 +1420,8 @@ mod api_tests { for (leaf, qc) in chain1.iter().chain(&chain2) { tracing::info!(height = leaf.height(), "check archive"); let qd = data_source.get_leaf(leaf.height() as usize).await.await; - let stored_leaf: Leaf2 = qd.leaf().clone().into(); - let stored_qc = qd.qc().clone().to_qc2(); + let stored_leaf: Leaf2 = qd.leaf().clone(); + let stored_qc = qd.qc().clone(); assert_eq!(&stored_leaf, leaf); assert_eq!(&stored_qc, qc); @@ -1483,15 +1490,13 @@ mod api_tests { )); let consumer = ApiEventConsumer::from(data_source.clone()); - let mut qc = QuorumCertificate::genesis::( + let mut qc = QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(); + .await; let leaf = - Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()) - .await; + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; // Append the genesis leaf. We don't use this for the test, because the update function will // automatically fill in the missing data for genesis. We just append this to get into a @@ -1500,7 +1505,7 @@ mod api_tests { persistence .append_decided_leaves( leaf.view_number(), - [(&leaf_info(leaf.clone().into()), qc.clone())], + [(&leaf_info(leaf.clone()), qc.clone())], &consumer, ) .await @@ -1538,10 +1543,7 @@ mod api_tests { .unwrap(); // Check that we still processed the leaf. - assert_eq!( - leaf, - data_source.get_leaf(1).await.await.leaf().clone().into() - ); + assert_eq!(leaf, data_source.get_leaf(1).await.await.leaf().clone()); assert!(data_source.get_vid_common(1).await.is_pending()); assert!(data_source.get_block(1).await.is_pending()); } diff --git a/sequencer/src/api/endpoints.rs b/sequencer/src/api/endpoints.rs index 651a38e796..91cecf6743 100644 --- a/sequencer/src/api/endpoints.rs +++ b/sequencer/src/api/endpoints.rs @@ -91,6 +91,7 @@ type AvailabilityApi = Api, availabil // Snafu has been replaced by `this_error` everywhere. // However, the query service still uses snafu pub(super) fn availability( + api_ver: semver::Version, ) -> Result> where N: ConnectedNetwork, @@ -105,6 +106,7 @@ where let mut api = availability::define_api::, SeqTypes, _>( &options, SequencerApiVersion::instance(), + api_ver, )?; api.get("getnamespaceproof", move |req, state| { diff --git a/sequencer/src/api/options.rs b/sequencer/src/api/options.rs index 794c3b6fd5..2db7fd4ea3 100644 --- a/sequencer/src/api/options.rs +++ b/sequencer/src/api/options.rs @@ -274,7 +274,25 @@ impl Options { app.register_module("status", status_api)?; // Initialize availability and node APIs (these both use the same data source). - app.register_module("availability", endpoints::availability()?)?; + + // Note: We initialize two versions of the availability module: `availability/v0` and `availability/v1`. + // - `availability/v0/leaf/0` returns the old `Leaf1` type for backward compatibility. + // - `availability/v1/leaf/0` returns the new `Leaf2` type + + // initialize the availability module for API version V0. + // This ensures compatibility for nodes that expect `Leaf1` for leaf endpoints + app.register_module( + "availability", + endpoints::availability("0.0.1".parse().unwrap())?, + )?; + + // initialize the availability module for API version V1. + // This enables support for the new `Leaf2` type + app.register_module( + "availability", + endpoints::availability("1.0.0".parse().unwrap())?, + )?; + app.register_module("node", endpoints::node()?)?; // Initialize submit API diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index 7106f9fa52..8af85c7b3b 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -4,7 +4,7 @@ use committable::{Commitment, Committable}; use espresso_types::{ get_l1_deposits, v0_99::{ChainConfig, IterableFeeInfo}, - BlockMerkleTree, FeeAccount, FeeMerkleTree, Leaf, Leaf2, NodeState, ValidatedState, + BlockMerkleTree, FeeAccount, FeeMerkleTree, Leaf2, NodeState, ValidatedState, }; use hotshot::traits::ValidatedState as _; use hotshot_query_service::{ @@ -21,7 +21,7 @@ use hotshot_query_service::{ Resolvable, }; use hotshot_types::{ - data::{QuorumProposal, ViewNumber}, + data::{QuorumProposalWrapper, ViewNumber}, message::Proposal, traits::node_implementation::ConsensusTime, }; @@ -261,7 +261,7 @@ async fn load_accounts( } } - Ok((snapshot, leaf.leaf().clone().into())) + Ok((snapshot, leaf.leaf().clone())) } async fn load_chain_config( @@ -290,7 +290,7 @@ async fn reconstruct_state( .get_leaf((from_height as usize).into()) .await .context(format!("leaf {from_height} not available"))?; - let from_leaf: Leaf2 = from_leaf.leaf().clone().into(); + let from_leaf: Leaf2 = from_leaf.leaf().clone(); ensure!( from_leaf.view_number() < to_view, "state reconstruction: starting state {:?} must be before ending state {to_view:?}", @@ -444,13 +444,14 @@ where P: Type + for<'q> Encode<'q, Db>, { let (data,) = query_as::<(Vec,)>(&format!( - "SELECT data FROM quorum_proposals WHERE {where_clause} LIMIT 1", + "SELECT data FROM quorum_proposals2 WHERE {where_clause} LIMIT 1", )) .bind(param) .fetch_one(tx.as_mut()) .await?; - let proposal: Proposal> = bincode::deserialize(&data)?; - Ok(Leaf::from_quorum_proposal(&proposal.data).into()) + let proposal: Proposal> = + bincode::deserialize(&data)?; + Ok(Leaf2::from_quorum_proposal(&proposal.data)) } #[cfg(any(test, feature = "testing"))] diff --git a/sequencer/src/block/full_payload.rs b/sequencer/src/block/full_payload.rs deleted file mode 100644 index 61247ec87e..0000000000 --- a/sequencer/src/block/full_payload.rs +++ /dev/null @@ -1,9 +0,0 @@ -mod ns_proof; -mod ns_table; -mod payload; - -pub use ns_proof::NsProof; -pub use ns_table::{NsIndex, NsTable, NsTableValidationError}; -pub use payload::{Payload, PayloadByteLen}; - -pub(in crate::block) use ns_table::NsIter; diff --git a/sequencer/src/block/full_payload/ns_proof.rs b/sequencer/src/block/full_payload/ns_proof.rs deleted file mode 100644 index 104ca45f4f..0000000000 --- a/sequencer/src/block/full_payload/ns_proof.rs +++ /dev/null @@ -1,172 +0,0 @@ -use crate::{ - block::{ - full_payload::{NsIndex, NsTable, Payload, PayloadByteLen}, - namespace_payload::NsPayloadOwned, - }, - NamespaceId, Transaction, -}; -use hotshot_types::{ - traits::EncodeBytes, - vid::{vid_scheme, LargeRangeProofType, VidCommitment, VidCommon, VidSchemeType}, -}; -use jf_vid::{ - payload_prover::{PayloadProver, Statement}, - VidScheme, -}; -use serde::{Deserialize, Serialize}; - -/// Proof of correctness for namespace payload bytes in a block. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct NsProof { - ns_index: NsIndex, - ns_payload: NsPayloadOwned, - ns_proof: Option, // `None` if ns_payload is empty -} - -impl NsProof { - /// Returns the payload bytes for the `index`th namespace, along with a - /// proof of correctness for those bytes. Returns `None` on error. - /// - /// The namespace payload [`NsPayloadOwned`] is included as a hidden field - /// in the returned [`NsProof`]. A conventional API would instead return - /// `(NsPayload, NsProof)` and [`NsProof`] would not contain the namespace - /// payload. - /// ([`TxProof::new`](crate::block::namespace_payload::TxProof::new) - /// conforms to this convention.) In the future we should change this API to - /// conform to convention. But that would require a change to our RPC - /// endpoint API at [`endpoints`](crate::api::endpoints), which is a hassle. - pub fn new(payload: &Payload, index: &NsIndex, common: &VidCommon) -> Option { - let payload_byte_len = payload.byte_len(); - if !payload_byte_len.is_consistent(common) { - tracing::warn!( - "payload byte len {} inconsistent with common {}", - payload_byte_len, - VidSchemeType::get_payload_byte_len(common) - ); - return None; // error: payload byte len inconsistent with common - } - if !payload.ns_table().in_bounds(index) { - tracing::warn!("ns_index {:?} out of bounds", index); - return None; // error: index out of bounds - } - let ns_payload_range = payload.ns_table().ns_range(index, &payload_byte_len); - - // TODO vid_scheme() arg should be u32 to match get_num_storage_nodes - // https://github.com/EspressoSystems/HotShot/issues/3298 - let vid = vid_scheme( - VidSchemeType::get_num_storage_nodes(common) - .try_into() - .ok()?, // error: failure to convert u32 to usize - ); - - let ns_proof = if ns_payload_range.as_block_range().is_empty() { - None - } else { - Some( - vid.payload_proof(payload.encode(), ns_payload_range.as_block_range()) - .ok()?, // error: internal to payload_proof() - ) - }; - - Some(NsProof { - ns_index: index.clone(), - ns_payload: payload.read_ns_payload(&ns_payload_range).to_owned(), - ns_proof, - }) - } - - /// Verify a [`NsProof`] against a payload commitment. Returns `None` on - /// error or if verification fails. - /// - /// There is no [`NsPayload`](crate::block::namespace_payload::NsPayload) - /// arg because this data is already included in the [`NsProof`]. See - /// [`NsProof::new`] for discussion. - /// - /// If verification is successful then return `(Vec, - /// NamespaceId)` obtained by post-processing the underlying - /// [`NsPayload`](crate::block::namespace_payload::NsPayload). Why? This - /// method might be run by a client in a WASM environment who might be - /// running non-Rust code, in which case the client is unable to perform - /// this post-processing himself. - pub fn verify( - &self, - ns_table: &NsTable, - commit: &VidCommitment, - common: &VidCommon, - ) -> Option<(Vec, NamespaceId)> { - VidSchemeType::is_consistent(commit, common).ok()?; - if !ns_table.in_bounds(&self.ns_index) { - return None; // error: index out of bounds - } - - let range = ns_table - .ns_range(&self.ns_index, &PayloadByteLen::from_vid_common(common)) - .as_block_range(); - - match (&self.ns_proof, range.is_empty()) { - (Some(proof), false) => { - // TODO vid_scheme() arg should be u32 to match get_num_storage_nodes - // https://github.com/EspressoSystems/HotShot/issues/3298 - let vid = vid_scheme( - VidSchemeType::get_num_storage_nodes(common) - .try_into() - .ok()?, // error: failure to convert u32 to usize - ); - - vid.payload_verify( - Statement { - payload_subslice: self.ns_payload.as_bytes_slice(), - range, - commit, - common, - }, - proof, - ) - .ok()? // error: internal to payload_verify() - .ok()?; // verification failure - } - (None, true) => {} // 0-length namespace, nothing to verify - (None, false) => { - tracing::error!( - "ns verify: missing proof for nonempty ns payload range {:?}", - range - ); - return None; - } - (Some(_), true) => { - tracing::error!("ns verify: unexpected proof for empty ns payload range"); - return None; - } - } - - // verification succeeded, return some data - let ns_id = ns_table.read_ns_id_unchecked(&self.ns_index); - Some((self.ns_payload.export_all_txs(&ns_id), ns_id)) - } - - /// Return all transactions in the namespace whose payload is proven by - /// `self`. The namespace ID for each returned [`Transaction`] is set to - /// `ns_id`. - /// - /// # Design warning - /// - /// This method relies on a promise that a [`NsProof`] stores the entire - /// namespace payload. If in the future we wish to remove the payload from a - /// [`NsProof`] then this method can no longer be supported. - /// - /// In that case, use the following a workaround: - /// - Given a [`NamespaceId`], get a [`NsIndex`] `i` via - /// [`NsTable::find_ns_id`]. - /// - Use `i` to get a - /// [`NsPayload`](crate::block::namespace_payload::NsPayload) `p` via - /// [`Payload::ns_payload`]. - /// - Use `p` to get the desired [`Vec`] via - /// [`NsPayload::export_all_txs`](crate::block::namespace_payload::NsPayload::export_all_txs). - /// - /// This workaround duplicates the work done in [`NsProof::new`]. If you - /// don't like that then you could instead hack [`NsProof::new`] to return a - /// pair `(NsProof, Vec)`. - pub fn export_all_txs(&self, ns_id: &NamespaceId) -> Vec { - self.ns_payload.export_all_txs(ns_id) - } -} diff --git a/sequencer/src/block/full_payload/ns_table.rs b/sequencer/src/block/full_payload/ns_table.rs deleted file mode 100644 index d2d2290ef1..0000000000 --- a/sequencer/src/block/full_payload/ns_table.rs +++ /dev/null @@ -1,467 +0,0 @@ -//! Types related to a namespace table. -//! -//! All code that needs to know the binary format of a namespace table is -//! restricted to this file. -//! -//! See [`NsTable`] for a full specification of the binary format of a namespace -//! table. -use crate::{ - block::{ - full_payload::payload::PayloadByteLen, - namespace_payload::NsPayloadRange, - uint_bytes::{ - bytes_serde_impl, u32_from_bytes, u32_to_bytes, usize_from_bytes, usize_to_bytes, - }, - }, - NamespaceId, -}; -use committable::{Commitment, Committable, RawCommitmentBuilder}; -use derive_more::Display; -use hotshot_types::traits::EncodeBytes; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -use std::{collections::HashSet, ops::Range, sync::Arc}; -use thiserror::Error; - -/// Byte lengths for the different items that could appear in a namespace table. -const NUM_NSS_BYTE_LEN: usize = 4; -const NS_OFFSET_BYTE_LEN: usize = 4; - -// TODO prefer [`NS_ID_BYTE_LEN`] set to `8` because [`NamespaceId`] is a `u64` -// but we need to maintain serialization compatibility. -// https://github.com/EspressoSystems/espresso-sequencer/issues/1574 -const NS_ID_BYTE_LEN: usize = 4; - -/// Raw binary data for a namespace table. -/// -/// Any sequence of bytes is a valid [`NsTable`]. -/// -/// # Binary format of a namespace table -/// -/// Byte lengths for the different items that could appear in a namespace table -/// are specified in local private constants [`NUM_NSS_BYTE_LEN`], -/// [`NS_OFFSET_BYTE_LEN`], [`NS_ID_BYTE_LEN`]. -/// -/// ## Number of entries in the namespace table -/// -/// The first [`NUM_NSS_BYTE_LEN`] bytes of the namespace table indicate the -/// number `n` of entries in the table as a little-endian unsigned integer. If -/// the entire table length is smaller than [`NUM_NSS_BYTE_LEN`] then the -/// missing bytes are zero-padded. -/// -/// The bytes in the namespace table beyond the first [`NUM_NSS_BYTE_LEN`] bytes -/// encode table entries. Each entry consumes exactly [`NS_ID_BYTE_LEN`] `+` -/// [`NS_OFFSET_BYTE_LEN`] bytes. -/// -/// The number `n` could be anything, including a number much larger than the -/// number of entries that could fit in the namespace table. As such, the actual -/// number of entries in the table is defined as the minimum of `n` and the -/// maximum number of whole entries that could fit in the table. -/// -/// See [`Self::in_bounds`] for clarification. -/// -/// ## Namespace table entry -/// -/// ### Namespace ID -/// -/// The first [`NS_ID_BYTE_LEN`] bytes of each table entry indicate the -/// [`NamespaceId`] for this namespace. Any table entry whose [`NamespaceId`] is -/// a duplicate of a previous entry is ignored. A correct count of the number of -/// *unique* (non-ignored) entries is given by `NsTable::iter().count()`. -/// -/// ### Namespace offset -/// -/// The next [`NS_OFFSET_BYTE_LEN`] bytes of each table entry indicate the -/// end-index of a namespace in the block payload bytes -/// [`Payload`](super::payload::Payload). This end-index is a little-endian -/// unsigned integer. -/// -/// # How to deduce a namespace's byte range -/// -/// In order to extract the payload bytes of a single namespace `N` from the -/// block payload one needs both the start- and end-indices for `N`. -/// -/// See [`Self::ns_range`] for clarification. What follows is a description of -/// what's implemented in [`Self::ns_range`]. -/// -/// If `N` occupies the `i`th entry in the namespace table for `i>0` then the -/// start-index for `N` is defined as the end-index of the `(i-1)`th entry in -/// the table. -/// -/// Even if the `(i-1)`the entry would otherwise be ignored (due to a duplicate -/// [`NamespaceId`] or any other reason), that entry's end-index still defines -/// the start-index of `N`. This rule guarantees that both start- and -/// end-indices for any namespace `N` can be read from a constant-size byte -/// range in the namespace table, and it eliminates the need to traverse an -/// unbounded number of previous entries of the namespace table looking for a -/// previous non-ignored entry. -/// -/// The start-index of the 0th entry in the table is implicitly defined to be -/// `0`. -/// -/// The start- and end-indices `(declared_start, declared_end)` declared in the -/// namespace table could be anything. As such, the actual start- and -/// end-indices `(start, end)` are defined so as to ensure that the byte range -/// is well-defined and in-bounds for the block payload: -/// ```ignore -/// end = min(declared_end, block_payload_byte_length) -/// start = min(declared_start, end) -/// ``` -/// -/// In a "honestly-prepared" namespace table the end-index of the final -/// namespace equals the byte length of the block payload. (Otherwise the block -/// payload might have bytes that are not included in any namespace.) -/// -/// It is possible that a namespace table could indicate two distinct namespaces -/// whose byte ranges overlap, though no "honestly-prepared" namespace table -/// would do this. -/// -/// TODO prefer [`NsTable`] to be a newtype like this -/// ```ignore -/// #[repr(transparent)] -/// #[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] -/// #[serde(transparent)] -/// pub struct NsTable(#[serde(with = "base64_bytes")] Vec); -/// ``` -/// but we need to maintain serialization compatibility. -/// -#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] -// Boilerplate: `#[serde(remote = "Self")]` needed to check invariants on -// deserialization. See -// https://github.com/serde-rs/serde/issues/1220#issuecomment-382589140 -#[serde(remote = "Self")] -pub struct NsTable { - #[serde(with = "base64_bytes")] - bytes: Vec, -} - -// Boilerplate: `#[serde(remote = "Self")]` allows invariant checking on -// deserialization via re-implementation of `Deserialize` in terms of default -// derivation. See -// https://github.com/serde-rs/serde/issues/1220#issuecomment-382589140 -impl<'de> Deserialize<'de> for NsTable { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let unchecked = NsTable::deserialize(deserializer)?; - unchecked - .validate_deserialization_invariants() - .map_err(de::Error::custom)?; - Ok(unchecked) - } -} - -// Boilerplate: use of `#[serde(remote = "Self")]` must include a trivial -// `Serialize` impl. See -// https://github.com/serde-rs/serde/issues/1220#issuecomment-382589140 -impl Serialize for NsTable { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - NsTable::serialize(self, serializer) - } -} - -impl NsTable { - /// Search the namespace table for the ns_index belonging to `ns_id`. - pub fn find_ns_id(&self, ns_id: &NamespaceId) -> Option { - self.iter() - .find(|index| self.read_ns_id_unchecked(index) == *ns_id) - } - - /// Number of entries in the namespace table. - /// - /// Defined as the maximum number of entries that could fit in the namespace - /// table, ignoring what's declared in the table header. - pub fn len(&self) -> NumNss { - NumNss( - self.bytes.len().saturating_sub(NUM_NSS_BYTE_LEN) - / NS_ID_BYTE_LEN.saturating_add(NS_OFFSET_BYTE_LEN), - ) - } - - /// Iterator over all unique namespaces in the namespace table. - pub fn iter(&self) -> impl Iterator + '_ { - NsIter::new(&self.len()) - } - - /// Read the namespace id from the `index`th entry from the namespace table. - /// Returns `None` if `index` is out of bounds. - /// - /// TODO I want to restrict visibility to `pub(crate)` or lower but this - /// method is currently used in `nasty-client`. - pub fn read_ns_id(&self, index: &NsIndex) -> Option { - if !self.in_bounds(index) { - None - } else { - Some(self.read_ns_id_unchecked(index)) - } - } - - /// Like [`Self::read_ns_id`] except `index` is not checked. Use [`Self::in_bounds`] as needed. - pub fn read_ns_id_unchecked(&self, index: &NsIndex) -> NamespaceId { - let start = index.0 * (NS_ID_BYTE_LEN + NS_OFFSET_BYTE_LEN) + NUM_NSS_BYTE_LEN; - - // TODO hack to deserialize `NamespaceId` from `NS_ID_BYTE_LEN` bytes - // https://github.com/EspressoSystems/espresso-sequencer/issues/1574 - NamespaceId::from(u32_from_bytes::( - &self.bytes[start..start + NS_ID_BYTE_LEN], - )) - } - - /// Does the `index`th entry exist in the namespace table? - pub fn in_bounds(&self, index: &NsIndex) -> bool { - self.len().in_bounds(index) - } - - /// Are the bytes of this [`NsTable`] uncorrupted? - /// - /// # Checks - /// 1. Byte length must hold a whole number of entries. - /// 2. All offsets must increase monotonically. Offsets - /// must be nonzero. Namespace IDs must be unique. - /// 3. Header consistent with byte length. (Obsolete after - /// .) - /// 4. Final offset must equal `payload_byte_len`. (Obsolete after - /// .) - /// If the namespace table is empty then `payload_byte_len` must be 0. - pub fn validate( - &self, - payload_byte_len: &PayloadByteLen, - ) -> Result<(), NsTableValidationError> { - use NsTableValidationError::*; - - // conditions 1-3 - self.validate_deserialization_invariants()?; - - // condition 4 - let len = self.len().0; - if len > 0 { - let final_ns_index = NsIndex(len - 1); - let final_offset = self.read_ns_offset_unchecked(&final_ns_index); - if final_offset != payload_byte_len.as_usize() { - return Err(InvalidFinalOffset); - } - } else if payload_byte_len.as_usize() != 0 { - return Err(ExpectNonemptyNsTable); - } - - Ok(()) - } - - // CRATE-VISIBLE HELPERS START HERE - - /// Read subslice range for the `index`th namespace from the namespace - /// table. - pub(in crate::block) fn ns_range( - &self, - index: &NsIndex, - payload_byte_len: &PayloadByteLen, - ) -> NsPayloadRange { - let end = self - .read_ns_offset_unchecked(index) - .min(payload_byte_len.as_usize()); - let start = if index.0 == 0 { - 0 - } else { - self.read_ns_offset_unchecked(&NsIndex(index.0 - 1)) - } - .min(end); - NsPayloadRange::new(start, end) - } - - // PRIVATE HELPERS START HERE - - /// Read the number of namespaces declared in the namespace table. THIS - /// QUANTITY IS NEVER USED. Instead use [`NsTable::len`]. - /// - /// TODO Delete this method after - /// - fn read_num_nss(&self) -> usize { - let num_nss_byte_len = NUM_NSS_BYTE_LEN.min(self.bytes.len()); - usize_from_bytes::(&self.bytes[..num_nss_byte_len]) - } - - /// Read the namespace offset from the `index`th entry from the namespace table. - fn read_ns_offset_unchecked(&self, index: &NsIndex) -> usize { - let start = - index.0 * (NS_ID_BYTE_LEN + NS_OFFSET_BYTE_LEN) + NUM_NSS_BYTE_LEN + NS_ID_BYTE_LEN; - usize_from_bytes::(&self.bytes[start..start + NS_OFFSET_BYTE_LEN]) - } - - /// Helper for [`NsTable::validate`], used in our custom [`serde`] - /// implementation. - /// - /// Checks conditions 1-3 of [`NsTable::validate`]. Those conditions can be - /// checked by looking only at the contents of the [`NsTable`]. - fn validate_deserialization_invariants(&self) -> Result<(), NsTableValidationError> { - use NsTableValidationError::*; - - // Byte length for a table with `x` entries must be exactly `x * - // NsTableBuilder::entry_byte_len() + - // NsTableBuilder::header_byte_len()`. - // - // Explanation for the following `if` condition: - // - // The above condition is equivalent to `[byte length] - - // header_byte_len` equals 0 modulo `entry_byte_len`. In order to - // compute `[byte length] - header_byte_len` we must first check that - // `[byte length]` is not exceeded by `header_byte_len` - if self.bytes.len() < NsTableBuilder::header_byte_len() - || (self.bytes.len() - NsTableBuilder::header_byte_len()) - % NsTableBuilder::entry_byte_len() - != 0 - { - return Err(InvalidByteLen); - } - - // Header must declare the correct number of namespaces - // - // TODO this check obsolete after - // https://github.com/EspressoSystems/espresso-sequencer/issues/1604 - if self.len().0 != self.read_num_nss() { - return Err(InvalidHeader); - } - - // Offsets must increase monotonically. Offsets must - // be nonzero. Namespace IDs must be unique - { - let mut prev_offset = 0; - let mut repeat_ns_ids = HashSet::::new(); - for (ns_id, offset) in self.iter().map(|i| { - ( - self.read_ns_id_unchecked(&i), - self.read_ns_offset_unchecked(&i), - ) - }) { - if !repeat_ns_ids.insert(ns_id) { - return Err(DuplicateNamespaceId); - } - if offset <= prev_offset { - return Err(NonIncreasingEntries); - } - prev_offset = offset; - } - } - - Ok(()) - } -} - -impl EncodeBytes for NsTable { - fn encode(&self) -> Arc<[u8]> { - Arc::from(self.bytes.as_ref()) - } -} - -impl Committable for NsTable { - fn commit(&self) -> Commitment { - RawCommitmentBuilder::new(&Self::tag()) - .var_size_bytes(&self.bytes) - .finalize() - } - - fn tag() -> String { - "NSTABLE".into() - } -} - -/// Return type for [`NsTable::validate`]. -#[derive(Error, Debug, Display, Eq, PartialEq)] -pub enum NsTableValidationError { - InvalidByteLen, - NonIncreasingEntries, - DuplicateNamespaceId, - InvalidHeader, // TODO this variant obsolete after https://github.com/EspressoSystems/espresso-sequencer/issues/1604 - InvalidFinalOffset, // TODO this variant obsolete after https://github.com/EspressoSystems/espresso-sequencer/issues/1604 - ExpectNonemptyNsTable, -} - -pub struct NsTableBuilder { - bytes: Vec, - num_entries: usize, -} - -impl NsTableBuilder { - pub fn new() -> Self { - // pre-allocate space for the ns table header - Self { - bytes: Vec::from([0; NUM_NSS_BYTE_LEN]), - num_entries: 0, - } - } - - /// Add an entry to the namespace table. - pub fn append_entry(&mut self, ns_id: NamespaceId, offset: usize) { - // hack to serialize `NamespaceId` to `NS_ID_BYTE_LEN` bytes - self.bytes - .extend(u32_to_bytes::(u32::from(ns_id))); - self.bytes - .extend(usize_to_bytes::(offset)); - self.num_entries += 1; - } - - /// Serialize to bytes and consume self. - pub fn into_ns_table(self) -> NsTable { - let mut bytes = self.bytes; - // write the number of entries to the ns table header - bytes[..NUM_NSS_BYTE_LEN] - .copy_from_slice(&usize_to_bytes::(self.num_entries)); - NsTable { bytes } - } - - /// Byte length of a namespace table header. - pub const fn header_byte_len() -> usize { - NUM_NSS_BYTE_LEN - } - - /// Byte length of a single namespace table entry. - pub const fn entry_byte_len() -> usize { - NS_ID_BYTE_LEN + NS_OFFSET_BYTE_LEN - } -} - -/// Index for an entry in a ns table. -#[derive(Clone, Debug, Display, Eq, Hash, PartialEq)] -pub struct NsIndex(usize); -bytes_serde_impl!(NsIndex, to_bytes, [u8; NUM_NSS_BYTE_LEN], from_bytes); - -impl NsIndex { - pub fn to_bytes(&self) -> [u8; NUM_NSS_BYTE_LEN] { - usize_to_bytes::(self.0) - } - fn from_bytes(bytes: &[u8]) -> Self { - Self(usize_from_bytes::(bytes)) - } -} - -/// Number of entries in a namespace table. -pub struct NumNss(usize); - -impl NumNss { - pub fn in_bounds(&self, index: &NsIndex) -> bool { - index.0 < self.0 - } -} - -/// Return type for [`Payload::ns_iter`]. -pub(in crate::block) struct NsIter(Range); - -impl NsIter { - pub fn new(num_nss: &NumNss) -> Self { - Self(0..num_nss.0) - } -} - -// Simple `impl Iterator` delegates to `Range`. -impl Iterator for NsIter { - type Item = NsIndex; - - fn next(&mut self) -> Option { - self.0.next().map(NsIndex) - } -} - -#[cfg(test)] -mod test; diff --git a/sequencer/src/block/full_payload/ns_table/test.rs b/sequencer/src/block/full_payload/ns_table/test.rs deleted file mode 100644 index d0499ce6ef..0000000000 --- a/sequencer/src/block/full_payload/ns_table/test.rs +++ /dev/null @@ -1,251 +0,0 @@ -use super::{ - NsTable, NsTableBuilder, NsTableValidationError, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, - NUM_NSS_BYTE_LEN, -}; -use crate::{ - block::{ - test::ValidTest, - uint_bytes::{u32_max_from_byte_len, usize_max_from_byte_len, usize_to_bytes}, - }, - NamespaceId, Payload, -}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use hotshot::traits::BlockPayload; -use rand::{Rng, RngCore}; -use NsTableValidationError::*; - -#[test] -fn random_valid() { - setup_logging(); - setup_backtrace(); - let mut rng = jf_utils::test_rng(); - - for num_entries in 0..20 { - expect_valid(&random_valid_ns_table(num_entries, &mut rng)); - } -} - -#[test] -fn ns_table_byte_len() { - setup_logging(); - setup_backtrace(); - let mut rng = jf_utils::test_rng(); - - // Extremely small byte lengths should get rejected. - { - let mut ns_table = NsTable { bytes: Vec::new() }; - expect_invalid(&ns_table, InvalidByteLen); - expect_num_bytes_invalid(&mut ns_table, NsTableBuilder::header_byte_len(), &mut rng); - } - - // Add enough bytes for a new entry. - { - let mut ns_table = random_valid_ns_table(20, &mut rng); - expect_num_bytes_invalid(&mut ns_table, NsTableBuilder::entry_byte_len(), &mut rng); - } - - // Helper fn: add 1 byte to the `ns_table` `num_bytes` times. Expect - // invalidity in all but the final time. - fn expect_num_bytes_invalid(ns_table: &mut NsTable, num_bytes: usize, rng: &mut R) - where - R: RngCore, - { - for i in 0..num_bytes { - ns_table.bytes.push(rng.gen()); - if i == num_bytes - 1 { - break; // final iteration: no error expected - } - expect_invalid(ns_table, InvalidByteLen); - } - expect_invalid(ns_table, InvalidHeader); - } -} - -#[tokio::test(flavor = "multi_thread")] -async fn payload_byte_len() { - setup_logging(); - setup_backtrace(); - let test_case = vec![vec![5, 8, 8], vec![7, 9, 11], vec![10, 5, 8]]; - let mut rng = jf_utils::test_rng(); - let test = ValidTest::from_tx_lengths(test_case, &mut rng); - let mut block = - Payload::from_transactions(test.all_txs(), &Default::default(), &Default::default()) - .await - .unwrap() - .0; - let payload_byte_len = block.byte_len(); - let final_offset = block - .ns_table() - .read_ns_offset_unchecked(&block.ns_table().iter().last().unwrap()); - - // final offset matches payload byte len - block.ns_table().validate(&payload_byte_len).unwrap(); - - // Helper closure fn: modify the final offset of `block`'s namespace table - // by adding `diff` to it. Assert failure. - let mut modify_final_offset = |diff: isize| { - let ns_table_byte_len = block.ns_table().bytes.len(); - let old_final_offset: isize = final_offset.try_into().unwrap(); - let new_final_offset: usize = (old_final_offset + diff).try_into().unwrap(); - - block.ns_table_mut().bytes[ns_table_byte_len - NS_OFFSET_BYTE_LEN..] - .copy_from_slice(&usize_to_bytes::(new_final_offset)); - assert_eq!( - block.ns_table().validate(&payload_byte_len).unwrap_err(), - InvalidFinalOffset - ); - }; - - // final offset exceeds payload byte len - modify_final_offset(1); - - // final offset less than payload byte len - modify_final_offset(-1); - - // zero-length payload - let empty_block = Payload::from_transactions([], &Default::default(), &Default::default()) - .await - .unwrap() - .0; - assert_eq!(empty_block.ns_table().len().0, 0); - assert_eq!( - empty_block.ns_table().bytes, - usize_to_bytes::(0) - ); - empty_block - .ns_table() - .validate(&empty_block.byte_len()) - .unwrap(); - - // empty namespace table with nonempty payload - *block.ns_table_mut() = empty_block.ns_table().clone(); - assert_eq!( - block.ns_table().validate(&payload_byte_len).unwrap_err(), - ExpectNonemptyNsTable - ); -} - -#[test] -fn monotonic_increase() { - setup_logging(); - setup_backtrace(); - - // Duplicate namespace ID - two_entries_ns_table((5, 5), (5, 6), Some(DuplicateNamespaceId)); - - // Decreasing namespace ID - two_entries_ns_table((5, 5), (4, 6), None); - - // Duplicate offset - two_entries_ns_table((5, 5), (6, 5), Some(NonIncreasingEntries)); - - // Decreasing offset - two_entries_ns_table((5, 5), (6, 4), Some(NonIncreasingEntries)); - - // Zero namespace ID - two_entries_ns_table((0, 5), (6, 6), None); - - // Zero offset - two_entries_ns_table((5, 0), (6, 6), Some(NonIncreasingEntries)); - - // Helper fn: build a 2-entry NsTable, assert failure - fn two_entries_ns_table( - entry1: (u32, usize), - entry2: (u32, usize), - expect_err: Option, - ) { - let mut ns_table_builder = NsTableBuilder::new(); - ns_table_builder.append_entry(NamespaceId::from(entry1.0), entry1.1); - ns_table_builder.append_entry(NamespaceId::from(entry2.0), entry2.1); - let ns_table = ns_table_builder.into_ns_table(); - if let Some(err) = expect_err { - expect_invalid(&ns_table, err); - } else { - expect_valid(&ns_table); - } - } -} - -// TODO this test obsolete after -// https://github.com/EspressoSystems/espresso-sequencer/issues/1604 -#[test] -fn header() { - setup_logging(); - setup_backtrace(); - let mut rng = jf_utils::test_rng(); - - for num_entries in 0..20 { - let mut ns_table = random_valid_ns_table(num_entries, &mut rng); - if num_entries != 0 { - set_header(&mut ns_table, 0); - set_header(&mut ns_table, num_entries - 1); - } - set_header(&mut ns_table, num_entries + 1); - set_header(&mut ns_table, usize_max_from_byte_len(NUM_NSS_BYTE_LEN)); - } - - // Helper fn: set the header of `ns_table` to declare `num_nss` entries, - // assert failure. - fn set_header(ns_table: &mut NsTable, num_nss: usize) { - ns_table.bytes[..NUM_NSS_BYTE_LEN] - .copy_from_slice(&usize_to_bytes::(num_nss)); - expect_invalid(ns_table, InvalidHeader); - } -} - -fn random_valid_ns_table(num_entries: usize, rng: &mut R) -> NsTable -where - R: RngCore, -{ - let (offset_max_increment, ns_id_max_increment) = if num_entries == 0 { - (0, 0) - } else { - let num_entries_u32: u32 = num_entries.try_into().unwrap(); - ( - usize_max_from_byte_len(NS_OFFSET_BYTE_LEN) / num_entries, - u32_max_from_byte_len(NS_ID_BYTE_LEN) / num_entries_u32, - ) - }; - - let mut ns_id = 0; - let mut offset = 0; - let mut ns_table_builder = NsTableBuilder::new(); - for _ in 0..num_entries { - // ns_id, offset must increase monotonically - ns_id += rng.gen_range(1..=ns_id_max_increment); - offset += rng.gen_range(1..=offset_max_increment); - ns_table_builder.append_entry(NamespaceId::from(ns_id), offset); - } - ns_table_builder.into_ns_table() -} - -fn expect_valid(ns_table: &NsTable) { - // `validate` should succeed - ns_table.validate_deserialization_invariants().unwrap(); - - // serde round-trip should succeed - let serde_bytes = bincode::serialize(ns_table).unwrap(); - let ns_table_serde: NsTable = bincode::deserialize(&serde_bytes).unwrap(); - assert_eq!(&ns_table_serde, ns_table); -} - -fn expect_invalid(ns_table: &NsTable, err: NsTableValidationError) { - use serde::de::Error; - - // `validate` should fail - assert_eq!( - ns_table.validate_deserialization_invariants().unwrap_err(), - err - ); - - // serde round-trip should fail - // - // need to use `to_string` because `bincode::Error`` is not `Eq` - let serde_bytes = bincode::serialize(ns_table).unwrap(); - assert_eq!( - bincode::deserialize::(&serde_bytes) - .unwrap_err() - .to_string(), - bincode::Error::custom(err).to_string(), - ); -} diff --git a/sequencer/src/block/full_payload/payload.rs b/sequencer/src/block/full_payload/payload.rs deleted file mode 100644 index bf2398c656..0000000000 --- a/sequencer/src/block/full_payload/payload.rs +++ /dev/null @@ -1,313 +0,0 @@ -use crate::{ - block::{ - full_payload::ns_table::{NsIndex, NsTable, NsTableBuilder}, - namespace_payload::{Index, Iter, NsPayload, NsPayloadBuilder, NsPayloadRange, TxProof}, - }, - ChainConfig, NamespaceId, NodeState, SeqTypes, Transaction, ValidatedState, -}; - -use async_trait::async_trait; -use committable::Committable; -use derive_more::Display; -use hotshot_query_service::availability::QueryablePayload; -use hotshot_types::{ - traits::{BlockPayload, EncodeBytes}, - utils::BuilderCommitment, - vid::{VidCommon, VidSchemeType}, -}; -use jf_vid::VidScheme; -use serde::{Deserialize, Serialize}; -use sha2::Digest; -use std::{collections::BTreeMap, sync::Arc}; - -/// Raw payload data for an entire block. -/// -/// A block consists of two sequences of arbitrary bytes: -/// - `ns_table`: namespace table -/// - `ns_payloads`: namespace payloads -/// -/// Any sequence of bytes is a valid `ns_table`. Any sequence of bytes is a -/// valid `ns_payloads`. The contents of `ns_table` determine how to interpret -/// `ns_payload`. -/// -/// # Namespace table -/// -/// See [`NsTable`] for the format of a namespace table. -/// -/// # Namespace payloads -/// -/// A concatenation of payload bytes for multiple individual namespaces. -/// Namespace boundaries are dictated by `ns_table`. See [`NsPayload`] for the -/// format of a namespace payload. -#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] -pub struct Payload { - // Concatenated payload bytes for each namespace - // - // TODO want to rename thisfield to `ns_payloads`, but can't due to - // serialization compatibility. - #[serde(with = "base64_bytes")] - raw_payload: Vec, - - ns_table: NsTable, -} - -impl Payload { - pub fn ns_table(&self) -> &NsTable { - &self.ns_table - } - - /// Like [`QueryablePayload::transaction_with_proof`] except without the - /// proof. - pub fn transaction(&self, index: &Index) -> Option { - let ns_id = self.ns_table.read_ns_id(index.ns())?; - let ns_payload = self.ns_payload(index.ns()); - ns_payload.export_tx(&ns_id, index.tx()) - } - - // CRATE-VISIBLE HELPERS START HERE - - pub(in crate::block) fn read_ns_payload(&self, range: &NsPayloadRange) -> &NsPayload { - NsPayload::from_bytes_slice(&self.raw_payload[range.as_block_range()]) - } - - /// Convenience wrapper for [`Self::read_ns_payload`]. - /// - /// `index` is not checked. Use `self.ns_table().in_bounds()` as needed. - pub(in crate::block) fn ns_payload(&self, index: &NsIndex) -> &NsPayload { - let ns_payload_range = self.ns_table().ns_range(index, &self.byte_len()); - self.read_ns_payload(&ns_payload_range) - } - - pub(in crate::block) fn byte_len(&self) -> PayloadByteLen { - PayloadByteLen(self.raw_payload.len()) - } - - // PRIVATE HELPERS START HERE - - /// Need a sync version of [`BlockPayload::from_transactions`] in order to impl [`BlockPayload::empty`]. - fn from_transactions_sync( - transactions: impl IntoIterator>::Transaction> + Send, - chain_config: ChainConfig, - ) -> Result< - (Self, >::Metadata), - >::Error, - > { - // accounting for block byte length limit - let max_block_byte_len: usize = u64::from(chain_config.max_block_size) - .try_into() - .map_err(|_| >::Error::BlockBuilding)?; - let mut block_byte_len = NsTableBuilder::header_byte_len(); - - // add each tx to its namespace - let mut ns_builders = BTreeMap::::new(); - for tx in transactions.into_iter() { - // accounting for block byte length limit - block_byte_len += tx.size_in_block(!ns_builders.contains_key(&tx.namespace())); - if block_byte_len > max_block_byte_len { - tracing::warn!("transactions truncated to fit in maximum block byte length {max_block_byte_len}"); - break; - } - - let ns_builder = ns_builders.entry(tx.namespace()).or_default(); - ns_builder.append_tx(tx); - } - - // build block payload and namespace table - let mut payload = Vec::new(); - let mut ns_table_builder = NsTableBuilder::new(); - for (ns_id, ns_builder) in ns_builders { - payload.extend(ns_builder.into_bytes()); - ns_table_builder.append_entry(ns_id, payload.len()); - } - let ns_table = ns_table_builder.into_ns_table(); - let metadata = ns_table.clone(); - Ok(( - Self { - raw_payload: payload, - ns_table, - }, - metadata, - )) - } -} - -#[async_trait] -impl BlockPayload for Payload { - // TODO BlockPayload trait eliminate unneeded args, return vals of type - // `Self::Metadata` https://github.com/EspressoSystems/HotShot/issues/3300 - type Error = crate::Error; - type Transaction = Transaction; - type Instance = NodeState; - type Metadata = NsTable; - type ValidatedState = ValidatedState; - - async fn from_transactions( - transactions: impl IntoIterator + Send, - validated_state: &Self::ValidatedState, - instance_state: &Self::Instance, - ) -> Result<(Self, Self::Metadata), Self::Error> { - let validated_state_cf = validated_state.chain_config; - let instance_state_cf = instance_state.chain_config; - - let chain_config = if validated_state_cf.commit() == instance_state_cf.commit() { - instance_state_cf - } else { - match validated_state_cf.resolve() { - Some(cf) => cf, - None => { - instance_state - .peers - .as_ref() - .fetch_chain_config(validated_state_cf.commit()) - .await? - } - } - }; - - Self::from_transactions_sync(transactions, chain_config, instance_state) - } - - // TODO avoid cloning the entire payload here? - fn from_bytes(block_payload_bytes: &[u8], ns_table: &Self::Metadata) -> Self { - Self { - raw_payload: block_payload_bytes.to_vec(), - ns_table: ns_table.clone(), - } - } - - fn empty() -> (Self, Self::Metadata) { - let payload = Self::from_transactions_sync(vec![], Default::default(), &Default::default()) - .unwrap() - .0; - let ns_table = payload.ns_table().clone(); - (payload, ns_table) - } - - fn builder_commitment(&self, metadata: &Self::Metadata) -> BuilderCommitment { - let ns_table_bytes = self.ns_table.encode(); - - // TODO `metadata_bytes` equals `ns_table_bytes`, so we are - // double-hashing the ns_table. Why? To maintain serialization - // compatibility. - // https://github.com/EspressoSystems/espresso-sequencer/issues/1576 - let metadata_bytes = metadata.encode(); - - let mut digest = sha2::Sha256::new(); - digest.update((self.raw_payload.len() as u64).to_le_bytes()); - digest.update((ns_table_bytes.len() as u64).to_le_bytes()); - digest.update((metadata_bytes.len() as u64).to_le_bytes()); // https://github.com/EspressoSystems/espresso-sequencer/issues/1576 - digest.update(&self.raw_payload); - digest.update(ns_table_bytes); - digest.update(metadata_bytes); // https://github.com/EspressoSystems/espresso-sequencer/issues/1576 - BuilderCommitment::from_raw_digest(digest.finalize()) - } - - fn transactions<'a>( - &'a self, - metadata: &'a Self::Metadata, - ) -> impl 'a + Iterator { - self.enumerate(metadata).map(|(_, t)| t) - } -} - -impl QueryablePayload for Payload { - // TODO changes to QueryablePayload trait: - // https://github.com/EspressoSystems/hotshot-query-service/issues/639 - type TransactionIndex = Index; - type Iter<'a> = Iter<'a>; - type InclusionProof = TxProof; - - fn len(&self, _meta: &Self::Metadata) -> usize { - // Counting txs is nontrivial. The easiest solution is to consume an - // iterator. If performance is a concern then we could cache this count - // on construction of `Payload`. - self.iter(_meta).count() - } - - fn iter<'a>(&'a self, _meta: &'a Self::Metadata) -> Self::Iter<'a> { - Iter::new(self) - } - - fn transaction_with_proof( - &self, - _meta: &Self::Metadata, - index: &Self::TransactionIndex, - ) -> Option<(Self::Transaction, Self::InclusionProof)> { - // TODO HACK! THE RETURNED PROOF MIGHT FAIL VERIFICATION. - // https://github.com/EspressoSystems/hotshot-query-service/issues/639 - // - // Need a `VidCommon` to proceed. Need to modify `QueryablePayload` - // trait to add a `VidCommon` arg. In the meantime tests fail if I leave - // it `todo!()`, so this hack allows tests to pass. - let common = hotshot_types::vid::vid_scheme(10) - .disperse(&self.raw_payload) - .unwrap() - .common; - - TxProof::new(index, self, &common) - } -} - -impl std::fmt::Display for Payload { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{self:#?}") - } -} - -impl EncodeBytes for Payload { - fn encode(&self) -> Arc<[u8]> { - Arc::from(self.raw_payload.as_ref()) - } -} - -/// Byte length of a block payload, which includes all namespaces but *not* the -/// namespace table. -#[derive(Clone, Debug, Display, Eq, Hash, PartialEq)] -pub struct PayloadByteLen(usize); - -impl PayloadByteLen { - /// Extract payload byte length from a [`VidCommon`] and construct a new [`Self`] from it. - pub fn from_vid_common(common: &VidCommon) -> Self { - Self(usize::try_from(VidSchemeType::get_payload_byte_len(common)).unwrap()) - } - - /// Is the payload byte length declared in a [`VidCommon`] equal [`Self`]? - pub fn is_consistent(&self, common: &VidCommon) -> bool { - // failure to convert to usize implies that `common` cannot be - // consistent with `self`. - let expected = match usize::try_from(VidSchemeType::get_payload_byte_len(common)) { - Ok(n) => n, - Err(_) => { - tracing::warn!( - "VidCommon byte len u32 {} should convert to usize", - VidSchemeType::get_payload_byte_len(common) - ); - return false; - } - }; - - self.0 == expected - } - - pub(in crate::block::full_payload) fn as_usize(&self) -> usize { - self.0 - } -} - -#[cfg(any(test, feature = "testing"))] -impl hotshot_types::traits::block_contents::TestableBlock for Payload { - fn genesis() -> Self { - BlockPayload::empty().0 - } - - fn txn_count(&self) -> u64 { - self.len(&self.ns_table) as u64 - } -} - -#[cfg(any(test, feature = "testing"))] -impl Payload { - pub fn ns_table_mut(&mut self) -> &mut NsTable { - &mut self.ns_table - } -} diff --git a/sequencer/src/block/namespace_payload.rs b/sequencer/src/block/namespace_payload.rs deleted file mode 100644 index ecd894f86e..0000000000 --- a/sequencer/src/block/namespace_payload.rs +++ /dev/null @@ -1,12 +0,0 @@ -mod iter; -mod ns_payload; -mod ns_payload_range; -mod tx_proof; -mod types; - -pub use iter::{Index, Iter}; -pub use tx_proof::TxProof; - -pub(in crate::block) use ns_payload::{NsPayload, NsPayloadOwned}; -pub(in crate::block) use ns_payload_range::NsPayloadRange; -pub(in crate::block) use types::NsPayloadBuilder; diff --git a/sequencer/src/block/namespace_payload/iter.rs b/sequencer/src/block/namespace_payload/iter.rs deleted file mode 100644 index cf136f76eb..0000000000 --- a/sequencer/src/block/namespace_payload/iter.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::block::{ - full_payload::{NsIndex, NsIter, Payload}, - namespace_payload::types::{TxIndex, TxIter}, -}; -use serde::{Deserialize, Serialize}; -use std::iter::Peekable; - -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct Index { - ns_index: NsIndex, - tx_index: TxIndex, -} - -impl Index { - pub fn ns(&self) -> &NsIndex { - &self.ns_index - } - pub(in crate::block) fn tx(&self) -> &TxIndex { - &self.tx_index - } -} - -// TODO don't impl `PartialOrd` -// It's needed only for `QueryablePayload` trait: -// https://github.com/EspressoSystems/hotshot-query-service/issues/639 -impl PartialOrd for Index { - fn partial_cmp(&self, _other: &Self) -> Option { - Some(self.cmp(_other)) - } -} -// TODO don't impl `Ord` -// It's needed only for `QueryablePayload` trait: -// https://github.com/EspressoSystems/hotshot-query-service/issues/639 -impl Ord for Index { - fn cmp(&self, _other: &Self) -> std::cmp::Ordering { - unimplemented!() - } -} - -/// Cartesian product of [`NsIter`], [`TxIter`]. -pub struct Iter<'a> { - ns_iter: Peekable, - tx_iter: Option, - block: &'a Payload, -} - -impl<'a> Iter<'a> { - pub fn new(block: &'a Payload) -> Self { - Self { - ns_iter: NsIter::new(&block.ns_table().len()).peekable(), - tx_iter: None, - block, - } - } -} - -impl Iterator for Iter<'_> { - type Item = Index; - - fn next(&mut self) -> Option { - loop { - let Some(ns_index) = self.ns_iter.peek() else { - break None; // ns_iter consumed - }; - - if let Some(tx_index) = self - .tx_iter - .get_or_insert_with(|| self.block.ns_payload(ns_index).iter()) - .next() - { - break Some(Index { - ns_index: ns_index.clone(), - tx_index, - }); - } - - self.tx_iter = None; // unset `tx_iter`; it's consumed for this namespace - self.ns_iter.next(); - } - } -} diff --git a/sequencer/src/block/namespace_payload/ns_payload.rs b/sequencer/src/block/namespace_payload/ns_payload.rs deleted file mode 100644 index f2997839df..0000000000 --- a/sequencer/src/block/namespace_payload/ns_payload.rs +++ /dev/null @@ -1,137 +0,0 @@ -use crate::{ - block::namespace_payload::types::{ - FromNsPayloadBytes, NsPayloadByteLen, NsPayloadBytesRange, NumTxs, NumTxsRange, - NumTxsUnchecked, TxIndex, TxIter, TxPayloadRange, TxTableEntriesRange, - }, - NamespaceId, Transaction, -}; -use serde::{Deserialize, Serialize}; - -/// Raw binary data for a single namespace's payload. -/// -/// Any sequence of bytes is a valid [`NsPayload`]. -/// -/// See module-level documentation [`types`](super::types) for a full -/// specification of the binary format of a namespace. -pub(in crate::block) struct NsPayload([u8]); - -impl NsPayload { - pub fn from_bytes_slice(bytes: &[u8]) -> &NsPayload { - NsPayload::new_private(bytes) - } - pub fn as_bytes_slice(&self) -> &[u8] { - &self.0 - } - pub fn byte_len(&self) -> NsPayloadByteLen { - NsPayloadByteLen::from_usize(self.0.len()) - } - - /// Read and parse bytes from the ns payload. - /// - /// Arg `range: &R` is convertible into a `Range` via - /// [`NsPayloadBytesRange`]. The payload bytes are parsed into a `R::Output` - /// via [`FromNsPayloadBytes`]. - pub fn read<'a, R>(&'a self, range: &R) -> R::Output - where - R: NsPayloadBytesRange<'a>, - { - >::from_payload_bytes(&self.0[range.ns_payload_range()]) - } - - /// Iterator over all transactions in this namespace. - pub fn iter(&self) -> TxIter { - self.iter_from_num_txs(&self.read_num_txs()) - } - - /// Return all transactions in this namespace. The namespace ID for each - /// returned [`Transaction`] is set to `ns_id`. - pub fn export_all_txs(&self, ns_id: &NamespaceId) -> Vec { - let num_txs = self.read_num_txs(); - self.iter_from_num_txs(&num_txs) - .map(|i| self.tx_from_num_txs(ns_id, &i, &num_txs)) - .collect() - } - - /// Return a transaction from this namespace. Set its namespace ID to - /// `ns_id`. - /// - /// Return `None` if `index` is out of bounds. - pub fn export_tx(&self, ns_id: &NamespaceId, index: &TxIndex) -> Option { - let num_txs_unchecked = self.read_num_txs(); - let num_txs = NumTxs::new(&num_txs_unchecked, &self.byte_len()); - if !num_txs.in_bounds(index) { - return None; // error: tx index out of bounds - } - Some(self.tx_from_num_txs(ns_id, index, &num_txs_unchecked)) - } - - /// Private helper. (Could be pub if desired.) - fn read_num_txs(&self) -> NumTxsUnchecked { - self.read(&NumTxsRange::new(&self.byte_len())) - } - - /// Private helper - fn iter_from_num_txs(&self, num_txs: &NumTxsUnchecked) -> TxIter { - let num_txs = NumTxs::new(num_txs, &self.byte_len()); - TxIter::new(&num_txs) - } - - /// Private helper - fn tx_from_num_txs( - &self, - ns_id: &NamespaceId, - index: &TxIndex, - num_txs_unchecked: &NumTxsUnchecked, - ) -> Transaction { - let tx_table_entries = self.read(&TxTableEntriesRange::new(index)); - let tx_range = TxPayloadRange::new(num_txs_unchecked, &tx_table_entries, &self.byte_len()); - - // TODO don't copy the tx bytes into the return value - // https://github.com/EspressoSystems/hotshot-query-service/issues/267 - let tx_payload = self.read(&tx_range).to_payload_bytes().to_vec(); - Transaction::new(*ns_id, tx_payload) - } -} - -#[repr(transparent)] -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -#[serde(transparent)] -pub(in crate::block) struct NsPayloadOwned(#[serde(with = "base64_bytes")] Vec); - -/// Crazy boilerplate code to make it so that [`NsPayloadOwned`] is to -/// [`NsPayload`] as [`Vec`] is to `[T]`. See [How can I create newtypes for -/// an unsized type and its owned counterpart (like `str` and `String`) in safe -/// Rust? - Stack Overflow](https://stackoverflow.com/q/64977525) -mod ns_payload_owned { - use super::{NsPayload, NsPayloadOwned}; - use std::borrow::Borrow; - use std::ops::Deref; - - impl NsPayload { - // pub(super) because I want it visible everywhere in this file but I - // also want this boilerplate code quarrantined in `ns_payload_owned`. - pub(super) fn new_private(p: &[u8]) -> &NsPayload { - unsafe { &*(p as *const [u8] as *const NsPayload) } - } - } - - impl Deref for NsPayloadOwned { - type Target = NsPayload; - fn deref(&self) -> &NsPayload { - NsPayload::new_private(&self.0) - } - } - - impl Borrow for NsPayloadOwned { - fn borrow(&self) -> &NsPayload { - self.deref() - } - } - - impl ToOwned for NsPayload { - type Owned = NsPayloadOwned; - fn to_owned(&self) -> NsPayloadOwned { - NsPayloadOwned(self.0.to_owned()) - } - } -} diff --git a/sequencer/src/block/namespace_payload/ns_payload_range.rs b/sequencer/src/block/namespace_payload/ns_payload_range.rs deleted file mode 100644 index f2812f6fd9..0000000000 --- a/sequencer/src/block/namespace_payload/ns_payload_range.rs +++ /dev/null @@ -1,34 +0,0 @@ -use super::types::{NsPayloadByteLen, NsPayloadBytesRange}; -use std::ops::Range; - -/// Index range for a namespace payload inside a block payload. -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub(in crate::block) struct NsPayloadRange(Range); - -impl NsPayloadRange { - /// TODO restrict visibility? - pub fn new(start: usize, end: usize) -> Self { - Self(start..end) - } - - /// Access the underlying index range for this namespace inside a block - /// payload. - pub fn as_block_range(&self) -> Range { - self.0.clone() - } - - /// Return the byte length of this namespace. - pub fn byte_len(&self) -> NsPayloadByteLen { - NsPayloadByteLen::from_usize(self.0.len()) - } - - /// Convert a [`NsPayloadBytesRange`] into a range that's relative to the - /// entire block payload. - pub fn block_range<'a, R>(&self, range: &R) -> Range - where - R: NsPayloadBytesRange<'a>, - { - let range = range.ns_payload_range(); - range.start + self.0.start..range.end + self.0.start - } -} diff --git a/sequencer/src/block/namespace_payload/tx_proof.rs b/sequencer/src/block/namespace_payload/tx_proof.rs deleted file mode 100644 index ee025c0f4b..0000000000 --- a/sequencer/src/block/namespace_payload/tx_proof.rs +++ /dev/null @@ -1,253 +0,0 @@ -use crate::{ - block::{ - full_payload::{ - NsTable, {Payload, PayloadByteLen}, - }, - namespace_payload::{ - iter::Index, - types::{ - NumTxs, NumTxsRange, NumTxsUnchecked, TxIndex, TxPayloadRange, TxTableEntries, - TxTableEntriesRange, - }, - }, - }, - Transaction, -}; -use hotshot_query_service::{VidCommitment, VidCommon}; -use hotshot_types::{ - traits::EncodeBytes, - vid::{vid_scheme, SmallRangeProofType, VidSchemeType}, -}; -use jf_vid::{ - payload_prover::{PayloadProver, Statement}, - VidScheme, -}; -use serde::{Deserialize, Serialize}; - -/// Proof of correctness for transaction bytes in a block. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct TxProof { - // Naming conventions for this struct's fields: - // - `payload_x`: bytes from the payload - // - `payload_proof_x`: a proof of those bytes from the payload - tx_index: TxIndex, - - // Number of txs declared in the tx table - payload_num_txs: NumTxsUnchecked, - payload_proof_num_txs: SmallRangeProofType, - - // Tx table entries for this tx - payload_tx_table_entries: TxTableEntries, - payload_proof_tx_table_entries: SmallRangeProofType, - - // This tx's payload bytes. - // `None` if this tx has zero length. - payload_proof_tx: Option, -} - -impl TxProof { - /// Returns the [`Transaction`] indicated by `index`, along with a proof of - /// correctness for that transaction. Returns `None` on error. - pub fn new( - index: &Index, - payload: &Payload, - common: &VidCommon, - ) -> Option<(Transaction, Self)> { - let payload_byte_len = payload.byte_len(); - if !payload_byte_len.is_consistent(common) { - tracing::warn!( - "payload byte len {} inconsistent with common {}", - payload_byte_len, - VidSchemeType::get_payload_byte_len(common) - ); - return None; // error: payload byte len inconsistent with common - } - if !payload.ns_table().in_bounds(index.ns()) { - tracing::warn!("ns_index {:?} out of bounds", index.ns()); - return None; // error: ns index out of bounds - } - // check tx index below - - let payload_bytes_arc = payload.encode(); // pacify borrow checker - let payload_bytes = payload_bytes_arc.as_ref(); - let ns_range = payload.ns_table().ns_range(index.ns(), &payload_byte_len); - let ns_byte_len = ns_range.byte_len(); - let ns_payload = payload.read_ns_payload(&ns_range); - let vid = vid_scheme( - VidSchemeType::get_num_storage_nodes(common) - .try_into() - .unwrap(), - ); - - // Read the tx table len from this namespace's tx table and compute a - // proof of correctness. - let num_txs_range = NumTxsRange::new(&ns_byte_len); - let payload_num_txs = ns_payload.read(&num_txs_range); - - // Check tx index. - // - // TODO the next line of code (and other code) could be easier to read - // if we make a helpers that repeat computation we've already done. - if !NumTxs::new(&payload_num_txs, &ns_byte_len).in_bounds(index.tx()) { - return None; // error: tx index out of bounds - } - - let payload_proof_num_txs = vid - .payload_proof(payload_bytes, ns_range.block_range(&num_txs_range)) - .ok()?; - - // Read the tx table entries for this tx and compute a proof of - // correctness. - let tx_table_entries_range = TxTableEntriesRange::new(index.tx()); - let payload_tx_table_entries = ns_payload.read(&tx_table_entries_range); - let payload_proof_tx_table_entries = { - vid.payload_proof(payload_bytes, ns_range.block_range(&tx_table_entries_range)) - .ok()? - }; - - // Read the tx payload and compute a proof of correctness. - let tx_payload_range = - TxPayloadRange::new(&payload_num_txs, &payload_tx_table_entries, &ns_byte_len); - let payload_proof_tx = { - let range = ns_range.block_range(&tx_payload_range); - if range.is_empty() { - None - } else { - Some(vid.payload_proof(payload_bytes, range).ok()?) - } - }; - - let tx = { - let ns_id = payload.ns_table().read_ns_id_unchecked(index.ns()); - let tx_payload = ns_payload - .read(&tx_payload_range) - .to_payload_bytes() - .to_vec(); - Transaction::new(ns_id, tx_payload) - }; - - Some(( - tx, - TxProof { - tx_index: index.tx().clone(), - payload_num_txs, - payload_proof_num_txs, - payload_tx_table_entries, - payload_proof_tx_table_entries, - payload_proof_tx, - }, - )) - } - - /// Verify a [`TxProof`] for `tx` against a payload commitment. Returns - /// `None` on error. - pub fn verify( - &self, - ns_table: &NsTable, - tx: &Transaction, - commit: &VidCommitment, - common: &VidCommon, - ) -> Option { - VidSchemeType::is_consistent(commit, common).ok()?; - let Some(ns_index) = ns_table.find_ns_id(&tx.namespace()) else { - tracing::info!("ns id {} does not exist", tx.namespace()); - return None; // error: ns id does not exist - }; - let ns_range = ns_table.ns_range(&ns_index, &PayloadByteLen::from_vid_common(common)); - let ns_byte_len = ns_range.byte_len(); - - if !NumTxs::new(&self.payload_num_txs, &ns_byte_len).in_bounds(&self.tx_index) { - tracing::info!("tx index {:?} out of bounds", self.tx_index); - return None; // error: tx index out of bounds - } - - let vid = vid_scheme( - VidSchemeType::get_num_storage_nodes(common) - .try_into() - .unwrap(), - ); - - // Verify proof for tx table len - { - let range = ns_range.block_range(&NumTxsRange::new(&ns_byte_len)); - if vid - .payload_verify( - Statement { - payload_subslice: &self.payload_num_txs.to_payload_bytes(), - range, - commit, - common, - }, - &self.payload_proof_num_txs, - ) - .ok()? - .is_err() - { - return Some(false); - } - } - - // Verify proof for tx table entries - { - let range = ns_range.block_range(&TxTableEntriesRange::new(&self.tx_index)); - if vid - .payload_verify( - Statement { - payload_subslice: &self.payload_tx_table_entries.to_payload_bytes(), - range, - commit, - common, - }, - &self.payload_proof_tx_table_entries, - ) - .ok()? - .is_err() - { - return Some(false); - } - } - - // Verify proof for tx payload - { - let range = ns_range.block_range(&TxPayloadRange::new( - &self.payload_num_txs, - &self.payload_tx_table_entries, - &ns_byte_len, - )); - - match (&self.payload_proof_tx, range.is_empty()) { - (Some(proof), false) => { - if vid - .payload_verify( - Statement { - payload_subslice: tx.payload(), - range, - commit, - common, - }, - proof, - ) - .ok()? - .is_err() - { - return Some(false); - } - } - (None, true) => {} // 0-length tx, nothing to verify - (None, false) => { - tracing::error!( - "tx verify: missing proof for nonempty tx payload range {:?}", - range - ); - return None; - } - (Some(_), true) => { - tracing::error!("tx verify: unexpected proof for empty tx payload range"); - return None; - } - } - } - - Some(true) - } -} diff --git a/sequencer/src/block/namespace_payload/types.rs b/sequencer/src/block/namespace_payload/types.rs deleted file mode 100644 index 09860f80bd..0000000000 --- a/sequencer/src/block/namespace_payload/types.rs +++ /dev/null @@ -1,429 +0,0 @@ -//! Types related to a namespace payload and its transaction table. -//! -//! All code that needs to know the binary format of a namespace payload and its -//! transaction table is restricted to this file. -//! -//! There are many newtypes in this file to facilitate transaction proofs. -//! -//! # Binary format of a namespace payload -//! -//! Any sequence of bytes is a valid [`NsPayload`]. -//! -//! A namespace payload consists of two concatenated byte sequences: -//! - `tx_table`: transaction table -//! - `tx_payloads`: transaction payloads -//! -//! # Transaction table -//! -//! Byte lengths for the different items that could appear in a `tx_table` are -//! specified in local private constants [`NUM_TXS_BYTE_LEN`], -//! [`TX_OFFSET_BYTE_LEN`]. -//! -//! ## Number of entries in the transaction table -//! -//! The first [`NUM_TXS_BYTE_LEN`] bytes of the `tx_table` indicate the number -//! `n` of entries in the table as a little-endian unsigned integer. If the -//! entire namespace payload byte length is smaller than [`NUM_TXS_BYTE_LEN`] -//! then the missing bytes are zero-padded. -//! -//! The bytes in the namespace payload beyond the first [`NUM_TXS_BYTE_LEN`] -//! bytes encode entries in the `tx_table`. Each entry consumes exactly -//! [`TX_OFFSET_BYTE_LEN`] bytes. -//! -//! The number `n` could be anything, including a number much larger than the -//! number of entries that could fit in the namespace payload. As such, the -//! actual number of entries in the `tx_table` is defined as the minimum of `n` -//! and the maximum number of whole `tx_table` entries that could fit in the -//! namespace payload. -//! -//! The `tx_payloads` consist of any bytes in the namespace payload beyond the -//! `tx_table`. -//! -//! ## Transaction table entry -//! -//! Each entry in the `tx_table` is exactly [`TX_OFFSET_BYTE_LEN`] bytes. These -//! bytes indicate the end-index of a transaction in the namespace payload -//! bytes. This end-index is a little-endian unsigned integer. -//! -//! This offset is relative to the end of the `tx_table` within the current -//! namespace. -//! -//! ### Example -//! -//! Suppose a block payload has 3000 bytes and 3 namespaces of 1000 bytes each. -//! Suppose the `tx_table` for final namespace in the block has byte length 100, -//! and suppose an entry in that `tx_table` indicates an end-index of `10`. The -//! actual end-index of that transaction relative to the current namespace is -//! `110`: `10` bytes for the offset plus `100` bytes for the `tx_table`. -//! Relative to the entire block payload, the end-index of that transaction is -//! `2110`: `10` bytes for the offset plus `100` bytes for the `tx_table` plus -//! `2000` bytes for this namespace. -//! -//! # How to deduce a transaction's byte range -//! -//! In order to extract the payload bytes of a single transaction `T` from the -//! namespace payload one needs both the start- and end-indices for `T`. -//! -//! See [`TxPayloadRange::new`] for clarification. What follows is a description -//! of what's implemented in [`TxPayloadRange::new`]. -//! -//! If `T` occupies the `i`th entry in the `tx_table` for `i>0` then the -//! start-index for `T` is defined as the end-index of the `(i-1)`th entry in -//! the table. -//! -//! Thus, both start- and end-indices for any transaction `T` can be read from a -//! contiguous, constant-size byte range in the `tx_table`. This property -//! facilitates transaction proofs. -//! -//! The start-index of the 0th entry in the table is implicitly defined to be -//! `0`. -//! -//! The start- and end-indices `(declared_start, declared_end)` declared in the -//! `tx_table` could be anything. As such, the actual start- and end-indices -//! `(start, end)` are defined so as to ensure that the byte range is -//! well-defined and in-bounds for the namespace payload: -//! ```ignore -//! end = min(declared_end, namespace_payload_byte_length) -//! start = min(declared_start, end) -//! ``` -//! -//! To get the byte range for `T` relative to the current namespace, the above -//! range is translated by the byte length of the `tx_table` *as declared in the -//! `tx_table` itself*, suitably truncated to fit within the current namespace. -//! -//! In particular, if the `tx_table` declares a huge number `n` of entries that -//! cannot fit into the namespace payload then all transactions in this -//! namespace have a zero-length byte range whose start- and end-indices are -//! both `namespace_payload_byte_length`. -//! -//! In a "honestly-prepared" `tx_table` the end-index of the final transaction -//! equals the byte length of the namespace payload minus the byte length of the -//! `tx_table`. (Otherwise the namespace payload might have bytes that are not -//! included in any transaction.) -//! -//! It is possible that a `tx_table` table could indicate two distinct -//! transactions whose byte ranges overlap, though no "honestly-prepared" -//! `tx_table` would do this. -use crate::block::uint_bytes::{bytes_serde_impl, usize_from_bytes, usize_to_bytes}; -use crate::Transaction; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use std::ops::Range; - -/// Byte lengths for the different items that could appear in a tx table. -const NUM_TXS_BYTE_LEN: usize = 4; -const TX_OFFSET_BYTE_LEN: usize = 4; - -/// Data that can be deserialized from a subslice of namespace payload bytes. -/// -/// Companion trait for [`NsPayloadBytesRange`], which specifies the subslice of -/// namespace payload bytes to read. -pub trait FromNsPayloadBytes<'a> { - /// Deserialize `Self` from namespace payload bytes. - fn from_payload_bytes(bytes: &'a [u8]) -> Self; -} - -/// Specifies a subslice of namespace payload bytes to read. -/// -/// Companion trait for [`FromNsPayloadBytes`], which holds data that can be -/// deserialized from that subslice of bytes. -pub trait NsPayloadBytesRange<'a> { - type Output: FromNsPayloadBytes<'a>; - - /// Range relative to this ns payload - fn ns_payload_range(&self) -> Range; -} - -/// Number of txs in a namespace. -/// -/// Like [`NumTxsUnchecked`] but checked against a [`NsPayloadByteLen`]. -pub struct NumTxs(usize); - -impl NumTxs { - /// Returns the minimum of: - /// - `num_txs` - /// - The maximum number of tx table entries that could fit in a namespace - /// whose byte length is `byte_len`. - pub fn new(num_txs: &NumTxsUnchecked, byte_len: &NsPayloadByteLen) -> Self { - Self(std::cmp::min( - // Number of txs declared in the tx table - num_txs.0, - // Max number of tx table entries that could fit in the namespace payload - byte_len.0.saturating_sub(NUM_TXS_BYTE_LEN) / TX_OFFSET_BYTE_LEN, - )) - } - - pub fn in_bounds(&self, index: &TxIndex) -> bool { - index.0 < self.0 - } -} - -/// Byte length of a namespace payload. -pub struct NsPayloadByteLen(usize); - -impl NsPayloadByteLen { - // TODO restrict visibility? - pub fn from_usize(n: usize) -> Self { - Self(n) - } -} - -/// The part of a tx table that declares the number of txs in the payload. -/// -/// "Unchecked" because this quantity might exceed the number of tx table -/// entries that could fit into the namespace that contains it. -/// -/// Use [`NumTxs`] for the actual number of txs in this namespace. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct NumTxsUnchecked(usize); -bytes_serde_impl!( - NumTxsUnchecked, - to_payload_bytes, - [u8; NUM_TXS_BYTE_LEN], - from_payload_bytes -); - -impl NumTxsUnchecked { - pub fn to_payload_bytes(&self) -> [u8; NUM_TXS_BYTE_LEN] { - usize_to_bytes::(self.0) - } -} - -impl FromNsPayloadBytes<'_> for NumTxsUnchecked { - fn from_payload_bytes(bytes: &[u8]) -> Self { - Self(usize_from_bytes::(bytes)) - } -} - -/// Byte range for the part of a tx table that declares the number of txs in the -/// payload. -pub struct NumTxsRange(Range); - -impl NumTxsRange { - pub fn new(byte_len: &NsPayloadByteLen) -> Self { - Self(0..NUM_TXS_BYTE_LEN.min(byte_len.0)) - } -} - -impl NsPayloadBytesRange<'_> for NumTxsRange { - type Output = NumTxsUnchecked; - - fn ns_payload_range(&self) -> Range { - self.0.clone() - } -} - -/// Entries from a tx table in a namespace for use in a transaction proof. -/// -/// Contains either one or two entries according to whether it was derived from -/// the first transaction in the namespace. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct TxTableEntries { - cur: usize, - prev: Option, // `None` if derived from the first transaction -} - -// This serde impl uses Vec. We could save space by using an array of -// length `TWO_ENTRIES_BYTE_LEN`, but then we need a way to distinguish -// `prev=Some(0)` from `prev=None`. -bytes_serde_impl!( - TxTableEntries, - to_payload_bytes, - Vec, - from_payload_bytes -); - -impl TxTableEntries { - const TWO_ENTRIES_BYTE_LEN: usize = 2 * TX_OFFSET_BYTE_LEN; - - pub fn to_payload_bytes(&self) -> Vec { - let mut bytes = Vec::with_capacity(Self::TWO_ENTRIES_BYTE_LEN); - if let Some(prev) = self.prev { - bytes.extend(usize_to_bytes::(prev)); - } - bytes.extend(usize_to_bytes::(self.cur)); - bytes - } -} - -impl FromNsPayloadBytes<'_> for TxTableEntries { - fn from_payload_bytes(bytes: &[u8]) -> Self { - match bytes.len() { - TX_OFFSET_BYTE_LEN => Self { - cur: usize_from_bytes::(bytes), - prev: None, - }, - Self::TWO_ENTRIES_BYTE_LEN => Self { - cur: usize_from_bytes::(&bytes[TX_OFFSET_BYTE_LEN..]), - prev: Some(usize_from_bytes::( - &bytes[..TX_OFFSET_BYTE_LEN], - )), - }, - len => panic!( - "unexpected bytes len {} should be either {} or {}", - len, - TX_OFFSET_BYTE_LEN, - Self::TWO_ENTRIES_BYTE_LEN - ), - } - } -} - -/// Byte range for entries from a tx table for use in a transaction proof. -/// -/// This range covers either one or two entries from a tx table according to -/// whether it was derived from the first transaction in the namespace. -pub struct TxTableEntriesRange(Range); - -impl TxTableEntriesRange { - pub fn new(index: &TxIndex) -> Self { - let start = if index.0 == 0 { - // Special case: the desired range includes only one entry from - // the tx table: the first entry. This entry starts immediately - // following the bytes that encode the tx table length. - NUM_TXS_BYTE_LEN - } else { - // The desired range starts at the beginning of the previous tx - // table entry. - (index.0 - 1) - .saturating_mul(TX_OFFSET_BYTE_LEN) - .saturating_add(NUM_TXS_BYTE_LEN) - }; - // The desired range ends at the end of this transaction's tx table entry - let end = index - .0 - .saturating_add(1) - .saturating_mul(TX_OFFSET_BYTE_LEN) - .saturating_add(NUM_TXS_BYTE_LEN); - Self(start..end) - } -} - -impl NsPayloadBytesRange<'_> for TxTableEntriesRange { - type Output = TxTableEntries; - - fn ns_payload_range(&self) -> Range { - self.0.clone() - } -} - -/// A transaction's payload data. -pub struct TxPayload<'a>(&'a [u8]); - -impl<'a> TxPayload<'a> { - pub fn to_payload_bytes(&self) -> &'a [u8] { - self.0 - } -} - -impl<'a> FromNsPayloadBytes<'a> for TxPayload<'a> { - fn from_payload_bytes(bytes: &'a [u8]) -> Self { - Self(bytes) - } -} - -/// Byte range for a transaction's payload data. -pub struct TxPayloadRange(Range); - -impl TxPayloadRange { - pub fn new( - num_txs: &NumTxsUnchecked, - tx_table_entries: &TxTableEntries, - byte_len: &NsPayloadByteLen, - ) -> Self { - let tx_table_byte_len = num_txs - .0 - .saturating_mul(TX_OFFSET_BYTE_LEN) - .saturating_add(NUM_TXS_BYTE_LEN); - let end = tx_table_entries - .cur - .saturating_add(tx_table_byte_len) - .min(byte_len.0); - let start = tx_table_entries - .prev - .unwrap_or(0) - .saturating_add(tx_table_byte_len) - .min(end); - Self(start..end) - } -} - -impl<'a> NsPayloadBytesRange<'a> for TxPayloadRange { - type Output = TxPayload<'a>; - - fn ns_payload_range(&self) -> Range { - self.0.clone() - } -} - -/// Index for an entry in a tx table. -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub(in crate::block) struct TxIndex(usize); -bytes_serde_impl!(TxIndex, to_bytes, [u8; NUM_TXS_BYTE_LEN], from_bytes); - -impl TxIndex { - pub fn to_bytes(&self) -> [u8; NUM_TXS_BYTE_LEN] { - usize_to_bytes::(self.0) - } - fn from_bytes(bytes: &[u8]) -> Self { - Self(usize_from_bytes::(bytes)) - } -} - -pub(in crate::block) struct TxIter(Range); - -impl TxIter { - pub fn new(num_txs: &NumTxs) -> Self { - Self(0..num_txs.0) - } -} - -// Simple `impl Iterator` delegates to `Range`. -impl Iterator for TxIter { - type Item = TxIndex; - - fn next(&mut self) -> Option { - self.0.next().map(TxIndex) - } -} - -/// Build an individual namespace payload one transaction at a time. -/// -/// Use [`Self::append_tx`] to add each transaction. Use [`Self::into_bytes`] -/// when you're done. The returned bytes include a well-formed tx table and all -/// tx payloads. -#[derive(Default)] -pub(in crate::block) struct NsPayloadBuilder { - tx_table_entries: Vec, - tx_bodies: Vec, -} - -impl NsPayloadBuilder { - /// Add a transaction's payload to this namespace - pub fn append_tx(&mut self, tx: Transaction) { - self.tx_bodies.extend(tx.into_payload()); - self.tx_table_entries - .extend(usize_to_bytes::(self.tx_bodies.len())); - } - - /// Serialize to bytes and consume self. - pub fn into_bytes(self) -> Vec { - let mut result = Vec::with_capacity( - NUM_TXS_BYTE_LEN + self.tx_table_entries.len() + self.tx_bodies.len(), - ); - let num_txs = NumTxsUnchecked(self.tx_table_entries.len() / TX_OFFSET_BYTE_LEN); - result.extend(num_txs.to_payload_bytes()); - result.extend(self.tx_table_entries); - result.extend(self.tx_bodies); - result - } - - /// Byte length of a tx table header. - pub const fn tx_table_header_byte_len() -> usize { - NUM_TXS_BYTE_LEN - } - - /// Byte length of a single tx table entry. - pub const fn tx_table_entry_byte_len() -> usize { - TX_OFFSET_BYTE_LEN - } -} diff --git a/sequencer/src/block/test.rs b/sequencer/src/block/test.rs deleted file mode 100644 index fe8f77b417..0000000000 --- a/sequencer/src/block/test.rs +++ /dev/null @@ -1,207 +0,0 @@ -use crate::{ - block::{ - full_payload::{NsProof, Payload}, - namespace_payload::TxProof, - }, - chain_config::BlockSize, - ChainConfig, NamespaceId, NodeState, Transaction, ValidatedState, -}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use hotshot::traits::BlockPayload; -use hotshot_query_service::availability::QueryablePayload; -use hotshot_types::{traits::EncodeBytes, vid::vid_scheme}; -use jf_vid::VidScheme; -use rand::RngCore; -use std::collections::HashMap; - -#[tokio::test(flavor = "multi_thread")] -async fn basic_correctness() { - // play with this - let test_cases = vec![ - vec![vec![5, 8, 8], vec![7, 9, 11], vec![10, 5, 8]], // 3 non-empty namespaces - ]; - - setup_logging(); - setup_backtrace(); - let mut rng = jf_utils::test_rng(); - let valid_tests = ValidTest::many_from_tx_lengths(test_cases, &mut rng); - - let mut vid = vid_scheme(10); - - for mut test in valid_tests { - let mut all_txs = test.all_txs(); - tracing::info!("test case {} nss {} txs", test.nss.len(), all_txs.len()); - - let block = - Payload::from_transactions(test.all_txs(), &Default::default(), &Default::default()) - .await - .unwrap() - .0; - tracing::info!( - "ns_table {:?}, payload {:?}", - block.ns_table().encode(), - block.encode() - ); - - // test correct number of nss, txs - assert_eq!(block.ns_table().iter().count(), test.nss.len()); - assert_eq!(block.len(block.ns_table()), all_txs.len()); - assert_eq!(block.iter(block.ns_table()).count(), all_txs.len()); - - tracing::info!("all_txs {:?}", all_txs); - - let (vid_commit, vid_common) = { - let disperse_data = vid.disperse(block.encode()).unwrap(); - (disperse_data.commit, disperse_data.common) - }; - - // test iterate over all txs - for tx_index in block.iter(block.ns_table()) { - let tx = block.transaction(&tx_index).unwrap(); - tracing::info!("tx {:?}, {:?}", tx_index, tx); - - // warning: linear search for a tx - let test_tx = all_txs.remove(all_txs.iter().position(|t| t == &tx).unwrap()); - assert_eq!(tx, test_tx); - - let tx_proof2 = { - let (tx2, tx_proof) = TxProof::new(&tx_index, &block, &vid_common).unwrap(); - assert_eq!(tx, tx2); - tx_proof - }; - assert!(tx_proof2 - .verify(block.ns_table(), &tx, &vid_commit, &vid_common) - .unwrap()); - } - assert!( - all_txs.is_empty(), - "not all test txs consumed by block.iter" - ); - - // test iterate over all namespaces - for ns_index in block.ns_table().iter() { - let ns_id = block.ns_table().read_ns_id(&ns_index).unwrap(); - tracing::info!("test ns_id {ns_id}"); - - let txs = test - .nss - .remove(&ns_id) - .expect("block ns_id missing from test"); - - let ns_proof = NsProof::new(&block, &ns_index, &vid_common) - .expect("namespace_with_proof should succeed"); - - let (ns_proof_txs, ns_proof_ns_id) = ns_proof - .verify(block.ns_table(), &vid_commit, &vid_common) - .unwrap_or_else(|| panic!("namespace {} proof verification failure", ns_id)); - - assert_eq!(ns_proof_ns_id, ns_id); - assert_eq!(ns_proof_txs, txs); - } - assert!( - test.nss.is_empty(), - "not all test namespaces consumed by ns_iter" - ); - } -} - -#[tokio::test(flavor = "multi_thread")] -async fn enforce_max_block_size() { - setup_logging(); - setup_backtrace(); - let test_case = vec![vec![5, 8, 8], vec![7, 9, 11], vec![10, 5, 8]]; - let payload_byte_len_expected: usize = 119; - let ns_table_byte_len_expected: usize = 28; - - let mut rng = jf_utils::test_rng(); - let test = ValidTest::from_tx_lengths(test_case, &mut rng); - let tx_count_expected = test.all_txs().len(); - - let chain_config = ChainConfig { - max_block_size: BlockSize::from( - (payload_byte_len_expected + ns_table_byte_len_expected) as u64, - ), - ..Default::default() - }; - - // test: actual block size equals max block size - let instance_state = NodeState::default().with_chain_config(chain_config); - - let validated_state = ValidatedState { - chain_config: chain_config.into(), - ..Default::default() - }; - let block = Payload::from_transactions(test.all_txs(), &validated_state, &instance_state) - .await - .unwrap() - .0; - assert_eq!(block.encode().len(), payload_byte_len_expected); - assert_eq!(block.ns_table().encode().len(), ns_table_byte_len_expected); - assert_eq!(block.len(block.ns_table()), tx_count_expected); - - // test: actual block size exceeds max block size, so 1 tx is dropped - // WARN log should be emitted - - let chain_config = ChainConfig { - max_block_size: BlockSize::from( - (payload_byte_len_expected + ns_table_byte_len_expected - 1) as u64, - ), - ..Default::default() - }; - let instance_state = NodeState::default().with_chain_config(chain_config); - - let validated_state = ValidatedState { - chain_config: chain_config.into(), - ..Default::default() - }; - - let block = Payload::from_transactions(test.all_txs(), &validated_state, &instance_state) - .await - .unwrap() - .0; - assert!(block.encode().len() < payload_byte_len_expected); - assert_eq!(block.ns_table().encode().len(), ns_table_byte_len_expected); - assert_eq!(block.len(block.ns_table()), tx_count_expected - 1); -} - -// TODO lots of infra here that could be reused in other tests. -pub struct ValidTest { - nss: HashMap>, -} - -impl ValidTest { - pub fn from_tx_lengths(tx_lengths: Vec>, rng: &mut R) -> Self - where - R: RngCore, - { - let mut nss = HashMap::new(); - for tx_lens in tx_lengths.into_iter() { - let ns_id = NamespaceId::random(rng); - for len in tx_lens { - let ns: &mut Vec<_> = nss.entry(ns_id).or_default(); - ns.push(Transaction::new(ns_id, random_bytes(len, rng))); - } - } - Self { nss } - } - - pub fn many_from_tx_lengths(test_cases: Vec>>, rng: &mut R) -> Vec - where - R: RngCore, - { - test_cases - .into_iter() - .map(|t| Self::from_tx_lengths(t, rng)) - .collect() - } - - pub fn all_txs(&self) -> Vec { - self.nss.iter().flat_map(|(_, txs)| txs.clone()).collect() - } -} - -fn random_bytes(len: usize, rng: &mut R) -> Vec { - let mut result = vec![0; len]; - rng.fill_bytes(&mut result); - result -} diff --git a/sequencer/src/block/uint_bytes.rs b/sequencer/src/block/uint_bytes.rs deleted file mode 100644 index 2296a8182a..0000000000 --- a/sequencer/src/block/uint_bytes.rs +++ /dev/null @@ -1,231 +0,0 @@ -//! Serialization (and deserialization) of primitive unsigned integer types to -//! (and from) an arbitrary fixed-length byte array. -//! -use paste::paste; -use std::mem::size_of; - -// Use an ugly macro because it's difficult or impossible to be generic over -// primitive types such as `usize`, `u64`. -macro_rules! uint_bytes_impl { - ($T:ty) => { - paste! { - /// Serialize `n` into `BYTE_LEN` bytes in little-endian form, padding with - /// 0 as needed. - /// - /// # Panics - /// If `n` cannot fit into `BYTE_LEN` bytes. - pub fn [<$T _to_bytes>](n: $T) -> [u8; BYTE_LEN] { - if size_of::<$T>() > BYTE_LEN { - assert!( - [<$T _fits>](n, BYTE_LEN), - "n {n} cannot fit into {BYTE_LEN} bytes" - ); - n.to_le_bytes()[..BYTE_LEN].try_into().unwrap() // panic is impossible - } else { - // convert `n` to bytes and pad with 0 - let mut result = [0; BYTE_LEN]; - result[..size_of::<$T>()].copy_from_slice(&n.to_le_bytes()[..]); - result - } - } - - /// Deserialize `bytes` in little-endian form into a `$T`, padding with 0 - /// as needed. - /// - /// # Panics - /// If `bytes.len()` is too large to fit into a `$T`. - pub fn [<$T _from_bytes>](bytes: &[u8]) -> $T { - assert!(bytes.len() <= BYTE_LEN, "bytes len {} exceeds BYTE_LEN {BYTE_LEN}", bytes.len()); - assert!( - BYTE_LEN <= size_of::<$T>(), - "BYTE_LEN {BYTE_LEN} cannot fit into {}", - stringify!($T) - ); - let mut [<$T _bytes>] = [0; size_of::<$T>()]; - [<$T _bytes>][..bytes.len()].copy_from_slice(bytes); - $T::from_le_bytes([<$T _bytes>]) - } - - /// Return the largest `$T` value that can fit into `byte_len` bytes. - pub const fn [<$T _max_from_byte_len>](byte_len: usize) -> $T { - if byte_len >= size_of::<$T>() { - $T::MAX - } else { - // overflow cannot occur because `byte_len < size_of::<$T>()` - (1 << (byte_len * 8)) - 1 - } - } - - /// Can `n` fit into `byte_len` bytes? - pub const fn [<$T _fits>](n: $T, byte_len: usize) -> bool { - n <= [<$T _max_from_byte_len>](byte_len) - } - } - }; - } - -uint_bytes_impl!(usize); -uint_bytes_impl!(u32); - -/// Impl [`serde`] for type `$T` with methods named `$to_bytes`, `$from_bytes` -/// of the form -/// ```ignore -/// $T::$to_bytes(&self) -> $B -/// $T::$from_bytes(bytes: &[u8]) -> Self -/// ``` -/// where `$B` is any type that impls [`serde::Deserialize`] and has a method -/// `as_ref` of the form -/// ```ignore -/// $B::as_ref(&self) -> &[u8] -/// ``` -/// Typical examples of `$B` include array `[u8; N]`, slice `&[u8]`, or -/// `Vec`. -macro_rules! bytes_serde_impl { - ($T:ty, $to_bytes:ident, $B:ty, $from_bytes:ident) => { - impl Serialize for $T { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.$to_bytes().serialize(serializer) - } - } - - impl<'de> Deserialize<'de> for $T { - fn deserialize(deserializer: D) -> Result<$T, D::Error> - where - D: Deserializer<'de>, - { - <$B as Deserialize>::deserialize(deserializer) - .map(|bytes| <$T>::$from_bytes(bytes.as_ref())) - } - } - }; -} - -pub(super) use bytes_serde_impl; - -#[cfg(test)] -mod test { - use fluent_asserter::prelude::*; - use paste::paste; - use std::mem::size_of; - - macro_rules! uint_bytes_test_impl { - ($T:ty) => { - paste! { - use super::{[<$T _max_from_byte_len>], [<$T _to_bytes>], [<$T _from_bytes>]}; - - #[test] - fn [<$T _max_from_byte_len_correctness>]() { - // test byte lengths 0 to size_of::<$T>() - let mut bytes = [0; size_of::<$T>()]; - assert_eq!([<$T _max_from_byte_len>](0), 0); - for i in 0..bytes.len() { - bytes[i] = 0xff; - assert_eq!([<$T _max_from_byte_len>](i + 1).to_le_bytes(), bytes); - } - - // test byte lengths size_of::<$T>() to twice that length - for i in size_of::<$T>()..2 * size_of::<$T>() { - assert_eq!([<$T _max_from_byte_len>](i + 1), $T::MAX); - } - } - - #[test] - fn [<$T _to_bytes_correctness>]() { - // byte length 0 - assert_eq!([<$T _to_bytes>](0), [0; 0]); - assert_that_code!(|| [<$T _to_bytes>]::<0>(1)).panics(); - - // byte length 1 - assert_eq!([<$T _to_bytes>](0), [0; 1]); - assert_eq!([<$T _to_bytes>](255), [255; 1]); - assert_that_code!(|| [<$T _to_bytes>]::<1>(256)).panics(); - - // byte length 2 - assert_eq!([<$T _to_bytes>](0), [0; 2]); - assert_eq!([<$T _to_bytes>](65535), [255; 2]); - assert_that_code!(|| [<$T _to_bytes>]::<2>(65536)).panics(); - - // byte length size_of::<$T>() - assert_eq!([<$T _to_bytes>](0), [0; size_of::<$T>()]); - assert_eq!([<$T _to_bytes>]($T::MAX), [255; size_of::<$T>()]); - - // byte length size_of::<$T>() + 1 - assert_eq!([<$T _to_bytes>](0), [0; size_of::<$T>() + 1]); - let [<$T _max_bytes>] = { - let mut bytes = [255; size_of::<$T>() + 1]; - bytes[bytes.len() - 1] = 0; - bytes - }; - assert_eq!([<$T _to_bytes>]($T::MAX), [<$T _max_bytes>]); - } - - #[test] - fn [<$T _from_bytes_correctness>]() { - let bytes = [255; size_of::<$T>() + 1]; - - // It would be nice to iterate through - // `0..size_of::<$T>()` but this is not possible with - // const generics for `[<$T _from_bytes>]`. We could - // use `seq-macro` crate but it requires an integer - // literal whereas our range includes `size_of::<$T>()`. - // - // Instead we just hard code four constants: - // `0`, `1`, `size_of::<$T>() - 1`, `size_of::<$T>()`. - assert_eq!( - [<$T _from_bytes>]::<0>(&bytes[..0]), - [<$T _max_from_byte_len>](0) - ); - assert_eq!( - [<$T _from_bytes>]::<1>(&bytes[..1]), - [<$T _max_from_byte_len>](1) - ); - assert_eq!( - [<$T _from_bytes>]::<{size_of::<$T>() - 1}>(&bytes[..size_of::<$T>() - 1]), - [<$T _max_from_byte_len>](size_of::<$T>() - 1) - ); - assert_eq!( - [<$T _from_bytes>]::<{size_of::<$T>()}>(&bytes[..size_of::<$T>()]), - [<$T _max_from_byte_len>](size_of::<$T>()) - ); - - assert_that_code!(|| [<$T _from_bytes>]::<{size_of::<$T>() + 1}>(&bytes[..])).panics(); - } - - #[test] - fn [<$T _from_bytes_allows_smaller_byte_lens>]() { - // This test same as `xxx_from_bytes_correctness` except - // we set the const param `BYTE_LEN` to - // `size_of::<$T>()` in all cases. Why? To ensure that - // `xxx_from_bytes` allows its arg to have length - // smaller than `BYTE_LEN`. - let bytes = [255; size_of::<$T>() + 1]; - - assert_eq!( - [<$T _from_bytes>]::<{size_of::<$T>()}>(&bytes[..0]), - [<$T _max_from_byte_len>](0) - ); - assert_eq!( - [<$T _from_bytes>]::<{size_of::<$T>()}>(&bytes[..1]), - [<$T _max_from_byte_len>](1) - ); - assert_eq!( - [<$T _from_bytes>]::<{size_of::<$T>()}>(&bytes[..size_of::<$T>() - 1]), - [<$T _max_from_byte_len>](size_of::<$T>() - 1) - ); - assert_eq!( - [<$T _from_bytes>]::<{size_of::<$T>()}>(&bytes[..size_of::<$T>()]), - [<$T _max_from_byte_len>](size_of::<$T>()) - ); - - assert_that_code!(|| [<$T _from_bytes>]::<{size_of::<$T>()}>(&bytes[..])).panics(); - } - } - }; - } - - uint_bytes_test_impl!(usize); - uint_bytes_test_impl!(u32); -} diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 39aa46bd5c..8dff3c7448 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -58,21 +58,25 @@ mod persistence_tests { use hotshot_query_service::testing::mocks::MockVersions; use hotshot_types::{ data::{ - vid_commitment, vid_disperse::ADVZDisperseShare, DaProposal, EpochNumber, - QuorumProposal2, QuorumProposalWrapper, VidDisperseShare, ViewNumber, + ns_table::parse_ns_table, vid_commitment, vid_disperse::VidDisperseShare2, DaProposal2, + EpochNumber, QuorumProposal2, QuorumProposalWrapper, VidCommitment, VidDisperseShare, + ViewNumber, }, event::{EventType, HotShotAction, LeafInfo}, - message::{Proposal, UpgradeLock}, - simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate, UpgradeCertificate}, + message::{convert_proposal, Proposal, UpgradeLock}, + simple_certificate::{ + NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, UpgradeCertificate, + }, simple_vote::{NextEpochQuorumData2, QuorumData2, UpgradeProposalData, VersionedVoteData}, traits::{ block_contents::BlockHeader, node_implementation::{ConsensusTime, Versions}, EncodeBytes, }, - vid::advz::advz_scheme, + vid::avidm::{init_avidm_param, AvidMScheme}, + vote::HasViewNumber, }; - use jf_vid::VidScheme; + use sequencer_utils::test_utils::setup_test; use std::sync::Arc; use testing::TestablePersistence; @@ -175,22 +179,27 @@ mod persistence_tests { ); let leaf: Leaf2 = - Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()) - .await - .into(); + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); - let disperse = advz_scheme(2) - .disperse(leaf_payload_bytes_arc.clone()) - .unwrap(); + + let avidm_param = init_avidm_param(2).unwrap(); + let weights = vec![1u32; 2]; + + let ns_table = parse_ns_table(leaf_payload.byte_len().as_usize(), &leaf_payload.encode()); + let (payload_commitment, shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &leaf_payload_bytes_arc, ns_table) + .unwrap(); + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); let signature = PubKey::sign(&privkey, &[]).unwrap(); - let mut vid = ADVZDisperseShare:: { + let mut vid = VidDisperseShare2:: { view_number: ViewNumber::new(0), - payload_commitment: Default::default(), - share: disperse.shares[0].clone(), - common: disperse.common, + payload_commitment, + share: shares[0].clone(), recipient_key: pubkey, + epoch: Some(EpochNumber::new(0)), + target_epoch: Some(EpochNumber::new(0)), }; let mut quorum_proposal = Proposal { data: QuorumProposalWrapper:: { @@ -198,12 +207,11 @@ mod persistence_tests { epoch: None, block_header: leaf.block_header().clone(), view_number: ViewNumber::genesis(), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -216,50 +224,51 @@ mod persistence_tests { let vid_share0 = vid.clone().to_proposal(&privkey).unwrap().clone(); - storage.append_vid(&vid_share0).await.unwrap(); + storage.append_vid2(&vid_share0).await.unwrap(); assert_eq!( storage.load_vid_share(ViewNumber::new(0)).await.unwrap(), - Some(vid_share0.clone()) + Some(convert_proposal(vid_share0.clone())) ); vid.view_number = ViewNumber::new(1); let vid_share1 = vid.clone().to_proposal(&privkey).unwrap().clone(); - storage.append_vid(&vid_share1).await.unwrap(); + storage.append_vid2(&vid_share1).await.unwrap(); assert_eq!( - storage.load_vid_share(vid.view_number).await.unwrap(), - Some(vid_share1.clone()) + storage.load_vid_share(vid.view_number()).await.unwrap(), + Some(convert_proposal(vid_share1.clone())) ); vid.view_number = ViewNumber::new(2); let vid_share2 = vid.clone().to_proposal(&privkey).unwrap().clone(); - storage.append_vid(&vid_share2).await.unwrap(); + storage.append_vid2(&vid_share2).await.unwrap(); assert_eq!( - storage.load_vid_share(vid.view_number).await.unwrap(), - Some(vid_share2.clone()) + storage.load_vid_share(vid.view_number()).await.unwrap(), + Some(convert_proposal(vid_share2.clone())) ); vid.view_number = ViewNumber::new(3); let vid_share3 = vid.clone().to_proposal(&privkey).unwrap().clone(); - storage.append_vid(&vid_share3).await.unwrap(); + storage.append_vid2(&vid_share3).await.unwrap(); assert_eq!( - storage.load_vid_share(vid.view_number).await.unwrap(), - Some(vid_share3.clone()) + storage.load_vid_share(vid.view_number()).await.unwrap(), + Some(convert_proposal(vid_share3.clone())) ); let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); - let da_proposal_inner = DaProposal:: { + let da_proposal_inner = DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc.clone(), metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), + epoch: None, }; let da_proposal = Proposal { @@ -276,7 +285,7 @@ mod persistence_tests { ); storage - .append_da(&da_proposal, vid_commitment) + .append_da2(&da_proposal, vid_commitment) .await .unwrap(); @@ -288,7 +297,7 @@ mod persistence_tests { let mut da_proposal1 = da_proposal.clone(); da_proposal1.data.view_number = ViewNumber::new(1); storage - .append_da(&da_proposal1.clone(), vid_commitment) + .append_da2(&da_proposal1.clone(), vid_commitment) .await .unwrap(); @@ -303,7 +312,7 @@ mod persistence_tests { let mut da_proposal2 = da_proposal1.clone(); da_proposal2.data.view_number = ViewNumber::new(2); storage - .append_da(&da_proposal2.clone(), vid_commitment) + .append_da2(&da_proposal2.clone(), vid_commitment) .await .unwrap(); @@ -318,7 +327,7 @@ mod persistence_tests { let mut da_proposal3 = da_proposal2.clone(); da_proposal3.data.view_number = ViewNumber::new(3); storage - .append_da(&da_proposal3.clone(), vid_commitment) + .append_da2(&da_proposal3.clone(), vid_commitment) .await .unwrap(); @@ -331,8 +340,9 @@ mod persistence_tests { ); let quorum_proposal1 = quorum_proposal.clone(); + storage - .append_quorum_proposal(&quorum_proposal1) + .append_quorum_proposal2(&quorum_proposal1) .await .unwrap(); @@ -344,7 +354,7 @@ mod persistence_tests { quorum_proposal.data.proposal.view_number = ViewNumber::new(1); let quorum_proposal2 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal2) + .append_quorum_proposal2(&quorum_proposal2) .await .unwrap(); @@ -360,7 +370,7 @@ mod persistence_tests { quorum_proposal.data.proposal.justify_qc.view_number = ViewNumber::new(1); let quorum_proposal3 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal3) + .append_quorum_proposal2(&quorum_proposal3) .await .unwrap(); @@ -379,7 +389,7 @@ mod persistence_tests { // This one should stick around after GC runs. let quorum_proposal4 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal4) + .append_quorum_proposal2(&quorum_proposal4) .await .unwrap(); @@ -456,7 +466,7 @@ mod persistence_tests { assert_eq!( storage.load_vid_share(ViewNumber::new(3)).await.unwrap(), - Some(vid_share3.clone()) + Some(convert_proposal(vid_share3.clone())) ); let proposals = storage.load_quorum_proposals().await.unwrap(); @@ -669,16 +679,21 @@ mod persistence_tests { .into(); let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); - let disperse = advz_scheme(2) - .disperse(leaf_payload_bytes_arc.clone()) - .unwrap(); + let avidm_param = init_avidm_param(2).unwrap(); + let weights = vec![1u32; 2]; + let ns_table = parse_ns_table(leaf_payload.byte_len().as_usize(), &leaf_payload.encode()); + let (payload_commitment, shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &leaf_payload_bytes_arc, ns_table) + .unwrap(); + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let mut vid = ADVZDisperseShare:: { + let mut vid = VidDisperseShare2:: { view_number: ViewNumber::new(0), - payload_commitment: Default::default(), - share: disperse.shares[0].clone(), - common: disperse.common, + payload_commitment, + share: shares[0].clone(), recipient_key: pubkey, + epoch: Some(EpochNumber::new(0)), + target_epoch: Some(EpochNumber::new(0)), } .to_proposal(&privkey) .unwrap() @@ -700,20 +715,20 @@ mod persistence_tests { epoch: None, }, }; - let mut qc = QuorumCertificate::genesis::( + let mut qc = QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(); + .await; let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); let mut da_proposal = Proposal { - data: DaProposal:: { + data: DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc.clone(), metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), + epoch: Some(EpochNumber::new(0)), }, signature: block_payload_signature, _pd: Default::default(), @@ -739,8 +754,8 @@ mod persistence_tests { // Add proposals. for (_, _, vid, da) in &chain { tracing::info!(?da, ?vid, "insert proposal"); - storage.append_da(da, vid_commitment).await.unwrap(); - storage.append_vid(vid).await.unwrap(); + storage.append_da2(da, vid_commitment).await.unwrap(); + storage.append_vid2(vid).await.unwrap(); } // Decide 2 leaves, but fail in event processing. @@ -866,17 +881,22 @@ mod persistence_tests { Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); - let disperse = advz_scheme(2) - .disperse(leaf_payload_bytes_arc.clone()) - .unwrap(); - let payload_commitment = disperse.commit; + let avidm_param = init_avidm_param(2).unwrap(); + let weights = vec![1u32; 2]; + + let ns_table = parse_ns_table(leaf_payload.byte_len().as_usize(), &leaf_payload.encode()); + let (payload_commitment, shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &leaf_payload_bytes_arc, ns_table) + .unwrap(); + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let vid_share = ADVZDisperseShare:: { + let vid_share = VidDisperseShare2:: { view_number: ViewNumber::new(0), payload_commitment, - share: disperse.shares[0].clone(), - common: disperse.common, + share: shares[0].clone(), recipient_key: pubkey, + epoch: None, + target_epoch: None, } .to_proposal(&privkey) .unwrap() @@ -911,25 +931,23 @@ mod persistence_tests { let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); let da_proposal = Proposal { - data: DaProposal:: { + data: DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc, metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), + epoch: None, }, signature: block_payload_signature, _pd: Default::default(), }; storage - .append_da( - &da_proposal, - hotshot_query_service::VidCommitment::V0(payload_commitment), - ) + .append_da2(&da_proposal, VidCommitment::V1(payload_commitment)) .await .unwrap(); - storage.append_vid(&vid_share).await.unwrap(); + storage.append_vid2(&vid_share).await.unwrap(); storage - .append_quorum_proposal(&quorum_proposal) + .append_quorum_proposal2(&quorum_proposal) .await .unwrap(); @@ -955,7 +973,7 @@ mod persistence_tests { .await .unwrap() .unwrap(), - vid_share + convert_proposal(vid_share) ); assert_eq!( storage diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index c3b0abf7a9..853142fa9a 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -6,12 +6,12 @@ use espresso_types::{ v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence}, Leaf, Leaf2, NetworkConfig, Payload, SeqTypes, }; -use hotshot_query_service::VidCommitment; use hotshot_types::{ consensus::CommitmentMap, data::{ vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, - DaProposal, EpochNumber, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, + DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposal2, + QuorumProposalWrapper, VidCommitment, VidDisperseShare, }, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, @@ -27,7 +27,7 @@ use hotshot_types::{ }; use std::sync::Arc; use std::{ - collections::BTreeMap, + collections::{BTreeMap, HashSet}, fs::{self, File, OpenOptions}, io::{Read, Seek, SeekFrom, Write}, ops::RangeInclusive, @@ -36,7 +36,7 @@ use std::{ use crate::ViewNumber; -use espresso_types::{downgrade_commitment_map, downgrade_leaf, upgrade_commitment_map}; +use espresso_types::upgrade_commitment_map; /// Options for file system backed persistence. #[derive(Parser, Clone, Debug)] @@ -100,10 +100,20 @@ impl PersistenceOptions for Options { let store_undecided_state = self.store_undecided_state; let view_retention = self.consensus_view_retention; + let migration_path = path.join("migration"); + let migrated = if migration_path.is_file() { + let bytes = fs::read(&path) + .context(format!("unable to read migration from {}", path.display()))?; + bincode::deserialize(&bytes).context("malformed migration file")? + } else { + HashSet::new() + }; + Ok(Persistence { store_undecided_state, inner: Arc::new(RwLock::new(Inner { path, + migrated, view_retention, })), }) @@ -129,6 +139,7 @@ pub struct Persistence { struct Inner { path: PathBuf, view_retention: u64, + migrated: HashSet, } impl Inner { @@ -136,6 +147,10 @@ impl Inner { self.path.join("hotshot.cfg") } + fn migration(&self) -> PathBuf { + self.path.join("migration") + } + fn voted_view_path(&self) -> PathBuf { self.path.join("highest_voted_view") } @@ -145,6 +160,10 @@ impl Inner { self.path.join("decided_leaves") } + fn decided_leaf2_path(&self) -> PathBuf { + self.path.join("decided_leaves2") + } + /// The path from previous versions where there was only a single file for anchor leaves. fn legacy_anchor_leaf_path(&self) -> PathBuf { self.path.join("anchor_leaf") @@ -154,18 +173,34 @@ impl Inner { self.path.join("vid") } + fn vid2_dir_path(&self) -> PathBuf { + self.path.join("vid2") + } + fn da_dir_path(&self) -> PathBuf { self.path.join("da") } + fn da2_dir_path(&self) -> PathBuf { + self.path.join("da2") + } + fn undecided_state_path(&self) -> PathBuf { self.path.join("undecided_state") } + fn undecided2_state_path(&self) -> PathBuf { + self.path.join("undecided_state2") + } + fn quorum_proposals_dir_path(&self) -> PathBuf { self.path.join("quorum_proposals") } + fn quorum_proposals2_dir_path(&self) -> PathBuf { + self.path.join("quorum_proposals2") + } + fn upgrade_certificate_dir_path(&self) -> PathBuf { self.path.join("upgrade_certificate") } @@ -174,6 +209,20 @@ impl Inner { self.path.join("next_epoch_quorum_certificate") } + fn update_migration(&mut self) -> anyhow::Result<()> { + let path = self.migration(); + let bytes = bincode::serialize(&self.migrated)?; + + self.replace( + &path, + |_| Ok(true), + |mut file| { + file.write_all(&bytes)?; + Ok(()) + }, + ) + } + /// Overwrite a file if a condition is met. /// /// The file at `path`, if it exists, is opened in read mode and passed to `pred`. If `pred` @@ -225,10 +274,10 @@ impl Inner { ) -> anyhow::Result<()> { let prune_view = ViewNumber::new(decided_view.saturating_sub(self.view_retention)); - self.prune_files(self.da_dir_path(), prune_view, None, prune_intervals)?; - self.prune_files(self.vid_dir_path(), prune_view, None, prune_intervals)?; + self.prune_files(self.da2_dir_path(), prune_view, None, prune_intervals)?; + self.prune_files(self.vid2_dir_path(), prune_view, None, prune_intervals)?; self.prune_files( - self.quorum_proposals_dir_path(), + self.quorum_proposals2_dir_path(), prune_view, None, prune_intervals, @@ -236,7 +285,7 @@ impl Inner { // Save the most recent leaf as it will be our anchor point if the node restarts. self.prune_files( - self.decided_leaf_path(), + self.decided_leaf2_path(), prune_view, Some(decided_view), prune_intervals, @@ -287,7 +336,7 @@ impl Inner { // separate event for each leaf because it is possible we have non-consecutive leaves in our // storage, which would not be valid as a single decide with a single leaf chain. let mut leaves = BTreeMap::new(); - for (v, path) in view_files(self.decided_leaf_path())? { + for (v, path) in view_files(self.decided_leaf2_path())? { if v > view { continue; } @@ -295,7 +344,7 @@ impl Inner { let bytes = fs::read(&path).context(format!("reading decided leaf {}", path.display()))?; let (mut leaf, qc) = - bincode::deserialize::<(Leaf, QuorumCertificate)>(&bytes) + bincode::deserialize::<(Leaf2, QuorumCertificate2)>(&bytes) .context(format!("parsing decided leaf {}", path.display()))?; // Include the VID share if available. @@ -316,9 +365,8 @@ impl Inner { } let info = LeafInfo { - leaf: leaf.into(), - vid_share: vid_share.map(Into::into), - + leaf, + vid_share, // Note: the following fields are not used in Decide event processing, and should be // removed. For now, we just default them. state: Default::default(), @@ -347,7 +395,7 @@ impl Inner { .handle_event(&Event { view_number: view, event: EventType::Decide { - qc: Arc::new(qc.to_qc2()), + qc: Arc::new(qc), leaf_chain: Arc::new(vec![leaf]), block_size: None, }, @@ -379,8 +427,8 @@ impl Inner { fn load_da_proposal( &self, view: ViewNumber, - ) -> anyhow::Result>>> { - let dir_path = self.da_dir_path(); + ) -> anyhow::Result>>> { + let dir_path = self.da2_dir_path(); let file_path = dir_path.join(view.u64().to_string()).with_extension("txt"); @@ -390,7 +438,7 @@ impl Inner { let da_bytes = fs::read(file_path)?; - let da_proposal: Proposal> = + let da_proposal: Proposal> = bincode::deserialize(&da_bytes)?; Ok(Some(da_proposal)) } @@ -398,8 +446,8 @@ impl Inner { fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>> { - let dir_path = self.vid_dir_path(); + ) -> anyhow::Result>>> { + let dir_path = self.vid2_dir_path(); let file_path = dir_path.join(view.u64().to_string()).with_extension("txt"); @@ -408,31 +456,27 @@ impl Inner { } let vid_share_bytes = fs::read(file_path)?; - let vid_share: Proposal> = + let vid_share: Proposal> = bincode::deserialize(&vid_share_bytes)?; Ok(Some(vid_share)) } fn load_anchor_leaf(&self) -> anyhow::Result)>> { - if self.decided_leaf_path().is_dir() { + if self.decided_leaf2_path().is_dir() { let mut anchor: Option<(Leaf2, QuorumCertificate2)> = None; // Return the latest decided leaf. - for (_, path) in view_files(self.decided_leaf_path())? { + for (_, path) in view_files(self.decided_leaf2_path())? { let bytes = fs::read(&path).context(format!("reading decided leaf {}", path.display()))?; - let (leaf, qc) = - bincode::deserialize::<(Leaf, QuorumCertificate)>(&bytes) + let (leaf2, qc2) = + bincode::deserialize::<(Leaf2, QuorumCertificate2)>(&bytes) .context(format!("parsing decided leaf {}", path.display()))?; if let Some((anchor_leaf, _)) = &anchor { - if leaf.view_number() > anchor_leaf.view_number() { - let leaf2 = leaf.into(); - let qc2 = qc.to_qc2(); + if leaf2.view_number() > anchor_leaf.view_number() { anchor = Some((leaf2, qc2)); } } else { - let leaf2 = leaf.into(); - let qc2 = qc.to_qc2(); anchor = Some((leaf2, qc2)); } } @@ -503,7 +547,7 @@ impl SequencerPersistence for Persistence { consumer: &impl EventConsumer, ) -> anyhow::Result<()> { let mut inner = self.inner.write().await; - let path = inner.decided_leaf_path(); + let path = inner.decided_leaf2_path(); // Ensure the anchor leaf directory exists. fs::create_dir_all(&path).context("creating anchor leaf directory")?; @@ -539,9 +583,7 @@ impl SequencerPersistence for Persistence { Ok(false) }, |mut file| { - let leaf = downgrade_leaf(info.leaf.clone()); - let qc = qc2.to_qc(); - let bytes = bincode::serialize(&(&leaf, qc))?; + let bytes = bincode::serialize(&(&info.leaf.clone(), qc2))?; file.write_all(&bytes)?; Ok(()) }, @@ -578,27 +620,27 @@ impl SequencerPersistence for Persistence { &self, ) -> anyhow::Result, BTreeMap>)>> { let inner = self.inner.read().await; - let path = inner.undecided_state_path(); + let path = inner.undecided2_state_path(); if !path.is_file() { return Ok(None); } let bytes = fs::read(&path).context("read")?; - let value: (CommitmentMap, _) = + let value: (CommitmentMap, _) = bincode::deserialize(&bytes).context("deserialize")?; - Ok(Some((upgrade_commitment_map(value.0), value.1))) + Ok(Some((value.0, value.1))) } async fn load_da_proposal( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { self.inner.read().await.load_da_proposal(view) } async fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { self.inner.read().await.load_vid_share(view) } @@ -634,11 +676,13 @@ impl SequencerPersistence for Persistence { ) -> anyhow::Result<()> { let mut inner = self.inner.write().await; let view_number = proposal.data.view_number().u64(); - let dir_path = inner.vid_dir_path(); + + let dir_path = inner.vid2_dir_path(); fs::create_dir_all(dir_path.clone()).context("failed to create vid dir")?; let file_path = dir_path.join(view_number.to_string()).with_extension("txt"); + inner.replace( &file_path, |_| { @@ -648,6 +692,8 @@ impl SequencerPersistence for Persistence { Ok(false) }, |mut file| { + let proposal: Proposal> = + convert_proposal(proposal.clone()); let proposal_bytes = bincode::serialize(&proposal).context("serialize proposal")?; file.write_all(&proposal_bytes)?; Ok(()) @@ -712,19 +758,17 @@ impl SequencerPersistence for Persistence { }, ) } - async fn update_undecided_state( + async fn update_undecided_state2( &self, leaves: CommitmentMap, state: BTreeMap>, ) -> anyhow::Result<()> { - let leaves = downgrade_commitment_map(leaves); - if !self.store_undecided_state { return Ok(()); } let mut inner = self.inner.write().await; - let path = &inner.undecided_state_path(); + let path = &inner.undecided2_state_path(); inner.replace( path, |_| { @@ -739,15 +783,13 @@ impl SequencerPersistence for Persistence { }, ) } - async fn append_quorum_proposal( + async fn append_quorum_proposal2( &self, proposal: &Proposal>, ) -> anyhow::Result<()> { - let proposal: Proposal> = - convert_proposal(proposal.clone()); let mut inner = self.inner.write().await; let view_number = proposal.data.view_number().u64(); - let dir_path = inner.quorum_proposals_dir_path(); + let dir_path = inner.quorum_proposals2_dir_path(); fs::create_dir_all(dir_path.clone()).context("failed to create proposals dir")?; @@ -773,7 +815,7 @@ impl SequencerPersistence for Persistence { let inner = self.inner.read().await; // First, get the proposal directory. - let dir_path = inner.quorum_proposals_dir_path(); + let dir_path = inner.quorum_proposals2_dir_path(); if !dir_path.is_dir() { return Ok(Default::default()); } @@ -782,7 +824,7 @@ impl SequencerPersistence for Persistence { let mut map = BTreeMap::new(); for (view, path) in view_files(&dir_path)? { let proposal_bytes = fs::read(path)?; - let proposal: Proposal> = + let proposal: Proposal> = match bincode::deserialize(&proposal_bytes) { Ok(proposal) => proposal, Err(err) => { @@ -810,13 +852,12 @@ impl SequencerPersistence for Persistence { view: ViewNumber, ) -> anyhow::Result>> { let inner = self.inner.read().await; - let dir_path = inner.quorum_proposals_dir_path(); + let dir_path = inner.quorum_proposals2_dir_path(); let file_path = dir_path.join(view.to_string()).with_extension("txt"); let bytes = fs::read(file_path)?; - let proposal: Proposal> = bincode::deserialize(&bytes)?; - // TODO: rather than converting, we should store the value of QuorumProposalWrapper::with_epoch - let proposal_wrapper = convert_proposal(proposal); - Ok(proposal_wrapper) + let proposal = bincode::deserialize(&bytes)?; + + Ok(proposal) } async fn load_upgrade_certificate( @@ -893,14 +934,338 @@ impl SequencerPersistence for Persistence { )) } - async fn migrate_consensus( + async fn append_da2( &self, - _migrate_leaf: fn(Leaf) -> Leaf2, - _migrate_proposal: fn( - Proposal>, - ) -> Proposal>, + proposal: &Proposal>, + _vid_commit: VidCommitment, ) -> anyhow::Result<()> { - // TODO: https://github.com/EspressoSystems/espresso-sequencer/issues/2357 + let mut inner = self.inner.write().await; + let view_number = proposal.data.view_number().u64(); + let dir_path = inner.da2_dir_path(); + + fs::create_dir_all(dir_path.clone()).context("failed to create da dir")?; + + let file_path = dir_path.join(view_number.to_string()).with_extension("txt"); + inner.replace( + &file_path, + |_| { + // Don't overwrite an existing proposal, but warn about it as this is likely not + // intended behavior from HotShot. + tracing::warn!(view_number, "duplicate DA proposal"); + Ok(false) + }, + |mut file| { + let proposal_bytes = bincode::serialize(&proposal).context("serialize proposal")?; + file.write_all(&proposal_bytes)?; + Ok(()) + }, + ) + } + + async fn append_proposal2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + self.append_quorum_proposal2(proposal).await + } + + async fn migrate_anchor_leaf(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + + if inner.migrated.contains("anchor_leaf") { + tracing::info!("decided leaves already migrated"); + return Ok(()); + } + + let new_leaf_dir = inner.decided_leaf2_path(); + + fs::create_dir_all(new_leaf_dir.clone()).context("failed to create anchor leaf 2 dir")?; + + let old_leaf_dir = inner.decided_leaf_path(); + if !old_leaf_dir.is_dir() { + return Ok(()); + } + + tracing::warn!("migrating decided leaves.."); + for entry in fs::read_dir(old_leaf_dir)? { + let entry = entry?; + let path = entry.path(); + + let Some(file) = path.file_stem().and_then(|n| n.to_str()) else { + continue; + }; + let Ok(view) = file.parse::() else { + continue; + }; + + let bytes = + fs::read(&path).context(format!("reading decided leaf {}", path.display()))?; + let (leaf, qc) = bincode::deserialize::<(Leaf, QuorumCertificate)>(&bytes) + .context(format!("parsing decided leaf {}", path.display()))?; + + let leaf2: Leaf2 = leaf.into(); + let qc2 = qc.to_qc2(); + + let new_leaf_path = new_leaf_dir.join(view.to_string()).with_extension("txt"); + + inner.replace( + &new_leaf_path, + |_| { + tracing::warn!(view, "duplicate decided leaf"); + Ok(false) + }, + |mut file| { + let bytes = bincode::serialize(&(&leaf2.clone(), qc2))?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + + if view % 100 == 0 { + tracing::info!(view, "decided leaves migration progress"); + } + } + + inner.migrated.insert("anchor_leaf".to_string()); + inner.update_migration()?; + tracing::warn!("successfully migrated decided leaves"); + Ok(()) + } + async fn migrate_da_proposals(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + + if inner.migrated.contains("da_proposal") { + tracing::info!("da proposals already migrated"); + return Ok(()); + } + + let new_da_dir = inner.da2_dir_path(); + + fs::create_dir_all(new_da_dir.clone()).context("failed to create da proposals 2 dir")?; + + let old_da_dir = inner.da_dir_path(); + if !old_da_dir.is_dir() { + return Ok(()); + } + + tracing::warn!("migrating da proposals.."); + + for entry in fs::read_dir(old_da_dir)? { + let entry = entry?; + let path = entry.path(); + + let Some(file) = path.file_stem().and_then(|n| n.to_str()) else { + continue; + }; + let Ok(view) = file.parse::() else { + continue; + }; + + let bytes = + fs::read(&path).context(format!("reading da proposal {}", path.display()))?; + let proposal = bincode::deserialize::>>(&bytes) + .context(format!("parsing da proposal {}", path.display()))?; + + let new_da_path = new_da_dir.join(view.to_string()).with_extension("txt"); + + let proposal2: Proposal> = convert_proposal(proposal); + + inner.replace( + &new_da_path, + |_| { + tracing::warn!(view, "duplicate DA proposal 2"); + Ok(false) + }, + |mut file| { + let bytes = bincode::serialize(&proposal2)?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + + if view % 100 == 0 { + tracing::info!(view, "DA proposals migration progress"); + } + } + + inner.migrated.insert("da_proposal".to_string()); + inner.update_migration()?; + tracing::warn!("successfully migrated da proposals"); + Ok(()) + } + async fn migrate_vid_shares(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + + if inner.migrated.contains("vid_share") { + tracing::info!("vid shares already migrated"); + return Ok(()); + } + + let new_vid_dir = inner.vid2_dir_path(); + + fs::create_dir_all(new_vid_dir.clone()).context("failed to create vid shares 2 dir")?; + + let old_vid_dir = inner.vid_dir_path(); + if !old_vid_dir.is_dir() { + return Ok(()); + } + + tracing::warn!("migrating vid shares.."); + + for entry in fs::read_dir(old_vid_dir)? { + let entry = entry?; + let path = entry.path(); + + let Some(file) = path.file_stem().and_then(|n| n.to_str()) else { + continue; + }; + let Ok(view) = file.parse::() else { + continue; + }; + + let bytes = fs::read(&path).context(format!("reading vid share {}", path.display()))?; + let proposal = + bincode::deserialize::>>(&bytes) + .context(format!("parsing vid share {}", path.display()))?; + + let new_vid_path = new_vid_dir.join(view.to_string()).with_extension("txt"); + + let proposal2: Proposal> = + convert_proposal(proposal); + + inner.replace( + &new_vid_path, + |_| { + tracing::warn!(view, "duplicate VID share "); + Ok(false) + }, + |mut file| { + let bytes = bincode::serialize(&proposal2)?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + + if view % 100 == 0 { + tracing::info!(view, "VID shares migration progress"); + } + } + + inner.migrated.insert("vid_share".to_string()); + inner.update_migration()?; + tracing::warn!("successfully migrated vid shares"); + Ok(()) + } + async fn migrate_undecided_state(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + if inner.migrated.contains("undecided_state") { + tracing::info!("undecided state already migrated"); + return Ok(()); + } + + let new_undecided_state_path = &inner.undecided2_state_path(); + + let old_undecided_state_path = inner.undecided_state_path(); + + if !old_undecided_state_path.is_file() { + return Ok(()); + } + + let bytes = fs::read(&old_undecided_state_path).context("read")?; + let (leaves, state): (CommitmentMap, QuorumCertificate) = + bincode::deserialize(&bytes).context("deserialize")?; + + let leaves2 = upgrade_commitment_map(leaves); + let state2 = state.to_qc2(); + + tracing::warn!("migrating undecided state.."); + inner.replace( + new_undecided_state_path, + |_| { + // Always overwrite the previous file. + Ok(true) + }, + |mut file| { + let bytes = bincode::serialize(&(leaves2, state2)) + .context("serializing undecided state2")?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + + inner.migrated.insert("undecided_state".to_string()); + inner.update_migration()?; + tracing::warn!("successfully migrated undecided state"); + Ok(()) + } + async fn migrate_quorum_proposals(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + + if inner.migrated.contains("quorum_proposals") { + tracing::info!("quorum proposals already migrated"); + return Ok(()); + } + + let new_quorum_proposals_dir = inner.quorum_proposals2_dir_path(); + + fs::create_dir_all(new_quorum_proposals_dir.clone()) + .context("failed to create quorum proposals 2 dir")?; + + let old_quorum_proposals_dir = inner.quorum_proposals_dir_path(); + if !old_quorum_proposals_dir.is_dir() { + tracing::info!("no existing quorum proposals found for migration"); + return Ok(()); + } + + tracing::warn!("migrating quorum proposals.."); + for entry in fs::read_dir(old_quorum_proposals_dir)? { + let entry = entry?; + let path = entry.path(); + + let Some(file) = path.file_stem().and_then(|n| n.to_str()) else { + continue; + }; + let Ok(view) = file.parse::() else { + continue; + }; + + let bytes = + fs::read(&path).context(format!("reading quorum proposal {}", path.display()))?; + let proposal = + bincode::deserialize::>>(&bytes) + .context(format!("parsing quorum proposal {}", path.display()))?; + + let new_file_path = new_quorum_proposals_dir + .join(view.to_string()) + .with_extension("txt"); + + let proposal2: Proposal> = + convert_proposal(proposal); + + inner.replace( + &new_file_path, + |_| { + tracing::warn!(view, "duplicate Quorum proposal2 "); + Ok(false) + }, + |mut file| { + let bytes = bincode::serialize(&proposal2)?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + + if view % 100 == 0 { + tracing::info!(view, "Quorum proposals migration progress"); + } + } + + inner.migrated.insert("quorum_proposals".to_string()); + inner.update_migration()?; + tracing::warn!("successfully migrated quorum proposals"); + Ok(()) + } + async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { Ok(()) } } @@ -1026,12 +1391,29 @@ mod test { use hotshot::types::SignatureKey; use hotshot_example_types::node_types::TestVersions; use hotshot_query_service::testing::mocks::MockVersions; + use hotshot_types::data::{vid_commitment, QuorumProposal2}; + use hotshot_types::traits::node_implementation::Versions; + + use hotshot_types::vid::advz::advz_scheme; use sequencer_utils::test_utils::setup_test; + use vbs::version::StaticVersionType; + use serde_json::json; + use std::marker::PhantomData; use super::*; use crate::persistence::testing::TestablePersistence; + use crate::BLSPubKey; + use committable::Committable; + use committable::{Commitment, CommitmentBoundsArkless}; + use espresso_types::{Header, Leaf, ValidatedState}; + + use hotshot_types::{ + simple_certificate::QuorumCertificate, simple_vote::QuorumData, traits::EncodeBytes, + }; + use jf_vid::VidScheme; + #[test] fn test_config_migrations_add_builder_urls() { let before = json!({ @@ -1130,6 +1512,211 @@ mod test { assert_eq!(migrate_network_config(before.clone()).unwrap(), before); } + #[tokio::test(flavor = "multi_thread")] + pub async fn test_consensus_migration() { + setup_test(); + let rows = 300; + let tmp = Persistence::tmp_storage().await; + let mut opt = Persistence::options(&tmp); + let storage = opt.create().await.unwrap(); + + let inner = storage.inner.read().await; + + let decided_leaves_path = inner.decided_leaf_path(); + fs::create_dir_all(decided_leaves_path.clone()).expect("failed to create proposals dir"); + + let qp_dir_path = inner.quorum_proposals_dir_path(); + fs::create_dir_all(qp_dir_path.clone()).expect("failed to create proposals dir"); + drop(inner); + + for i in 0..rows { + let view = ViewNumber::new(i); + let validated_state = ValidatedState::default(); + let instance_state = NodeState::default(); + + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], i); + let (payload, metadata) = + Payload::from_transactions([], &validated_state, &instance_state) + .await + .unwrap(); + let builder_commitment = payload.builder_commitment(&metadata); + let payload_bytes = payload.encode(); + + let payload_commitment = vid_commitment::( + &payload_bytes, + &metadata.encode(), + 4, + ::Base::VERSION, + ); + + let block_header = Header::genesis( + &instance_state, + payload_commitment, + builder_commitment, + metadata, + ); + + let null_quorum_data = QuorumData { + leaf_commit: Commitment::::default_commitment_no_preimage(), + }; + + let justify_qc = QuorumCertificate::new( + null_quorum_data.clone(), + null_quorum_data.commit(), + view, + None, + PhantomData, + ); + + let quorum_proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: justify_qc.clone(), + upgrade_certificate: None, + proposal_certificate: None, + }; + + let quorum_proposal_signature = + BLSPubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) + .expect("Failed to sign quorum proposal"); + + let proposal = Proposal { + data: quorum_proposal.clone(), + signature: quorum_proposal_signature, + _pd: PhantomData, + }; + + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); + leaf.fill_block_payload::( + payload, + 4, + ::Base::VERSION, + ) + .unwrap(); + + let mut inner = storage.inner.write().await; + + tracing::debug!("inserting decided leaves"); + let file_path = decided_leaves_path + .join(view.to_string()) + .with_extension("txt"); + + tracing::debug!("inserting decided leaves"); + + inner + .replace( + &file_path, + |_| Ok(true), + |mut file| { + let bytes = bincode::serialize(&(&leaf.clone(), justify_qc))?; + file.write_all(&bytes)?; + Ok(()) + }, + ) + .expect("replace decided leaves"); + + let file_path = qp_dir_path.join(view.to_string()).with_extension("txt"); + + tracing::debug!("inserting qc for {view}"); + + inner + .replace( + &file_path, + |_| Ok(true), + |mut file| { + let proposal_bytes = + bincode::serialize(&proposal).context("serialize proposal")?; + + file.write_all(&proposal_bytes)?; + Ok(()) + }, + ) + .unwrap(); + + drop(inner); + let disperse = advz_scheme(4).disperse(payload_bytes.clone()).unwrap(); + + let vid = ADVZDisperseShare:: { + view_number: ViewNumber::new(i), + payload_commitment: Default::default(), + share: disperse.shares[0].clone(), + common: disperse.common, + recipient_key: pubkey, + }; + + let (payload, metadata) = + Payload::from_transactions([], &ValidatedState::default(), &NodeState::default()) + .await + .unwrap(); + + let da = DaProposal:: { + encoded_transactions: payload.encode(), + metadata, + view_number: ViewNumber::new(i), + }; + + let block_payload_signature = + BLSPubKey::sign(&privkey, &payload_bytes).expect("Failed to sign block payload"); + + let da_proposal = Proposal { + data: da, + signature: block_payload_signature, + _pd: Default::default(), + }; + + tracing::debug!("inserting vid for {view}"); + storage + .append_vid(&vid.to_proposal(&privkey).unwrap()) + .await + .unwrap(); + + tracing::debug!("inserting da for {view}"); + storage + .append_da(&da_proposal, VidCommitment::V0(disperse.commit)) + .await + .unwrap(); + } + + storage.migrate_consensus().await.unwrap(); + let inner = storage.inner.read().await; + let decided_leaves = fs::read_dir(inner.decided_leaf2_path()).unwrap(); + let decided_leaves_count = decided_leaves + .filter_map(Result::ok) + .filter(|e| e.path().is_file()) + .count(); + assert_eq!( + decided_leaves_count, rows as usize, + "decided leaves count does not match", + ); + + let da_proposals = fs::read_dir(inner.da2_dir_path()).unwrap(); + let da_proposals_count = da_proposals + .filter_map(Result::ok) + .filter(|e| e.path().is_file()) + .count(); + assert_eq!( + da_proposals_count, rows as usize, + "da proposals does not match", + ); + + let vids = fs::read_dir(inner.vid2_dir_path()).unwrap(); + let vids_count = vids + .filter_map(Result::ok) + .filter(|e| e.path().is_file()) + .count(); + assert_eq!(vids_count, rows as usize, "vid shares count does not match",); + + let qps = fs::read_dir(inner.quorum_proposals2_dir_path()).unwrap(); + let qps_count = qps + .filter_map(Result::ok) + .filter(|e| e.path().is_file()) + .count(); + assert_eq!( + qps_count, rows as usize, + "quorum proposals count does not match", + ); + } + #[tokio::test(flavor = "multi_thread")] async fn test_load_quorum_proposals_invalid_extension() { setup_test(); @@ -1138,9 +1725,7 @@ mod test { let storage = Persistence::connect(&tmp).await; // Generate a couple of valid quorum proposals. - let leaf: Leaf2 = Leaf::genesis::(&Default::default(), &NodeState::mock()) - .await - .into(); + let leaf = Leaf2::genesis::(&Default::default(), &NodeState::mock()).await; let privkey = PubKey::generated_from_seed_indexed([0; 32], 1).1; let signature = PubKey::sign(&privkey, &[]).unwrap(); let mut quorum_proposal = Proposal { @@ -1149,12 +1734,11 @@ mod test { epoch: None, block_header: leaf.block_header().clone(), view_number: ViewNumber::genesis(), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &Default::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -1168,21 +1752,21 @@ mod test { // Store quorum proposals. let quorum_proposal1 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal1) + .append_quorum_proposal2(&quorum_proposal1) .await .unwrap(); quorum_proposal.data.proposal.view_number = ViewNumber::new(1); let quorum_proposal2 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal2) + .append_quorum_proposal2(&quorum_proposal2) .await .unwrap(); // Change one of the file extensions. It can happen that we end up with files with the wrong // extension if, for example, the node is killed before cleaning up a swap file. fs::rename( - tmp.path().join("quorum_proposals/1.txt"), - tmp.path().join("quorum_proposals/1.swp"), + tmp.path().join("quorum_proposals2/1.txt"), + tmp.path().join("quorum_proposals2/1.swp"), ) .unwrap(); @@ -1214,12 +1798,11 @@ mod test { epoch: None, block_header: leaf.block_header().clone(), view_number: ViewNumber::new(1), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &Default::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -1231,16 +1814,16 @@ mod test { }; // First store an invalid quorum proposal. - fs::create_dir_all(tmp.path().join("quorum_proposals")).unwrap(); + fs::create_dir_all(tmp.path().join("quorum_proposals2")).unwrap(); fs::write( - tmp.path().join("quorum_proposals/0.txt"), + tmp.path().join("quorum_proposals2/0.txt"), "invalid data".as_bytes(), ) .unwrap(); // Store valid quorum proposal. storage - .append_quorum_proposal(&quorum_proposal) + .append_quorum_proposal2(&quorum_proposal) .await .unwrap(); diff --git a/sequencer/src/persistence/no_storage.rs b/sequencer/src/persistence/no_storage.rs index 56b69bb938..c47701a3ea 100644 --- a/sequencer/src/persistence/no_storage.rs +++ b/sequencer/src/persistence/no_storage.rs @@ -5,14 +5,14 @@ use anyhow::bail; use async_trait::async_trait; use espresso_types::{ v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence}, - Leaf, Leaf2, NetworkConfig, + Leaf2, NetworkConfig, }; -use hotshot_query_service::VidCommitment; use hotshot_types::{ consensus::CommitmentMap, data::{ vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, - DaProposal, EpochNumber, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, + DaProposal, DaProposal2, EpochNumber, QuorumProposalWrapper, VidCommitment, + VidDisperseShare, }, event::{Event, EventType, HotShotAction, LeafInfo}, message::Proposal, @@ -99,14 +99,14 @@ impl SequencerPersistence for NoStorage { async fn load_da_proposal( &self, _view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { Ok(None) } async fn load_vid_share( &self, _view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { Ok(None) } @@ -155,14 +155,14 @@ impl SequencerPersistence for NoStorage { ) -> anyhow::Result<()> { Ok(()) } - async fn update_undecided_state( + async fn update_undecided_state2( &self, _leaves: CommitmentMap, _state: BTreeMap>, ) -> anyhow::Result<()> { Ok(()) } - async fn append_quorum_proposal( + async fn append_quorum_proposal2( &self, _proposal: &Proposal>, ) -> anyhow::Result<()> { @@ -175,16 +175,6 @@ impl SequencerPersistence for NoStorage { Ok(()) } - async fn migrate_consensus( - &self, - _: fn(Leaf) -> Leaf2, - _: fn( - Proposal>, - ) -> Proposal>, - ) -> anyhow::Result<()> { - Ok(()) - } - async fn store_next_epoch_quorum_certificate( &self, _high_qc: NextEpochQuorumCertificate2, @@ -197,4 +187,38 @@ impl SequencerPersistence for NoStorage { ) -> anyhow::Result>> { Ok(None) } + + async fn append_da2( + &self, + _proposal: &Proposal>, + _vid_commit: VidCommitment, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn append_proposal2( + &self, + _proposal: &Proposal>, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn migrate_anchor_leaf(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_da_proposals(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_vid_shares(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_undecided_state(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_quorum_proposals(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { + Ok(()) + } } diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 0ed4716cdd..e2383a81ca 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -5,7 +5,7 @@ use committable::Committable; use derivative::Derivative; use derive_more::derive::{From, Into}; use espresso_types::{ - downgrade_commitment_map, downgrade_leaf, parse_duration, parse_size, upgrade_commitment_map, + parse_duration, parse_size, upgrade_commitment_map, v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence, StateCatchup}, BackoffParams, BlockMerkleTree, FeeMerkleTree, Leaf, Leaf2, NetworkConfig, Payload, }; @@ -27,13 +27,14 @@ use hotshot_query_service::{ Provider, }, merklized_state::MerklizedState, - VidCommitment, VidCommon, + VidCommon, }; use hotshot_types::{ consensus::CommitmentMap, data::{ vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, - DaProposal, EpochNumber, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, + DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposalWrapper, VidCommitment, + VidDisperseShare, }, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, @@ -646,9 +647,10 @@ impl Persistence { }; let mut parent = None; - let mut rows = query("SELECT leaf, qc FROM anchor_leaf WHERE view >= $1 ORDER BY view") - .bind(from_view) - .fetch(tx.as_mut()); + let mut rows = + query("SELECT leaf, qc FROM anchor_leaf2 WHERE view >= $1 ORDER BY view") + .bind(from_view) + .fetch(tx.as_mut()); let mut leaves = vec![]; let mut final_qc = None; while let Some(row) = rows.next().await { @@ -663,9 +665,9 @@ impl Persistence { }; let leaf_data: Vec = row.get("leaf"); - let leaf = bincode::deserialize::(&leaf_data)?; + let leaf = bincode::deserialize::(&leaf_data)?; let qc_data: Vec = row.get("qc"); - let qc = bincode::deserialize::>(&qc_data)?; + let qc = bincode::deserialize::>(&qc_data)?; let height = leaf.block_header().block_number(); // Ensure we are only dealing with a consecutive chain of leaves. We don't want to @@ -701,7 +703,7 @@ impl Persistence { // Collect VID shares for the decide event. let mut vid_shares = tx .fetch_all( - query("SELECT view, data FROM vid_share where view >= $1 AND view <= $2") + query("SELECT view, data FROM vid_share2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) @@ -711,7 +713,7 @@ impl Persistence { let view: i64 = row.get("view"); let data: Vec = row.get("data"); let vid_proposal = bincode::deserialize::< - Proposal>, + Proposal>, >(&data)?; Ok((view as u64, vid_proposal.data)) }) @@ -720,7 +722,7 @@ impl Persistence { // Collect DA proposals for the decide event. let mut da_proposals = tx .fetch_all( - query("SELECT view, data FROM da_proposal where view >= $1 AND view <= $2") + query("SELECT view, data FROM da_proposal2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) @@ -730,7 +732,7 @@ impl Persistence { let view: i64 = row.get("view"); let data: Vec = row.get("data"); let da_proposal = - bincode::deserialize::>>(&data)?; + bincode::deserialize::>>(&data)?; Ok((view as u64, da_proposal.data)) }) .collect::>>()?; @@ -765,8 +767,8 @@ impl Persistence { } LeafInfo { - leaf: leaf.into(), - vid_share: vid_share.map(Into::into), + leaf, + vid_share, // Note: the following fields are not used in Decide event processing, and // should be removed. For now, we just default them. state: Default::default(), @@ -782,7 +784,7 @@ impl Persistence { view_number: to_view, event: EventType::Decide { leaf_chain: Arc::new(leaf_chain), - qc: Arc::new(final_qc.to_qc2()), + qc: Arc::new(final_qc), block_size: None, }, }) @@ -805,25 +807,25 @@ impl Persistence { // Delete the data that has been fully processed. tx.execute( - query("DELETE FROM vid_share where view >= $1 AND view <= $2") + query("DELETE FROM vid_share2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) .await?; tx.execute( - query("DELETE FROM da_proposal where view >= $1 AND view <= $2") + query("DELETE FROM da_proposal2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) .await?; tx.execute( - query("DELETE FROM quorum_proposals where view >= $1 AND view <= $2") + query("DELETE FROM quorum_proposals2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) .await?; tx.execute( - query("DELETE FROM quorum_certificate where view >= $1 AND view <= $2") + query("DELETE FROM quorum_certificate2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) @@ -833,7 +835,7 @@ impl Persistence { // less than the given value). This is necessary to ensure that, in case of a restart, // we can resume from the last decided leaf. tx.execute( - query("DELETE FROM anchor_leaf WHERE view >= $1 AND view < $2") + query("DELETE FROM anchor_leaf2 WHERE view >= $1 AND view < $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) @@ -896,11 +898,11 @@ impl Persistence { } const PRUNE_TABLES: &[&str] = &[ - "anchor_leaf", - "vid_share", - "da_proposal", - "quorum_proposals", - "quorum_certificate", + "anchor_leaf2", + "vid_share2", + "da_proposal2", + "quorum_proposals2", + "quorum_certificate2", ]; async fn prune_to_view(tx: &mut Transaction, view: u64) -> anyhow::Result<()> { @@ -977,14 +979,12 @@ impl SequencerPersistence for Persistence { // because we already store it separately, as part of the DA proposal. Storing it // here contributes to load on the DB for no reason, so we remove it before // serializing the leaf. - let mut leaf = downgrade_leaf(info.leaf.clone()); + let mut leaf = info.leaf.clone(); leaf.unfill_block_payload(); - let qc = qc2.to_qc(); - - let view = qc.view_number.u64() as i64; + let view = qc2.view_number.u64() as i64; let leaf_bytes = bincode::serialize(&leaf)?; - let qc_bytes = bincode::serialize(&qc)?; + let qc_bytes = bincode::serialize(&qc2)?; Ok((view, leaf_bytes, qc_bytes)) }) .collect::>>()?; @@ -993,7 +993,7 @@ impl SequencerPersistence for Persistence { // event consumer later fails, there is no need to abort the storage of the leaves. let mut tx = self.db.write().await?; - tx.upsert("anchor_leaf", ["view", "leaf", "qc"], ["view"], values) + tx.upsert("anchor_leaf2", ["view", "leaf", "qc"], ["view"], values) .await?; tx.commit().await?; @@ -1036,26 +1036,24 @@ impl SequencerPersistence for Persistence { .db .read() .await? - .fetch_optional("SELECT leaf, qc FROM anchor_leaf ORDER BY view DESC LIMIT 1") + .fetch_optional("SELECT leaf, qc FROM anchor_leaf2 ORDER BY view DESC LIMIT 1") .await? else { return Ok(None); }; let leaf_bytes: Vec = row.get("leaf"); - let leaf: Leaf = bincode::deserialize(&leaf_bytes)?; - let leaf2: Leaf2 = leaf.into(); + let leaf2: Leaf2 = bincode::deserialize(&leaf_bytes)?; let qc_bytes: Vec = row.get("qc"); - let qc: QuorumCertificate = bincode::deserialize(&qc_bytes)?; - let qc2 = qc.to_qc2(); + let qc2: QuorumCertificate2 = bincode::deserialize(&qc_bytes)?; Ok(Some((leaf2, qc2))) } async fn load_anchor_view(&self) -> anyhow::Result { let mut tx = self.db.read().await?; - let (view,) = query_as::<(i64,)>("SELECT coalesce(max(view), 0) FROM anchor_leaf") + let (view,) = query_as::<(i64,)>("SELECT coalesce(max(view), 0) FROM anchor_leaf2") .fetch_one(tx.as_mut()) .await?; Ok(ViewNumber::new(view as u64)) @@ -1068,15 +1066,14 @@ impl SequencerPersistence for Persistence { .db .read() .await? - .fetch_optional("SELECT leaves, state FROM undecided_state WHERE id = 0") + .fetch_optional("SELECT leaves, state FROM undecided_state2 WHERE id = 0") .await? else { return Ok(None); }; let leaves_bytes: Vec = row.get("leaves"); - let leaves: CommitmentMap = bincode::deserialize(&leaves_bytes)?; - let leaves2 = upgrade_commitment_map(leaves); + let leaves2: CommitmentMap = bincode::deserialize(&leaves_bytes)?; let state_bytes: Vec = row.get("state"); let state = bincode::deserialize(&state_bytes)?; @@ -1087,13 +1084,13 @@ impl SequencerPersistence for Persistence { async fn load_da_proposal( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { let result = self .db .read() .await? .fetch_optional( - query("SELECT data FROM da_proposal where view = $1").bind(view.u64() as i64), + query("SELECT data FROM da_proposal2 where view = $1").bind(view.u64() as i64), ) .await?; @@ -1108,13 +1105,13 @@ impl SequencerPersistence for Persistence { async fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { let result = self .db .read() .await? .fetch_optional( - query("SELECT data FROM vid_share where view = $1").bind(view.u64() as i64), + query("SELECT data FROM vid_share2 where view = $1").bind(view.u64() as i64), ) .await?; @@ -1134,7 +1131,7 @@ impl SequencerPersistence for Persistence { .db .read() .await? - .fetch_all("SELECT * FROM quorum_proposals") + .fetch_all("SELECT * FROM quorum_proposals2") .await?; Ok(BTreeMap::from_iter( @@ -1143,9 +1140,8 @@ impl SequencerPersistence for Persistence { let view: i64 = row.get("view"); let view_number: ViewNumber = ViewNumber::new(view.try_into()?); let bytes: Vec = row.get("data"); - let proposal: Proposal> = - bincode::deserialize(&bytes)?; - Ok((view_number, convert_proposal(proposal))) + let proposal = bincode::deserialize(&bytes)?; + Ok((view_number, proposal)) }) .collect::>>()?, )) @@ -1157,12 +1153,12 @@ impl SequencerPersistence for Persistence { ) -> anyhow::Result>> { let mut tx = self.db.read().await?; let (data,) = - query_as::<(Vec,)>("SELECT data FROM quorum_proposals WHERE view = $1 LIMIT 1") + query_as::<(Vec,)>("SELECT data FROM quorum_proposals2 WHERE view = $1 LIMIT 1") .bind(view.u64() as i64) .fetch_one(tx.as_mut()) .await?; - let proposal: Proposal> = bincode::deserialize(&data)?; - let proposal = convert_proposal(proposal); + let proposal = bincode::deserialize(&data)?; + Ok(proposal) } @@ -1191,11 +1187,13 @@ impl SequencerPersistence for Persistence { ) -> anyhow::Result<()> { let view = proposal.data.view_number.u64(); let payload_hash = proposal.data.payload_commitment; - let data_bytes = bincode::serialize(proposal).unwrap(); + let proposal: Proposal> = + convert_proposal(proposal.clone()); + let data_bytes = bincode::serialize(&proposal).unwrap(); let mut tx = self.db.write().await?; tx.upsert( - "vid_share", + "vid_share2", ["view", "data", "payload_hash"], ["view"], [(view as i64, data_bytes, payload_hash.to_string())], @@ -1244,42 +1242,18 @@ impl SequencerPersistence for Persistence { tx.execute(query(&stmt).bind(view.u64() as i64)).await?; tx.commit().await } - async fn update_undecided_state( - &self, - leaves: CommitmentMap, - state: BTreeMap>, - ) -> anyhow::Result<()> { - let leaves = downgrade_commitment_map(leaves); - - if !self.store_undecided_state { - return Ok(()); - } - let leaves_bytes = bincode::serialize(&leaves).context("serializing leaves")?; - let state_bytes = bincode::serialize(&state).context("serializing state")?; - - let mut tx = self.db.write().await?; - tx.upsert( - "undecided_state", - ["id", "leaves", "state"], - ["id"], - [(0_i32, leaves_bytes, state_bytes)], - ) - .await?; - tx.commit().await - } - async fn append_quorum_proposal( + async fn append_quorum_proposal2( &self, proposal: &Proposal>, ) -> anyhow::Result<()> { - let proposal: Proposal> = - convert_proposal(proposal.clone()); let view_number = proposal.data.view_number().u64(); + let proposal_bytes = bincode::serialize(&proposal).context("serializing proposal")?; - let leaf_hash = Committable::commit(&Leaf::from_quorum_proposal(&proposal.data)); + let leaf_hash = Committable::commit(&Leaf2::from_quorum_proposal(&proposal.data)); let mut tx = self.db.write().await?; tx.upsert( - "quorum_proposals", + "quorum_proposals2", ["view", "leaf_hash", "data"], ["view"], [(view_number as i64, leaf_hash.to_string(), proposal_bytes)], @@ -1287,10 +1261,10 @@ impl SequencerPersistence for Persistence { .await?; // We also keep track of any QC we see in case we need it to recover our archival storage. - let justify_qc = &proposal.data.justify_qc; + let justify_qc = proposal.data.justify_qc(); let justify_qc_bytes = bincode::serialize(&justify_qc).context("serializing QC")?; tx.upsert( - "quorum_certificate", + "quorum_certificate2", ["view", "leaf_hash", "data"], ["view"], [( @@ -1343,14 +1317,483 @@ impl SequencerPersistence for Persistence { tx.commit().await } - async fn migrate_consensus( - &self, - _migrate_leaf: fn(Leaf) -> Leaf2, - _migrate_proposal: fn( - Proposal>, - ) -> Proposal>, - ) -> anyhow::Result<()> { - // TODO: https://github.com/EspressoSystems/espresso-sequencer/issues/2357 + async fn migrate_anchor_leaf(&self) -> anyhow::Result<()> { + let batch_size: i64 = 10000; + let mut offset: i64 = 0; + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'anchor_leaf'", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("decided leaves already migrated"); + return Ok(()); + } + + tracing::warn!("migrating decided leaves.."); + loop { + let mut tx = self.db.read().await?; + let rows = + query("SELECT view, leaf, qc FROM anchor_leaf ORDER BY view LIMIT $1 OFFSET $2") + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + if rows.is_empty() { + break; + } + let mut values = Vec::new(); + + for row in rows.iter() { + let leaf: Vec = row.try_get("leaf")?; + let qc: Vec = row.try_get("qc")?; + let leaf1: Leaf = bincode::deserialize(&leaf)?; + let qc1: QuorumCertificate = bincode::deserialize(&qc)?; + let view: i64 = row.try_get("view")?; + + let leaf2: Leaf2 = leaf1.into(); + let qc2: QuorumCertificate2 = qc1.to_qc2(); + + let leaf2_bytes = bincode::serialize(&leaf2)?; + let qc2_bytes = bincode::serialize(&qc2)?; + + values.push((view, leaf2_bytes, qc2_bytes)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO anchor_leaf2 (view, leaf, qc) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, leaf, qc)| { + b.push_bind(view).push_bind(leaf).push_bind(qc); + }); + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + tx.commit().await?; + offset += batch_size; + tracing::info!("anchor leaf migration progress: {} rows", offset); + + if rows.len() < batch_size as usize { + break; + } + } + + tracing::warn!("migrated decided leaves"); + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "completed"], + ["table_name"], + [("anchor_leaf".to_string(), true)], + ) + .await?; + tx.commit().await?; + + tracing::info!("updated epoch_migration table for anchor_leaf"); + + Ok(()) + } + + async fn migrate_da_proposals(&self) -> anyhow::Result<()> { + let batch_size: i64 = 10000; + let mut offset: i64 = 0; + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'da_proposal'", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("da proposals migration already done"); + return Ok(()); + } + + tracing::warn!("migrating da proposals.."); + + loop { + let mut tx = self.db.read().await?; + let rows = query( + "SELECT payload_hash, data FROM da_proposal ORDER BY view LIMIT $1 OFFSET $2", + ) + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + if rows.is_empty() { + break; + } + let mut values = Vec::new(); + + for row in rows.iter() { + let data: Vec = row.try_get("data")?; + let payload_hash: String = row.try_get("payload_hash")?; + + let da_proposal: DaProposal = bincode::deserialize(&data)?; + let da_proposal2: DaProposal2 = da_proposal.into(); + + let view = da_proposal2.view_number.u64() as i64; + let data = bincode::serialize(&da_proposal2)?; + + values.push((view, payload_hash, data)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO da_proposal2 (view, payload_hash, data) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, payload_hash, data)| { + b.push_bind(view).push_bind(payload_hash).push_bind(data); + }); + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + + tx.commit().await?; + + tracing::info!("DA proposals migration progress: {} rows", offset); + offset += batch_size; + + if rows.len() < batch_size as usize { + break; + } + } + + tracing::warn!("migrated da proposals"); + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "completed"], + ["table_name"], + [("da_proposal".to_string(), true)], + ) + .await?; + tx.commit().await?; + + tracing::info!("updated epoch_migration table for da_proposal"); + + Ok(()) + } + + async fn migrate_vid_shares(&self) -> anyhow::Result<()> { + let batch_size: i64 = 10000; + let mut offset: i64 = 0; + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'vid_share'", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("vid_share migration already done"); + return Ok(()); + } + + tracing::warn!("migrating vid shares.."); + loop { + let mut tx = self.db.read().await?; + let rows = + query("SELECT payload_hash, data FROM vid_share ORDER BY view LIMIT $1 OFFSET $2") + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + if rows.is_empty() { + break; + } + let mut values = Vec::new(); + + for row in rows.iter() { + let data: Vec = row.try_get("data")?; + let payload_hash: String = row.try_get("payload_hash")?; + + let vid_share: ADVZDisperseShare = bincode::deserialize(&data)?; + let vid_share2: VidDisperseShare = vid_share.into(); + + let view = vid_share2.view_number().u64() as i64; + let data = bincode::serialize(&vid_share2)?; + + values.push((view, payload_hash, data)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO vid_share2 (view, payload_hash, data) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, payload_hash, data)| { + b.push_bind(view).push_bind(payload_hash).push_bind(data); + }); + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + tx.commit().await?; + tracing::info!("VID shares migration progress: {} rows", offset); + offset += batch_size; + + if rows.len() < batch_size as usize { + break; + } + } + + tracing::warn!("migrated vid shares"); + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "completed"], + ["table_name"], + [("vid_share".to_string(), true)], + ) + .await?; + tx.commit().await?; + + tracing::info!("updated epoch_migration table for vid_share"); + + Ok(()) + } + + async fn migrate_undecided_state(&self) -> anyhow::Result<()> { + let mut tx = self.db.read().await?; + + let row = tx + .fetch_optional("SELECT leaves, state FROM undecided_state WHERE id = 0") + .await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'undecided_state'", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("undecided state migration already done"); + + return Ok(()); + } + + tracing::warn!("migrating undecided state.."); + + if let Some(row) = row { + let leaves_bytes: Vec = row.try_get("leaves")?; + let leaves: CommitmentMap = bincode::deserialize(&leaves_bytes)?; + + let leaves2 = upgrade_commitment_map(leaves); + let leaves2_bytes = bincode::serialize(&leaves2)?; + let state_bytes: Vec = row.try_get("state")?; + + let mut tx = self.db.write().await?; + tx.upsert( + "undecided_state2", + ["id", "leaves", "state"], + ["id"], + [(0_i32, leaves2_bytes, state_bytes)], + ) + .await?; + tx.commit().await?; + }; + + tracing::warn!("migrated undecided state"); + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "completed"], + ["table_name"], + [("undecided_state".to_string(), true)], + ) + .await?; + tx.commit().await?; + + tracing::info!("updated epoch_migration table for undecided_state"); + + Ok(()) + } + + async fn migrate_quorum_proposals(&self) -> anyhow::Result<()> { + let batch_size: i64 = 10000; + let mut offset: i64 = 0; + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'quorum_proposals'", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("quorum proposals migration already done"); + return Ok(()); + } + + tracing::warn!("migrating quorum proposals.."); + + loop { + let mut tx = self.db.read().await?; + let rows = + query("SELECT view, leaf_hash, data FROM quorum_proposals ORDER BY view LIMIT $1 OFFSET $2") + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + + if rows.is_empty() { + break; + } + + let mut values = Vec::new(); + + for row in rows.iter() { + let leaf_hash: String = row.try_get("leaf_hash")?; + let data: Vec = row.try_get("data")?; + + let quorum_proposal: Proposal> = + bincode::deserialize(&data)?; + let quorum_proposal2: Proposal> = + convert_proposal(quorum_proposal); + + let view = quorum_proposal2.data.view_number().u64() as i64; + let data = bincode::serialize(&quorum_proposal2)?; + + values.push((view, leaf_hash, data)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO quorum_proposals2 (view, leaf_hash, data) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, leaf_hash, data)| { + b.push_bind(view).push_bind(leaf_hash).push_bind(data); + }); + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + tx.commit().await?; + + offset += batch_size; + tracing::info!("quorum proposals migration progress: {} rows", offset); + + if rows.len() < batch_size as usize { + break; + } + } + + tracing::warn!("migrated quorum proposals"); + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "completed"], + ["table_name"], + [("quorum_proposals".to_string(), true)], + ) + .await?; + tx.commit().await?; + + tracing::info!("updated epoch_migration table for quorum_proposals"); + + Ok(()) + } + + async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { + let batch_size: i64 = 10000; + let mut offset: i64 = 0; + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'quorum_certificate'", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("quorum certificates migration already done"); + return Ok(()); + } + + tracing::warn!("migrating quorum certificates.."); + loop { + let mut tx = self.db.read().await?; + let rows = + query("SELECT view, leaf_hash, data FROM quorum_certificate ORDER BY view LIMIT $1 OFFSET $2") + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + if rows.is_empty() { + break; + } + let mut values = Vec::new(); + + for row in rows.iter() { + let leaf_hash: String = row.try_get("leaf_hash")?; + let data: Vec = row.try_get("data")?; + + let qc: QuorumCertificate = bincode::deserialize(&data)?; + let qc2: QuorumCertificate2 = qc.to_qc2(); + + let view = qc2.view_number().u64() as i64; + let data = bincode::serialize(&qc2)?; + + values.push((view, leaf_hash, data)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO quorum_certificate2 (view, leaf_hash, data) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, leaf_hash, data)| { + b.push_bind(view).push_bind(leaf_hash).push_bind(data); + }); + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + tx.commit().await?; + offset += batch_size; + + tracing::info!("Quorum certificates migration progress: {} rows", offset); + + if rows.len() < batch_size as usize { + break; + } + } + + tracing::warn!("migrated quorum certificates"); + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "completed"], + ["table_name"], + [("quorum_certificate".to_string(), true)], + ) + .await?; + tx.commit().await?; + tracing::info!("updated epoch_migration table for quorum_certificate"); + Ok(()) } @@ -1387,6 +1830,49 @@ impl SequencerPersistence for Persistence { }) .transpose() } + + async fn append_da2( + &self, + proposal: &Proposal>, + vid_commit: VidCommitment, + ) -> anyhow::Result<()> { + let data = &proposal.data; + let view = data.view_number().u64(); + let data_bytes = bincode::serialize(proposal).unwrap(); + + let mut tx = self.db.write().await?; + tx.upsert( + "da_proposal2", + ["view", "data", "payload_hash"], + ["view"], + [(view as i64, data_bytes, vid_commit.to_string())], + ) + .await?; + tx.commit().await + } + + async fn update_undecided_state2( + &self, + leaves: CommitmentMap, + state: BTreeMap>, + ) -> anyhow::Result<()> { + if !self.store_undecided_state { + return Ok(()); + } + + let leaves_bytes = bincode::serialize(&leaves).context("serializing leaves")?; + let state_bytes = bincode::serialize(&state).context("serializing state")?; + + let mut tx = self.db.write().await?; + tx.upsert( + "undecided_state2", + ["id", "leaves", "state"], + ["id"], + [(0_i32, leaves_bytes, state_bytes)], + ) + .await?; + tx.commit().await + } } #[async_trait] @@ -1402,7 +1888,7 @@ impl Provider for Persistence { }; let bytes = match query_as::<(Vec,)>( - "SELECT data FROM vid_share WHERE payload_hash = $1 LIMIT 1", + "SELECT data FROM vid_share2 WHERE payload_hash = $1 LIMIT 1", ) .bind(req.0.to_string()) .fetch_optional(tx.as_mut()) @@ -1411,12 +1897,12 @@ impl Provider for Persistence { Ok(Some((bytes,))) => bytes, Ok(None) => return None, Err(err) => { - tracing::warn!("error loading VID share: {err:#}"); + tracing::error!("error loading VID share: {err:#}"); return None; } }; - let share: Proposal> = + let share: Proposal> = match bincode::deserialize(&bytes) { Ok(share) => share, Err(err) => { @@ -1425,7 +1911,11 @@ impl Provider for Persistence { } }; - Some(share.data.common) + match share.data { + VidDisperseShare::V0(vid) => Some(vid.common), + // TODO (abdul): V1 VID does not have common field + _ => None, + } } } @@ -1442,7 +1932,7 @@ impl Provider for Persistence { }; let bytes = match query_as::<(Vec,)>( - "SELECT data FROM da_proposal WHERE payload_hash = $1 LIMIT 1", + "SELECT data FROM da_proposal2 WHERE payload_hash = $1 LIMIT 1", ) .bind(req.0.to_string()) .fetch_optional(tx.as_mut()) @@ -1456,11 +1946,11 @@ impl Provider for Persistence { } }; - let proposal: Proposal> = match bincode::deserialize(&bytes) + let proposal: Proposal> = match bincode::deserialize(&bytes) { Ok(proposal) => proposal, Err(err) => { - tracing::warn!("error decoding DA proposal: {err:#}"); + tracing::error!("error decoding DA proposal: {err:#}"); return None; } }; @@ -1505,10 +1995,10 @@ impl Provider> for Persistence { async fn fetch_leaf_from_proposals( tx: &mut Transaction, req: LeafRequest, -) -> anyhow::Result)>> { +) -> anyhow::Result)>> { // Look for a quorum proposal corresponding to this leaf. let Some((proposal_bytes,)) = - query_as::<(Vec,)>("SELECT data FROM quorum_proposals WHERE leaf_hash = $1 LIMIT 1") + query_as::<(Vec,)>("SELECT data FROM quorum_proposals2 WHERE leaf_hash = $1 LIMIT 1") .bind(req.expected_leaf.to_string()) .fetch_optional(tx.as_mut()) .await @@ -1519,7 +2009,7 @@ async fn fetch_leaf_from_proposals( // Look for a QC corresponding to this leaf. let Some((qc_bytes,)) = - query_as::<(Vec,)>("SELECT data FROM quorum_certificate WHERE leaf_hash = $1 LIMIT 1") + query_as::<(Vec,)>("SELECT data FROM quorum_certificate2 WHERE leaf_hash = $1 LIMIT 1") .bind(req.expected_leaf.to_string()) .fetch_optional(tx.as_mut()) .await @@ -1528,12 +2018,12 @@ async fn fetch_leaf_from_proposals( return Ok(None); }; - let proposal: Proposal> = + let proposal: Proposal> = bincode::deserialize(&proposal_bytes).context("deserializing quorum proposal")?; - let qc: QuorumCertificate = + let qc: QuorumCertificate2 = bincode::deserialize(&qc_bytes).context("deserializing quorum certificate")?; - let leaf = Leaf::from_quorum_proposal(&proposal.data); + let leaf = Leaf2::from_quorum_proposal(&proposal.data); Ok(Some((leaf, qc))) } @@ -1588,21 +2078,29 @@ mod generic_tests { #[cfg(test)] mod test { + use super::*; use crate::{persistence::testing::TestablePersistence, BLSPubKey, PubKey}; - use espresso_types::{ - traits::NullEventConsumer, Leaf, MockSequencerVersions, NodeState, ValidatedState, - }; + use committable::{Commitment, CommitmentBoundsArkless}; + use espresso_types::{traits::NullEventConsumer, Header, Leaf, NodeState, ValidatedState}; use futures::stream::TryStreamExt; use hotshot_example_types::node_types::TestVersions; use hotshot_types::{ - data::vid_commitment, + data::{ + ns_table::parse_ns_table, vid_commitment, vid_disperse::VidDisperseShare2, EpochNumber, + QuorumProposal2, + }, + message::convert_proposal, simple_certificate::QuorumCertificate, + simple_vote::QuorumData, traits::{ block_contents::BlockHeader, node_implementation::Versions, signature_key::SignatureKey, EncodeBytes, }, - vid::advz::advz_scheme, + vid::{ + advz::advz_scheme, + avidm::{init_avidm_param, AvidMScheme}, + }, }; use jf_vid::VidScheme; use sequencer_utils::test_utils::setup_test; @@ -1697,20 +2195,25 @@ mod test { // Mock up some data. let leaf = - Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()).await; + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); - let disperse = advz_scheme(2) - .disperse(leaf_payload_bytes_arc.clone()) - .unwrap(); - let payload_commitment = disperse.commit; + + let avidm_param = init_avidm_param(2).unwrap(); + let weights = vec![1u32; 2]; + + let ns_table = parse_ns_table(leaf_payload.byte_len().as_usize(), &leaf_payload.encode()); + let (payload_commitment, shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &leaf_payload_bytes_arc, ns_table) + .unwrap(); let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let vid_share = ADVZDisperseShare:: { + let vid_share = VidDisperseShare2:: { view_number: ViewNumber::new(0), payload_commitment, - share: disperse.shares[0].clone(), - common: disperse.common, + share: shares[0].clone(), recipient_key: pubkey, + epoch: None, + target_epoch: None, } .to_proposal(&privkey) .unwrap() @@ -1718,14 +2221,14 @@ mod test { let quorum_proposal = QuorumProposalWrapper:: { proposal: QuorumProposal2:: { - epoch: None, block_header: leaf.block_header().clone(), view_number: leaf.view_number(), - justify_qc: leaf.justify_qc().to_qc2(), + justify_qc: leaf.justify_qc(), upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, next_epoch_justify_qc: None, + epoch: None, }, }; let quorum_proposal_signature = @@ -1740,10 +2243,11 @@ mod test { let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); let da_proposal = Proposal { - data: DaProposal:: { + data: DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc, metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), + epoch: None, }, signature: block_payload_signature, _pd: Default::default(), @@ -1757,52 +2261,54 @@ mod test { .proposal .justify_qc .data - .leaf_commit = Committable::commit(&leaf.clone().into()); + .leaf_commit = Committable::commit(&leaf.clone()); let qc = next_quorum_proposal.data.justify_qc(); // Add to database. storage - .append_da(&da_proposal, VidCommitment::V0(payload_commitment)) + .append_da2(&da_proposal, VidCommitment::V1(payload_commitment)) .await .unwrap(); - storage.append_vid(&vid_share).await.unwrap(); storage - .append_quorum_proposal(&quorum_proposal) + .append_vid2(&convert_proposal(vid_share.clone())) + .await + .unwrap(); + storage + .append_quorum_proposal2(&quorum_proposal) .await .unwrap(); // Add an extra quorum proposal so we have a QC pointing back at `leaf`. storage - .append_quorum_proposal(&next_quorum_proposal) + .append_quorum_proposal2(&next_quorum_proposal) .await .unwrap(); // Fetch it as if we were rebuilding an archive. assert_eq!( - vid_share.data.common, + None, storage - .fetch(VidCommonRequest(VidCommitment::V0( + .fetch(VidCommonRequest(VidCommitment::V1( vid_share.data.payload_commitment ))) .await - .unwrap() ); assert_eq!( leaf_payload, storage - .fetch(PayloadRequest(VidCommitment::V0( + .fetch(PayloadRequest(VidCommitment::V1( vid_share.data.payload_commitment ))) .await .unwrap() ); assert_eq!( - LeafQueryData::new(leaf.clone(), qc.clone().to_qc()).unwrap(), + LeafQueryData::new(leaf.clone(), qc.clone()).unwrap(), storage .fetch(LeafRequest::new( leaf.block_header().block_number(), Committable::commit(&leaf), - qc.clone().to_qc().commit() + qc.clone().commit() )) .await .unwrap() @@ -1828,28 +2334,26 @@ mod test { // Populate some data. let leaf = - Leaf::genesis::(&ValidatedState::default(), &NodeState::mock()) - .await; + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); - let disperse = advz_scheme(2) - .disperse(leaf_payload_bytes_arc.clone()) - .unwrap(); - let payload_commitment = vid_commitment::( - &leaf_payload_bytes_arc, - &leaf.block_header().metadata().encode(), - 2, - ::Base::VERSION, - ) - .unwrap_v0(); + let avidm_param = init_avidm_param(2).unwrap(); + let weights = vec![1u32; 2]; + + let ns_table = parse_ns_table(leaf_payload.byte_len().as_usize(), &leaf_payload.encode()); + let (payload_commitment, shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &leaf_payload_bytes_arc, ns_table) + .unwrap(); + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let vid = ADVZDisperseShare:: { + let vid = VidDisperseShare2:: { view_number: data_view, payload_commitment, - share: disperse.shares[0].clone(), - common: disperse.common, + share: shares[0].clone(), recipient_key: pubkey, + epoch: None, + target_epoch: None, } .to_proposal(&privkey) .unwrap() @@ -1859,12 +2363,11 @@ mod test { epoch: None, block_header: leaf.block_header().clone(), view_number: data_view, - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -1883,23 +2386,24 @@ mod test { let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); let da_proposal = Proposal { - data: DaProposal:: { + data: DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc.clone(), metadata: leaf_payload.ns_table().clone(), view_number: data_view, + epoch: Some(EpochNumber::new(0)), }, signature: block_payload_signature, _pd: Default::default(), }; tracing::info!(?vid, ?da_proposal, ?quorum_proposal, "append data"); - storage.append_vid(&vid).await.unwrap(); + storage.append_vid2(&vid).await.unwrap(); storage - .append_da(&da_proposal, VidCommitment::V0(payload_commitment)) + .append_da2(&da_proposal, VidCommitment::V1(payload_commitment)) .await .unwrap(); storage - .append_quorum_proposal(&quorum_proposal) + .append_quorum_proposal2(&quorum_proposal) .await .unwrap(); @@ -1912,7 +2416,7 @@ mod test { .unwrap(); assert_eq!( storage.load_vid_share(data_view).await.unwrap().unwrap(), - vid + convert_proposal(vid) ); assert_eq!( storage.load_da_proposal(data_view).await.unwrap().unwrap(), @@ -1962,4 +2466,220 @@ mod test { }) .await } + + #[tokio::test(flavor = "multi_thread")] + async fn test_consensus_migration() { + setup_test(); + + let tmp = Persistence::tmp_storage().await; + let mut opt = Persistence::options(&tmp); + + let storage = opt.create().await.unwrap(); + + let rows = 300; + + for i in 0..rows { + let view = ViewNumber::new(i); + let validated_state = ValidatedState::default(); + let instance_state = NodeState::default(); + + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], i); + let (payload, metadata) = + Payload::from_transactions([], &validated_state, &instance_state) + .await + .unwrap(); + let builder_commitment = payload.builder_commitment(&metadata); + let payload_bytes = payload.encode(); + + let payload_commitment = vid_commitment::( + &payload_bytes, + &metadata.encode(), + 4, + ::Base::VERSION, + ); + + let block_header = Header::genesis( + &instance_state, + payload_commitment, + builder_commitment, + metadata, + ); + + let null_quorum_data = QuorumData { + leaf_commit: Commitment::::default_commitment_no_preimage(), + }; + + let justify_qc = QuorumCertificate::new( + null_quorum_data.clone(), + null_quorum_data.commit(), + view, + None, + std::marker::PhantomData, + ); + + let quorum_proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: justify_qc.clone(), + upgrade_certificate: None, + proposal_certificate: None, + }; + + let quorum_proposal_signature = + BLSPubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) + .expect("Failed to sign quorum proposal"); + + let proposal = Proposal { + data: quorum_proposal.clone(), + signature: quorum_proposal_signature, + _pd: std::marker::PhantomData, + }; + + let proposal_bytes = bincode::serialize(&proposal) + .context("serializing proposal") + .unwrap(); + + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); + leaf.fill_block_payload::( + payload, + 4, + ::Base::VERSION, + ) + .unwrap(); + + let mut tx = storage.db.write().await.unwrap(); + + let qc_bytes = bincode::serialize(&justify_qc).unwrap(); + let leaf_bytes = bincode::serialize(&leaf).unwrap(); + + tx.upsert( + "anchor_leaf", + ["view", "leaf", "qc"], + ["view"], + [(i as i64, leaf_bytes, qc_bytes)], + ) + .await + .unwrap(); + tx.commit().await.unwrap(); + + let disperse = advz_scheme(4).disperse(payload_bytes.clone()).unwrap(); + + let vid = ADVZDisperseShare:: { + view_number: ViewNumber::new(i), + payload_commitment: Default::default(), + share: disperse.shares[0].clone(), + common: disperse.common, + recipient_key: pubkey, + }; + + let (payload, metadata) = + Payload::from_transactions([], &ValidatedState::default(), &NodeState::default()) + .await + .unwrap(); + + let da = DaProposal:: { + encoded_transactions: payload.encode(), + metadata, + view_number: ViewNumber::new(i), + }; + + let block_payload_signature = + BLSPubKey::sign(&privkey, &payload_bytes).expect("Failed to sign block payload"); + + let da_proposal = Proposal { + data: da, + signature: block_payload_signature, + _pd: Default::default(), + }; + + storage + .append_vid(&vid.to_proposal(&privkey).unwrap()) + .await + .unwrap(); + storage + .append_da(&da_proposal, VidCommitment::V0(disperse.commit)) + .await + .unwrap(); + + let leaf_hash = Committable::commit(&leaf); + let mut tx = storage.db.write().await.expect("failed to start write tx"); + tx.upsert( + "quorum_proposals", + ["view", "leaf_hash", "data"], + ["view"], + [(i as i64, leaf_hash.to_string(), proposal_bytes)], + ) + .await + .expect("failed to upsert quorum proposal"); + + let justify_qc = &proposal.data.justify_qc; + let justify_qc_bytes = bincode::serialize(&justify_qc) + .context("serializing QC") + .unwrap(); + tx.upsert( + "quorum_certificate", + ["view", "leaf_hash", "data"], + ["view"], + [( + justify_qc.view_number.u64() as i64, + justify_qc.data.leaf_commit.to_string(), + &justify_qc_bytes, + )], + ) + .await + .expect("failed to upsert qc"); + + tx.commit().await.expect("failed to commit"); + } + + storage.migrate_consensus().await.unwrap(); + + let mut tx = storage.db.read().await.unwrap(); + let (anchor_leaf2_count,) = query_as::<(i64,)>("SELECT COUNT(*) from anchor_leaf2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + assert_eq!( + anchor_leaf2_count, rows as i64, + "anchor leaf count does not match rows", + ); + + let (da_proposal_count,) = query_as::<(i64,)>("SELECT COUNT(*) from da_proposal2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + assert_eq!( + da_proposal_count, rows as i64, + "da proposal count does not match rows", + ); + + let (vid_share_count,) = query_as::<(i64,)>("SELECT COUNT(*) from vid_share2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + assert_eq!( + vid_share_count, rows as i64, + "vid share count does not match rows" + ); + + let (quorum_proposals_count,) = + query_as::<(i64,)>("SELECT COUNT(*) from quorum_proposals2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + assert_eq!( + quorum_proposals_count, rows as i64, + "quorum proposals count does not match rows", + ); + + let (quorum_certificates_count,) = + query_as::<(i64,)>("SELECT COUNT(*) from quorum_certificate2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + assert_eq!( + quorum_certificates_count, rows as i64, + "quorum certificates count does not match rows", + ); + } } diff --git a/sequencer/src/proposal_fetcher.rs b/sequencer/src/proposal_fetcher.rs index d9232bc5cd..a5d143188a 100644 --- a/sequencer/src/proposal_fetcher.rs +++ b/sequencer/src/proposal_fetcher.rs @@ -196,7 +196,7 @@ where .context("timed out fetching proposal")? .context("error fetching proposal")?; self.persistence - .append_quorum_proposal(&proposal) + .append_quorum_proposal2(&proposal) .await .context("error saving fetched proposal")?; diff --git a/sequencer/src/state.rs b/sequencer/src/state.rs index 50eed617ac..e7f9160e41 100644 --- a/sequencer/src/state.rs +++ b/sequencer/src/state.rs @@ -155,8 +155,8 @@ where parent_state, instance, peers, - &parent_leaf.leaf().clone().into(), - &proposed_leaf.leaf().clone().into(), + &parent_leaf.leaf().clone(), + &proposed_leaf.leaf().clone(), ) .await .context("computing state update")?; diff --git a/types/src/v0/impls/block/full_payload/ns_proof.rs b/types/src/v0/impls/block/full_payload/ns_proof.rs index 4d463658d4..837e81885e 100644 --- a/types/src/v0/impls/block/full_payload/ns_proof.rs +++ b/types/src/v0/impls/block/full_payload/ns_proof.rs @@ -1,5 +1,5 @@ -use hotshot_query_service::VidCommitment; use hotshot_types::{ + data::VidCommitment, traits::EncodeBytes, vid::advz::{advz_scheme, ADVZCommon, ADVZScheme}, }; diff --git a/types/src/v0/impls/block/full_payload/ns_proof/test.rs b/types/src/v0/impls/block/full_payload/ns_proof/test.rs index 6bd3849241..6b12ff2fb5 100644 --- a/types/src/v0/impls/block/full_payload/ns_proof/test.rs +++ b/types/src/v0/impls/block/full_payload/ns_proof/test.rs @@ -1,8 +1,8 @@ use futures::future; use hotshot::helpers::initialize_logging; use hotshot::traits::BlockPayload; -use hotshot_query_service::VidCommitment; use hotshot_types::{ + data::VidCommitment, traits::EncodeBytes, vid::advz::{advz_scheme, ADVZScheme}, }; @@ -87,7 +87,7 @@ async fn ns_proof() { let (ns_proof_txs, ns_proof_ns_id) = ns_proof .verify( block.ns_table(), - &hotshot_query_service::VidCommitment::V0(vid.commit), + &VidCommitment::V0(vid.commit), &vid.common, ) .unwrap_or_else(|| panic!("namespace {} proof verification failure", ns_id)); diff --git a/types/src/v0/impls/block/full_payload/ns_table.rs b/types/src/v0/impls/block/full_payload/ns_table.rs index bbab22eb85..574cc46854 100644 --- a/types/src/v0/impls/block/full_payload/ns_table.rs +++ b/types/src/v0/impls/block/full_payload/ns_table.rs @@ -146,11 +146,7 @@ impl NsTable { /// Read subslice range for the `index`th namespace from the namespace /// table. - pub(crate) fn ns_range( - &self, - index: &NsIndex, - payload_byte_len: &PayloadByteLen, - ) -> NsPayloadRange { + pub fn ns_range(&self, index: &NsIndex, payload_byte_len: &PayloadByteLen) -> NsPayloadRange { let end = self .read_ns_offset_unchecked(index) .min(payload_byte_len.as_usize()); diff --git a/types/src/v0/impls/block/full_payload/payload.rs b/types/src/v0/impls/block/full_payload/payload.rs index dc328273ea..3707ab3a57 100644 --- a/types/src/v0/impls/block/full_payload/payload.rs +++ b/types/src/v0/impls/block/full_payload/payload.rs @@ -64,7 +64,7 @@ impl Payload { self.read_ns_payload(&ns_payload_range) } - pub(crate) fn byte_len(&self) -> PayloadByteLen { + pub fn byte_len(&self) -> PayloadByteLen { PayloadByteLen(self.raw_payload.len()) } @@ -287,7 +287,7 @@ impl PayloadByteLen { self.0 == expected } - pub(in crate::v0::impls::block::full_payload) fn as_usize(&self) -> usize { + pub fn as_usize(&self) -> usize { self.0 } } diff --git a/types/src/v0/impls/block/test.rs b/types/src/v0/impls/block/test.rs index 1000b6ca18..225332dd51 100755 --- a/types/src/v0/impls/block/test.rs +++ b/types/src/v0/impls/block/test.rs @@ -2,8 +2,8 @@ use std::collections::BTreeMap; use hotshot::traits::BlockPayload; -use hotshot_query_service::{availability::QueryablePayload, VidCommitment}; -use hotshot_types::{traits::EncodeBytes, vid::advz::advz_scheme}; +use hotshot_query_service::availability::QueryablePayload; +use hotshot_types::{data::VidCommitment, traits::EncodeBytes, vid::advz::advz_scheme}; use jf_vid::VidScheme; use rand::RngCore; use sequencer_utils::test_utils::setup_test; diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index 13940947e6..bc3e982bf7 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -2,10 +2,9 @@ use anyhow::{ensure, Context}; use ark_serialize::CanonicalSerialize; use committable::{Commitment, Committable, RawCommitmentBuilder}; use ethers_conv::ToAlloy; -use hotshot_query_service::{ - availability::QueryableHeader, explorer::ExplorerHeader, VidCommitment, -}; +use hotshot_query_service::{availability::QueryableHeader, explorer::ExplorerHeader}; use hotshot_types::{ + data::VidCommitment, traits::{ block_contents::{BlockHeader, BuilderFee}, node_implementation::NodeType, @@ -13,10 +12,8 @@ use hotshot_types::{ BlockPayload, ValidatedState as _, }, utils::BuilderCommitment, - // vid::advz::{ADVZCommon, ADVZScheme}, }; use jf_merkle_tree::{AppendableMerkleTreeScheme, MerkleTreeScheme}; -// use jf_vid::VidScheme; use serde::{ de::{self, MapAccess, SeqAccess, Visitor}, Deserialize, Deserializer, Serialize, Serializer, diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index b5f5bd5d83..b29d8e76bb 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -11,8 +11,8 @@ use hotshot_types::{ consensus::CommitmentMap, data::{ vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, - DaProposal, EpochNumber, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, - VidCommitment, ViewNumber, + DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposal2, + QuorumProposalWrapper, VidCommitment, VidDisperseShare, ViewNumber, }, event::{HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, @@ -460,11 +460,11 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { async fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>>; + ) -> anyhow::Result>>>; async fn load_da_proposal( &self, view: ViewNumber, - ) -> anyhow::Result>>>; + ) -> anyhow::Result>>>; async fn load_upgrade_certificate( &self, ) -> anyhow::Result>>; @@ -521,12 +521,9 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { None => { tracing::info!("no saved leaf, starting from genesis leaf"); ( - hotshot_types::data::Leaf::genesis::(&genesis_validated_state, &state) - .await - .into(), - QuorumCertificate::genesis::(&genesis_validated_state, &state) - .await - .to_qc2(), + hotshot_types::data::Leaf2::genesis::(&genesis_validated_state, &state) + .await, + QuorumCertificate2::genesis::(&genesis_validated_state, &state).await, None, ) } @@ -688,12 +685,13 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { epoch: Option, action: HotShotAction, ) -> anyhow::Result<()>; - async fn update_undecided_state( + + async fn update_undecided_state2( &self, leaves: CommitmentMap, state: BTreeMap>, ) -> anyhow::Result<()>; - async fn append_quorum_proposal( + async fn append_quorum_proposal2( &self, proposal: &Proposal>, ) -> anyhow::Result<()>; @@ -701,13 +699,27 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { &self, decided_upgrade_certificate: Option>, ) -> anyhow::Result<()>; - async fn migrate_consensus( - &self, - migrate_leaf: fn(Leaf) -> Leaf2, - migrate_proposal: fn( - Proposal>, - ) -> Proposal>, - ) -> anyhow::Result<()>; + async fn migrate_consensus(&self) -> anyhow::Result<()> { + tracing::warn!("migrating consensus data..."); + + self.migrate_anchor_leaf().await?; + self.migrate_da_proposals().await?; + self.migrate_vid_shares().await?; + self.migrate_undecided_state().await?; + self.migrate_quorum_proposals().await?; + self.migrate_quorum_certificates().await?; + + tracing::warn!("consensus storage has been migrated to new types"); + + Ok(()) + } + + async fn migrate_anchor_leaf(&self) -> anyhow::Result<()>; + async fn migrate_da_proposals(&self) -> anyhow::Result<()>; + async fn migrate_vid_shares(&self) -> anyhow::Result<()>; + async fn migrate_undecided_state(&self) -> anyhow::Result<()>; + async fn migrate_quorum_proposals(&self) -> anyhow::Result<()>; + async fn migrate_quorum_certificates(&self) -> anyhow::Result<()>; async fn load_anchor_view(&self) -> anyhow::Result { match self.load_anchor_leaf().await? { @@ -724,6 +736,19 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { async fn load_next_epoch_quorum_certificate( &self, ) -> anyhow::Result>>; + + async fn append_da2( + &self, + proposal: &Proposal>, + vid_commit: VidCommitment, + ) -> anyhow::Result<()>; + + async fn append_proposal2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + self.append_quorum_proposal2(proposal).await + } } #[async_trait] @@ -775,6 +800,14 @@ impl Storage for Arc

{ (**self).append_da(proposal, vid_commit).await } + async fn append_da2( + &self, + proposal: &Proposal>, + vid_commit: VidCommitment, + ) -> anyhow::Result<()> { + (**self).append_da2(proposal, vid_commit).await + } + async fn record_action( &self, view: ViewNumber, @@ -794,7 +827,7 @@ impl Storage for Arc

{ state: BTreeMap>, ) -> anyhow::Result<()> { (**self) - .update_undecided_state( + .update_undecided_state2( leaves .into_values() .map(|leaf| { @@ -806,16 +839,28 @@ impl Storage for Arc

{ ) .await } - async fn append_proposal( &self, proposal: &Proposal>, ) -> anyhow::Result<()> { (**self) - .append_quorum_proposal(&convert_proposal(proposal.clone())) + .append_quorum_proposal2(&convert_proposal(proposal.clone())) .await } + async fn append_proposal2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + let proposal_qp_wrapper: Proposal> = + convert_proposal(proposal.clone()); + (**self).append_quorum_proposal2(&proposal_qp_wrapper).await + } + + async fn update_high_qc2(&self, _high_qc: QuorumCertificate2) -> anyhow::Result<()> { + Ok(()) + } + async fn update_decided_upgrade_certificate( &self, decided_upgrade_certificate: Option>, @@ -824,6 +869,14 @@ impl Storage for Arc

{ .store_upgrade_certificate(decided_upgrade_certificate) .await } + + async fn update_undecided_state2( + &self, + leaves: CommitmentMap, + state: BTreeMap>, + ) -> anyhow::Result<()> { + (**self).update_undecided_state2(leaves, state).await + } } /// Data that can be deserialized from a subslice of namespace payload bytes. diff --git a/types/src/v0/utils.rs b/types/src/v0/utils.rs index fdac8c80e0..d2c32e598f 100644 --- a/types/src/v0/utils.rs +++ b/types/src/v0/utils.rs @@ -6,7 +6,7 @@ use derive_more::{From, Into}; use futures::future::BoxFuture; use hotshot_types::{ consensus::CommitmentMap, - data::{Leaf, Leaf2, QuorumProposal}, + data::{Leaf, Leaf2}, traits::node_implementation::NodeType, }; use rand::Rng; @@ -25,25 +25,6 @@ use time::{ }; use tokio::time::sleep; -pub fn downgrade_leaf(leaf2: Leaf2) -> Leaf { - // TODO verify removal. It doesn't seem we need this check, but lets double check. - // if leaf2.drb_seed != INITIAL_DRB_SEED_INPUT && leaf2.drb_result != INITIAL_DRB_RESULT { - // panic!("Downgrade of Leaf2 to Leaf will lose DRB information!"); - // } - let quorum_proposal = QuorumProposal { - block_header: leaf2.block_header().clone(), - view_number: leaf2.view_number(), - justify_qc: leaf2.justify_qc().to_qc(), - upgrade_certificate: leaf2.upgrade_certificate(), - proposal_certificate: None, - }; - let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); - if let Some(payload) = leaf2.block_payload() { - leaf.fill_block_payload_unchecked(payload); - } - leaf -} - pub fn upgrade_commitment_map( map: CommitmentMap>, ) -> CommitmentMap> { @@ -55,17 +36,6 @@ pub fn upgrade_commitment_map( .collect() } -pub fn downgrade_commitment_map( - map: CommitmentMap>, -) -> CommitmentMap> { - map.into_values() - .map(|leaf2| { - let leaf = downgrade_leaf(leaf2); - ( as Committable>::commit(&leaf), leaf) - }) - .collect() -} - #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)] pub enum Update { #[default]