From 508bcd01f552b9bd1f7912fc75a1a17721271099 Mon Sep 17 00:00:00 2001 From: Oleksandr Tkachenko <108659113+altkdf@users.noreply.github.com> Date: Fri, 29 Nov 2024 14:20:47 +0100 Subject: [PATCH 01/52] test: increase timeout threshold for new system performance tests (#2891) --- rs/tests/consensus/tecdsa/BUILD.bazel | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rs/tests/consensus/tecdsa/BUILD.bazel b/rs/tests/consensus/tecdsa/BUILD.bazel index 5cedc14d080..17a2019d07b 100644 --- a/rs/tests/consensus/tecdsa/BUILD.bazel +++ b/rs/tests/consensus/tecdsa/BUILD.bazel @@ -364,6 +364,7 @@ system_test_nns( "manual", ], test_driver_target = tecdsa_performance_test_template.test_driver_target, + test_timeout = "eternal", runtime_deps = GUESTOS_RUNTIME_DEPS + GRAFANA_RUNTIME_DEPS, ) @@ -387,6 +388,7 @@ system_test_nns( "manual", ], test_driver_target = tecdsa_performance_test_template.test_driver_target, + test_timeout = "eternal", runtime_deps = GUESTOS_RUNTIME_DEPS + GRAFANA_RUNTIME_DEPS, ) @@ -410,5 +412,6 @@ system_test_nns( "manual", ], test_driver_target = tecdsa_performance_test_template.test_driver_target, + test_timeout = "eternal", runtime_deps = GUESTOS_RUNTIME_DEPS + GRAFANA_RUNTIME_DEPS, ) From 71fc7f0820a8d2f36e3ae21f12131bdc44457c2f Mon Sep 17 00:00:00 2001 From: Igor Novgorodov Date: Fri, 29 Nov 2024 16:36:41 +0100 Subject: [PATCH 02/52] fix(BOUN-1318): fix typo in env file (#2903) --- ic-os/components/ic/share/ic-boundary.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ic-os/components/ic/share/ic-boundary.env b/ic-os/components/ic/share/ic-boundary.env index d82b644f66e..b4118838ccf 100644 --- a/ic-os/components/ic/share/ic-boundary.env +++ b/ic-os/components/ic/share/ic-boundary.env @@ -1,4 +1,4 @@ -LISTEN_HTTP_PORT="443" +LISTEN_HTTPS_PORT="443" TLS_CERT_PATH="/var/lib/ic/data/ic-boundary-tls.crt" TLS_PKEY_PATH="/var/lib/ic/data/ic-boundary-tls.key" TLS_ACME_CREDENTIALS_PATH="/var/lib/ic/data" From e742d50d31f406d8260e8a39fb056a5684415fa2 Mon Sep 17 00:00:00 2001 From: Arshavir Ter-Gabrielyan Date: Fri, 29 Nov 2024 16:49:44 +0100 Subject: [PATCH 03/52] feat(sns): Render SNS versions using hex strings in journal/json Http responses (#2902) This PR makes the JSON version of the Http-served upgrade journal easier to read by using hexadecimal encoding of byte vectors representing Wasm hashes of SNS framework canisters. --- Cargo.lock | 2 +- .../api/src/ic_sns_governance.pb.v1.rs | 38 ++++++++++++++++++- rs/sns/governance/canister/canister.rs | 13 ++++--- rs/sns/governance/canister/tests.rs | 27 +++++++------ rs/sns/governance/src/cached_upgrade_steps.rs | 9 +---- 5 files changed, 62 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c9b475264f0..cad39cd54bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14762,7 +14762,7 @@ dependencies = [ "regex", "regex-syntax 0.6.29", "string_cache", - "term 0.7.0", + "term", "tiny-keccak", "unicode-xid", ] diff --git a/rs/sns/governance/api/src/ic_sns_governance.pb.v1.rs b/rs/sns/governance/api/src/ic_sns_governance.pb.v1.rs index e159bfe22a5..1cbdd6bf129 100644 --- a/rs/sns/governance/api/src/ic_sns_governance.pb.v1.rs +++ b/rs/sns/governance/api/src/ic_sns_governance.pb.v1.rs @@ -1,3 +1,11 @@ +/// Formats the 32 bytes of a hash as a hexadecimal string. Corresponds to 64 ascii symbols. +pub fn format_full_hash(hash: &[u8]) -> String { + hash.iter() + .map(|b| format!("{:02x}", b)) + .collect::>() + .join("") +} + // This file is @generated by prost-build. /// A principal with a particular set of permissions over a neuron. #[derive( @@ -1723,6 +1731,9 @@ pub struct Governance { } /// Nested message and enum types in `Governance`. pub mod governance { + use super::format_full_hash; + use serde::ser::SerializeStruct; + /// The commands that require a neuron lock. #[derive( candid::CandidType, @@ -1898,6 +1909,32 @@ pub mod governance { #[prost(string, optional, tag = "4")] pub description: ::core::option::Option<::prost::alloc::string::String>, } + + impl serde::Serialize for Version { + fn serialize(self: &Version, serializer: S) -> Result + where + S: serde::Serializer, + { + let mut version = serializer.serialize_struct("Version", 6)?; + version.serialize_field("root_wasm_hash", &format_full_hash(&self.root_wasm_hash))?; + version.serialize_field( + "governance_wasm_hash", + &format_full_hash(&self.governance_wasm_hash), + )?; + version.serialize_field("swap_wasm_hash", &format_full_hash(&self.swap_wasm_hash))?; + version.serialize_field("index_wasm_hash", &format_full_hash(&self.index_wasm_hash))?; + version.serialize_field( + "ledger_wasm_hash", + &format_full_hash(&self.ledger_wasm_hash), + )?; + version.serialize_field( + "archive_wasm_hash", + &format_full_hash(&self.archive_wasm_hash), + )?; + version.end() + } + } + /// A version of the SNS defined by the WASM hashes of its canisters. #[derive( candid::CandidType, @@ -1905,7 +1942,6 @@ pub mod governance { comparable::Comparable, Eq, std::hash::Hash, - serde::Serialize, Clone, PartialEq, ::prost::Message, diff --git a/rs/sns/governance/canister/canister.rs b/rs/sns/governance/canister/canister.rs index 5a031428b4a..89afd45f59e 100644 --- a/rs/sns/governance/canister/canister.rs +++ b/rs/sns/governance/canister/canister.rs @@ -44,6 +44,7 @@ use ic_sns_governance::{ }, types::{Environment, HeapGrowthPotential}, }; +use ic_sns_governance_api::pb::v1 as api; use prost::Message; use rand::{RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; @@ -604,13 +605,15 @@ ic_nervous_system_common_build_metadata::define_get_build_metadata_candid_method pub fn http_request(request: HttpRequest) -> HttpResponse { match request.path() { "/journal/json" => { - let journal_entries = &governance() + let journal = governance() .proto .upgrade_journal - .as_ref() - .expect("The upgrade journal is not initialized for this SNS.") - .entries; - serve_journal(journal_entries) + .clone() + .expect("The upgrade journal is not initialized for this SNS."); + + let journal = api::UpgradeJournal::from(journal); + + serve_journal(&journal.entries) } "/metrics" => serve_metrics(encode_metrics), "/logs" => serve_logs_v2(request, &INFO, &ERROR), diff --git a/rs/sns/governance/canister/tests.rs b/rs/sns/governance/canister/tests.rs index 401a2f6e024..15ac971e16c 100644 --- a/rs/sns/governance/canister/tests.rs +++ b/rs/sns/governance/canister/tests.rs @@ -1,5 +1,3 @@ -use std::collections::HashSet; - use super::*; use assert_matches::assert_matches; use candid_parser::utils::{service_equal, CandidSource}; @@ -8,7 +6,10 @@ use ic_sns_governance::pb::v1::{ upgrade_journal_entry::{Event, UpgradeStepsRefreshed}, DisburseMaturityInProgress, Neuron, UpgradeJournal, UpgradeJournalEntry, }; +use ic_sns_governance_api::pb::v1 as api; use maplit::btreemap; +use pretty_assertions::assert_eq; +use std::collections::HashSet; /// This is NOT affected by /// @@ -124,21 +125,23 @@ fn test_upgrade_journal() { root_wasm_hash: vec![0, 0, 0, 0], governance_wasm_hash: vec![0, 0, 0, 1], swap_wasm_hash: vec![0, 0, 0, 2], - index_wasm_hash: vec![0, 0, 0, 4], - ledger_wasm_hash: vec![0, 0, 0, 5], - archive_wasm_hash: vec![0, 0, 0, 6], + index_wasm_hash: vec![0, 0, 0, 3], + ledger_wasm_hash: vec![0, 0, 0, 4], + archive_wasm_hash: vec![0, 0, 0, 5], }], }), })), }], }; + let journal = api::UpgradeJournal::from(journal); + // Currently, the `/journal` Http endpoint serves the entries directly, rather than the whole // journal object. let http_response = serve_journal(&journal.entries); let expected_headers: HashSet<(_, _)> = HashSet::from_iter([ ("Content-Type".to_string(), "application/json".to_string()), - ("Content-Length".to_string(), "271".to_string()), + ("Content-Length".to_string(), "277".to_string()), ]); let (observed_headers, observed_body) = assert_matches!( http_response, @@ -170,12 +173,12 @@ fn test_upgrade_journal() { "upgrade_steps": { "versions": [ { - "root_wasm_hash": [0,0,0,0], - "governance_wasm_hash": [0,0,0,1], - "ledger_wasm_hash": [0,0,0,5], - "swap_wasm_hash": [0,0,0,2], - "archive_wasm_hash": [0,0,0,6], - "index_wasm_hash": [0,0,0,4] + "root_wasm_hash": "00000000", + "governance_wasm_hash": "00000001", + "swap_wasm_hash": "00000002", + "index_wasm_hash": "00000003", + "ledger_wasm_hash": "00000004", + "archive_wasm_hash": "00000005" } ] } diff --git a/rs/sns/governance/src/cached_upgrade_steps.rs b/rs/sns/governance/src/cached_upgrade_steps.rs index ec1b11ab0f4..0eedd402b7b 100644 --- a/rs/sns/governance/src/cached_upgrade_steps.rs +++ b/rs/sns/governance/src/cached_upgrade_steps.rs @@ -10,6 +10,7 @@ use crate::sns_upgrade::ListUpgradeStep; use crate::sns_upgrade::ListUpgradeStepsResponse; use crate::sns_upgrade::SnsCanisterType; use ic_canister_log::log; +use ic_sns_governance_api::pb::v1::format_full_hash; #[derive(Clone, Debug, PartialEq)] pub(crate) struct CachedUpgradeSteps { @@ -106,14 +107,6 @@ pub fn format_short_hash(hash: &[u8]) -> String { .join("") } -/// Formats the 32 bytes of a hash as a hexadecimal string. Corresponds to 64 ascii symbols. -pub fn format_full_hash(hash: &[u8]) -> String { - hash.iter() - .map(|b| format!("{:02x}", b)) - .collect::>() - .join("") -} - pub fn render_two_versions_as_markdown_table( current_version: &Version, target_version: &Version, From 3ce31d4b94fd13b8d4461ec668958335e6468474 Mon Sep 17 00:00:00 2001 From: Elias Datler <46360620+fxgst@users.noreply.github.com> Date: Fri, 29 Nov 2024 23:38:45 +0700 Subject: [PATCH 04/52] chore(PocketIC): Add comment about with_server_url (#2904) - state that `with_server_url` only attaches to a running PocketIC, it does not create a new one under that port --------- Co-authored-by: mraszyk <31483726+mraszyk@users.noreply.github.com> --- packages/pocket-ic/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/pocket-ic/src/lib.rs b/packages/pocket-ic/src/lib.rs index a2144c0b838..e647de89523 100644 --- a/packages/pocket-ic/src/lib.rs +++ b/packages/pocket-ic/src/lib.rs @@ -140,6 +140,7 @@ impl PocketIcBuilder { .await } + /// Use an already running PocketIC server. pub fn with_server_url(mut self, server_url: Url) -> Self { self.server_url = Some(server_url); self From b7bb7578988306fcf211f9f9fcc1f1436d749afa Mon Sep 17 00:00:00 2001 From: Arshavir Ter-Gabrielyan Date: Fri, 29 Nov 2024 18:23:38 +0100 Subject: [PATCH 05/52] feat(sns): Enable AdvanceSnsTargetVersion proposals on mainnet (#2906) This PR removes the restriction for the new AdvanceSnsTargetVersion proposals to be only allowed in testing, thereby activating the new feature on mainnet. --- rs/sns/governance/src/governance.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/rs/sns/governance/src/governance.rs b/rs/sns/governance/src/governance.rs index aebe5321854..82466e131b3 100644 --- a/rs/sns/governance/src/governance.rs +++ b/rs/sns/governance/src/governance.rs @@ -3034,9 +3034,6 @@ impl Governance { &mut self, new_target: Version, ) -> Result<(), GovernanceError> { - // TODO[NNS1-3365]: Enable the AdvanceSnsTargetVersionFeature. - self.check_test_features_enabled(); - let (_, target_version) = self .proto .validate_new_target_version(Some(new_target)) From 7a20299a249b95508443c043f5652ca142664699 Mon Sep 17 00:00:00 2001 From: oggy-dfin <89794951+oggy-dfin@users.noreply.github.com> Date: Fri, 29 Nov 2024 18:48:04 +0100 Subject: [PATCH 06/52] feat(IC-1579): TLA instrumentation for `merge_neuron` (#2341) Adds a TLA model of `merge_neurons` and links it to the code. Since the only Rust integration tests that test merge_neuron are proptests, which generate a ton of traces, introduced a limit to check only some prefix (currently, first 30) of the state pairs from the collected traces. Also made the Apalache checks a bit more parallel, by using channels to signal completion as soon as a single thread is available instead of waiting for the batch. --------- Co-authored-by: IDX GitHub Automation --- rs/nns/governance/src/governance.rs | 25 +- .../src/governance/ledger_helper.rs | 7 + .../src/governance/tla/claim_neuron.rs | 43 ++- .../src/governance/tla/merge_neurons.rs | 34 ++ rs/nns/governance/src/governance/tla/mod.rs | 146 +++++--- .../src/governance/tla/split_neuron.rs | 43 ++- .../governance/src/governance/tla_macros.rs | 8 + rs/nns/governance/tests/fixtures/mod.rs | 26 ++ rs/nns/governance/tests/merge_neurons.rs | 6 + rs/nns/governance/tla/Merge_Neurons.tla | 351 ++++++++++++++++++ .../governance/tla/Merge_Neurons_Apalache.tla | 52 +++ .../tla_instrumentation/src/tla_state.rs | 14 +- 12 files changed, 659 insertions(+), 96 deletions(-) create mode 100644 rs/nns/governance/src/governance/tla/merge_neurons.rs create mode 100644 rs/nns/governance/tla/Merge_Neurons.tla create mode 100644 rs/nns/governance/tla/Merge_Neurons_Apalache.tla diff --git a/rs/nns/governance/src/governance.rs b/rs/nns/governance/src/governance.rs index 7f4f32a0959..fd1d21c37ff 100644 --- a/rs/nns/governance/src/governance.rs +++ b/rs/nns/governance/src/governance.rs @@ -138,8 +138,8 @@ pub mod tla; #[cfg(feature = "tla")] pub use tla::{ - claim_neuron_desc, split_neuron_desc, tla_update_method, InstrumentationState, ToTla, - TLA_INSTRUMENTATION_STATE, TLA_TRACES_LKEY, TLA_TRACES_MUTEX, + tla_update_method, InstrumentationState, ToTla, CLAIM_NEURON_DESC, MERGE_NEURONS_DESC, + SPLIT_NEURON_DESC, TLA_INSTRUMENTATION_STATE, TLA_TRACES_LKEY, TLA_TRACES_MUTEX, }; // 70 KB (for executing NNS functions that are not canister upgrades) @@ -2670,7 +2670,7 @@ impl Governance { /// stake. /// - The amount to split minus the transfer fee is more than the minimum /// stake. - #[cfg_attr(feature = "tla", tla_update_method(split_neuron_desc()))] + #[cfg_attr(feature = "tla", tla_update_method(SPLIT_NEURON_DESC.clone()))] pub async fn split_neuron( &mut self, id: &NeuronId, @@ -2926,6 +2926,7 @@ impl Governance { /// it will be merged into the stake of the target neuron; if it is less /// than the transaction fee, the maturity of the source neuron will /// still be merged into the maturity of the target neuron. + #[cfg_attr(feature = "tla", tla_update_method(MERGE_NEURONS_DESC.clone()))] pub async fn merge_neurons( &mut self, id: &NeuronId, @@ -2965,6 +2966,14 @@ impl Governance { // Step 4: burn neuron fees if needed. if let Some(source_burn_fees) = effect.source_burn_fees() { + tla_log_locals! { + source_neuron_id: effect.source_neuron_id().id, + target_neuron_id: effect.target_neuron_id().id, + fees_amount: effect.source_burn_fees().map_or(0, |f| f.amount_e8s), + amount_to_target: effect.stake_transfer().map_or(0, |t| t.amount_to_target_e8s) + } + tla_log_label!("MergeNeurons_Burn"); + source_burn_fees .burn_neuron_fees_with_ledger(&*self.ledger, &mut self.neuron_store, now) .await?; @@ -2972,6 +2981,14 @@ impl Governance { // Step 5: transfer the stake if needed. if let Some(stake_transfer) = effect.stake_transfer() { + tla_log_locals! { + source_neuron_id: effect.source_neuron_id().id, + target_neuron_id: effect.target_neuron_id().id, + fees_amount: effect.source_burn_fees().map_or(0, |f| f.amount_e8s), + amount_to_target: effect.stake_transfer().map_or(0, |t| t.amount_to_target_e8s) + } + tla_log_label!("MergeNeurons_Stake"); + stake_transfer .transfer_neuron_stake_with_ledger(&*self.ledger, &mut self.neuron_store, now) .await?; @@ -6138,7 +6155,7 @@ impl Governance { /// the neuron and lock it before we make the call, we know that any /// concurrent call to mutate the same neuron will need to wait for this /// one to finish before proceeding. - #[cfg_attr(feature = "tla", tla_update_method(claim_neuron_desc()))] + #[cfg_attr(feature = "tla", tla_update_method(CLAIM_NEURON_DESC.clone()))] async fn claim_neuron( &mut self, subaccount: Subaccount, diff --git a/rs/nns/governance/src/governance/ledger_helper.rs b/rs/nns/governance/src/governance/ledger_helper.rs index 91eb87bd464..3c63c47e8ca 100644 --- a/rs/nns/governance/src/governance/ledger_helper.rs +++ b/rs/nns/governance/src/governance/ledger_helper.rs @@ -8,6 +8,11 @@ use crate::{ use ic_nervous_system_common::ledger::IcpLedger; use ic_nns_common::pb::v1::NeuronId; +#[cfg(feature = "tla")] +use super::tla::TLA_INSTRUMENTATION_STATE; +#[cfg(feature = "tla")] +use tla_instrumentation_proc_macros::tla_function; + /// An object that represents the burning of neuron fees. #[derive(Clone, PartialEq, Debug)] pub struct BurnNeuronFeesOperation { @@ -19,6 +24,7 @@ impl BurnNeuronFeesOperation { /// Burns the neuron fees by calling ledger and changing the neuron. Recoverable errors are /// returned while unrecoverable errors cause a panic. A neuron lock should be held before /// calling this. + #[cfg_attr(feature = "tla", tla_function)] pub async fn burn_neuron_fees_with_ledger( self, ledger: &dyn IcpLedger, @@ -80,6 +86,7 @@ pub struct NeuronStakeTransferOperation { impl NeuronStakeTransferOperation { /// Transfers the stake from one neuron to another by calling ledger and changing the neurons. /// Recoverable errors are returned while unrecoverable errors cause a panic. + #[cfg_attr(feature = "tla", tla_function)] pub async fn transfer_neuron_stake_with_ledger( self, ledger: &dyn IcpLedger, diff --git a/rs/nns/governance/src/governance/tla/claim_neuron.rs b/rs/nns/governance/src/governance/tla/claim_neuron.rs index df3616d10f1..6d120715ae8 100644 --- a/rs/nns/governance/src/governance/tla/claim_neuron.rs +++ b/rs/nns/governance/src/governance/tla/claim_neuron.rs @@ -1,27 +1,30 @@ +use lazy_static::lazy_static; use tla_instrumentation::{Label, TlaConstantAssignment, ToTla, Update, VarAssignment}; use super::common::default_account; use super::{extract_common_constants, post_process_trace}; -pub fn claim_neuron_desc() -> Update { - const PID: &str = "Claim_Neuron"; - let default_locals = VarAssignment::new() - .add("account", default_account()) - .add("neuron_id", 0_u64.to_tla_value()); +lazy_static! { + pub static ref CLAIM_NEURON_DESC: Update = { + const PID: &str = "Claim_Neuron"; + let default_locals = VarAssignment::new() + .add("account", default_account()) + .add("neuron_id", 0_u64.to_tla_value()); - Update { - default_start_locals: default_locals.clone(), - default_end_locals: default_locals, - start_label: Label::new("ClaimNeuron1"), - end_label: Label::new("Done"), - process_id: PID.to_string(), - canister_name: "governance".to_string(), - post_process: |trace| { - let constants = TlaConstantAssignment { - constants: extract_common_constants(PID, trace).into_iter().collect(), - }; - post_process_trace(trace); - constants - }, - } + Update { + default_start_locals: default_locals.clone(), + default_end_locals: default_locals, + start_label: Label::new("ClaimNeuron1"), + end_label: Label::new("Done"), + process_id: PID.to_string(), + canister_name: "governance".to_string(), + post_process: |trace| { + let constants = TlaConstantAssignment { + constants: extract_common_constants(PID, trace).into_iter().collect(), + }; + post_process_trace(trace); + constants + }, + } + }; } diff --git a/rs/nns/governance/src/governance/tla/merge_neurons.rs b/rs/nns/governance/src/governance/tla/merge_neurons.rs new file mode 100644 index 00000000000..b150ef5a524 --- /dev/null +++ b/rs/nns/governance/src/governance/tla/merge_neurons.rs @@ -0,0 +1,34 @@ +use super::{account_to_tla, extract_common_constants, post_process_trace}; +use crate::governance::governance_minting_account; +use lazy_static::lazy_static; +use tla_instrumentation::{Label, TlaConstantAssignment, ToTla, Update, VarAssignment}; + +const PID: &str = "Merge_Neurons"; +lazy_static! { + pub static ref MERGE_NEURONS_DESC: Update = { + let default_locals = VarAssignment::new() + .add("source_neuron_id", 0_u64.to_tla_value()) + .add("target_neuron_id", 0_u64.to_tla_value()) + .add("fees_amount", 0_u64.to_tla_value()) + .add("amount_to_target", 0_u64.to_tla_value()); + Update { + default_start_locals: default_locals.clone(), + default_end_locals: default_locals, + start_label: Label::new("MergeNeurons_Start"), + end_label: Label::new("Done"), + process_id: PID.to_string(), + canister_name: "governance".to_string(), + post_process: |trace| { + let mut constants = TlaConstantAssignment { + constants: extract_common_constants(PID, trace).into_iter().collect(), + }; + post_process_trace(trace); + constants.constants.insert( + "Minting_Account_Id".to_string(), + account_to_tla(governance_minting_account()), + ); + constants + }, + } + }; +} diff --git a/rs/nns/governance/src/governance/tla/mod.rs b/rs/nns/governance/src/governance/tla/mod.rs index 32702e36edf..89ea93fbe64 100644 --- a/rs/nns/governance/src/governance/tla/mod.rs +++ b/rs/nns/governance/src/governance/tla/mod.rs @@ -1,9 +1,7 @@ -use itertools::Itertools; -use std::{ - collections::{BTreeMap, BTreeSet}, - ops::Deref, - thread, -}; +use std::collections::{BTreeMap, BTreeSet, HashSet}; +use std::hash::Hash; +use std::sync::mpsc; +use std::thread; use super::Governance; use crate::storage::{with_stable_neuron_indexes, with_stable_neuron_store}; @@ -26,15 +24,18 @@ pub use common::{account_to_tla, opt_subaccount_to_tla, subaccount_to_tla}; use common::{function_domain_union, governance_account_id}; pub use store::{TLA_INSTRUMENTATION_STATE, TLA_TRACES_LKEY, TLA_TRACES_MUTEX}; -mod split_neuron; -pub use split_neuron::split_neuron_desc; mod claim_neuron; -pub use claim_neuron::claim_neuron_desc; +mod merge_neurons; +mod split_neuron; + +pub use claim_neuron::CLAIM_NEURON_DESC; +pub use merge_neurons::MERGE_NEURONS_DESC; +pub use split_neuron::SPLIT_NEURON_DESC; fn neuron_global(gov: &Governance) -> TlaValue { let neuron_map: BTreeMap = with_stable_neuron_store(|store| { gov.neuron_store.with_active_neurons_iter(|iter| { - iter.map(|n| n.deref().clone()) + iter.map(|n| (*n).clone()) .chain(store.range_neurons(std::ops::RangeFull)) .map(|neuron| { ( @@ -207,6 +208,15 @@ fn get_tla_module_path(module: &str) -> PathBuf { }) } +fn dedup_by_key(vec: &mut Vec, mut key_selector: F) +where + F: FnMut(&E) -> K, + K: Eq + Hash, +{ + let mut seen_keys = HashSet::new(); + vec.retain(|element| seen_keys.insert(key_selector(element))); +} + /// Checks a trace against the model. /// /// It's assumed that the corresponding model is called `_Apalache.tla`, where PID is the @@ -214,22 +224,34 @@ fn get_tla_module_path(module: &str) -> PathBuf { pub fn check_traces() { // Large states make Apalache time and memory consumption explode. We'll look at // improving that later, for now we introduce a hard limit on the state size, and - // skip checking states larger than the limit + // skip checking states larger than the limit. The limit is a somewhat arbitrary + // number based on what we observed in the tests. We saw that states with 1000+ atoms take + // a long time to process, whereas most manual tests yield states of size 100 or so. const STATE_SIZE_LIMIT: u64 = 500; + // Proptests generate lots of traces (and thus state pairs) that make the Apalache testing very long. + // Again, limit this to some arbitrary number where checking is still reasonably fast. + // Note that this is effectively a per-test limit due to how `check_traces` is normally used. + const STATE_PAIR_COUNT_LIMIT: usize = 30; fn is_under_limit(p: &ResolvedStatePair) -> bool { p.start.size() < STATE_SIZE_LIMIT && p.end.size() < STATE_SIZE_LIMIT } fn print_stats(traces: &Vec) { + let mut total_pairs = 0; println!("Checking {} traces with TLA/Apalache", traces.len()); for t in traces { let total_len = t.state_pairs.len(); + total_pairs += total_len; let under_limit_len = t.state_pairs.iter().filter(|p| is_under_limit(p)).count(); println!( - "TLA/Apalache checks: keeping {}/{} states for update {}", + "TLA/Apalache checks: keeping {}/{} state pairs for update {}", under_limit_len, total_len, t.update.process_id ); } + println!( + "Total of {} state pairs to be checked with Apalache; will retain {}", + total_pairs, STATE_PAIR_COUNT_LIMIT + ) } let traces = { @@ -238,6 +260,26 @@ pub fn check_traces() { std::mem::take(&mut (*t)) }; + print_stats(&traces); + + let mut all_pairs = traces + .into_iter() + .flat_map(|t| { + t.state_pairs + .into_iter() + .filter(is_under_limit) + .map(move |p| (t.update.clone(), t.constants.clone(), p)) + }) + .collect(); + + // A quick check that we don't have any duplicate state pairs. We assume the constants should + // be the same anyways and look at just the process ID and the state sthemselves. + dedup_by_key(&mut all_pairs, |(u, _c, p)| { + (u.process_id.clone(), p.start.clone(), p.end.clone()) + }); + + all_pairs.truncate(STATE_PAIR_COUNT_LIMIT); + set_java_path(); let apalache = std::env::var("TLA_APALACHE_BIN") @@ -248,42 +290,40 @@ pub fn check_traces() { panic!("bad apalache bin from 'TLA_APALACHE_BIN': '{:?}'", apalache); } - print_stats(&traces); - - let chunk_size = 20; - let all_pairs = traces.into_iter().flat_map(|t| { - t.state_pairs - .into_iter() - .filter(is_under_limit) - .map(move |p| (t.update.clone(), t.constants.clone(), p)) - }); - let chunks = all_pairs.chunks(chunk_size); - for chunk in &chunks { - let mut handles = vec![]; - for (update, constants, pair) in chunk { - let apalache = apalache.clone(); - let constants = constants.clone(); - let pair = pair.clone(); - // NOTE: We adopt the convention to reuse the 'process_id" as the tla module name - let tla_module = format!("{}_Apalache.tla", update.process_id); - let tla_module = get_tla_module_path(&tla_module); - - let handle = thread::spawn(move || { - check_tla_code_link( - &apalache, - PredicateDescription { - tla_module, - transition_predicate: "Next".to_string(), - predicate_parameters: Vec::new(), - }, - pair, - constants, - ) - }); - handles.push(handle); + // A poor man's parallel_map; process up to MAX_THREADS state pairs in parallel. Use mpsc channels + // to signal threads becoming available. + const MAX_THREADS: usize = 20; + let mut running_threads = 0; + let (thread_freed_tx, thread_freed_rx) = mpsc::channel::<()>(); + for (i, (update, constants, pair)) in all_pairs.iter().enumerate() { + println!("Checking state pair #{}", i + 1); + if running_threads >= MAX_THREADS { + thread_freed_rx + .recv() + .expect("Error while waiting for the thread completion signal"); + running_threads -= 1; } - for handle in handles { - handle.join().unwrap().unwrap_or_else(|e| { + + let thread_freed_rx = thread_freed_tx.clone(); + let apalache = apalache.clone(); + let constants = constants.clone(); + let pair = pair.clone(); + // NOTE: We adopt the convention to reuse the 'process_id" as the tla module name + let tla_module = format!("{}_Apalache.tla", update.process_id); + let tla_module = get_tla_module_path(&tla_module); + + running_threads += 1; + let _handle = thread::spawn(move || { + let _ = check_tla_code_link( + &apalache, + PredicateDescription { + tla_module, + transition_predicate: "Next".to_string(), + predicate_parameters: Vec::new(), + }, + pair, + constants, + ).map_err(|e| { println!("Possible divergence from the TLA model detected when interacting with the ledger!"); println!("If you did not expect to change the interaction between governance and the ledger, reconsider whether your change is safe. You can find additional data on the step that triggered the error below."); println!("If you are confident that your change is correct, please contact the #formal-models Slack channel and describe the problem."); @@ -293,6 +333,16 @@ pub fn check_traces() { println!("Apalache returned:\n{:#?}", e.apalache_error); panic!("Apalache check failed") }); - } + thread_freed_rx + .send(()) + .expect("Couldn't send the thread completion signal"); + }); + } + + while running_threads > 0 { + thread_freed_rx + .recv() + .expect("Error while waiting for the thread completion signal"); + running_threads -= 1; } } diff --git a/rs/nns/governance/src/governance/tla/split_neuron.rs b/rs/nns/governance/src/governance/tla/split_neuron.rs index 80c1327d07b..f4555a0c1d5 100644 --- a/rs/nns/governance/src/governance/tla/split_neuron.rs +++ b/rs/nns/governance/src/governance/tla/split_neuron.rs @@ -1,3 +1,4 @@ +use lazy_static::lazy_static; use tla_instrumentation::{ Label, ResolvedStatePair, TlaConstantAssignment, ToTla, Update, VarAssignment, }; @@ -5,27 +6,29 @@ use tla_instrumentation::{ use super::common::{default_account, governance_account_id}; use super::{extract_common_constants, post_process_trace}; -pub fn split_neuron_desc() -> Update { - const PID: &str = "Split_Neuron"; - let default_locals = VarAssignment::new() - .add("sn_amount", 0_u64.to_tla_value()) - .add("sn_parent_neuron_id", 0_u64.to_tla_value()) - .add("sn_child_neuron_id", 0_u64.to_tla_value()) - .add("sn_child_account_id", default_account()); +const PID: &str = "Split_Neuron"; +lazy_static! { + pub static ref SPLIT_NEURON_DESC: Update = { + let default_locals = VarAssignment::new() + .add("sn_amount", 0_u64.to_tla_value()) + .add("sn_parent_neuron_id", 0_u64.to_tla_value()) + .add("sn_child_neuron_id", 0_u64.to_tla_value()) + .add("sn_child_account_id", default_account()); - Update { - default_start_locals: default_locals.clone(), - default_end_locals: default_locals, - start_label: Label::new("SplitNeuron1"), - end_label: Label::new("Done"), - process_id: PID.to_string(), - canister_name: "governance".to_string(), - post_process: |trace| { - let constants = extract_split_neuron_constants(PID, trace); - post_process_trace(trace); - constants - }, - } + Update { + default_start_locals: default_locals.clone(), + default_end_locals: default_locals, + start_label: Label::new("SplitNeuron1"), + end_label: Label::new("Done"), + process_id: PID.to_string(), + canister_name: "governance".to_string(), + post_process: |trace| { + let constants = extract_split_neuron_constants(PID, trace); + post_process_trace(trace); + constants + }, + } + }; } fn extract_split_neuron_constants(pid: &str, trace: &[ResolvedStatePair]) -> TlaConstantAssignment { diff --git a/rs/nns/governance/src/governance/tla_macros.rs b/rs/nns/governance/src/governance/tla_macros.rs index 7b3de21c65e..b0e6b552523 100644 --- a/rs/nns/governance/src/governance/tla_macros.rs +++ b/rs/nns/governance/src/governance/tla_macros.rs @@ -28,3 +28,11 @@ macro_rules! tla_get_globals { tla::get_tla_globals($self) }; } + +#[macro_export] +macro_rules! tla_log_label { + ($label:expr) => { + #[cfg(feature = "tla")] + tla_instrumentation::tla_log_label!($label); + }; +} diff --git a/rs/nns/governance/tests/fixtures/mod.rs b/rs/nns/governance/tests/fixtures/mod.rs index c6034f13da9..9b2d0c724d7 100755 --- a/rs/nns/governance/tests/fixtures/mod.rs +++ b/rs/nns/governance/tests/fixtures/mod.rs @@ -46,6 +46,12 @@ use std::{ sync::{Arc, Mutex}, }; +#[cfg(feature = "tla")] +use ic_nns_governance::governance::tla::{ + self, account_to_tla, Destination, ToTla, TLA_INSTRUMENTATION_STATE, +}; +use ic_nns_governance::{tla_log_request, tla_log_response}; + pub mod environment_fixture; const DEFAULT_TEST_START_TIMESTAMP_SECONDS: u64 = 999_111_000_u64; @@ -442,6 +448,18 @@ impl IcpLedger for NNSFixture { "Issuing ledger transfer from account {} (subaccount {}) to account {} amount {} fee {}", from_account, from_subaccount.as_ref().map_or_else(||"None".to_string(), ToString::to_string), to_account, amount_e8s, fee_e8s ); + tla_log_request!( + "WaitForTransfer", + Destination::new("ledger"), + "Transfer", + tla::TlaValue::Record(BTreeMap::from([ + ("amount".to_string(), amount_e8s.to_tla_value()), + ("fee".to_string(), fee_e8s.to_tla_value()), + ("from".to_string(), account_to_tla(from_account)), + ("to".to_string(), account_to_tla(to_account)), + ])) + ); + let accounts = &mut self.nns_state.try_lock().unwrap().ledger.accounts; let from_e8s = accounts @@ -462,6 +480,14 @@ impl IcpLedger for NNSFixture { *accounts.entry(to_account).or_default() += amount_e8s; + tla_log_response!( + Destination::new("ledger"), + tla::TlaValue::Variant { + tag: "TransferOk".to_string(), + value: Box::new(tla::TlaValue::Constant("UNIT".to_string())) + } + ); + Ok(0) } diff --git a/rs/nns/governance/tests/merge_neurons.rs b/rs/nns/governance/tests/merge_neurons.rs index e4d70df042a..0b43b3d1189 100644 --- a/rs/nns/governance/tests/merge_neurons.rs +++ b/rs/nns/governance/tests/merge_neurons.rs @@ -19,6 +19,11 @@ use ic_nns_governance::{ }; use proptest::prelude::{proptest, TestCaseError}; +#[cfg(feature = "tla")] +use ic_nns_governance::governance::tla::{check_traces as tla_check_traces, TLA_TRACES_LKEY}; +#[cfg(feature = "tla")] +use tla_instrumentation_proc_macros::with_tla_trace_check; + // Using a `pub mod` works around spurious dead code warnings; see // https://github.com/rust-lang/rust/issues/46379 pub mod fixtures; @@ -167,6 +172,7 @@ fn do_test_merge_neurons( proptest! { #[test] +#[cfg_attr(feature = "tla", with_tla_trace_check)] fn test_merge_neurons_small( n1_stake in 0u64..50_000, n1_maturity in 0u64..500_000_000, diff --git a/rs/nns/governance/tla/Merge_Neurons.tla b/rs/nns/governance/tla/Merge_Neurons.tla new file mode 100644 index 00000000000..cb556d7e172 --- /dev/null +++ b/rs/nns/governance/tla/Merge_Neurons.tla @@ -0,0 +1,351 @@ +---- MODULE Merge_Neurons ---- +EXTENDS TLC, Sequences, Integers, FiniteSets, Variants + +(* +@typeAlias: proc = Str; +@typeAlias: account = Str; +@typeAlias: neuronId = Int; +@typeAlias: methodCall = Transfer({ from: $account, to: $account, amount: Int, fee: Int}) | AccountBalance({ account: $account }); +@typeAlias: methodResponse = Fail(UNIT) | TransferOk(UNIT) | BalanceQueryOk(Int); +@typeAlias: neurons = $neuronId -> {cached_stake: Int, account: $account, maturity: Int, fees: Int}; +*) + +_type_alias_dummy == TRUE + +CONSTANTS + Minting_Account_Id, + Merge_Neurons_Process_Ids, + TRANSACTION_FEE + +TRANSFER_OK == "Ok" +TRANSFER_FAIL == "Err" +DUMMY_ACCOUNT == "" + +\* @type: ($neurons, $neuronId) => Int; +Minted_Stake(neuron, neuron_id) == LET diff == neuron[neuron_id].cached_stake - neuron[neuron_id].fees IN + IF diff > 0 THEN diff ELSE 0 +request(caller, request_args) == [caller |-> caller, method_and_args |-> request_args] +transfer(from, to, amount, fee) == Variant("Transfer", [from |-> from, to |-> to, amount |-> amount, fee |-> fee]) + +\* @type: ($neurons, $neuronId, Int) => $neurons; +Decrease_Stake(neuron, neuron_id, amount) == [neuron EXCEPT ![neuron_id].cached_stake = @ - amount] +\* @type: ($neurons, $neuronId, Int) => $neurons; +Increase_Stake(neuron, neuron_id, amount) == [neuron EXCEPT ![neuron_id].cached_stake = @ + amount] +\* @type: ($neurons, $neuronId, Int) => $neurons; +Update_Fees(neuron, neuron_id, fees_amount) == [neuron EXCEPT + ![neuron_id].cached_stake = Minted_Stake(neuron, neuron_id), + ![neuron_id].fees = 0 ] +\* @type: ($neurons, $neuronId, Int) => $neurons; +Decrease_Maturity(neuron, neuron_id, amount) == [neuron EXCEPT ![neuron_id].maturity = @ - amount] +\* @type: ($neurons, $neuronId, Int) => $neurons; +Increase_Maturity(neuron, neuron_id, amount) == [neuron EXCEPT ![neuron_id].maturity = @ + amount] + + +(* --algorithm Merge_Neurons { + +variables + + neuron \in [{} -> {}]; + \* Used to decide whether we should refresh or claim a neuron + neuron_id_by_account \in [{} -> {}]; + \* The set of currently locked neurons + locks = {}; + \* The queue of messages sent from the governance canister to the ledger canister + governance_to_ledger = <<>>; + ledger_to_governance = {}; + +macro reset_mn_vars() { + source_neuron_id := 0; + target_neuron_id := 0; + fees_amount := 0; + amount_to_target := 0; +} + +macro send_request(caller_id, request_args) { + governance_to_ledger := Append(governance_to_ledger, request(caller_id, request_args)) +}; + +macro adjust_maturities(neuron_changes) { + neuron := Decrease_Maturity( + Increase_Maturity(neuron_changes, target_nid, neuron[source_nid].maturity), + source_nid, neuron[source_nid].maturity); +} + +macro finish() { + locks := locks \ {source_neuron_id, target_neuron_id }; + reset_mn_vars(); + goto Done; +} + +\* This assumes that att, source_nid and target_nid are implicitly available +macro maybe_transfer_stake(neuron_changes) { + if(att > 0) { + source_neuron_id := source_nid; + target_neuron_id := target_nid; + locks := locks \union { source_neuron_id, target_neuron_id }; + amount_to_target := att; + neuron := Decrease_Stake(neuron_changes, source_neuron_id, amount_to_target + TRANSACTION_FEE); + send_request(self, + transfer(neuron[source_neuron_id].account, + neuron[target_neuron_id].account, + amount_to_target, + TRANSACTION_FEE) + ); + goto MergeNeurons_Stake_WaitForTransfer; + } else { + adjust_maturities(neuron_changes); + finish(); + } +} + +process ( Merge_Neurons \in Merge_Neurons_Process_Ids ) + variable + \* These model the parameters of the call + source_neuron_id = 0; + target_neuron_id = 0; + + \* internal variables + fees_amount = 0; + amount_to_target = 0; + { + MergeNeurons_Start: + either { + \* Simulate calls that just fail early (e.g, due to failing checks) and + \* don't change the state. + \* Not so useful for model checking, but needed to follow the code traces. + goto Done; + } or with(source_nid \in DOMAIN(neuron) \ locks; target_nid \in DOMAIN(neuron) \ locks) { + \* We block this branch of the process from starting where an error would be returned + \* in the implementation + \* Note that the sequential taking of locks in the implementation already implies that + \* source_nid != target_nid, as the locks are taken sequentially there. + \* Here, also we manually ensure that these neuron IDs differ. + await source_nid /= target_nid; + + \* total stake cannot equal 0 + await (neuron[source_nid].cached_stake - neuron[source_nid].fees) + + (neuron[target_nid].cached_stake - neuron[target_nid].fees) /= 0; + + + \* here the impl has a few guards: + \* - caller must be controller of both source and target. + \* - source must be younger than target + \* - kyc_verified must match for both source and target + \* - not_for_profit must match for both source and target + \* - source neuron cannot be dedicated to community fund + + with(fa = neuron[source_nid].fees; + att = LET diff == Minted_Stake(neuron, source_nid) - TRANSACTION_FEE + IN IF diff > 0 THEN diff ELSE 0 + ) { + if(fa > TRANSACTION_FEE) { + \* This is a bit braindead, but seems to be the best way to work around PlusCal limitations. + \* Since we might unlock the neurons in the same message handler if we don't try to + \* burn the fees, and since the stupid PlusCal limitation prevents us from updating + \* the locks variable twice in the same block, we work around this by only locking + \* here conditionally. We'll also explicitly add the neurons to the locks again + \* if we want to transfer stake later on. This will be idempotent if they're already + \* locked, so it will behave the same as the implementation. + \* And same for the other local variables, they may get reset if the whole + \* update completes in a just a single message execution + source_neuron_id := source_nid; + target_neuron_id := target_nid; + locks := locks \union {source_neuron_id, target_neuron_id}; + fees_amount := fa; + amount_to_target := att; + + \* The burning fee is 0, even though we check that the amounts are less than the transaction fee + send_request(self, transfer(neuron[source_neuron_id].account, Minting_Account_Id, fees_amount, 0)); + } + else { + \* There's some code duplication here, but modeling the Rust control flow + \* in PlusCal is tricky, especially as we now have to match the labels 1-1 + \* with the code link checker. + maybe_transfer_stake(neuron); + }; + } + }; + MergeNeurons_Burn_WaitForTransfer: + \* Note that we're here only because the fees amount was larger than the + \* transaction fee (otherwise, the goto above would have taken us to MergeNeurons_Stake_WaitForTransfer/MergeNeurons_End) + with(answer \in { resp \in ledger_to_governance: resp.caller = self}) { + ledger_to_governance := ledger_to_governance \ {answer}; + if(answer.response = Variant("Fail", UNIT)) { + finish(); + } + else { + \* The with here introduces some context to the macro, which reassigns + \* source/target_neuron_id to themselves + with(source_nid = source_neuron_id; target_nid = target_neuron_id; att = amount_to_target) { + maybe_transfer_stake(Update_Fees(neuron, source_neuron_id, fees_amount)); + } + }; + }; + + MergeNeurons_Stake_WaitForTransfer: + with(answer \in { resp \in ledger_to_governance: resp.caller = self}) { + ledger_to_governance := ledger_to_governance \ {answer}; + if(answer.response = Variant("Fail", UNIT)) { + neuron := Increase_Stake(neuron, source_neuron_id, amount_to_target + TRANSACTION_FEE); + finish(); + } else { + with(source_nid = source_neuron_id; target_nid = target_neuron_id) { + adjust_maturities(Increase_Stake(neuron, target_neuron_id, amount_to_target)); + finish(); + } + }; + }; + } +} +*) +\* BEGIN TRANSLATION (chksum(pcal) = "e673c31c" /\ chksum(tla) = "225bd637") +VARIABLES pc, neuron, neuron_id_by_account, locks, governance_to_ledger, + ledger_to_governance, source_neuron_id, target_neuron_id, + fees_amount, amount_to_target + +vars == << pc, neuron, neuron_id_by_account, locks, governance_to_ledger, + ledger_to_governance, source_neuron_id, target_neuron_id, + fees_amount, amount_to_target >> + +ProcSet == (Merge_Neurons_Process_Ids) + +Init == (* Global variables *) + /\ neuron \in [{} -> {}] + /\ neuron_id_by_account \in [{} -> {}] + /\ locks = {} + /\ governance_to_ledger = <<>> + /\ ledger_to_governance = {} + (* Process Merge_Neurons *) + /\ source_neuron_id = [self \in Merge_Neurons_Process_Ids |-> 0] + /\ target_neuron_id = [self \in Merge_Neurons_Process_Ids |-> 0] + /\ fees_amount = [self \in Merge_Neurons_Process_Ids |-> 0] + /\ amount_to_target = [self \in Merge_Neurons_Process_Ids |-> 0] + /\ pc = [self \in ProcSet |-> "MergeNeurons_Start"] + +MergeNeurons_Start(self) == /\ pc[self] = "MergeNeurons_Start" + /\ \/ /\ pc' = [pc EXCEPT ![self] = "Done"] + /\ UNCHANGED <> + \/ /\ \E source_nid \in DOMAIN(neuron) \ locks: + \E target_nid \in DOMAIN(neuron) \ locks: + /\ source_nid /= target_nid + /\ (neuron[source_nid].cached_stake - neuron[source_nid].fees) + + (neuron[target_nid].cached_stake - neuron[target_nid].fees) /= 0 + /\ LET fa == neuron[source_nid].fees IN + LET att == LET diff == Minted_Stake(neuron, source_nid) - TRANSACTION_FEE + IN IF diff > 0 THEN diff ELSE 0 IN + IF fa > TRANSACTION_FEE + THEN /\ source_neuron_id' = [source_neuron_id EXCEPT ![self] = source_nid] + /\ target_neuron_id' = [target_neuron_id EXCEPT ![self] = target_nid] + /\ locks' = (locks \union {source_neuron_id'[self], target_neuron_id'[self]}) + /\ fees_amount' = [fees_amount EXCEPT ![self] = fa] + /\ amount_to_target' = [amount_to_target EXCEPT ![self] = att] + /\ governance_to_ledger' = Append(governance_to_ledger, request(self, (transfer(neuron[source_neuron_id'[self]].account, Minting_Account_Id, fees_amount'[self], 0)))) + /\ pc' = [pc EXCEPT ![self] = "MergeNeurons_Burn_WaitForTransfer"] + /\ UNCHANGED neuron + ELSE /\ IF att > 0 + THEN /\ source_neuron_id' = [source_neuron_id EXCEPT ![self] = source_nid] + /\ target_neuron_id' = [target_neuron_id EXCEPT ![self] = target_nid] + /\ locks' = (locks \union { source_neuron_id'[self], target_neuron_id'[self] }) + /\ amount_to_target' = [amount_to_target EXCEPT ![self] = att] + /\ neuron' = Decrease_Stake(neuron, source_neuron_id'[self], amount_to_target'[self] + TRANSACTION_FEE) + /\ governance_to_ledger' = Append(governance_to_ledger, request(self, (transfer(neuron'[source_neuron_id'[self]].account, + neuron'[target_neuron_id'[self]].account, + amount_to_target'[self], + TRANSACTION_FEE)))) + /\ pc' = [pc EXCEPT ![self] = "MergeNeurons_Stake_WaitForTransfer"] + /\ UNCHANGED fees_amount + ELSE /\ neuron' = Decrease_Maturity( + Increase_Maturity(neuron, target_nid, neuron[source_nid].maturity), + source_nid, neuron[source_nid].maturity) + /\ locks' = locks \ {source_neuron_id[self], target_neuron_id[self] } + /\ source_neuron_id' = [source_neuron_id EXCEPT ![self] = 0] + /\ target_neuron_id' = [target_neuron_id EXCEPT ![self] = 0] + /\ fees_amount' = [fees_amount EXCEPT ![self] = 0] + /\ amount_to_target' = [amount_to_target EXCEPT ![self] = 0] + /\ pc' = [pc EXCEPT ![self] = "Done"] + /\ UNCHANGED governance_to_ledger + /\ UNCHANGED << neuron_id_by_account, + ledger_to_governance >> + +MergeNeurons_Burn_WaitForTransfer(self) == /\ pc[self] = "MergeNeurons_Burn_WaitForTransfer" + /\ \E answer \in { resp \in ledger_to_governance: resp.caller = self}: + /\ ledger_to_governance' = ledger_to_governance \ {answer} + /\ IF answer.response = Variant("Fail", UNIT) + THEN /\ locks' = locks \ {source_neuron_id[self], target_neuron_id[self] } + /\ source_neuron_id' = [source_neuron_id EXCEPT ![self] = 0] + /\ target_neuron_id' = [target_neuron_id EXCEPT ![self] = 0] + /\ fees_amount' = [fees_amount EXCEPT ![self] = 0] + /\ amount_to_target' = [amount_to_target EXCEPT ![self] = 0] + /\ pc' = [pc EXCEPT ![self] = "Done"] + /\ UNCHANGED << neuron, + governance_to_ledger >> + ELSE /\ LET source_nid == source_neuron_id[self] IN + LET target_nid == target_neuron_id[self] IN + LET att == amount_to_target[self] IN + IF att > 0 + THEN /\ source_neuron_id' = [source_neuron_id EXCEPT ![self] = source_nid] + /\ target_neuron_id' = [target_neuron_id EXCEPT ![self] = target_nid] + /\ locks' = (locks \union { source_neuron_id'[self], target_neuron_id'[self] }) + /\ amount_to_target' = [amount_to_target EXCEPT ![self] = att] + /\ neuron' = Decrease_Stake((Update_Fees(neuron, source_neuron_id'[self], fees_amount[self])), source_neuron_id'[self], amount_to_target'[self] + TRANSACTION_FEE) + /\ governance_to_ledger' = Append(governance_to_ledger, request(self, (transfer(neuron'[source_neuron_id'[self]].account, + neuron'[target_neuron_id'[self]].account, + amount_to_target'[self], + TRANSACTION_FEE)))) + /\ pc' = [pc EXCEPT ![self] = "MergeNeurons_Stake_WaitForTransfer"] + /\ UNCHANGED fees_amount + ELSE /\ neuron' = Decrease_Maturity( + Increase_Maturity((Update_Fees(neuron, source_neuron_id[self], fees_amount[self])), target_nid, neuron[source_nid].maturity), + source_nid, neuron[source_nid].maturity) + /\ locks' = locks \ {source_neuron_id[self], target_neuron_id[self] } + /\ source_neuron_id' = [source_neuron_id EXCEPT ![self] = 0] + /\ target_neuron_id' = [target_neuron_id EXCEPT ![self] = 0] + /\ fees_amount' = [fees_amount EXCEPT ![self] = 0] + /\ amount_to_target' = [amount_to_target EXCEPT ![self] = 0] + /\ pc' = [pc EXCEPT ![self] = "Done"] + /\ UNCHANGED governance_to_ledger + /\ UNCHANGED neuron_id_by_account + +MergeNeurons_Stake_WaitForTransfer(self) == /\ pc[self] = "MergeNeurons_Stake_WaitForTransfer" + /\ \E answer \in { resp \in ledger_to_governance: resp.caller = self}: + /\ ledger_to_governance' = ledger_to_governance \ {answer} + /\ IF answer.response = Variant("Fail", UNIT) + THEN /\ neuron' = Increase_Stake(neuron, source_neuron_id[self], amount_to_target[self] + TRANSACTION_FEE) + /\ locks' = locks \ {source_neuron_id[self], target_neuron_id[self] } + /\ source_neuron_id' = [source_neuron_id EXCEPT ![self] = 0] + /\ target_neuron_id' = [target_neuron_id EXCEPT ![self] = 0] + /\ fees_amount' = [fees_amount EXCEPT ![self] = 0] + /\ amount_to_target' = [amount_to_target EXCEPT ![self] = 0] + /\ pc' = [pc EXCEPT ![self] = "Done"] + ELSE /\ LET source_nid == source_neuron_id[self] IN + LET target_nid == target_neuron_id[self] IN + /\ neuron' = Decrease_Maturity( + Increase_Maturity((Increase_Stake(neuron, target_neuron_id[self], amount_to_target[self])), target_nid, neuron[source_nid].maturity), + source_nid, neuron[source_nid].maturity) + /\ locks' = locks \ {source_neuron_id[self], target_neuron_id[self] } + /\ source_neuron_id' = [source_neuron_id EXCEPT ![self] = 0] + /\ target_neuron_id' = [target_neuron_id EXCEPT ![self] = 0] + /\ fees_amount' = [fees_amount EXCEPT ![self] = 0] + /\ amount_to_target' = [amount_to_target EXCEPT ![self] = 0] + /\ pc' = [pc EXCEPT ![self] = "Done"] + /\ UNCHANGED << neuron_id_by_account, + governance_to_ledger >> + +Merge_Neurons(self) == MergeNeurons_Start(self) + \/ MergeNeurons_Burn_WaitForTransfer(self) + \/ MergeNeurons_Stake_WaitForTransfer(self) + +(* Allow infinite stuttering to prevent deadlock on termination. *) +Terminating == /\ \A self \in ProcSet: pc[self] = "Done" + /\ UNCHANGED vars + +Next == (\E self \in Merge_Neurons_Process_Ids: Merge_Neurons(self)) + \/ Terminating + +Spec == Init /\ [][Next]_vars + +Termination == <>(\A self \in ProcSet: pc[self] = "Done") + +\* END TRANSLATION + +==== diff --git a/rs/nns/governance/tla/Merge_Neurons_Apalache.tla b/rs/nns/governance/tla/Merge_Neurons_Apalache.tla new file mode 100644 index 00000000000..4e029383977 --- /dev/null +++ b/rs/nns/governance/tla/Merge_Neurons_Apalache.tla @@ -0,0 +1,52 @@ +---- MODULE Merge_Neurons_Apalache ---- + +EXTENDS TLC, Variants + + +\* CODE_LINK_INSERT_CONSTANTS + +(* + +CONSTANTS + + \* @type: Set($proc); + Merge_Neurons_Process_Ids + +CONSTANTS + \* The transfer fee charged by the ledger canister + \* @type: Int; + TRANSACTION_FEE +*) + +VARIABLES + \* @type: $neuronId -> {cached_stake: Int, account: $account, maturity: Int, fees: Int}; + neuron, + \* @type: $account -> $neuronId; + neuron_id_by_account, + \* @type: Set($neuronId); + locks, + \* @type: Seq({caller : $proc, method_and_args: $methodCall }); + governance_to_ledger, + \* @type: Set({caller: $proc, response: $methodResponse }); + ledger_to_governance, + \* @type: $proc -> Str; + pc, + \* @type: $proc -> $neuronId; + source_neuron_id, + \* @type: $proc -> $neuronId; + target_neuron_id, + \* @type: $proc -> Int; + fees_amount, + \* @type: $proc -> Int; + amount_to_target + +\* @type: Set($neuronId) => $neuronId; +FRESH_NEURON_ID(existing_neurons) == CHOOSE nid \in (Neuron_Ids \ existing_neurons): TRUE + +MOD == INSTANCE Merge_Neurons + +Next == [MOD!Next]_MOD!vars + +==== + +==== diff --git a/rs/tla_instrumentation/tla_instrumentation/src/tla_state.rs b/rs/tla_instrumentation/tla_instrumentation/src/tla_state.rs index 108dda76d35..d2fbd2b3611 100644 --- a/rs/tla_instrumentation/tla_instrumentation/src/tla_state.rs +++ b/rs/tla_instrumentation/tla_instrumentation/src/tla_state.rs @@ -48,11 +48,17 @@ impl VarAssignment { Possible causes: 1. A local variable is set both after the last await and in default_locals. This is the most likely cause if the stack trace includes tla_log_method_return. -2. A local variable of the same name is set in multiple functions in the call stack."#, +2. A local variable of the same name is set in multiple functions in the call stack. +States are: +{:?} +and +{:?}"#, self.0 .keys() .collect::>() - .intersection(&other.0.keys().collect::>()) + .intersection(&other.0.keys().collect::>()), + self, + other ); let mut new_locals = self.0.clone(); new_locals.extend(other.0); @@ -60,7 +66,7 @@ Possible causes: } } -#[derive(Clone, Default)] +#[derive(Clone, Default, PartialEq, Eq, Hash)] pub struct GlobalState(pub VarAssignment); impl GlobalState { @@ -100,7 +106,7 @@ impl std::fmt::Debug for GlobalState { } } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct Label(String); impl Label { From 25c1bb0227d9970f5673b908817d7c4962b29911 Mon Sep 17 00:00:00 2001 From: Arshavir Ter-Gabrielyan Date: Fri, 29 Nov 2024 18:50:36 +0100 Subject: [PATCH 07/52] feat(sns): Use human readable date-time in AdvanceSnsTargetVersion proposal rendering (#2896) This PR improves the rendering of AdvanceSnsTargetVersion proposals by using a human-readable date-time format rather than raw Unix timestamps. Also, a potentially confusing remark is dropped from the disclaimer. --------- Co-authored-by: Yusef Habib Co-authored-by: IDX GitHub Automation --- Cargo.lock | 1 + rs/sns/governance/BUILD.bazel | 1 + rs/sns/governance/Cargo.toml | 2 +- rs/sns/governance/src/proposal.rs | 30 ++++++++++++++++--- .../proposal/advance_sns_target_version.rs | 25 +++++++++++++++- 5 files changed, 53 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cad39cd54bc..564a235be64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11856,6 +11856,7 @@ dependencies = [ "strum", "strum_macros", "tempfile", + "time", "tokio", "tokio-test", ] diff --git a/rs/sns/governance/BUILD.bazel b/rs/sns/governance/BUILD.bazel index 39adaf9000f..0b98210b77b 100644 --- a/rs/sns/governance/BUILD.bazel +++ b/rs/sns/governance/BUILD.bazel @@ -61,6 +61,7 @@ DEPENDENCIES = [ "@crate_index//:serde", "@crate_index//:serde_bytes", "@crate_index//:strum", + "@crate_index//:time", ] MACRO_DEPENDENCIES = [ diff --git a/rs/sns/governance/Cargo.toml b/rs/sns/governance/Cargo.toml index d66babf4a6a..86db0a34d3c 100644 --- a/rs/sns/governance/Cargo.toml +++ b/rs/sns/governance/Cargo.toml @@ -27,7 +27,6 @@ path = "tests/proposal.rs" [dependencies] build-info = { workspace = true } - async-trait = { workspace = true } base64 = { workspace = true } candid = { workspace = true } @@ -81,6 +80,7 @@ serde = { workspace = true } serde_bytes = { workspace = true } strum = { workspace = true } strum_macros = { workspace = true } +time = { workspace = true } [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] ic-types = { path = "../../types/types" } diff --git a/rs/sns/governance/src/proposal.rs b/rs/sns/governance/src/proposal.rs index e82d3e77591..c1a30b86f86 100644 --- a/rs/sns/governance/src/proposal.rs +++ b/rs/sns/governance/src/proposal.rs @@ -52,6 +52,7 @@ use std::{ convert::TryFrom, fmt::Write, }; +use time; /// The maximum number of bytes in an SNS proposal's title. pub const PROPOSAL_TITLE_BYTES_MAX: usize = 256; @@ -1716,6 +1717,21 @@ fn validate_and_render_manage_dapp_canister_settings( } } +/// Attempts to format `` as a human-readable string. +/// +/// For example: +/// ``` +/// assert_eq!(format_timestamp(1732896850), Some("2024-11-29 16:14:10 UTC".to_string())); +/// ``` +fn format_timestamp(timestamp_seconds: u64) -> Option { + let timestamp_seconds = i64::try_from(timestamp_seconds).ok()?; + let dt_offset = time::OffsetDateTime::from_unix_timestamp(timestamp_seconds).ok()?; + let format = + time::format_description::parse("[year]-[month]-[day] [hour]:[minute]:[second] UTC") + .ok()?; + dt_offset.format(&format).ok() +} + /// Attempts to validate an `AdvanceSnsTargetVersion` action and render its human-readable text. /// Invalidates the action in the following cases: /// - There are no pending upgrades. @@ -1737,7 +1753,14 @@ fn validate_and_render_advance_sns_target_version_proposal( let (upgrade_steps, target_version) = governance_proto .validate_new_target_version(advance_sns_target_version.new_target.clone())?; - let valid_timestamp_seconds = upgrade_steps.approximate_time_of_validity_timestamp_seconds(); + let time_of_validity = { + let timestamp_seconds = upgrade_steps.approximate_time_of_validity_timestamp_seconds(); + // This fallback should not occur unless `timestamp_seconds` is outside of the range + // from +1970-01-01 00:00:00 UTC (0) + // till +9999-12-31 23:59:59 UTC (253402300799). + format_timestamp(timestamp_seconds) + .unwrap_or_else(|| format!("timestamp {} seconds", timestamp_seconds)) + }; let current_target_versions_render = render_two_versions_as_markdown_table(upgrade_steps.current(), &target_version); @@ -1753,9 +1776,8 @@ fn validate_and_render_advance_sns_target_version_proposal( ### Upgrade steps\n\n\ {upgrade_steps}\n\n\ ### Monitoring the upgrade process\n\n\ - Please note: the upgrade steps above (valid around timestamp {valid_timestamp_seconds} \ - seconds) might change during this proposal's voting period. Such changes are unlikely and \ - are subject to NNS community's approval.\n\n\ + Please note: the upgrade steps mentioned above (valid around {time_of_validity}) \ + might change during this proposal's voting period.\n\n\ The **upgrade journal** provides up-to-date information on this SNS's upgrade process:\n\n\ {upgrade_journal_url_render}" ); diff --git a/rs/sns/governance/src/proposal/advance_sns_target_version.rs b/rs/sns/governance/src/proposal/advance_sns_target_version.rs index d98bf263ae7..628b69de1ee 100644 --- a/rs/sns/governance/src/proposal/advance_sns_target_version.rs +++ b/rs/sns/governance/src/proposal/advance_sns_target_version.rs @@ -5,6 +5,7 @@ use crate::pb::v1::Governance as GovernancePb; use crate::types::test_helpers::NativeEnvironment; use futures::FutureExt; use ic_test_utilities_types::ids::canister_test_id; +use pretty_assertions::assert_eq; fn sns_version_for_tests() -> Version { Version { @@ -65,6 +66,28 @@ fn standard_governance_proto_for_tests(deployed_version: Option) -> Gov } } +#[test] +fn test_format_timestamp() { + for (expected, timestamp_seconds) in [ + (Some("1970-01-01 00:00:00 UTC"), 0), + (Some("2024-11-29 16:14:10 UTC"), 1732896850), + (Some("2038-01-19 03:14:07 UTC"), i32::MAX as u64), + (Some("4571-09-24 08:52:47 UTC"), 82102668767), + (Some("9999-12-31 23:59:59 UTC"), 253402300799), + (None, 253402300800), + (None, i64::MAX as u64), + (None, u64::MAX), + ] { + let observed = format_timestamp(timestamp_seconds); + assert_eq!( + observed, + expected.map(|s| s.to_string()), + "unexpected result from format_timestamp({})", + timestamp_seconds, + ); + } +} + #[test] fn test_validate_and_render_advance_target_version_action() { // Prepare the world. @@ -176,7 +199,7 @@ fn test_validate_and_render_advance_target_version_action() { ### Monitoring the upgrade process -Please note: the upgrade steps above (valid around timestamp 0 seconds) might change during this proposal's voting period. Such changes are unlikely and are subject to NNS community's approval. +Please note: the upgrade steps mentioned above (valid around 1970-01-01 00:00:00 UTC) might change during this proposal's voting period. The **upgrade journal** provides up-to-date information on this SNS's upgrade process: From 5ce01d0a871d5162d0b2ff0b585d71ef2e644ac9 Mon Sep 17 00:00:00 2001 From: gregorydemay <112856886+gregorydemay@users.noreply.github.com> Date: Sat, 30 Nov 2024 12:58:03 +0100 Subject: [PATCH 08/52] fix(cketh): Undo breaking change in `get_minter_info` (#2907) Fix an undesired breaking changed introduced by #2747 : 1. The fields `eth_helper_contract_address` and `erc20_helper_contract_address` in `get_minter_info` were wrongly reused to point to the new helper smart contract [0x18901044688D3756C35Ed2b36D93e6a5B8e00E68](https://etherscan.io/address/0x18901044688D3756C35Ed2b36D93e6a5B8e00E68) that supports deposit with subaccounts and that was added as part of proposal [134264](https://dashboard.internetcomputer.org/proposal/134264). 2. This broke clients that relied on that information to make deposit of ETH or ERC-20 because the new helper smart contract has a different ABI. This is visible by such a [transaction](https://etherscan.io/tx/0x0968b25814221719bf966cf4bbd2de8290ed2ab42c049d451d64e46812d1574e), where the transaction tried to call the method `deposit` (`0xb214faa5`) that does exist on the [deprecated ETH helper smart contract](https://etherscan.io/address/0x7574eB42cA208A4f6960ECCAfDF186D627dCC175) but doesn't on the new contract (it should have been `depositEth` (`0x17c819c4`)). 3. The fix simply consists in reverting the changes regarding the values of the fields `eth_helper_contract_address` and `erc20_helper_contract_address` in `get_minter_info` (so that they point back to [0x7574eB42cA208A4f6960ECCAfDF186D627dCC175](https://etherscan.io/address/0x7574eB42cA208A4f6960ECCAfDF186D627dCC175) and [0x6abDA0438307733FC299e9C229FD3cc074bD8cC0](https://etherscan.io/address/0x6abDA0438307733FC299e9C229FD3cc074bD8cC0), respectively) and adding new fields to contain the state of the log scraping (address and last scraped block number) for the new helper smart contract. --------- Co-authored-by: Thomas Locher --- rs/ethereum/cketh/minter/cketh_minter.did | 6 +++ rs/ethereum/cketh/minter/src/endpoints.rs | 2 + rs/ethereum/cketh/minter/src/main.rs | 4 ++ .../minter/src/state/eth_logs_scraping/mod.rs | 21 ++++----- .../src/state/eth_logs_scraping/tests.rs | 21 ++++++--- rs/ethereum/cketh/minter/tests/ckerc20.rs | 46 ++++++++++++++++++- rs/ethereum/cketh/minter/tests/cketh.rs | 4 ++ 7 files changed, 83 insertions(+), 21 deletions(-) diff --git a/rs/ethereum/cketh/minter/cketh_minter.did b/rs/ethereum/cketh/minter/cketh_minter.did index 2f938ae190f..98ee76dea3b 100644 --- a/rs/ethereum/cketh/minter/cketh_minter.did +++ b/rs/ethereum/cketh/minter/cketh_minter.did @@ -172,6 +172,9 @@ type MinterInfo = record { // Address of the ERC20 helper smart contract erc20_helper_contract_address: opt text; + // Address of the ETH or ERC20 deposit with subaccount helper smart contract. + deposit_with_subaccount_helper_contract_address: opt text; + // Information of supported ERC20 tokens. supported_ckerc20_tokens: opt vec CkErc20Token; @@ -201,6 +204,9 @@ type MinterInfo = record { // Last scraped block number for logs of the ERC20 helper contract. last_erc20_scraped_block_number: opt nat; + // Last scraped block number for logs of the deposit with subaccount helper contract. + last_deposit_with_subaccount_scraped_block_number: opt nat; + // Canister ID of the ckETH ledger. cketh_ledger_id: opt principal; diff --git a/rs/ethereum/cketh/minter/src/endpoints.rs b/rs/ethereum/cketh/minter/src/endpoints.rs index aafff92a3d2..5ebfbbfe98b 100644 --- a/rs/ethereum/cketh/minter/src/endpoints.rs +++ b/rs/ethereum/cketh/minter/src/endpoints.rs @@ -67,6 +67,7 @@ pub struct MinterInfo { pub smart_contract_address: Option, pub eth_helper_contract_address: Option, pub erc20_helper_contract_address: Option, + pub deposit_with_subaccount_helper_contract_address: Option, pub supported_ckerc20_tokens: Option>, pub minimum_withdrawal_amount: Option, pub ethereum_block_height: Option, @@ -76,6 +77,7 @@ pub struct MinterInfo { pub erc20_balances: Option>, pub last_eth_scraped_block_number: Option, pub last_erc20_scraped_block_number: Option, + pub last_deposit_with_subaccount_scraped_block_number: Option, pub cketh_ledger_id: Option, pub evm_rpc_id: Option, } diff --git a/rs/ethereum/cketh/minter/src/main.rs b/rs/ethereum/cketh/minter/src/main.rs index 47664bf1218..32bb99986f9 100644 --- a/rs/ethereum/cketh/minter/src/main.rs +++ b/rs/ethereum/cketh/minter/src/main.rs @@ -227,6 +227,8 @@ async fn get_minter_info() -> MinterInfo { last_eth_scraped_block_number, erc20_helper_contract_address, last_erc20_scraped_block_number, + deposit_with_subaccount_helper_contract_address, + last_deposit_with_subaccount_scraped_block_number, } = s.log_scrapings.info(); MinterInfo { @@ -234,6 +236,7 @@ async fn get_minter_info() -> MinterInfo { smart_contract_address: eth_helper_contract_address.clone(), eth_helper_contract_address, erc20_helper_contract_address, + deposit_with_subaccount_helper_contract_address, supported_ckerc20_tokens, minimum_withdrawal_amount: Some(s.cketh_minimum_withdrawal_amount.into()), ethereum_block_height: Some(s.ethereum_block_height.into()), @@ -249,6 +252,7 @@ async fn get_minter_info() -> MinterInfo { erc20_balances, last_eth_scraped_block_number, last_erc20_scraped_block_number, + last_deposit_with_subaccount_scraped_block_number, cketh_ledger_id: Some(s.cketh_ledger_id), evm_rpc_id: s.evm_rpc_id, } diff --git a/rs/ethereum/cketh/minter/src/state/eth_logs_scraping/mod.rs b/rs/ethereum/cketh/minter/src/state/eth_logs_scraping/mod.rs index 92737856e96..cfc017acfd7 100644 --- a/rs/ethereum/cketh/minter/src/state/eth_logs_scraping/mod.rs +++ b/rs/ethereum/cketh/minter/src/state/eth_logs_scraping/mod.rs @@ -69,28 +69,21 @@ impl LogScrapings { let last_scraped_block_number = Some(Nat::from(state.last_scraped_block_number())); (contract_address, last_scraped_block_number) }; - let ( - deposit_with_subaccount_contract_address, - deposit_with_subaccount_last_scraped_block_number, - ) = to_info(self.get(LogScrapingId::EthOrErc20DepositWithSubaccount)); - if deposit_with_subaccount_contract_address.is_some() { - return LogScrapingInfo { - eth_helper_contract_address: deposit_with_subaccount_contract_address.clone(), - last_eth_scraped_block_number: deposit_with_subaccount_last_scraped_block_number - .clone(), - erc20_helper_contract_address: deposit_with_subaccount_contract_address, - last_erc20_scraped_block_number: deposit_with_subaccount_last_scraped_block_number, - }; - } let (eth_helper_contract_address, last_eth_scraped_block_number) = to_info(self.get(LogScrapingId::EthDepositWithoutSubaccount)); let (erc20_helper_contract_address, last_erc20_scraped_block_number) = to_info(self.get(LogScrapingId::Erc20DepositWithoutSubaccount)); + let ( + deposit_with_subaccount_helper_contract_address, + last_deposit_with_subaccount_scraped_block_number, + ) = to_info(self.get(LogScrapingId::EthOrErc20DepositWithSubaccount)); LogScrapingInfo { eth_helper_contract_address, last_eth_scraped_block_number, erc20_helper_contract_address, last_erc20_scraped_block_number, + deposit_with_subaccount_helper_contract_address, + last_deposit_with_subaccount_scraped_block_number, } } } @@ -202,4 +195,6 @@ pub struct LogScrapingInfo { pub last_eth_scraped_block_number: Option, pub erc20_helper_contract_address: Option, pub last_erc20_scraped_block_number: Option, + pub deposit_with_subaccount_helper_contract_address: Option, + pub last_deposit_with_subaccount_scraped_block_number: Option, } diff --git a/rs/ethereum/cketh/minter/src/state/eth_logs_scraping/tests.rs b/rs/ethereum/cketh/minter/src/state/eth_logs_scraping/tests.rs index 8fe4e235682..4eeec72e5b2 100644 --- a/rs/ethereum/cketh/minter/src/state/eth_logs_scraping/tests.rs +++ b/rs/ethereum/cketh/minter/src/state/eth_logs_scraping/tests.rs @@ -1,7 +1,7 @@ use crate::state::eth_logs_scraping::{LogScrapingId, LogScrapingInfo, LogScrapings}; #[test] -fn should_use_info_from_deposit_with_subaccount_when_contract_address_present() { +fn should_not_change_other_field_when_deposit_with_subaccount_present() { const LAST_SCRAPED_BLOCK_NUMBER: u32 = 21_235_426_u32; const ETH_HELPER_SMART_CONTRACT: &str = "0x7574eB42cA208A4f6960ECCAfDF186D627dCC175"; const ERC20_HELPER_SMART_CONTRACT: &str = "0x6abDA0438307733FC299e9C229FD3cc074bD8cC0"; @@ -15,6 +15,9 @@ fn should_use_info_from_deposit_with_subaccount_when_contract_address_present() LogScrapingInfo { last_eth_scraped_block_number: Some(LAST_SCRAPED_BLOCK_NUMBER.into()), last_erc20_scraped_block_number: Some(LAST_SCRAPED_BLOCK_NUMBER.into()), + last_deposit_with_subaccount_scraped_block_number: Some( + LAST_SCRAPED_BLOCK_NUMBER.into() + ), ..Default::default() } ); @@ -32,13 +35,18 @@ fn should_use_info_from_deposit_with_subaccount_when_contract_address_present() ) .unwrap(); + let info_before = scrapings.info(); assert_eq!( - scrapings.info(), + info_before, LogScrapingInfo { eth_helper_contract_address: Some(ETH_HELPER_SMART_CONTRACT.to_string()), last_eth_scraped_block_number: Some(LAST_SCRAPED_BLOCK_NUMBER.into()), erc20_helper_contract_address: Some(ERC20_HELPER_SMART_CONTRACT.to_string()), last_erc20_scraped_block_number: Some(LAST_SCRAPED_BLOCK_NUMBER.into()), + deposit_with_subaccount_helper_contract_address: None, + last_deposit_with_subaccount_scraped_block_number: Some( + LAST_SCRAPED_BLOCK_NUMBER.into() + ), } ); @@ -58,14 +66,13 @@ fn should_use_info_from_deposit_with_subaccount_when_contract_address_present() assert_eq!( scrapings.info(), LogScrapingInfo { - eth_helper_contract_address: Some( + deposit_with_subaccount_helper_contract_address: Some( DEPOSIT_WITH_SUBACCOUNT_HELPER_SMART_CONTRACT.to_string() ), - last_eth_scraped_block_number: Some((LAST_SCRAPED_BLOCK_NUMBER + 1).into()), - erc20_helper_contract_address: Some( - DEPOSIT_WITH_SUBACCOUNT_HELPER_SMART_CONTRACT.to_string() + last_deposit_with_subaccount_scraped_block_number: Some( + (LAST_SCRAPED_BLOCK_NUMBER + 1).into() ), - last_erc20_scraped_block_number: Some((LAST_SCRAPED_BLOCK_NUMBER + 1).into()), + ..info_before } ); } diff --git a/rs/ethereum/cketh/minter/tests/ckerc20.rs b/rs/ethereum/cketh/minter/tests/ckerc20.rs index 31fed9353ab..024359b5922 100644 --- a/rs/ethereum/cketh/minter/tests/ckerc20.rs +++ b/rs/ethereum/cketh/minter/tests/ckerc20.rs @@ -25,7 +25,8 @@ use ic_cketh_test_utils::{ format_ethereum_address_to_eip_55, CkEthSetup, CKETH_MINIMUM_WITHDRAWAL_AMOUNT, DEFAULT_DEPOSIT_BLOCK_NUMBER, DEFAULT_DEPOSIT_FROM_ADDRESS, DEFAULT_DEPOSIT_LOG_INDEX, DEFAULT_DEPOSIT_TRANSACTION_HASH, DEFAULT_ERC20_DEPOSIT_LOG_INDEX, - DEFAULT_ERC20_DEPOSIT_TRANSACTION_HASH, DEFAULT_USER_SUBACCOUNT, EFFECTIVE_GAS_PRICE, + DEFAULT_ERC20_DEPOSIT_TRANSACTION_HASH, DEFAULT_USER_SUBACCOUNT, + DEPOSIT_WITH_SUBACCOUNT_HELPER_CONTRACT_ADDRESS, EFFECTIVE_GAS_PRICE, ERC20_HELPER_CONTRACT_ADDRESS, ETH_HELPER_CONTRACT_ADDRESS, GAS_USED, LAST_SCRAPED_BLOCK_NUMBER_AT_INSTALL, MINTER_ADDRESS, }; @@ -1681,6 +1682,7 @@ fn should_retrieve_minter_info() { erc20_helper_contract_address: Some(format_ethereum_address_to_eip_55( ERC20_HELPER_CONTRACT_ADDRESS )), + deposit_with_subaccount_helper_contract_address: None, supported_ckerc20_tokens: Some(supported_ckerc20_tokens), minimum_withdrawal_amount: Some(Nat::from(CKETH_MINIMUM_WITHDRAWAL_AMOUNT)), ethereum_block_height: Some(Finalized), @@ -1690,12 +1692,54 @@ fn should_retrieve_minter_info() { erc20_balances: Some(erc20_balances), last_eth_scraped_block_number: Some(LAST_SCRAPED_BLOCK_NUMBER_AT_INSTALL.into()), last_erc20_scraped_block_number: Some(LAST_SCRAPED_BLOCK_NUMBER_AT_INSTALL.into()), + last_deposit_with_subaccount_scraped_block_number: Some( + LAST_SCRAPED_BLOCK_NUMBER_AT_INSTALL.into() + ), cketh_ledger_id: Some(ckerc20.cketh_ledger_id()), evm_rpc_id: ckerc20.cketh.evm_rpc_id.map(Principal::from), } ); } +// If a contract needs to be changed, the contract ABI must be preserved, +// otherwise this will be a breaking change for clients (e.g. front-ends). +#[test] +#[allow(deprecated)] +fn should_not_change_address_of_other_contracts_when_adding_new_contract() { + let ckerc20 = CkErc20Setup::default(); + let info_at_start = ckerc20.cketh.get_minter_info(); + assert_eq!( + info_at_start, + MinterInfo { + smart_contract_address: Some(format_ethereum_address_to_eip_55( + ETH_HELPER_CONTRACT_ADDRESS + )), + eth_helper_contract_address: Some(format_ethereum_address_to_eip_55( + ETH_HELPER_CONTRACT_ADDRESS + )), + erc20_helper_contract_address: Some(format_ethereum_address_to_eip_55( + ERC20_HELPER_CONTRACT_ADDRESS + )), + ..info_at_start.clone() + } + ); + + let ckerc20 = ckerc20.add_support_for_subaccount(); + + assert_eq!( + ckerc20.get_minter_info(), + MinterInfo { + deposit_with_subaccount_helper_contract_address: Some( + format_ethereum_address_to_eip_55(DEPOSIT_WITH_SUBACCOUNT_HELPER_CONTRACT_ADDRESS) + ), + last_deposit_with_subaccount_scraped_block_number: Some( + LAST_SCRAPED_BLOCK_NUMBER_AT_INSTALL.into() + ), + ..info_at_start + } + ) +} + #[test] fn should_scrape_from_last_scraped_after_upgrade() { let mut ckerc20 = CkErc20Setup::default().add_supported_erc20_tokens(); diff --git a/rs/ethereum/cketh/minter/tests/cketh.rs b/rs/ethereum/cketh/minter/tests/cketh.rs index f21655343eb..3463d2436c4 100644 --- a/rs/ethereum/cketh/minter/tests/cketh.rs +++ b/rs/ethereum/cketh/minter/tests/cketh.rs @@ -1087,6 +1087,7 @@ fn should_retrieve_minter_info() { ETH_HELPER_CONTRACT_ADDRESS )), erc20_helper_contract_address: None, + deposit_with_subaccount_helper_contract_address: None, supported_ckerc20_tokens: None, minimum_withdrawal_amount: Some(Nat::from(CKETH_MINIMUM_WITHDRAWAL_AMOUNT)), ethereum_block_height: Some(Finalized), @@ -1096,6 +1097,9 @@ fn should_retrieve_minter_info() { erc20_balances: None, last_eth_scraped_block_number: Some(LAST_SCRAPED_BLOCK_NUMBER_AT_INSTALL.into()), last_erc20_scraped_block_number: Some(LAST_SCRAPED_BLOCK_NUMBER_AT_INSTALL.into()), + last_deposit_with_subaccount_scraped_block_number: Some( + LAST_SCRAPED_BLOCK_NUMBER_AT_INSTALL.into() + ), cketh_ledger_id: Some(cketh.ledger_id.into()), evm_rpc_id: cketh.evm_rpc_id.map(Principal::from), } From a700085f0575a440cb44941da69cfa2efeea3ab9 Mon Sep 17 00:00:00 2001 From: gregorydemay <112856886+gregorydemay@users.noreply.github.com> Date: Sat, 30 Nov 2024 16:06:27 +0100 Subject: [PATCH 09/52] chore(cketh): proposal to fix breaking changes in `get_minter_info` (#2908) --- .../mainnet/minter_upgrade_2024_11_30.md | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 rs/ethereum/cketh/mainnet/minter_upgrade_2024_11_30.md diff --git a/rs/ethereum/cketh/mainnet/minter_upgrade_2024_11_30.md b/rs/ethereum/cketh/mainnet/minter_upgrade_2024_11_30.md new file mode 100644 index 00000000000..09a907a5fad --- /dev/null +++ b/rs/ethereum/cketh/mainnet/minter_upgrade_2024_11_30.md @@ -0,0 +1,51 @@ +# Proposal to upgrade the ckETH minter canister + +Repository: `https://github.com/dfinity/ic.git` + +Git hash: `5ce01d0a871d5162d0b2ff0b585d71ef2e644ac9` + +New compressed Wasm hash: `8a5c77ddafee85bee18e3fa76c11922ed5b7bd11f81c2d66b578b0c1b00f5b23` + +Upgrade args hash: `0fee102bd16b053022b69f2c65fd5e2f41d150ce9c214ac8731cfaf496ebda4e` + +Target canister: `sv3dd-oaaaa-aaaar-qacoa-cai` + +Previous ckETH minter proposal: https://dashboard.internetcomputer.org/proposal/134264 + +--- + +## Motivation + +Fix an undesired breaking changed introduced by proposal [134264](https://dashboard.internetcomputer.org/proposal/134264) : + +1. The fields `eth_helper_contract_address` and `erc20_helper_contract_address` in `get_minter_info` were wrongly reused to point to the new helper smart contract [0x18901044688D3756C35Ed2b36D93e6a5B8e00E68](https://etherscan.io/address/0x18901044688D3756C35Ed2b36D93e6a5B8e00E68) that supports deposit with subaccounts and that was added as part of proposal [134264](https://dashboard.internetcomputer.org/proposal/134264). +2. This broke clients that relied on that information to make deposit of ETH or ERC-20 because the new helper smart contract has a different ABI. This is visible by such a [transaction](https://etherscan.io/tx/0x0968b25814221719bf966cf4bbd2de8290ed2ab42c049d451d64e46812d1574e), where the transaction tried to call the method `deposit` (`0xb214faa5`) that does exist on the [deprecated ETH helper smart contract](https://etherscan.io/address/0x7574eB42cA208A4f6960ECCAfDF186D627dCC175) but doesn't on the new contract (it should have been `depositEth` (`0x17c819c4`)). +3. The fix simply consists in reverting the changes regarding the values of the fields `eth_helper_contract_address` and `erc20_helper_contract_address` in `get_minter_info` (so that they point back to [0x7574eB42cA208A4f6960ECCAfDF186D627dCC175](https://etherscan.io/address/0x7574eB42cA208A4f6960ECCAfDF186D627dCC175) and [0x6abDA0438307733FC299e9C229FD3cc074bD8cC0](https://etherscan.io/address/0x6abDA0438307733FC299e9C229FD3cc074bD8cC0), respectively) and adding new fields to contain the state of the log scraping (address and last scraped block number) for the new helper smart contract. + +## Upgrade args + +``` +git fetch +git checkout 5ce01d0a871d5162d0b2ff0b585d71ef2e644ac9 +cd rs/ethereum/cketh/minter +didc encode '()' | xxd -r -p | sha256sum +``` + +## Release Notes + +``` +git log --format='%C(auto) %h %s' 2181ddf2a690ca0262d2d9d0511b093bfa350ece..5ce01d0a871d5162d0b2ff0b585d71ef2e644ac9 -- rs/ethereum/cketh/minter +5ce01d0a8 fix(cketh): Undo breaking change in `get_minter_info` (#2907) +68d1088d5 chore(XC-48): extract cketh minter minicbor encoder and decoders into a separate library (#2769) + ``` + +## Wasm Verification + +Verify that the hash of the gzipped WASM matches the proposed hash. + +``` +git fetch +git checkout 5ce01d0a871d5162d0b2ff0b585d71ef2e644ac9 +"./ci/container/build-ic.sh" "--canisters" +sha256sum ./artifacts/canisters/ic-cketh-minter.wasm.gz +``` From a8d2104557d796755ab58bc7121beb7cc37bb471 Mon Sep 17 00:00:00 2001 From: Rostislav Rumenov Date: Mon, 2 Dec 2024 10:54:32 +0100 Subject: [PATCH 10/52] chore: split out parsing the read state response into a separate create (#2899) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The intent it to make the deprecation of the canister-client easier so production code doesn't depend on that unsafe/testonly library --------- Co-authored-by: IDX GitHub Automation Co-authored-by: Mathias Björkqvist --- Cargo.lock | 18 +- Cargo.toml | 1 + rs/canister_client/BUILD.bazel | 2 +- rs/canister_client/Cargo.toml | 1 + .../read_state_response_parser/BUILD.bazel | 32 ++ .../read_state_response_parser/Cargo.toml | 20 ++ .../read_state_response_parser/src/lib.rs | 297 ++++++++++++++++++ rs/canister_client/src/agent.rs | 6 +- rs/canister_client/src/cbor.rs | 293 +---------------- rs/canister_client/src/lib.rs | 4 +- rs/http_endpoints/public/BUILD.bazel | 1 + rs/http_endpoints/public/Cargo.toml | 1 + rs/http_endpoints/public/tests/test.rs | 3 +- rs/rosetta-api/icp/BUILD.bazel | 2 +- rs/rosetta-api/icp/Cargo.toml | 4 +- rs/rosetta-api/icp/src/ledger_client.rs | 15 +- 16 files changed, 396 insertions(+), 304 deletions(-) create mode 100644 rs/canister_client/read_state_response_parser/BUILD.bazel create mode 100644 rs/canister_client/read_state_response_parser/Cargo.toml create mode 100644 rs/canister_client/read_state_response_parser/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 564a235be64..07b789eb94a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6065,6 +6065,7 @@ dependencies = [ "ic-crypto-tree-hash", "ic-management-canister-types", "ic-protobuf", + "ic-read-state-response-parser", "ic-test-utilities", "ic-test-utilities-types", "ic-types", @@ -8465,6 +8466,7 @@ dependencies = [ "ic-metrics", "ic-pprof", "ic-protobuf", + "ic-read-state-response-parser", "ic-registry-client-helpers", "ic-registry-keys", "ic-registry-provisional-whitelist", @@ -10907,6 +10909,20 @@ dependencies = [ "turmoil", ] +[[package]] +name = "ic-read-state-response-parser" +version = "0.9.0" +dependencies = [ + "ic-canonical-state", + "ic-certification 0.9.0", + "ic-certification-test-utils", + "ic-crypto-tree-hash", + "ic-types", + "serde", + "serde_cbor", + "tree-deserializer", +] + [[package]] name = "ic-recovery" version = "0.1.0" @@ -11587,7 +11603,6 @@ dependencies = [ "hex", "ic-agent", "ic-base-types", - "ic-canister-client", "ic-crypto-ed25519", "ic-crypto-sha2", "ic-crypto-tree-hash", @@ -11611,6 +11626,7 @@ dependencies = [ "ic-nns-governance-init", "ic-nns-handler-root", "ic-nns-test-utils", + "ic-read-state-response-parser", "ic-rosetta-test-utils", "ic-types", "icp-ledger", diff --git a/Cargo.toml b/Cargo.toml index 9bd04ae4d02..b310f474e87 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,6 +38,7 @@ members = [ "rs/boundary_node/rate_limits/canister_client", "rs/boundary_node/systemd_journal_gatewayd_shim", "rs/canister_client", + "rs/canister_client/read_state_response_parser", "rs/canister_client/sender", "rs/cross-chain/proposal-cli", "rs/cycles_account_manager", diff --git a/rs/canister_client/BUILD.bazel b/rs/canister_client/BUILD.bazel index 7f200a2c24a..8a824ed05e5 100644 --- a/rs/canister_client/BUILD.bazel +++ b/rs/canister_client/BUILD.bazel @@ -9,7 +9,6 @@ package(default_visibility = [ "//rs/registry/admin:__pkg__", "//rs/registry/nns_data_provider:__pkg__", "//rs/replay:__pkg__", - "//rs/rosetta-api:__subpackages__", "//rs/rust_canisters/canister_test:__pkg__", "//rs/tests:__subpackages__", "//rs/workload_generator:__pkg__", @@ -17,6 +16,7 @@ package(default_visibility = [ DEPENDENCIES = [ # Keep sorted. + "//rs/canister_client/read_state_response_parser", "//rs/canister_client/sender", "//rs/canonical_state", "//rs/certification", diff --git a/rs/canister_client/Cargo.toml b/rs/canister_client/Cargo.toml index dc12b41729c..c285522c6d0 100644 --- a/rs/canister_client/Cargo.toml +++ b/rs/canister_client/Cargo.toml @@ -20,6 +20,7 @@ ic-crypto-secp256k1 = { path = "../crypto/secp256k1" } ic-crypto-tree-hash = { path = "../crypto/tree_hash" } ic-management-canister-types = { path = "../types/management_canister_types" } ic-protobuf = { path = "../protobuf" } +ic-read-state-response-parser = { path = "./read_state_response_parser" } ic-types = { path = "../types/types" } itertools = { workspace = true } prost = { workspace = true } diff --git a/rs/canister_client/read_state_response_parser/BUILD.bazel b/rs/canister_client/read_state_response_parser/BUILD.bazel new file mode 100644 index 00000000000..70b27051fc2 --- /dev/null +++ b/rs/canister_client/read_state_response_parser/BUILD.bazel @@ -0,0 +1,32 @@ +load("@rules_rust//rust:defs.bzl", "rust_library", "rust_test") + +package(default_visibility = ["//visibility:public"]) + +DEPENDENCIES = [ + # Keep sorted. + "//rs/canonical_state", + "//rs/certification", + "//rs/crypto/tree_hash", + "//rs/tree_deserializer", + "//rs/types/types", + "@crate_index//:serde", + "@crate_index//:serde_cbor", +] + +DEV_DEPENDENCIES = [ + # Keep sorted. + "//rs/certification/test-utils", +] + +rust_library( + name = "read_state_response_parser", + srcs = glob(["src/**/*.rs"]), + crate_name = "ic_read_state_response_parser", + deps = DEPENDENCIES, +) + +rust_test( + name = "tests", + crate = ":read_state_response_parser", + deps = DEV_DEPENDENCIES, +) diff --git a/rs/canister_client/read_state_response_parser/Cargo.toml b/rs/canister_client/read_state_response_parser/Cargo.toml new file mode 100644 index 00000000000..d44e4b514d2 --- /dev/null +++ b/rs/canister_client/read_state_response_parser/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "ic-read-state-response-parser" +version.workspace = true +authors.workspace = true +edition.workspace = true +description.workspace = true +documentation.workspace = true + +[dependencies] +ic-canonical-state = { path = "../../canonical_state" } +ic-certification = { path = "../../certification" } +ic-crypto-tree-hash = { path = "../../crypto/tree_hash" } +ic-types = { path = "../../types/types" } +serde = { workspace = true } +serde_cbor = { workspace = true } +tree-deserializer = { path = "../../tree_deserializer" } + +[dev-dependencies] +ic-certification-test-utils = { path = "../../certification/test-utils" } + diff --git a/rs/canister_client/read_state_response_parser/src/lib.rs b/rs/canister_client/read_state_response_parser/src/lib.rs new file mode 100644 index 00000000000..f12c41ce6b0 --- /dev/null +++ b/rs/canister_client/read_state_response_parser/src/lib.rs @@ -0,0 +1,297 @@ +use ic_canonical_state::encoding::types::SubnetMetrics; +use ic_crypto_tree_hash::{LabeledTree, LookupStatus, MixedHashTree}; +use ic_types::{ + crypto::threshold_sig::ThresholdSigPublicKey, + messages::{HttpReadStateResponse, MessageId}, + CanisterId, SubnetId, +}; +use serde::Deserialize; +use serde_cbor::value::Value as CBOR; +use std::collections::BTreeMap; +use std::convert::TryFrom; + +// An auxiliary structure that mirrors the request statuses +// encoded in a certificate, starting from the root of the tree. +#[derive(Debug, Deserialize)] +struct RequestStatuses { + request_status: Option>, +} + +#[derive(Eq, PartialEq, Debug, Deserialize)] +pub struct RequestStatus { + pub status: String, + pub reply: Option>, + pub reject_message: Option, +} + +impl RequestStatus { + fn unknown() -> Self { + RequestStatus { + status: "unknown".to_string(), + reply: None, + reject_message: None, + } + } +} + +/// Given a CBOR response from a `read_state` and a `request_id` extracts +/// the `RequestStatus` if available. +pub fn parse_read_state_response( + request_id: &MessageId, + effective_canister_id: &CanisterId, + root_pk: Option<&ThresholdSigPublicKey>, + message: CBOR, +) -> Result { + let response = serde_cbor::value::from_value::(message) + .map_err(|source| format!("decoding to HttpReadStateResponse failed: {}", source))?; + + let certificate = match root_pk { + Some(pk) => { + ic_certification::verify_certificate(&response.certificate, effective_canister_id, pk) + .map_err(|source| format!("verifying certificate failed: {}", source))? + } + None => serde_cbor::from_slice(response.certificate.as_slice()) + .map_err(|source| format!("decoding Certificate failed: {}", source))?, + }; + + match certificate + .tree + .lookup(&[&b"request_status"[..], request_id.as_ref()]) + { + LookupStatus::Found(_) => (), + // TODO(MR-249): return an error in the Unknown case once the replica + // implements absence proofs. + LookupStatus::Absent | LookupStatus::Unknown => return Ok(RequestStatus::unknown()), + } + + // Parse the tree. + let tree = LabeledTree::try_from(certificate.tree) + .map_err(|e| format!("parsing tree in certificate failed: {:?}", e))?; + + let request_statuses = + RequestStatuses::deserialize(tree_deserializer::LabeledTreeDeserializer::new(&tree)) + .map_err(|err| format!("deserializing request statuses failed: {:?}", err))?; + + Ok(match request_statuses.request_status { + Some(mut request_status_map) => request_status_map + .remove(request_id) + .unwrap_or_else(RequestStatus::unknown), + None => RequestStatus::unknown(), + }) +} + +/// Given a CBOR response from a subnet `read_state` and a `subnet_id` extracts +/// the `SubnetMetrics` if available. +pub fn parse_subnet_read_state_response( + subnet_id: &SubnetId, + root_pk: Option<&ThresholdSigPublicKey>, + message: CBOR, +) -> Result { + let response = serde_cbor::value::from_value::(message) + .map_err(|source| format!("decoding to HttpReadStateResponse failed: {}", source))?; + + let certificate = match root_pk { + Some(pk) => ic_certification::verify_certificate_for_subnet_read_state( + &response.certificate, + subnet_id, + pk, + ) + .map_err(|source| format!("verifying certificate failed: {}", source))?, + None => serde_cbor::from_slice(response.certificate.as_slice()) + .map_err(|source| format!("decoding Certificate failed: {}", source))?, + }; + + let subnet_metrics_leaf = + match certificate + .tree + .lookup(&[&b"subnet"[..], subnet_id.get().as_ref(), &b"metrics"[..]]) + { + LookupStatus::Found(subnet_metrics_leaf) => subnet_metrics_leaf.clone(), + LookupStatus::Absent | LookupStatus::Unknown => return Ok(SubnetMetrics::default()), + }; + + match subnet_metrics_leaf { + MixedHashTree::Leaf(bytes) => { + let subnet_metrics: SubnetMetrics = serde_cbor::from_slice(&bytes) + .map_err(|err| format!("deserializing subnet_metrics failed: {:?}", err))?; + Ok(subnet_metrics) + } + tree => Err(format!("Expected subnet metrics leaf but found {:?}", tree)), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ic_certification_test_utils::CertificateBuilder; + use ic_certification_test_utils::CertificateData; + use ic_crypto_tree_hash::Digest; + use ic_crypto_tree_hash::Label; + use ic_types::messages::Blob; + use ic_types::CanisterId; + use serde::Serialize; + + fn to_self_describing_cbor(e: &T) -> serde_cbor::Result> { + let mut serialized_bytes = Vec::new(); + let mut serializer = serde_cbor::Serializer::new(&mut serialized_bytes); + serializer.self_describe()?; + e.serialize(&mut serializer)?; + Ok(serialized_bytes) + } + + #[test] + fn test_parse_read_state_response_unknown() { + let labeled_tree = LabeledTree::try_from(MixedHashTree::Labeled( + "time".into(), + Box::new(MixedHashTree::Leaf(vec![1])), + )) + .unwrap(); + let data = CertificateData::CustomTree(labeled_tree); + let (certificate, root_pk, _) = CertificateBuilder::new(data).build(); + + let certificate_cbor: Vec = to_self_describing_cbor(&certificate).unwrap(); + + let response = HttpReadStateResponse { + certificate: Blob(certificate_cbor), + }; + + let response_cbor: Vec = to_self_describing_cbor(&response).unwrap(); + + let response: CBOR = serde_cbor::from_slice(response_cbor.as_slice()).unwrap(); + + let request_id: MessageId = MessageId::from([0; 32]); + assert_eq!( + parse_read_state_response(&request_id, &CanisterId::from(1), Some(&root_pk), response), + Ok(RequestStatus::unknown()) + ); + } + + #[test] + fn test_parse_read_state_response_replied() { + let tree = MixedHashTree::Fork(Box::new(( + MixedHashTree::Labeled( + "request_status".into(), + Box::new(MixedHashTree::Labeled( + vec![ + 184, 255, 145, 192, 128, 156, 132, 76, 67, 213, 87, 237, 189, 136, 206, + 184, 254, 192, 233, 210, 142, 173, 27, 123, 112, 187, 82, 222, 130, 129, + 245, 41, + ] + .into(), + Box::new(MixedHashTree::Fork(Box::new(( + MixedHashTree::Labeled( + "reply".into(), + Box::new(MixedHashTree::Leaf(vec![68, 73, 68, 76, 0, 0])), + ), + MixedHashTree::Labeled( + "status".into(), + Box::new(MixedHashTree::Leaf(b"replied".to_vec())), + ), + )))), + )), + ), + MixedHashTree::Labeled("time".into(), Box::new(MixedHashTree::Leaf(vec![1]))), + ))); + let labeled_tree = LabeledTree::try_from(tree).unwrap(); + let data = CertificateData::CustomTree(labeled_tree); + let (certificate, root_pk, _) = CertificateBuilder::new(data).build(); + + let certificate_cbor: Vec = to_self_describing_cbor(&certificate).unwrap(); + + let response = HttpReadStateResponse { + certificate: Blob(certificate_cbor), + }; + + let response_cbor: Vec = to_self_describing_cbor(&response).unwrap(); + + let response: CBOR = serde_cbor::from_slice(response_cbor.as_slice()).unwrap(); + + // Request ID that exists. + let request_id: MessageId = MessageId::from([ + 184, 255, 145, 192, 128, 156, 132, 76, 67, 213, 87, 237, 189, 136, 206, 184, 254, 192, + 233, 210, 142, 173, 27, 123, 112, 187, 82, 222, 130, 129, 245, 41, + ]); + + assert_eq!( + parse_read_state_response( + &request_id, + &CanisterId::from(1), + Some(&root_pk), + response.clone() + ), + Ok(RequestStatus { + status: "replied".to_string(), + reply: Some(vec![68, 73, 68, 76, 0, 0]), + reject_message: None + }), + ); + + // Request ID that doesn't exist. + let request_id: MessageId = MessageId::from([0; 32]); + assert_eq!( + parse_read_state_response(&request_id, &CanisterId::from(1), Some(&root_pk), response), + Ok(RequestStatus::unknown()) + ); + } + + #[test] + fn test_parse_read_state_response_pruned() { + fn mklabeled(l: impl Into