From 3d9dda21fb2c7601e543041443cda174928d95b3 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Tue, 30 Jan 2024 13:57:59 +0800 Subject: [PATCH 01/10] Add `engine_clientVersionV1` structs --- beacon_node/execution_layer/src/engine_api.rs | 113 +++++++++++++++++- .../execution_layer/src/engine_api/http.rs | 28 +++-- .../src/engine_api/json_structures.rs | 33 +++++ .../execution_layer/src/test_utils/mod.rs | 1 + 4 files changed, 159 insertions(+), 16 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 511b38892c6..0d59393166f 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,9 +1,9 @@ use crate::engines::ForkchoiceState; use crate::http::{ - ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1, - ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, + ENGINE_CLIENT_VERSION_V1, ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, + ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, }; use eth2::types::{ BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2, @@ -584,6 +584,7 @@ pub struct EngineCapabilities { pub get_payload_v1: bool, pub get_payload_v2: bool, pub get_payload_v3: bool, + pub client_version_v1: bool, } impl EngineCapabilities { @@ -622,7 +623,111 @@ impl EngineCapabilities { if self.get_payload_v3 { response.push(ENGINE_GET_PAYLOAD_V3); } + if self.client_version_v1 { + response.push(ENGINE_CLIENT_VERSION_V1); + } response } } + +#[derive(Clone, Debug, PartialEq)] +pub enum ClientCode { + Besu, + EtherumJS, + Erigon, + GoEthereum, + Grandine, + Lighthouse, + Lodestar, + Nethermind, + Nimbus, + Teku, + Prysm, + Reth, + Unknown(String), +} + +impl std::fmt::Display for ClientCode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match self { + ClientCode::Besu => "BU", + ClientCode::EtherumJS => "EJ", + ClientCode::Erigon => "EG", + ClientCode::GoEthereum => "GE", + ClientCode::Grandine => "GR", + ClientCode::Lighthouse => "LH", + ClientCode::Lodestar => "LS", + ClientCode::Nethermind => "NM", + ClientCode::Nimbus => "NB", + ClientCode::Teku => "TK", + ClientCode::Prysm => "PM", + ClientCode::Reth => "RH", + ClientCode::Unknown(code) => &code, + }; + write!(f, "{}", s) + } +} + +impl TryFrom for ClientCode { + type Error = String; + + fn try_from(code: String) -> Result { + match code.as_str() { + "BU" => Ok(Self::Besu), + "EJ" => Ok(Self::EtherumJS), + "EG" => Ok(Self::Erigon), + "GE" => Ok(Self::GoEthereum), + "GR" => Ok(Self::Grandine), + "LH" => Ok(Self::Lighthouse), + "LS" => Ok(Self::Lodestar), + "NM" => Ok(Self::Nethermind), + "NB" => Ok(Self::Nimbus), + "TK" => Ok(Self::Teku), + "PM" => Ok(Self::Prysm), + "RH" => Ok(Self::Reth), + string => { + if string.len() == 2 { + Ok(Self::Unknown(code)) + } else { + Err(format!("Invalid client code: {}", code)) + } + } + } + } +} + +#[derive(Clone, Debug)] +pub struct CommitPrefix(pub String); + +impl TryFrom for CommitPrefix { + type Error = String; + + fn try_from(value: String) -> Result { + // Ensure length is exactly 8 characters + if value.len() != 8 { + return Err("Input must be exactly 8 characters long".to_string()); + } + + // Ensure all characters are valid hex digits + if value.chars().all(|c| c.is_digit(16)) { + Ok(CommitPrefix(value.to_lowercase())) + } else { + Err("Input must contain only hexadecimal characters".to_string()) + } + } +} + +impl std::fmt::Display for CommitPrefix { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +#[derive(Clone, Debug)] +pub struct ClientVersionV1 { + pub code: ClientCode, + pub client_name: String, + pub version: String, + pub commit: CommitPrefix, +} diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 0604f15c43b..57f0f6d20d1 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -51,6 +51,9 @@ pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1); +pub const ENGINE_CLIENT_VERSION_V1: &str = "engine_clientVersionV1"; +pub const ENGINE_CLIENT_VERSION_TIMEOUT: Duration = Duration::from_secs(1); + /// This error is returned during a `chainId` call by Geth. pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; /// This code is returned by all clients when a method is not supported @@ -69,6 +72,7 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, + ENGINE_CLIENT_VERSION_V1, ]; /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. @@ -546,22 +550,21 @@ pub mod deposit_methods { } } -#[derive(Clone, Debug)] -pub struct CapabilitiesCacheEntry { - engine_capabilities: EngineCapabilities, - fetch_time: Instant, +pub struct CachedResponse { + pub data: T, + pub fetch_time: Instant, } -impl CapabilitiesCacheEntry { - pub fn new(engine_capabilities: EngineCapabilities) -> Self { +impl CachedResponse { + pub fn new(data: T) -> Self { Self { - engine_capabilities, + data, fetch_time: Instant::now(), } } - pub fn engine_capabilities(&self) -> EngineCapabilities { - self.engine_capabilities + pub fn data(&self) -> T { + self.data.clone() } pub fn age(&self) -> Duration { @@ -578,7 +581,7 @@ pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, pub execution_timeout_multiplier: u32, - pub engine_capabilities_cache: Mutex>, + pub engine_capabilities_cache: Mutex>>, auth: Option, } @@ -1017,6 +1020,7 @@ impl HttpJsonRpc { get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), + client_version_v1: capabilities.contains(ENGINE_CLIENT_VERSION_V1), }) } @@ -1040,10 +1044,10 @@ impl HttpJsonRpc { let mut lock = self.engine_capabilities_cache.lock().await; if let Some(lock) = lock.as_ref().filter(|entry| !entry.older_than(age_limit)) { - Ok(lock.engine_capabilities()) + Ok(lock.data()) } else { let engine_capabilities = self.exchange_capabilities().await?; - *lock = Some(CapabilitiesCacheEntry::new(engine_capabilities)); + *lock = Some(CachedResponse::new(engine_capabilities)); Ok(engine_capabilities) } } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index feb6c99748c..151e693e5b7 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -676,3 +676,36 @@ pub mod serde_logs_bloom { .map_err(|e| serde::de::Error::custom(format!("invalid logs bloom: {:?}", e))) } } + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonClientVersionV1 { + pub code: String, + pub client_name: String, + pub version: String, + pub commit: String, +} + +impl From for JsonClientVersionV1 { + fn from(client_version: ClientVersionV1) -> Self { + Self { + code: client_version.code.to_string(), + client_name: client_version.client_name, + version: client_version.version, + commit: client_version.commit.to_string(), + } + } +} + +impl TryFrom for ClientVersionV1 { + type Error = String; + + fn try_from(json: JsonClientVersionV1) -> Result { + Ok(Self { + code: json.code.try_into()?, + client_name: json.client_name, + version: json.version, + commit: json.commit.try_into()?, + }) + } +} diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 425329a520a..c54687e47b2 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -49,6 +49,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_payload_v1: true, get_payload_v2: true, get_payload_v3: true, + client_version_v1: true, }; mod execution_block_generator; From 875e5503f82380f9e344a146fb194afbbed1ae4a Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Wed, 31 Jan 2024 14:10:43 +0800 Subject: [PATCH 02/10] Implement `engine_clientVersionV1` --- Cargo.lock | 1 + beacon_node/execution_layer/Cargo.toml | 1 + beacon_node/execution_layer/src/engine_api.rs | 3 +- .../execution_layer/src/engine_api/http.rs | 76 ++++++++++++++++++- beacon_node/execution_layer/src/engines.rs | 57 ++++++++++---- beacon_node/execution_layer/src/lib.rs | 20 +++++ common/lighthouse_version/src/lib.rs | 18 +++++ 7 files changed, 158 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 558c3eb811a..d320bd2d7f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3024,6 +3024,7 @@ dependencies = [ "kzg", "lazy_static", "lighthouse_metrics", + "lighthouse_version", "lru", "parking_lot 0.12.1", "pretty_reqwest_error", diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index ace8e24a8e4..28cd16e4ef9 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -52,3 +52,4 @@ arc-swap = "1.6.0" eth2_network_config = { workspace = true } alloy-rlp = "0.3" alloy-consensus = { git = "https://github.com/alloy-rs/alloy.git", rev = "974d488bab5e21e9f17452a39a4bfa56677367b2" } +lighthouse_version = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 0d59393166f..16a380d9ca0 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -56,13 +56,14 @@ pub enum Error { ParentHashEqualsBlockHash(ExecutionBlockHash), PayloadIdUnavailable, TransitionConfigurationMismatch, - PayloadConversionLogicFlaw, SszError(ssz_types::Error), DeserializeWithdrawals(ssz_types::Error), BuilderApi(builder_client::Error), IncorrectStateVariant, RequiredMethodUnsupported(&'static str), UnsupportedForkVariant(String), + MethodNotSupported(String), + InvalidClientVersion(String), RlpDecoderError(rlp::DecoderError), } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 57f0f6d20d1..f980641525e 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -3,6 +3,8 @@ use super::*; use crate::auth::Auth; use crate::json_structures::*; +use lazy_static::lazy_static; +use lighthouse_version::{COMMIT_PREFIX, VERSION}; use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; @@ -75,6 +77,19 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_CLIENT_VERSION_V1, ]; +lazy_static! { + /// We opt to initialize the JsonClientVersionV1 rather than the ClientVersionV1 + /// for two reasons: + /// 1. This saves the overhead of converting into Json for every engine call + /// 2. The Json version lacks error checking so we can avoid calling `unwrap()` + pub static ref LIGHTHOUSE_JSON_CLIENT_VERSION: JsonClientVersionV1 = JsonClientVersionV1 { + code: ClientCode::Lighthouse.to_string(), + client_name: "Lighthouse".to_string(), + version: VERSION.replace("Lighthouse/", ""), + commit: COMMIT_PREFIX.to_string(), + }; +} + /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. pub mod deposit_log { use ssz::Decode; @@ -582,6 +597,7 @@ pub struct HttpJsonRpc { pub url: SensitiveUrl, pub execution_timeout_multiplier: u32, pub engine_capabilities_cache: Mutex>>, + pub engine_version_cache: Mutex>>, auth: Option, } @@ -595,6 +611,7 @@ impl HttpJsonRpc { url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), engine_capabilities_cache: Mutex::new(None), + engine_version_cache: Mutex::new(None), auth: None, }) } @@ -609,6 +626,7 @@ impl HttpJsonRpc { url, execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1), engine_capabilities_cache: Mutex::new(None), + engine_version_cache: Mutex::new(None), auth: Some(auth), }) } @@ -1043,7 +1061,10 @@ impl HttpJsonRpc { ) -> Result { let mut lock = self.engine_capabilities_cache.lock().await; - if let Some(lock) = lock.as_ref().filter(|entry| !entry.older_than(age_limit)) { + if let Some(lock) = lock + .as_ref() + .filter(|cached_response| !cached_response.older_than(age_limit)) + { Ok(lock.data()) } else { let engine_capabilities = self.exchange_capabilities().await?; @@ -1052,6 +1073,59 @@ impl HttpJsonRpc { } } + /// This method fetches the response from the engine without checking + /// any caches or storing the result in the cache. It is better to use + /// `get_engine_version(Some(Duration::ZERO))` if you want to force + /// fetching from the EE as this will cache the result. + pub async fn client_version_v1(&self) -> Result { + let params = json!([*LIGHTHOUSE_JSON_CLIENT_VERSION]); + + let response: JsonClientVersionV1 = self + .rpc_request( + ENGINE_CLIENT_VERSION_V1, + params, + ENGINE_CLIENT_VERSION_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + response.try_into().map_err(Error::InvalidClientVersion) + } + + pub async fn clear_engine_version_cache(&self) { + *self.engine_version_cache.lock().await = None; + } + + /// Returns the execution engine version resulting from a call to + /// engine_clientVersionV1. If the version cache is not populated, or if it + /// is populated with a cached result of age >= `age_limit`, this method will + /// fetch the result from the execution engine and populate the cache before + /// returning it. Otherwise it will return the cached result from an earlier + /// call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_version( + &self, + age_limit: Option, + ) -> Result { + // check engine capabilities first (avoids holding two locks at once) + let engine_capabilities = self.get_engine_capabilities(None).await?; + if !engine_capabilities.client_version_v1 { + return Err(Error::RequiredMethodUnsupported(ENGINE_CLIENT_VERSION_V1)); + } + let mut lock = self.engine_version_cache.lock().await; + if let Some(lock) = lock + .as_ref() + .filter(|cached_response| !cached_response.older_than(age_limit)) + { + Ok(lock.data()) + } else { + let engine_version = self.client_version_v1().await?; + *lock = Some(CachedResponse::new(engine_version.clone())); + Ok(engine_version) + } + } + // automatically selects the latest version of // new_payload that the execution engine supports pub async fn new_payload( diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index bc8e4e31404..faecce9f980 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -4,7 +4,7 @@ use crate::engine_api::{ EngineCapabilities, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, }; -use crate::HttpJsonRpc; +use crate::{ClientVersionV1, HttpJsonRpc}; use lru::LruCache; use slog::{debug, error, info, warn, Logger}; use std::future::Future; @@ -21,7 +21,7 @@ use types::ExecutionBlockHash; /// /// Since the size of each value is small (~800 bytes) a large number is used for safety. const PAYLOAD_ID_LRU_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(512); -const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes +const CACHED_RESPONSE_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes /// Stores the remembered state of a engine. #[derive(Copy, Clone, PartialEq, Debug, Eq, Default)] @@ -34,11 +34,11 @@ enum EngineStateInternal { } #[derive(Copy, Clone, Debug, Default, Eq, PartialEq)] -enum CapabilitiesCacheAction { +enum ResponseCacheAction { #[default] None, - Update, - Clear, + Update, // Update cached responses + Clear, // Clear cached responses } /// A subset of the engine state to inform other services if the engine is online or offline. @@ -266,12 +266,12 @@ impl Engine { ); } state.update(EngineStateInternal::Synced); - (**state, CapabilitiesCacheAction::Update) + (**state, ResponseCacheAction::Update) } Err(EngineApiError::IsSyncing) => { let mut state = self.state.write().await; state.update(EngineStateInternal::Syncing); - (**state, CapabilitiesCacheAction::Update) + (**state, ResponseCacheAction::Update) } Err(EngineApiError::Auth(err)) => { error!( @@ -282,7 +282,7 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::AuthFailed); - (**state, CapabilitiesCacheAction::Clear) + (**state, ResponseCacheAction::Clear) } Err(e) => { error!( @@ -293,28 +293,37 @@ impl Engine { let mut state = self.state.write().await; state.update(EngineStateInternal::Offline); - // need to clear the engine capabilities cache if we detect the - // execution engine is offline as it is likely the engine is being - // updated to a newer version with new capabilities - (**state, CapabilitiesCacheAction::Clear) + // need to clear cached responses if we detect the execution engine + // is offline as it is likely the engine is being updated to a newer + // version which might also have new capabilities + (**state, ResponseCacheAction::Clear) } }; // do this after dropping state lock guard to avoid holding two locks at once match cache_action { - CapabilitiesCacheAction::None => {} - CapabilitiesCacheAction::Update => { + ResponseCacheAction::None => {} + ResponseCacheAction::Update => { if let Err(e) = self - .get_engine_capabilities(Some(CACHED_ENGINE_CAPABILITIES_AGE_LIMIT)) + .get_engine_capabilities(Some(CACHED_RESPONSE_AGE_LIMIT)) .await { warn!(self.log, "Error during exchange capabilities"; "error" => ?e, ) + } else { + // no point in running this if there was an error fetching the capabilities + // as it will just result in an error again + let _ = self + .get_engine_version(Some(CACHED_RESPONSE_AGE_LIMIT)) + .await; } } - CapabilitiesCacheAction::Clear => self.api.clear_exchange_capabilties_cache().await, + ResponseCacheAction::Clear => { + self.api.clear_exchange_capabilties_cache().await; + self.api.clear_engine_version_cache().await; + } } debug!( @@ -340,6 +349,22 @@ impl Engine { self.api.get_engine_capabilities(age_limit).await } + /// Returns the execution engine version resulting from a call to + /// engine_clientVersionV1. If the version cache is not populated, or if it + /// is populated with a cached result of age >= `age_limit`, this method will + /// fetch the result from the execution engine and populate the cache before + /// returning it. Otherwise it will return the cached result from an earlier + /// call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_version( + &self, + age_limit: Option, + ) -> Result { + self.api.get_engine_version(age_limit).await + } + /// Run `func` on the node regardless of the node's current state. /// /// ## Note diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 69b84adbb8f..9e19be56a72 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1523,6 +1523,26 @@ impl ExecutionLayer { .map_err(Error::EngineError) } + /// Returns the execution engine version resulting from a call to + /// engine_clientVersionV1. If the version cache is not populated, or if it + /// is populated with a cached result of age >= `age_limit`, this method will + /// fetch the result from the execution engine and populate the cache before + /// returning it. Otherwise it will return the cached result from an earlier + /// call. + /// + /// Set `age_limit` to `None` to always return the cached result + /// Set `age_limit` to `Some(Duration::ZERO)` to force fetching from EE + pub async fn get_engine_version( + &self, + age_limit: Option, + ) -> Result { + self.engine() + .request(|engine| engine.get_engine_version(age_limit)) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } + /// Used during block production to determine if the merge has been triggered. /// /// ## Specification diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index aa5eea035cd..61515fc40ff 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -21,6 +21,24 @@ pub const VERSION: &str = git_version!( fallback = "Lighthouse/v5.1.2" ); +/// Returns the first eight characters of the latest commit hash for this build. +/// +/// No indication is given if the tree is dirty. This is part of the standard +/// for reporting the client version to the execution engine. +pub const COMMIT_PREFIX: &str = git_version!( + args = [ + "--always", + "--abbrev=8", + // NOTE: using --match instead of --exclude for compatibility with old Git + "--match=thiswillnevermatchlol" + ], + prefix = "", + suffix = "", + cargo_prefix = "", + cargo_suffix = "", + fallback = "00000000" +); + /// Returns `VERSION`, but with platform information appended to the end. /// /// ## Example From 3a791b4028d24836ec7b7305b3448cf2d692762d Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Wed, 7 Feb 2024 13:38:06 +0800 Subject: [PATCH 03/10] Update to latest spec changes --- beacon_node/execution_layer/src/engine_api.rs | 2 +- .../execution_layer/src/engine_api/http.rs | 20 +++++++++++-------- .../src/engine_api/json_structures.rs | 6 +++--- beacon_node/execution_layer/src/engines.rs | 2 +- beacon_node/execution_layer/src/lib.rs | 2 +- 5 files changed, 18 insertions(+), 14 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 16a380d9ca0..9a535649b16 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -728,7 +728,7 @@ impl std::fmt::Display for CommitPrefix { #[derive(Clone, Debug)] pub struct ClientVersionV1 { pub code: ClientCode, - pub client_name: String, + pub name: String, pub version: String, pub commit: CommitPrefix, } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index f980641525e..8d97108a7a6 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -84,7 +84,7 @@ lazy_static! { /// 2. The Json version lacks error checking so we can avoid calling `unwrap()` pub static ref LIGHTHOUSE_JSON_CLIENT_VERSION: JsonClientVersionV1 = JsonClientVersionV1 { code: ClientCode::Lighthouse.to_string(), - client_name: "Lighthouse".to_string(), + name: "Lighthouse".to_string(), version: VERSION.replace("Lighthouse/", ""), commit: COMMIT_PREFIX.to_string(), }; @@ -597,7 +597,7 @@ pub struct HttpJsonRpc { pub url: SensitiveUrl, pub execution_timeout_multiplier: u32, pub engine_capabilities_cache: Mutex>>, - pub engine_version_cache: Mutex>>, + pub engine_version_cache: Mutex>>>, auth: Option, } @@ -1077,10 +1077,10 @@ impl HttpJsonRpc { /// any caches or storing the result in the cache. It is better to use /// `get_engine_version(Some(Duration::ZERO))` if you want to force /// fetching from the EE as this will cache the result. - pub async fn client_version_v1(&self) -> Result { + pub async fn get_client_version_v1(&self) -> Result, Error> { let params = json!([*LIGHTHOUSE_JSON_CLIENT_VERSION]); - let response: JsonClientVersionV1 = self + let response: Vec = self .rpc_request( ENGINE_CLIENT_VERSION_V1, params, @@ -1088,7 +1088,11 @@ impl HttpJsonRpc { ) .await?; - response.try_into().map_err(Error::InvalidClientVersion) + response + .into_iter() + .map(TryInto::try_into) + .collect::, _>>() + .map_err(Error::InvalidClientVersion) } pub async fn clear_engine_version_cache(&self) { @@ -1096,7 +1100,7 @@ impl HttpJsonRpc { } /// Returns the execution engine version resulting from a call to - /// engine_clientVersionV1. If the version cache is not populated, or if it + /// engine_getClientVersionV1. If the version cache is not populated, or if it /// is populated with a cached result of age >= `age_limit`, this method will /// fetch the result from the execution engine and populate the cache before /// returning it. Otherwise it will return the cached result from an earlier @@ -1107,7 +1111,7 @@ impl HttpJsonRpc { pub async fn get_engine_version( &self, age_limit: Option, - ) -> Result { + ) -> Result, Error> { // check engine capabilities first (avoids holding two locks at once) let engine_capabilities = self.get_engine_capabilities(None).await?; if !engine_capabilities.client_version_v1 { @@ -1120,7 +1124,7 @@ impl HttpJsonRpc { { Ok(lock.data()) } else { - let engine_version = self.client_version_v1().await?; + let engine_version = self.get_client_version_v1().await?; *lock = Some(CachedResponse::new(engine_version.clone())); Ok(engine_version) } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 151e693e5b7..a9487872e4c 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -681,7 +681,7 @@ pub mod serde_logs_bloom { #[serde(rename_all = "camelCase")] pub struct JsonClientVersionV1 { pub code: String, - pub client_name: String, + pub name: String, pub version: String, pub commit: String, } @@ -690,7 +690,7 @@ impl From for JsonClientVersionV1 { fn from(client_version: ClientVersionV1) -> Self { Self { code: client_version.code.to_string(), - client_name: client_version.client_name, + name: client_version.name, version: client_version.version, commit: client_version.commit.to_string(), } @@ -703,7 +703,7 @@ impl TryFrom for ClientVersionV1 { fn try_from(json: JsonClientVersionV1) -> Result { Ok(Self { code: json.code.try_into()?, - client_name: json.client_name, + name: json.name, version: json.version, commit: json.commit.try_into()?, }) diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index faecce9f980..75d0b872cef 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -361,7 +361,7 @@ impl Engine { pub async fn get_engine_version( &self, age_limit: Option, - ) -> Result { + ) -> Result, EngineApiError> { self.api.get_engine_version(age_limit).await } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 9e19be56a72..904238f8045 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1535,7 +1535,7 @@ impl ExecutionLayer { pub async fn get_engine_version( &self, age_limit: Option, - ) -> Result { + ) -> Result, Error> { self.engine() .request(|engine| engine.get_engine_version(age_limit)) .await From 7d91a9f8fa654ba7c1f1c0b9d00bb1c74c58c482 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Sat, 24 Feb 2024 16:36:38 +1000 Subject: [PATCH 04/10] Implement GraffitiCalculator Service --- Cargo.lock | 2 + beacon_node/beacon_chain/Cargo.toml | 2 + beacon_node/beacon_chain/src/beacon_chain.rs | 17 +- beacon_node/beacon_chain/src/builder.rs | 24 +- .../beacon_chain/src/graffiti_calculator.rs | 250 ++++++++++++++++++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/client/src/builder.rs | 9 +- beacon_node/client/src/config.rs | 8 +- beacon_node/execution_layer/src/engine_api.rs | 47 +++- beacon_node/execution_layer/src/lib.rs | 26 +- beacon_node/src/config.rs | 24 +- 11 files changed, 354 insertions(+), 56 deletions(-) create mode 100644 beacon_node/beacon_chain/src/graffiti_calculator.rs diff --git a/Cargo.lock b/Cargo.lock index d320bd2d7f3..da847f55388 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -879,6 +879,7 @@ version = "0.2.0" dependencies = [ "bitvec 1.0.1", "bls", + "clap", "derivative", "environment", "eth1", @@ -898,6 +899,7 @@ dependencies = [ "kzg", "lazy_static", "lighthouse_metrics", + "lighthouse_version", "logging", "lru", "maplit", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 43c2c896f71..26c117284a5 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -19,6 +19,7 @@ environment = { workspace = true } serde_json = { workspace = true } [dependencies] +clap = { workspace = true } serde_json = { workspace = true } eth2_network_config = { workspace = true } merkle_proof = { workspace = true } @@ -31,6 +32,7 @@ operation_pool = { workspace = true } rayon = { workspace = true } serde = { workspace = true } ethereum_serde_utils = { workspace = true } +lighthouse_version = { workspace = true } slog = { workspace = true } sloggers = { workspace = true } slot_clock = { workspace = true } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 39089c0b0d3..7ba9030f428 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -30,6 +30,7 @@ use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData use crate::events::ServerSentEventHandler; use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; +use crate::graffiti_calculator::GraffitiCalculator; use crate::head_tracker::{HeadTracker, HeadTrackerReader, SszHeadTracker}; use crate::historical_blocks::HistoricalBlockError; use crate::light_client_finality_update_verification::{ @@ -474,7 +475,7 @@ pub struct BeaconChain { /// Logging to CLI, etc. pub(crate) log: Logger, /// Arbitrary bytes included in the blocks. - pub(crate) graffiti: Graffiti, + pub(crate) graffiti_calculator: GraffitiCalculator, /// Optional slasher. pub slasher: Option>>, /// Provides monitoring of a set of explicitly defined validators. @@ -4754,6 +4755,10 @@ impl BeaconChain { // // Perform the state advance and block-packing functions. let chain = self.clone(); + let graffiti = self + .graffiti_calculator + .get_graffiti(validator_graffiti) + .await; let mut partial_beacon_block = self .task_executor .spawn_blocking_handle( @@ -4763,7 +4768,7 @@ impl BeaconChain { state_root_opt, produce_at_slot, randao_reveal, - validator_graffiti, + graffiti, builder_boost_factor, block_production_version, ) @@ -4861,7 +4866,7 @@ impl BeaconChain { state_root_opt: Option, produce_at_slot: Slot, randao_reveal: Signature, - validator_graffiti: Option, + graffiti: Graffiti, builder_boost_factor: Option, block_production_version: BlockProductionVersion, ) -> Result, BlockProductionError> { @@ -4963,12 +4968,6 @@ impl BeaconChain { } drop(unagg_import_timer); - // Override the beacon node's graffiti with graffiti from the validator, if present. - let graffiti = match validator_graffiti { - Some(graffiti) => graffiti, - None => self.graffiti, - }; - let attestation_packing_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_ATTESTATION_TIMES); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index dd4b612f60b..d897fe63959 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -7,6 +7,7 @@ use crate::eth1_chain::{CachingEth1Backend, SszEth1}; use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; +use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; use crate::head_tracker::HeadTracker; use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; @@ -39,8 +40,8 @@ use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use types::{ - BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, - Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, + Signature, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -96,7 +97,7 @@ pub struct BeaconChainBuilder { spec: ChainSpec, chain_config: ChainConfig, log: Option, - graffiti: Graffiti, + beacon_graffiti: GraffitiOrigin, slasher: Option>>, // Pending I/O batch that is constructed during building and should be executed atomically // alongside `PersistedBeaconChain` storage when `BeaconChainBuilder::build` is called. @@ -139,7 +140,7 @@ where spec: TEthSpec::default_spec(), chain_config: ChainConfig::default(), log: None, - graffiti: Graffiti::default(), + beacon_graffiti: GraffitiOrigin::default(), slasher: None, pending_io_batch: vec![], trusted_setup: None, @@ -652,9 +653,9 @@ where self } - /// Sets the `graffiti` field. - pub fn graffiti(mut self, graffiti: Graffiti) -> Self { - self.graffiti = graffiti; + /// Sets the `beacon_graffiti` field. + pub fn beacon_graffiti(mut self, beacon_graffiti: GraffitiOrigin) -> Self { + self.beacon_graffiti = beacon_graffiti; self } @@ -936,7 +937,7 @@ where observed_attester_slashings: <_>::default(), observed_bls_to_execution_changes: <_>::default(), eth1_chain: self.eth1_chain, - execution_layer: self.execution_layer, + execution_layer: self.execution_layer.clone(), genesis_validators_root, genesis_time, canonical_head, @@ -968,7 +969,12 @@ where .shutdown_sender .ok_or("Cannot build without a shutdown sender.")?, log: log.clone(), - graffiti: self.graffiti, + graffiti_calculator: GraffitiCalculator::new( + self.beacon_graffiti, + self.execution_layer, + slot_clock.slot_duration() * TEthSpec::slots_per_epoch() as u32, + log.clone(), + ), slasher: self.slasher.clone(), validator_monitor: RwLock::new(validator_monitor), genesis_backfill_slot, diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs new file mode 100644 index 00000000000..ae0d28217cd --- /dev/null +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -0,0 +1,250 @@ +use crate::BeaconChain; +use crate::BeaconChainTypes; +use clap::ArgMatches; +use execution_layer::{http::ENGINE_CLIENT_VERSION_V1, CommitPrefix, ExecutionLayer}; +use serde::{Deserialize, Serialize}; +use slog::{crit, debug, error, warn, Logger}; +use slot_clock::SlotClock; +use std::{fmt::Debug, time::Duration}; +use task_executor::TaskExecutor; +use types::{EthSpec, Graffiti, GRAFFITI_BYTES_LEN}; + +const ENGINE_VERSION_AGE_LIMIT_EPOCH_MULTIPLE: u32 = 6; // 6 epochs +const ENGINE_VERSION_CACHE_REFRESH_EPOCH_MULTIPLE: u32 = 2; // 2 epochs +const ENGINE_VERSION_CACHE_PRELOAD_STARTUP_DELAY: Duration = Duration::from_secs(60); + +/// Represents the source and content of graffiti for block production, excluding +/// inputs from the validator client and execution engine. Graffiti is categorized +/// as either user-specified or calculated to facilitate decisions on graffiti +/// selection. +#[derive(Clone, Copy, Serialize, Deserialize)] +pub enum GraffitiOrigin { + UserSpecified(Graffiti), + Calculated(Graffiti), +} + +impl GraffitiOrigin { + pub fn new(cli_args: &ArgMatches) -> Result { + let mut bytes = [0u8; GRAFFITI_BYTES_LEN]; + + if let Some(graffiti) = cli_args.value_of("graffiti") { + if graffiti.len() > GRAFFITI_BYTES_LEN { + return Err(format!( + "Your graffiti is too long! {} bytes maximum!", + GRAFFITI_BYTES_LEN + )); + } + bytes[..graffiti.len()].copy_from_slice(graffiti.as_bytes()); + Ok(Self::UserSpecified(Graffiti::from(bytes))) + } else if cli_args.is_present("private") { + // When 'private' flag is present, use a zero-initialized bytes array. + Ok(Self::UserSpecified(Graffiti::from(bytes))) + } else { + // Use the default lighthouse graffiti if no user-specified graffiti flags are present + Ok(Self::default()) + } + } + + pub fn graffiti(&self) -> Graffiti { + match self { + GraffitiOrigin::UserSpecified(graffiti) => *graffiti, + GraffitiOrigin::Calculated(graffiti) => *graffiti, + } + } +} + +impl Default for GraffitiOrigin { + fn default() -> Self { + let version_bytes = lighthouse_version::VERSION.as_bytes(); + let trimmed_len = std::cmp::min(version_bytes.len(), GRAFFITI_BYTES_LEN); + let mut bytes = [0u8; GRAFFITI_BYTES_LEN]; + bytes[..trimmed_len].copy_from_slice(&version_bytes[..trimmed_len]); + Self::Calculated(Graffiti::from(bytes)) + } +} + +impl Debug for GraffitiOrigin { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + self.graffiti().fmt(f) + } +} + +pub struct GraffitiCalculator { + pub beacon_graffiti: GraffitiOrigin, + execution_layer: Option>, + pub epoch_duration: Duration, + log: Logger, +} + +impl GraffitiCalculator { + pub fn new( + beacon_graffiti: GraffitiOrigin, + execution_layer: Option>, + epoch_duration: Duration, + log: Logger, + ) -> Self { + Self { + beacon_graffiti, + execution_layer, + epoch_duration, + log, + } + } + + /// Returns the appropriate graffiti to use for block production, prioritizing + /// sources in the following order: + /// 1. Graffiti specified by the validator client. + /// 2. Graffiti specified by the user via beacon node CLI options. + /// 3. The EL & CL client version string, applicable when the EL supports version specification. + /// 4. The default lighthouse version string, used if the EL lacks version specification support. + pub async fn get_graffiti(&self, validator_graffiti: Option) -> Graffiti { + if let Some(graffiti) = validator_graffiti { + return graffiti; + } + + match self.beacon_graffiti { + GraffitiOrigin::UserSpecified(graffiti) => graffiti, + GraffitiOrigin::Calculated(default_graffiti) => { + let Some(execution_layer) = self.execution_layer.as_ref() else { + // Return default graffiti if there is no execution layer. This + // shouldn't occur if we're actually producing blocks. + crit!(self.log, "No execution layer available for graffiti calculation during block production!"); + return default_graffiti; + }; + + // The engine version cache refresh service ensures this will almost always retrieve this data from the + // cache instead of making a request to the execution engine. A cache miss would only occur if lighthouse + // has recently started or the EL recently went offline. + let engine_versions_response = execution_layer + .get_engine_version(Some( + self.epoch_duration * ENGINE_VERSION_AGE_LIMIT_EPOCH_MULTIPLE, + )) + .await; + let engine_versions = match engine_versions_response { + Err(el_error) => { + if el_error.is_method_unsupported(ENGINE_CLIENT_VERSION_V1) { + debug!( + self.log, + "Using default lighthouse graffiti: EL does not support {} method", + ENGINE_CLIENT_VERSION_V1 + ); + } else { + warn!(self.log, "Failed to determine execution engine version for graffiti"; "error" => format!("{:?}", el_error)); + } + return default_graffiti; + } + Ok(engine_versions) => engine_versions, + }; + + let Some(engine_version) = engine_versions.first() else { + // Some kind of error occurred, return default graffiti. + error!( + self.log, + "Got empty engine version response from execution layer" + ); + return default_graffiti; + }; + if engine_versions.len() != 1 { + // More than one version implies lighthouse is connected to + // an EL multiplexer. We don't support modifying the graffiti + // with these configurations. + warn!(self.log, "Multiplexer detected, using default graffiti"); + return default_graffiti; + } + + let lighthouse_commit_prefix = CommitPrefix::try_from(lighthouse_version::COMMIT_PREFIX.to_string()) + .unwrap_or_else(|error_message| { + // This really shouldn't happen but we want to definitly log if it does + crit!(self.log, "Failed to parse lighthouse commit prefix"; "error" => error_message); + CommitPrefix("00000000".to_string()) + }); + + engine_version.calculate_graffiti(lighthouse_commit_prefix) + } + } + } +} + +pub fn start_engine_version_cache_refresh_service( + chain: &BeaconChain, + executor: TaskExecutor, +) { + let Some(el_ref) = chain.execution_layer.as_ref() else { + debug!( + chain.log, + "No execution layer configured, not starting engine version cache refresh service" + ); + return; + }; + + let execution_layer = el_ref.clone(); + let log = chain.log.clone(); + let slot_clock = chain.slot_clock.clone(); + let epoch_duration = chain.graffiti_calculator.epoch_duration; + executor.spawn( + async move { + engine_version_cache_refresh_service::( + execution_layer, + slot_clock, + epoch_duration, + log, + ) + .await + }, + "engine_version_cache_refresh_service", + ); +} + +async fn engine_version_cache_refresh_service( + execution_layer: ExecutionLayer, + slot_clock: T::SlotClock, + epoch_duration: Duration, + log: Logger, +) { + // Preload the engine version cache after a brief delay to allow for EL initialization. + // This initial priming ensures cache readiness before the service's regular update cycle begins. + tokio::time::sleep(ENGINE_VERSION_CACHE_PRELOAD_STARTUP_DELAY).await; + if let Err(e) = execution_layer.get_engine_version(None).await { + debug!(log, "Failed to preload engine version cache"; "error" => format!("{:?}", e)); + } + + // this service should run 3/8 of the way through the epoch + let epoch_delay = (epoch_duration * 3) / 8; + // the duration of 1 epoch less than the total duration between firing of this service + let partial_firing_delay = + epoch_duration * ENGINE_VERSION_CACHE_REFRESH_EPOCH_MULTIPLE.saturating_sub(1); + loop { + match slot_clock.duration_to_next_epoch(T::EthSpec::slots_per_epoch()) { + Some(duration_to_next_epoch) => { + let firing_delay = partial_firing_delay + duration_to_next_epoch + epoch_delay; + tokio::time::sleep(firing_delay).await; + + debug!( + log, + "Engine version cache refresh service firing"; + ); + + if let Err(el_error) = execution_layer.get_engine_version(None).await { + if el_error.is_method_unsupported(ENGINE_CLIENT_VERSION_V1) { + debug!( + log, + "EL does not support {} method. Sleeping twice as long before retry", + ENGINE_CLIENT_VERSION_V1 + ); + tokio::time::sleep( + epoch_duration * ENGINE_VERSION_CACHE_REFRESH_EPOCH_MULTIPLE, + ) + .await; + } else { + debug!(log, "Failed to populate engine version cache"; "error" => format!("{:?}", el_error)); + } + } + } + None => { + error!(log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + tokio::time::sleep(slot_clock.slot_duration()).await; + } + }; + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 529f269be10..58a16d770f9 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -27,6 +27,7 @@ pub mod events; pub mod execution_payload; pub mod fork_choice_signal; pub mod fork_revert; +pub mod graffiti_calculator; mod head_tracker; pub mod historical_blocks; pub mod kzg_utils; diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 243dd132408..6194984c04a 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -6,6 +6,7 @@ use crate::notifier::spawn_notifier; use crate::Client; use beacon_chain::attestation_simulator::start_attestation_simulator_service; use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; +use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_service; use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; @@ -163,7 +164,7 @@ where let runtime_context = self.runtime_context.clone(); let eth_spec_instance = self.eth_spec_instance.clone(); let chain_config = config.chain.clone(); - let graffiti = config.graffiti; + let beacon_graffiti = config.beacon_graffiti; let store = store.ok_or("beacon_chain_start_method requires a store")?; let runtime_context = @@ -202,7 +203,7 @@ where MigratorConfig::default().epochs_per_migration(chain_config.epochs_per_migration), ) .chain_config(chain_config) - .graffiti(graffiti) + .beacon_graffiti(beacon_graffiti) .event_handler(event_handler) .execution_layer(execution_layer) .validator_monitor_config(config.validator_monitor.clone()); @@ -947,6 +948,10 @@ where runtime_context.executor.clone(), beacon_chain.clone(), ); + start_engine_version_cache_refresh_service( + beacon_chain.as_ref(), + runtime_context.executor.clone(), + ); start_attestation_simulator_service( beacon_chain.task_executor.clone(), beacon_chain.clone(), diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 48ad77abc58..c61007b1f5c 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,3 +1,4 @@ +use beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_chain::validator_monitor::ValidatorMonitorConfig; use beacon_chain::TrustedSetup; use beacon_processor::BeaconProcessorConfig; @@ -9,7 +10,6 @@ use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; use std::time::Duration; -use types::Graffiti; /// Default directory name for the freezer database under the top-level data dir. const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; @@ -58,8 +58,8 @@ pub struct Config { /// This is the method used for the 2019 client interop in Canada. pub dummy_eth1_backend: bool, pub sync_eth1_chain: bool, - /// Graffiti to be inserted everytime we create a block. - pub graffiti: Graffiti, + /// Graffiti to be inserted everytime we create a block if the validator doesn't specify. + pub beacon_graffiti: GraffitiOrigin, pub validator_monitor: ValidatorMonitorConfig, #[serde(skip)] /// The `genesis` field is not serialized or deserialized by `serde` to ensure it is defined @@ -99,7 +99,7 @@ impl Default for Config { eth1: <_>::default(), execution_layer: None, trusted_setup: None, - graffiti: Graffiti::default(), + beacon_graffiti: GraffitiOrigin::default(), http_api: <_>::default(), http_metrics: <_>::default(), monitoring_api: None, diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 9a535649b16..68498493cc5 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -25,6 +25,7 @@ pub use types::{ Withdrawal, Withdrawals, }; use types::{ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge, KzgProofs}; +use types::{Graffiti, GRAFFITI_BYTES_LEN}; pub mod auth; pub mod http; @@ -62,7 +63,6 @@ pub enum Error { IncorrectStateVariant, RequiredMethodUnsupported(&'static str), UnsupportedForkVariant(String), - MethodNotSupported(String), InvalidClientVersion(String), RlpDecoderError(rlp::DecoderError), } @@ -664,7 +664,7 @@ impl std::fmt::Display for ClientCode { ClientCode::Teku => "TK", ClientCode::Prysm => "PM", ClientCode::Reth => "RH", - ClientCode::Unknown(code) => &code, + ClientCode::Unknown(code) => code, }; write!(f, "{}", s) } @@ -705,14 +705,22 @@ impl TryFrom for CommitPrefix { type Error = String; fn try_from(value: String) -> Result { - // Ensure length is exactly 8 characters - if value.len() != 8 { - return Err("Input must be exactly 8 characters long".to_string()); + // Check if the input starts with '0x' and strip it if it does + let hex_str = value + .strip_prefix("0x") + .map(|stripped| stripped.to_string()) + .unwrap_or(value); + + // Ensure length is exactly 8 characters after '0x' removal + if hex_str.len() != 8 { + return Err( + "Input must be exactly 8 characters long (excluding any '0x' prefix)".to_string(), + ); } // Ensure all characters are valid hex digits - if value.chars().all(|c| c.is_digit(16)) { - Ok(CommitPrefix(value.to_lowercase())) + if hex_str.chars().all(|c| c.is_ascii_hexdigit()) { + Ok(CommitPrefix(hex_str.to_lowercase())) } else { Err("Input must contain only hexadecimal characters".to_string()) } @@ -732,3 +740,28 @@ pub struct ClientVersionV1 { pub version: String, pub commit: CommitPrefix, } + +impl ClientVersionV1 { + pub fn calculate_graffiti(&self, lighthouse_commit_prefix: CommitPrefix) -> Graffiti { + let graffiti_string = format!( + "{}{}LH{}", + self.code, + self.commit + .0 + .get(..4) + .map_or_else(|| self.commit.0.as_str(), |s| s) + .to_uppercase(), + lighthouse_commit_prefix + .0 + .get(..4) + .unwrap_or("0000") + .to_uppercase(), + ); + let mut graffiti_bytes = [0u8; GRAFFITI_BYTES_LEN]; + let bytes_to_copy = std::cmp::min(graffiti_string.len(), GRAFFITI_BYTES_LEN); + graffiti_bytes[..bytes_to_copy] + .copy_from_slice(&graffiti_string.as_bytes()[..bytes_to_copy]); + + Graffiti::from(graffiti_bytes) + } +} diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 904238f8045..f6c33cea0d0 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -10,7 +10,7 @@ use auth::{strip_prefix, Auth, JwtKey}; pub use block_hash::calculate_execution_block_hash; use builder_client::BuilderHttpClient; pub use engine_api::EngineCapabilities; -use engine_api::Error as ApiError; +pub use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; @@ -158,6 +158,24 @@ impl From for Error { } } +impl From for Error { + fn from(e: EngineError) -> Self { + match e { + // This removes an unnecessary layer of indirection. + // TODO (mark): consider refactoring these error enums + EngineError::Api { error } => Error::ApiError(error), + _ => Error::EngineError(Box::new(e)), + } + } +} + +impl Error { + // returns true if this error is caused by the execution engine not supporting a method + pub fn is_method_unsupported(&self, method: &'static str) -> bool { + matches!(self, Error::ApiError(ApiError::RequiredMethodUnsupported(m)) if *m == method) + } +} + pub enum BlockProposalContentsType { Full(BlockProposalContents>), Blinded(BlockProposalContents>), @@ -1519,8 +1537,7 @@ impl ExecutionLayer { self.engine() .request(|engine| engine.get_engine_capabilities(age_limit)) .await - .map_err(Box::new) - .map_err(Error::EngineError) + .map_err(Into::into) } /// Returns the execution engine version resulting from a call to @@ -1539,8 +1556,7 @@ impl ExecutionLayer { self.engine() .request(|engine| engine.get_engine_version(age_limit)) .await - .map_err(Box::new) - .map_err(Error::EngineError) + .map_err(Into::into) } /// Used during block production to determine if the merge has been triggered. diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 8f369818c09..040f6017f47 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -2,6 +2,7 @@ use beacon_chain::chain_config::{ DisallowedReOrgOffsets, ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, }; +use beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_chain::TrustedSetup; use clap::ArgMatches; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; @@ -16,7 +17,6 @@ use lighthouse_network::ListenAddress; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; use slog::{info, warn, Logger}; -use std::cmp; use std::cmp::max; use std::fmt::Debug; use std::fs; @@ -26,7 +26,7 @@ use std::num::NonZeroU16; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; -use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes, GRAFFITI_BYTES_LEN}; +use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes}; /// Gets the fully-initialized global client. /// @@ -566,24 +566,8 @@ pub fn get_config( client_config.chain.genesis_backfill = true; } - let raw_graffiti = if let Some(graffiti) = cli_args.value_of("graffiti") { - if graffiti.len() > GRAFFITI_BYTES_LEN { - return Err(format!( - "Your graffiti is too long! {} bytes maximum!", - GRAFFITI_BYTES_LEN - )); - } - - graffiti.as_bytes() - } else if cli_args.is_present("private") { - b"" - } else { - lighthouse_version::VERSION.as_bytes() - }; - - let trimmed_graffiti_len = cmp::min(raw_graffiti.len(), GRAFFITI_BYTES_LEN); - client_config.graffiti.0[..trimmed_graffiti_len] - .copy_from_slice(&raw_graffiti[..trimmed_graffiti_len]); + let beacon_graffiti = GraffitiOrigin::new(cli_args)?; + client_config.beacon_graffiti = beacon_graffiti; if let Some(wss_checkpoint) = cli_args.value_of("wss-checkpoint") { let mut split = wss_checkpoint.split(':'); From 4f9210ed536aff0074b29d1d10e3501f53802b2e Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Sun, 25 Feb 2024 16:23:59 +1000 Subject: [PATCH 05/10] Added Unit Tests for GraffitiCalculator --- .../beacon_chain/src/graffiti_calculator.rs | 211 +++++++++++++++--- beacon_node/execution_layer/src/engine_api.rs | 25 +-- .../execution_layer/src/engine_api/http.rs | 19 +- beacon_node/execution_layer/src/lib.rs | 9 +- .../src/test_utils/handle_rpc.rs | 5 +- .../execution_layer/src/test_utils/mod.rs | 13 +- lighthouse/tests/beacon_node.rs | 41 +++- 7 files changed, 252 insertions(+), 71 deletions(-) diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index ae0d28217cd..a8818750e1c 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -1,7 +1,7 @@ use crate::BeaconChain; use crate::BeaconChainTypes; use clap::ArgMatches; -use execution_layer::{http::ENGINE_CLIENT_VERSION_V1, CommitPrefix, ExecutionLayer}; +use execution_layer::{http::ENGINE_GET_CLIENT_VERSION_V1, CommitPrefix, ExecutionLayer}; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, warn, Logger}; use slot_clock::SlotClock; @@ -115,32 +115,25 @@ impl GraffitiCalculator { // The engine version cache refresh service ensures this will almost always retrieve this data from the // cache instead of making a request to the execution engine. A cache miss would only occur if lighthouse // has recently started or the EL recently went offline. - let engine_versions_response = execution_layer + let engine_versions = match execution_layer .get_engine_version(Some( self.epoch_duration * ENGINE_VERSION_AGE_LIMIT_EPOCH_MULTIPLE, )) - .await; - let engine_versions = match engine_versions_response { + .await + { + Ok(engine_versions) => engine_versions, Err(el_error) => { - if el_error.is_method_unsupported(ENGINE_CLIENT_VERSION_V1) { - debug!( - self.log, - "Using default lighthouse graffiti: EL does not support {} method", - ENGINE_CLIENT_VERSION_V1 - ); - } else { - warn!(self.log, "Failed to determine execution engine version for graffiti"; "error" => format!("{:?}", el_error)); - } + warn!(self.log, "Failed to determine execution engine version for graffiti"; "error" => ?el_error); return default_graffiti; } - Ok(engine_versions) => engine_versions, }; let Some(engine_version) = engine_versions.first() else { - // Some kind of error occurred, return default graffiti. - error!( + // Got an empty array which indicates the EL doesn't support the method + debug!( self.log, - "Got empty engine version response from execution layer" + "Using default lighthouse graffiti: EL does not support {} method", + ENGINE_GET_CLIENT_VERSION_V1; ); return default_graffiti; }; @@ -148,16 +141,19 @@ impl GraffitiCalculator { // More than one version implies lighthouse is connected to // an EL multiplexer. We don't support modifying the graffiti // with these configurations. - warn!(self.log, "Multiplexer detected, using default graffiti"); + warn!( + self.log, + "Execution Engine multiplexer detected, using default graffiti" + ); return default_graffiti; } let lighthouse_commit_prefix = CommitPrefix::try_from(lighthouse_version::COMMIT_PREFIX.to_string()) - .unwrap_or_else(|error_message| { - // This really shouldn't happen but we want to definitly log if it does - crit!(self.log, "Failed to parse lighthouse commit prefix"; "error" => error_message); - CommitPrefix("00000000".to_string()) - }); + .unwrap_or_else(|error_message| { + // This really shouldn't happen but we want to definitly log if it does + crit!(self.log, "Failed to parse lighthouse commit prefix"; "error" => error_message); + CommitPrefix("00000000".to_string()) + }); engine_version.calculate_graffiti(lighthouse_commit_prefix) } @@ -224,19 +220,21 @@ async fn engine_version_cache_refresh_service( "Engine version cache refresh service firing"; ); - if let Err(el_error) = execution_layer.get_engine_version(None).await { - if el_error.is_method_unsupported(ENGINE_CLIENT_VERSION_V1) { - debug!( - log, - "EL does not support {} method. Sleeping twice as long before retry", - ENGINE_CLIENT_VERSION_V1 - ); - tokio::time::sleep( - epoch_duration * ENGINE_VERSION_CACHE_REFRESH_EPOCH_MULTIPLE, - ) - .await; - } else { - debug!(log, "Failed to populate engine version cache"; "error" => format!("{:?}", el_error)); + match execution_layer.get_engine_version(None).await { + Err(e) => warn!(log, "Failed to populate engine version cache"; "error" => ?e), + Ok(versions) => { + if versions.is_empty() { + // Empty array indicates the EL doesn't support the method + debug!( + log, + "EL does not support {} method. Sleeping twice as long before retry", + ENGINE_GET_CLIENT_VERSION_V1 + ); + tokio::time::sleep( + epoch_duration * ENGINE_VERSION_CACHE_REFRESH_EPOCH_MULTIPLE, + ) + .await; + } } } } @@ -248,3 +246,144 @@ async fn engine_version_cache_refresh_service( }; } } + +#[cfg(test)] +mod tests { + use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType}; + use crate::ChainConfig; + use execution_layer::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_ENGINE_CAPABILITIES}; + use execution_layer::EngineCapabilities; + use lazy_static::lazy_static; + use slog::info; + use std::time::Duration; + use types::{ChainSpec, Graffiti, Keypair, MinimalEthSpec, GRAFFITI_BYTES_LEN}; + + const VALIDATOR_COUNT: usize = 48; + lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); + } + + fn get_harness( + validator_count: usize, + spec: ChainSpec, + chain_config: Option, + ) -> BeaconChainHarness> { + let harness = BeaconChainHarness::builder(MinimalEthSpec) + .spec(spec) + .chain_config(chain_config.unwrap_or_default()) + .keypairs(KEYPAIRS[0..validator_count].to_vec()) + .logger(logging::test_logger()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + harness.advance_slot(); + + harness + } + + #[tokio::test] + async fn check_graffiti_without_el_version_support() { + let spec = test_spec::(); + let harness = get_harness(VALIDATOR_COUNT, spec, None); + // modify execution engine so it doesn't support engine_getClientVersionV1 method + let mock_execution_layer = harness.mock_execution_layer.as_ref().unwrap(); + mock_execution_layer + .server + .set_engine_capabilities(EngineCapabilities { + get_client_version_v1: false, + ..DEFAULT_ENGINE_CAPABILITIES + }); + // refresh capabilities cache + harness + .chain + .execution_layer + .as_ref() + .unwrap() + .get_engine_capabilities(Some(Duration::ZERO)) + .await + .unwrap(); + + let version_bytes = std::cmp::min( + lighthouse_version::VERSION.as_bytes().len(), + GRAFFITI_BYTES_LEN, + ); + // grab the slice of the graffiti that corresponds to the lighthouse version + let graffiti_slice = + &harness.chain.graffiti_calculator.get_graffiti(None).await.0[..version_bytes]; + + // convert graffiti bytes slice to ascii for easy debugging if this test should fail + let graffiti_str = + std::str::from_utf8(graffiti_slice).expect("bytes should convert nicely to ascii"); + + info!(harness.chain.log, "results"; "lighthouse_version" => lighthouse_version::VERSION, "graffiti_str" => graffiti_str); + println!("lighthouse_version: '{}'", lighthouse_version::VERSION); + println!("graffiti_str: '{}'", graffiti_str); + + assert!(lighthouse_version::VERSION.starts_with(graffiti_str)); + } + + #[tokio::test] + async fn check_graffiti_with_el_version_support() { + let spec = test_spec::(); + let harness = get_harness(VALIDATOR_COUNT, spec, None); + + let found_graffiti_bytes = harness.chain.graffiti_calculator.get_graffiti(None).await.0; + + let mock_commit = DEFAULT_CLIENT_VERSION.commit.clone(); + let expected_graffiti_string = format!( + "{}{}{}{}", + DEFAULT_CLIENT_VERSION.code, + mock_commit + .strip_prefix("0x") + .unwrap_or(&mock_commit) + .get(0..4) + .expect("should get first 2 bytes in hex"), + "LH", + lighthouse_version::COMMIT_PREFIX + .get(0..4) + .expect("should get first 2 bytes in hex") + ); + + let expected_graffiti_prefix_bytes = expected_graffiti_string.as_bytes(); + let expected_graffiti_prefix_len = + std::cmp::min(expected_graffiti_prefix_bytes.len(), GRAFFITI_BYTES_LEN); + + let found_graffiti_string = + std::str::from_utf8(&found_graffiti_bytes[..expected_graffiti_prefix_len]) + .expect("bytes should convert nicely to ascii"); + + info!(harness.chain.log, "results"; "expected_graffiti_string" => &expected_graffiti_string, "found_graffiti_string" => &found_graffiti_string); + println!("expected_graffiti_string: '{}'", expected_graffiti_string); + println!("found_graffiti_string: '{}'", found_graffiti_string); + + assert_eq!(expected_graffiti_string, found_graffiti_string); + + let mut expected_graffiti_bytes = [0u8; GRAFFITI_BYTES_LEN]; + expected_graffiti_bytes[..expected_graffiti_prefix_len] + .copy_from_slice(expected_graffiti_string.as_bytes()); + assert_eq!(found_graffiti_bytes, expected_graffiti_bytes); + } + + #[tokio::test] + async fn check_graffiti_with_validator_specified_value() { + let spec = test_spec::(); + let harness = get_harness(VALIDATOR_COUNT, spec, None); + + let graffiti_str = "nice graffiti bro"; + let mut graffiti_bytes = [0u8; GRAFFITI_BYTES_LEN]; + graffiti_bytes[..graffiti_str.as_bytes().len()].copy_from_slice(graffiti_str.as_bytes()); + + let found_graffiti = harness + .chain + .graffiti_calculator + .get_graffiti(Some(Graffiti::from(graffiti_bytes))) + .await; + + assert_eq!( + found_graffiti.to_string(), + "0x6e6963652067726166666974692062726f000000000000000000000000000000" + ); + } +} diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 68498493cc5..41da970771c 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,7 +1,7 @@ use crate::engines::ForkchoiceState; use crate::http::{ - ENGINE_CLIENT_VERSION_V1, ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, - ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, + ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, + ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, }; @@ -585,7 +585,7 @@ pub struct EngineCapabilities { pub get_payload_v1: bool, pub get_payload_v2: bool, pub get_payload_v3: bool, - pub client_version_v1: bool, + pub get_client_version_v1: bool, } impl EngineCapabilities { @@ -624,8 +624,8 @@ impl EngineCapabilities { if self.get_payload_v3 { response.push(ENGINE_GET_PAYLOAD_V3); } - if self.client_version_v1 { - response.push(ENGINE_CLIENT_VERSION_V1); + if self.get_client_version_v1 { + response.push(ENGINE_GET_CLIENT_VERSION_V1); } response @@ -706,21 +706,18 @@ impl TryFrom for CommitPrefix { fn try_from(value: String) -> Result { // Check if the input starts with '0x' and strip it if it does - let hex_str = value - .strip_prefix("0x") - .map(|stripped| stripped.to_string()) - .unwrap_or(value); + let commit_prefix = value.strip_prefix("0x").unwrap_or(&value); // Ensure length is exactly 8 characters after '0x' removal - if hex_str.len() != 8 { + if commit_prefix.len() != 8 { return Err( "Input must be exactly 8 characters long (excluding any '0x' prefix)".to_string(), ); } // Ensure all characters are valid hex digits - if hex_str.chars().all(|c| c.is_ascii_hexdigit()) { - Ok(CommitPrefix(hex_str.to_lowercase())) + if commit_prefix.chars().all(|c| c.is_ascii_hexdigit()) { + Ok(CommitPrefix(commit_prefix.to_lowercase())) } else { Err("Input must contain only hexadecimal characters".to_string()) } @@ -750,12 +747,12 @@ impl ClientVersionV1 { .0 .get(..4) .map_or_else(|| self.commit.0.as_str(), |s| s) - .to_uppercase(), + .to_lowercase(), lighthouse_commit_prefix .0 .get(..4) .unwrap_or("0000") - .to_uppercase(), + .to_lowercase(), ); let mut graffiti_bytes = [0u8; GRAFFITI_BYTES_LEN]; let bytes_to_copy = std::cmp::min(graffiti_string.len(), GRAFFITI_BYTES_LEN); diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 8d97108a7a6..cc038af1167 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -53,8 +53,8 @@ pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1); -pub const ENGINE_CLIENT_VERSION_V1: &str = "engine_clientVersionV1"; -pub const ENGINE_CLIENT_VERSION_TIMEOUT: Duration = Duration::from_secs(1); +pub const ENGINE_GET_CLIENT_VERSION_V1: &str = "engine_getClientVersionV1"; +pub const ENGINE_GET_CLIENT_VERSION_TIMEOUT: Duration = Duration::from_secs(1); /// This error is returned during a `chainId` call by Geth. pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; @@ -74,7 +74,7 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_CLIENT_VERSION_V1, + ENGINE_GET_CLIENT_VERSION_V1, ]; lazy_static! { @@ -1038,7 +1038,7 @@ impl HttpJsonRpc { get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), - client_version_v1: capabilities.contains(ENGINE_CLIENT_VERSION_V1), + get_client_version_v1: capabilities.contains(ENGINE_GET_CLIENT_VERSION_V1), }) } @@ -1082,9 +1082,9 @@ impl HttpJsonRpc { let response: Vec = self .rpc_request( - ENGINE_CLIENT_VERSION_V1, + ENGINE_GET_CLIENT_VERSION_V1, params, - ENGINE_CLIENT_VERSION_TIMEOUT * self.execution_timeout_multiplier, + ENGINE_GET_CLIENT_VERSION_TIMEOUT * self.execution_timeout_multiplier, ) .await?; @@ -1114,8 +1114,11 @@ impl HttpJsonRpc { ) -> Result, Error> { // check engine capabilities first (avoids holding two locks at once) let engine_capabilities = self.get_engine_capabilities(None).await?; - if !engine_capabilities.client_version_v1 { - return Err(Error::RequiredMethodUnsupported(ENGINE_CLIENT_VERSION_V1)); + if !engine_capabilities.get_client_version_v1 { + // We choose an empty vec to denote that this method is not + // supported instead of an error since this method is optional + // & we don't want to log a warning and concern the user + return Ok(vec![]); } let mut lock = self.engine_version_cache.lock().await; if let Some(lock) = lock diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index f6c33cea0d0..2466c63ecd5 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -10,7 +10,7 @@ use auth::{strip_prefix, Auth, JwtKey}; pub use block_hash::calculate_execution_block_hash; use builder_client::BuilderHttpClient; pub use engine_api::EngineCapabilities; -pub use engine_api::Error as ApiError; +use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; @@ -169,13 +169,6 @@ impl From for Error { } } -impl Error { - // returns true if this error is caused by the execution engine not supporting a method - pub fn is_method_unsupported(&self, method: &'static str) -> bool { - matches!(self, Error::ApiError(ApiError::RequiredMethodUnsupported(m)) if *m == method) - } -} - pub enum BlockProposalContentsType { Full(BlockProposalContents>), Blinded(BlockProposalContents>), diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 623c1c9548a..2b8dfe530ed 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -1,7 +1,7 @@ use super::Context; use crate::engine_api::{http::*, *}; use crate::json_structures::*; -use crate::test_utils::DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI; +use crate::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI}; use serde::{de::DeserializeOwned, Deserialize}; use serde_json::Value as JsonValue; use std::sync::Arc; @@ -456,6 +456,9 @@ pub async fn handle_rpc( let engine_capabilities = ctx.engine_capabilities.read(); Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap()) } + ENGINE_GET_CLIENT_VERSION_V1 => { + Ok(serde_json::to_value([DEFAULT_CLIENT_VERSION.clone()]).unwrap()) + } ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1 => { #[derive(Deserialize)] #[serde(transparent)] diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index c54687e47b2..1204a4b5095 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -4,11 +4,13 @@ use crate::engine_api::auth::JwtKey; use crate::engine_api::{ auth::Auth, http::JSONRPC_VERSION, ExecutionBlock, PayloadStatusV1, PayloadStatusV1Status, }; +use crate::json_structures::JsonClientVersionV1; use bytes::Bytes; use environment::null_logger; use execution_block_generator::PoWBlock; use handle_rpc::handle_rpc; use kzg::Kzg; +use lazy_static::lazy_static; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use serde::{Deserialize, Serialize}; use serde_json::json; @@ -49,9 +51,18 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_payload_v1: true, get_payload_v2: true, get_payload_v3: true, - client_version_v1: true, + get_client_version_v1: true, }; +lazy_static! { + pub static ref DEFAULT_CLIENT_VERSION: JsonClientVersionV1 = JsonClientVersionV1 { + code: "MC".to_string(), // "mock client" + name: "Mock Execution Client".to_string(), + version: "0.1.0".to_string(), + commit: "0xabcdef01".to_string(), + }; +} + mod execution_block_generator; mod handle_rpc; mod hook; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index b6cd84a69fc..294da571ae3 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -5,9 +5,11 @@ use beacon_node::beacon_chain::chain_config::{ DisallowedReOrgOffsets, DEFAULT_RE_ORG_CUTOFF_DENOMINATOR, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, }; +use beacon_node::beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_processor::BeaconProcessorConfig; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; +use lighthouse_version; use std::fs::File; use std::io::{Read, Write}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; @@ -319,13 +321,36 @@ fn graffiti_flag() { .flag("graffiti", Some("nice-graffiti")) .run_with_zero_port() .with_config(|config| { + assert!(matches!( + config.beacon_graffiti, + GraffitiOrigin::UserSpecified(_) + )); assert_eq!( - config.graffiti.to_string(), - "0x6e6963652d677261666669746900000000000000000000000000000000000000" + config.beacon_graffiti.graffiti().to_string(), + "0x6e6963652d677261666669746900000000000000000000000000000000000000", ); }); } +#[test] +fn default_graffiti() { + use types::GRAFFITI_BYTES_LEN; + // test default graffiti when no graffiti flags are provided + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(matches!( + config.beacon_graffiti, + GraffitiOrigin::Calculated(_) + )); + let version_bytes = lighthouse_version::VERSION.as_bytes(); + let trimmed_len = std::cmp::min(version_bytes.len(), GRAFFITI_BYTES_LEN); + let mut bytes = [0u8; GRAFFITI_BYTES_LEN]; + bytes[..trimmed_len].copy_from_slice(&version_bytes[..trimmed_len]); + assert_eq!(config.beacon_graffiti.graffiti().0, bytes); + }); +} + #[test] fn trusted_peers_flag() { let peers = vec![PeerId::random(), PeerId::random()]; @@ -1218,7 +1243,17 @@ fn private_flag() { CommandLineTest::new() .flag("private", None) .run_with_zero_port() - .with_config(|config| assert!(config.network.private)); + .with_config(|config| { + assert!(config.network.private); + assert!(matches!( + config.beacon_graffiti, + GraffitiOrigin::UserSpecified(_) + )); + assert_eq!( + config.beacon_graffiti.graffiti().to_string(), + "0x0000000000000000000000000000000000000000000000000000000000000000".to_string(), + ); + }); } #[test] fn zero_ports_flag() { From f85eeda9bc0d85d7214343f305680b18d65ff743 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Fri, 8 Mar 2024 11:22:01 +1100 Subject: [PATCH 06/10] Address Mac's Comments --- beacon_node/beacon_chain/src/graffiti_calculator.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index a8818750e1c..4e79547ffa5 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -172,6 +172,16 @@ pub fn start_engine_version_cache_refresh_service( ); return; }; + if matches!( + chain.graffiti_calculator.beacon_graffiti, + GraffitiOrigin::UserSpecified(_) + ) { + debug!( + chain.log, + "Graffiti is user-specified, not starting engine version cache refresh service" + ); + return; + } let execution_layer = el_ref.clone(); let log = chain.log.clone(); From f858ecaec1f97aa2779745527544d325fcac5465 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Tue, 16 Apr 2024 11:01:46 -0500 Subject: [PATCH 07/10] Remove need to use clap in beacon chain --- Cargo.lock | 1 - beacon_node/beacon_chain/Cargo.toml | 1 - .../beacon_chain/src/graffiti_calculator.rs | 22 ------------------- beacon_node/src/config.rs | 11 +++++++++- consensus/types/src/graffiti.rs | 6 +++++ 5 files changed, 16 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da847f55388..780b54609b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -879,7 +879,6 @@ version = "0.2.0" dependencies = [ "bitvec 1.0.1", "bls", - "clap", "derivative", "environment", "eth1", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 26c117284a5..43bd887db53 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -19,7 +19,6 @@ environment = { workspace = true } serde_json = { workspace = true } [dependencies] -clap = { workspace = true } serde_json = { workspace = true } eth2_network_config = { workspace = true } merkle_proof = { workspace = true } diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index 4e79547ffa5..599c99dc2da 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -1,6 +1,5 @@ use crate::BeaconChain; use crate::BeaconChainTypes; -use clap::ArgMatches; use execution_layer::{http::ENGINE_GET_CLIENT_VERSION_V1, CommitPrefix, ExecutionLayer}; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, warn, Logger}; @@ -24,27 +23,6 @@ pub enum GraffitiOrigin { } impl GraffitiOrigin { - pub fn new(cli_args: &ArgMatches) -> Result { - let mut bytes = [0u8; GRAFFITI_BYTES_LEN]; - - if let Some(graffiti) = cli_args.value_of("graffiti") { - if graffiti.len() > GRAFFITI_BYTES_LEN { - return Err(format!( - "Your graffiti is too long! {} bytes maximum!", - GRAFFITI_BYTES_LEN - )); - } - bytes[..graffiti.len()].copy_from_slice(graffiti.as_bytes()); - Ok(Self::UserSpecified(Graffiti::from(bytes))) - } else if cli_args.is_present("private") { - // When 'private' flag is present, use a zero-initialized bytes array. - Ok(Self::UserSpecified(Graffiti::from(bytes))) - } else { - // Use the default lighthouse graffiti if no user-specified graffiti flags are present - Ok(Self::default()) - } - } - pub fn graffiti(&self) -> Graffiti { match self { GraffitiOrigin::UserSpecified(graffiti) => *graffiti, diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 040f6017f47..98ff40ba2b3 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -26,6 +26,7 @@ use std::num::NonZeroU16; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; +use types::graffiti::GraffitiString; use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes}; /// Gets the fully-initialized global client. @@ -566,7 +567,15 @@ pub fn get_config( client_config.chain.genesis_backfill = true; } - let beacon_graffiti = GraffitiOrigin::new(cli_args)?; + let beacon_graffiti = if let Some(graffiti) = cli_args.value_of("graffiti") { + GraffitiOrigin::UserSpecified(GraffitiString::from_str(graffiti)?.into()) + } else if cli_args.is_present("private") { + // When 'private' flag is present, use a zero-initialized bytes array. + GraffitiOrigin::UserSpecified(GraffitiString::empty().into()) + } else { + // Use the default lighthouse graffiti if no user-specified graffiti flags are present + GraffitiOrigin::default() + }; client_config.beacon_graffiti = beacon_graffiti; if let Some(wss_checkpoint) = cli_args.value_of("wss-checkpoint") { diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index bd4abe37d8f..c7a0081c9f7 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -47,6 +47,12 @@ impl Into<[u8; GRAFFITI_BYTES_LEN]> for Graffiti { #[serde(transparent)] pub struct GraffitiString(String); +impl GraffitiString { + pub fn empty() -> Self { + Self(String::new()) + } +} + impl FromStr for GraffitiString { type Err = String; From e0b862bfcbdaaa9d6be7875cb23c63322e153618 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 23 Apr 2024 01:14:26 +0900 Subject: [PATCH 08/10] Drop lookup type trait for a simple arg --- .../network/src/sync/block_lookups/common.rs | 63 +++---- .../network/src/sync/block_lookups/mod.rs | 40 ++-- .../src/sync/block_lookups/parent_lookup.rs | 7 +- .../sync/block_lookups/single_block_lookup.rs | 172 ++---------------- beacon_node/network/src/sync/manager.rs | 21 +-- 5 files changed, 73 insertions(+), 230 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 3bd39301b21..7193dd6e216 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -29,34 +29,12 @@ pub enum LookupType { Parent, } -/// This trait helps differentiate `SingleBlockLookup`s from `ParentLookup`s .This is useful in -/// ensuring requests and responses are handled separately and enables us to use different failure -/// tolerances for each, while re-using the same basic request and retry logic. -pub trait Lookup { - const MAX_ATTEMPTS: u8; - fn lookup_type() -> LookupType; - fn max_attempts() -> u8 { - Self::MAX_ATTEMPTS - } -} - -/// A `Lookup` that is a part of a `ParentLookup`. -pub struct Parent; - -impl Lookup for Parent { - const MAX_ATTEMPTS: u8 = PARENT_FAIL_TOLERANCE; - fn lookup_type() -> LookupType { - LookupType::Parent - } -} - -/// A `Lookup` that part of a single block lookup. -pub struct Current; - -impl Lookup for Current { - const MAX_ATTEMPTS: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; - fn lookup_type() -> LookupType { - LookupType::Current +impl LookupType { + fn max_attempts(&self) -> u8 { + match self { + LookupType::Current => SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, + LookupType::Parent => PARENT_FAIL_TOLERANCE, + } } } @@ -68,7 +46,7 @@ impl Lookup for Current { /// The use of the `ResponseType` associated type gives us a degree of type /// safety when handling a block/blob response ensuring we only mutate the correct corresponding /// state. -pub trait RequestState { +pub trait RequestState { /// The type of the request . type RequestType; @@ -81,9 +59,12 @@ pub trait RequestState { /* Request building methods */ /// Construct a new request. - fn build_request(&mut self) -> Result<(PeerId, Self::RequestType), LookupRequestError> { + fn build_request( + &mut self, + lookup_type: LookupType, + ) -> Result<(PeerId, Self::RequestType), LookupRequestError> { // Verify and construct request. - self.too_many_attempts()?; + self.too_many_attempts(lookup_type)?; let peer = self.get_peer()?; let request = self.new_request(); Ok((peer, request)) @@ -93,6 +74,7 @@ pub trait RequestState { fn build_request_and_send( &mut self, id: Id, + lookup_type: LookupType, cx: &mut SyncNetworkContext, ) -> Result<(), LookupRequestError> { // Check if request is necessary. @@ -101,7 +83,7 @@ pub trait RequestState { } // Construct request. - let (peer_id, request) = self.build_request()?; + let (peer_id, request) = self.build_request(lookup_type)?; // Update request state. let req_counter = self.get_state_mut().on_download_start(peer_id); @@ -110,17 +92,16 @@ pub trait RequestState { let id = SingleLookupReqId { id, req_counter, - lookup_type: L::lookup_type(), + lookup_type, }; Self::make_request(id, peer_id, request, cx) } /// Verify the current request has not exceeded the maximum number of attempts. - fn too_many_attempts(&self) -> Result<(), LookupRequestError> { - let max_attempts = L::max_attempts(); + fn too_many_attempts(&self, lookup_type: LookupType) -> Result<(), LookupRequestError> { let request_state = self.get_state(); - if request_state.failed_attempts() >= max_attempts { + if request_state.failed_attempts() >= lookup_type.max_attempts() { let cannot_process = request_state.more_failed_processing_attempts(); Err(LookupRequestError::TooManyAttempts { cannot_process }) } else { @@ -187,7 +168,7 @@ pub trait RequestState { fn response_type() -> ResponseType; /// A getter for the `BlockRequestState` or `BlobRequestState` associated with this trait. - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self; + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self; /// A getter for a reference to the `SingleLookupRequestState` associated with this trait. fn get_state(&self) -> &SingleLookupRequestState; @@ -196,7 +177,7 @@ pub trait RequestState { fn get_state_mut(&mut self) -> &mut SingleLookupRequestState; } -impl RequestState for BlockRequestState { +impl RequestState for BlockRequestState { type RequestType = BlocksByRootSingleRequest; type VerifiedResponseType = Arc>; type ReconstructedResponseType = RpcBlock; @@ -253,7 +234,7 @@ impl RequestState for BlockRequestState fn response_type() -> ResponseType { ResponseType::Block } - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { &mut request.block_request_state } fn get_state(&self) -> &SingleLookupRequestState { @@ -264,7 +245,7 @@ impl RequestState for BlockRequestState } } -impl RequestState for BlobRequestState { +impl RequestState for BlobRequestState { type RequestType = BlobsByRootSingleBlockRequest; type VerifiedResponseType = FixedBlobSidecarList; type ReconstructedResponseType = FixedBlobSidecarList; @@ -328,7 +309,7 @@ impl RequestState for BlobRequestState ResponseType { ResponseType::Blob } - fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { + fn request_state_mut(request: &mut SingleBlockLookup) -> &mut Self { &mut request.blob_request_state } fn get_state(&self) -> &SingleLookupRequestState { diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index fa2683fb0f0..a2909b49dd1 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -16,9 +16,6 @@ use beacon_chain::data_availability_checker::{ }; use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError}; -pub use common::Current; -pub use common::Lookup; -pub use common::Parent; pub use common::RequestState; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; @@ -55,12 +52,12 @@ pub struct BlockLookups { /// Parent chain lookups being downloaded. parent_lookups: SmallVec<[ParentLookup; 3]>, - processing_parent_lookups: HashMap, SingleBlockLookup)>, + processing_parent_lookups: HashMap, SingleBlockLookup)>, /// A cache of failed chain lookups to prevent duplicate searches. failed_chains: LRUTimeCache, - single_block_lookups: FnvHashMap>, + single_block_lookups: FnvHashMap>, pub(crate) da_checker: Arc>, @@ -131,7 +128,7 @@ impl BlockLookups { /// Attempts to trigger the request matching the given `block_root`. pub fn trigger_single_lookup( &mut self, - mut single_block_lookup: SingleBlockLookup, + mut single_block_lookup: SingleBlockLookup, cx: &mut SyncNetworkContext, ) { let block_root = single_block_lookup.block_root(); @@ -147,7 +144,7 @@ impl BlockLookups { } /// Adds a lookup to the `single_block_lookups` map. - pub fn add_single_lookup(&mut self, single_block_lookup: SingleBlockLookup) { + pub fn add_single_lookup(&mut self, single_block_lookup: SingleBlockLookup) { self.single_block_lookups .insert(single_block_lookup.id, single_block_lookup); @@ -212,6 +209,7 @@ impl BlockLookups { peers, self.da_checker.clone(), cx.next_id(), + LookupType::Current, ); debug!( @@ -284,10 +282,10 @@ impl BlockLookups { /// Get a single block lookup by its ID. This method additionally ensures the `req_counter` /// matches the current `req_counter` for the lookup. This ensures any stale responses from requests /// that have been retried are ignored. - fn get_single_lookup>( + fn get_single_lookup>( &mut self, id: SingleLookupReqId, - ) -> Option> { + ) -> Option> { let mut lookup = self.single_block_lookups.remove(&id.id)?; let request_state = R::request_state_mut(&mut lookup); @@ -314,7 +312,7 @@ impl BlockLookups { } /// Process a block or blob response received from a single lookup request. - pub fn single_lookup_response>( + pub fn single_lookup_response>( &mut self, lookup_id: SingleLookupReqId, peer_id: PeerId, @@ -345,7 +343,7 @@ impl BlockLookups { "response_type" => ?response_type, ); - match self.handle_verified_response::( + match self.handle_verified_response::( seen_timestamp, cx, BlockProcessType::SingleBlock { id: lookup.id }, @@ -372,13 +370,13 @@ impl BlockLookups { /// Consolidates error handling for `single_lookup_response`. An `Err` here should always mean /// the lookup is dropped. - fn handle_verified_response>( + fn handle_verified_response>( &self, seen_timestamp: Duration, cx: &mut SyncNetworkContext, process_type: BlockProcessType, verified_response: R::VerifiedResponseType, - lookup: &mut SingleBlockLookup, + lookup: &mut SingleBlockLookup, ) -> Result<(), LookupRequestError> { let id = lookup.id; let block_root = lookup.block_root(); @@ -389,7 +387,7 @@ impl BlockLookups { // If we have an outstanding parent request for this block, delay sending the response until // all parent blocks have been processed, otherwise we will fail validation with an // `UnknownParent`. - let delay_send = match L::lookup_type() { + let delay_send = match lookup.lookup_type { LookupType::Parent => false, LookupType::Current => self.has_pending_parent_request(lookup.block_root()), }; @@ -453,7 +451,7 @@ impl BlockLookups { /// Get a parent block lookup by its ID. This method additionally ensures the `req_counter` /// matches the current `req_counter` for the lookup. This any stale responses from requests /// that have been retried are ignored. - fn get_parent_lookup>( + fn get_parent_lookup>( &mut self, id: SingleLookupReqId, ) -> Option> { @@ -479,7 +477,7 @@ impl BlockLookups { } /// Process a response received from a parent lookup request. - pub fn parent_lookup_response>( + pub fn parent_lookup_response>( &mut self, id: SingleLookupReqId, peer_id: PeerId, @@ -523,7 +521,7 @@ impl BlockLookups { /// Consolidates error handling for `parent_lookup_response`. An `Err` here should always mean /// the lookup is dropped. - fn parent_lookup_response_inner>( + fn parent_lookup_response_inner>( &mut self, peer_id: PeerId, response: R::VerifiedResponseType, @@ -554,7 +552,7 @@ impl BlockLookups { } } - self.handle_verified_response::( + self.handle_verified_response::( seen_timestamp, cx, BlockProcessType::ParentLookup { @@ -633,7 +631,7 @@ impl BlockLookups { } /// An RPC error has occurred during a parent lookup. This function handles this case. - pub fn parent_lookup_failed>( + pub fn parent_lookup_failed>( &mut self, id: SingleLookupReqId, peer_id: &PeerId, @@ -669,7 +667,7 @@ impl BlockLookups { } /// An RPC error has occurred during a single lookup. This function handles this case.\ - pub fn single_block_lookup_failed>( + pub fn single_block_lookup_failed>( &mut self, id: SingleLookupReqId, peer_id: &PeerId, @@ -717,7 +715,7 @@ impl BlockLookups { /* Processing responses */ - pub fn single_block_component_processed>( + pub fn single_block_component_processed>( &mut self, target_id: Id, result: BlockProcessingResult, diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index b7a71860bff..11eb908953f 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,6 +1,6 @@ +use super::common::LookupType; use super::single_block_lookup::{LookupRequestError, SingleBlockLookup}; use super::{DownloadedBlock, PeerId}; -use crate::sync::block_lookups::common::Parent; use crate::sync::{manager::SLOT_IMPORT_TOLERANCE, network_context::SyncNetworkContext}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::block_verification_types::RpcBlock; @@ -24,7 +24,7 @@ pub(crate) struct ParentLookup { /// The blocks that have currently been downloaded. downloaded_blocks: Vec>, /// Request of the last parent. - pub current_parent_request: SingleBlockLookup, + pub current_parent_request: SingleBlockLookup, } #[derive(Debug, PartialEq, Eq)] @@ -55,6 +55,7 @@ impl ParentLookup { &[peer_id], da_checker, cx.next_id(), + LookupType::Parent, ); Self { @@ -132,7 +133,7 @@ impl ParentLookup { Hash256, VecDeque>, Vec, - SingleBlockLookup, + SingleBlockLookup, ) { let ParentLookup { chain_hash, diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 5bb663967d7..077af7c3d19 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,5 +1,6 @@ +use super::common::LookupType; use super::PeerId; -use crate::sync::block_lookups::common::{Lookup, RequestState}; +use crate::sync::block_lookups::common::RequestState; use crate::sync::block_lookups::Id; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::block_verification_types::RpcBlock; @@ -14,7 +15,6 @@ use rand::seq::IteratorRandom; use slog::{debug, Logger}; use std::collections::HashSet; use std::fmt::Debug; -use std::marker::PhantomData; use std::sync::Arc; use store::Hash256; use strum::IntoStaticStr; @@ -33,27 +33,30 @@ pub enum LookupRequestError { BadState(String), } -pub struct SingleBlockLookup { +pub struct SingleBlockLookup { pub id: Id, - pub block_request_state: BlockRequestState, - pub blob_request_state: BlobRequestState, + pub lookup_type: LookupType, + pub block_request_state: BlockRequestState, + pub blob_request_state: BlobRequestState, pub da_checker: Arc>, /// Only necessary for requests triggered by an `UnknownBlockParent` or `UnknownBlockParent` /// because any blocks or blobs without parents won't hit the data availability cache. pub child_components: Option>, } -impl SingleBlockLookup { +impl SingleBlockLookup { pub fn new( requested_block_root: Hash256, child_components: Option>, peers: &[PeerId], da_checker: Arc>, id: Id, + lookup_type: LookupType, ) -> Self { let is_deneb = da_checker.is_deneb(); Self { id, + lookup_type, block_request_state: BlockRequestState::new(requested_block_root, peers), blob_request_state: BlobRequestState::new(requested_block_root, peers, is_deneb), da_checker, @@ -103,11 +106,11 @@ impl SingleBlockLookup { if !block_already_downloaded { self.block_request_state - .build_request_and_send(self.id, cx)?; + .build_request_and_send(self.id, self.lookup_type, cx)?; } if !blobs_already_downloaded { self.blob_request_state - .build_request_and_send(self.id, cx)?; + .build_request_and_send(self.id, self.lookup_type, cx)?; } Ok(()) } @@ -144,7 +147,7 @@ impl SingleBlockLookup { /// Accepts a verified response, and adds it to the child components if required. This method /// returns a `CachedChild` which provides a completed block + blob response if all components have been /// received, or information about whether the child is required and if it has been downloaded. - pub fn add_response>( + pub fn add_response>( &mut self, verified_response: R::VerifiedResponseType, ) -> CachedChild { @@ -301,7 +304,7 @@ impl SingleBlockLookup { } /// The state of the blob request component of a `SingleBlockLookup`. -pub struct BlobRequestState { +pub struct BlobRequestState { /// The latest picture of which blobs still need to be requested. This includes information /// from both block/blobs downloaded in the network layer and any blocks/blobs that exist in /// the data availability checker. @@ -310,10 +313,9 @@ pub struct BlobRequestState { /// Where we store blobs until we receive the stream terminator. pub blob_download_queue: FixedBlobSidecarList, pub state: SingleLookupRequestState, - _phantom: PhantomData, } -impl BlobRequestState { +impl BlobRequestState { pub fn new(block_root: Hash256, peer_source: &[PeerId], is_deneb: bool) -> Self { let default_ids = MissingBlobs::new_without_block(block_root, is_deneb); Self { @@ -321,24 +323,21 @@ impl BlobRequestState { requested_ids: default_ids, blob_download_queue: <_>::default(), state: SingleLookupRequestState::new(peer_source), - _phantom: PhantomData, } } } /// The state of the block request component of a `SingleBlockLookup`. -pub struct BlockRequestState { +pub struct BlockRequestState { pub requested_block_root: Hash256, pub state: SingleLookupRequestState, - _phantom: PhantomData, } -impl BlockRequestState { +impl BlockRequestState { pub fn new(block_root: Hash256, peers: &[PeerId]) -> Self { Self { requested_block_root: block_root, state: SingleLookupRequestState::new(peers), - _phantom: PhantomData, } } } @@ -525,7 +524,7 @@ impl SingleLookupRequestState { } } -impl slog::Value for SingleBlockLookup { +impl slog::Value for SingleBlockLookup { fn serialize( &self, _record: &slog::Record, @@ -533,7 +532,7 @@ impl slog::Value for SingleBlockLookup { serializer: &mut dyn slog::Serializer, ) -> slog::Result { serializer.emit_str("request", key)?; - serializer.emit_arguments("lookup_type", &format_args!("{:?}", L::lookup_type()))?; + serializer.emit_arguments("lookup_type", &format_args!("{:?}", self.lookup_type))?; serializer.emit_arguments("hash", &format_args!("{}", self.block_root()))?; serializer.emit_arguments( "blob_ids", @@ -587,138 +586,3 @@ impl std::fmt::Display for State { } } } - -#[cfg(test)] -mod tests { - use super::*; - use crate::sync::block_lookups::common::LookupType; - use beacon_chain::builder::Witness; - use beacon_chain::eth1_chain::CachingEth1Backend; - use sloggers::null::NullLoggerBuilder; - use sloggers::Build; - use slot_clock::{SlotClock, TestingSlotClock}; - use std::time::Duration; - use store::{HotColdDB, MemoryStore, StoreConfig}; - use types::{ - test_utils::{SeedableRng, TestRandom, XorShiftRng}, - ChainSpec, MinimalEthSpec as E, SignedBeaconBlock, Slot, - }; - - fn rand_block() -> SignedBeaconBlock { - let mut rng = XorShiftRng::from_seed([42; 16]); - SignedBeaconBlock::from_block( - types::BeaconBlock::Base(types::BeaconBlockBase { - ..<_>::random_for_test(&mut rng) - }), - types::Signature::random_for_test(&mut rng), - ) - } - type T = Witness, E, MemoryStore, MemoryStore>; - - struct TestLookup1; - - impl Lookup for TestLookup1 { - const MAX_ATTEMPTS: u8 = 3; - - fn lookup_type() -> LookupType { - panic!() - } - } - - struct TestLookup2; - - impl Lookup for TestLookup2 { - const MAX_ATTEMPTS: u8 = 4; - - fn lookup_type() -> LookupType { - panic!() - } - } - - #[test] - fn test_happy_path() { - let peer_id = PeerId::random(); - let block = rand_block(); - let spec = E::default_spec(); - let slot_clock = TestingSlotClock::new( - Slot::new(0), - Duration::from_secs(0), - Duration::from_secs(spec.seconds_per_slot), - ); - let log = NullLoggerBuilder.build().expect("logger should build"); - let store = - HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone()) - .expect("store"); - let da_checker = Arc::new( - DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec.clone()) - .expect("data availability checker"), - ); - let mut sl = SingleBlockLookup::::new( - block.canonical_root(), - None, - &[peer_id], - da_checker, - 1, - ); - as RequestState>::build_request( - &mut sl.block_request_state, - ) - .unwrap(); - sl.block_request_state.state.state = State::Downloading { peer_id }; - } - - #[test] - fn test_block_lookup_failures() { - let peer_id = PeerId::random(); - let block = rand_block(); - let spec = E::default_spec(); - let slot_clock = TestingSlotClock::new( - Slot::new(0), - Duration::from_secs(0), - Duration::from_secs(spec.seconds_per_slot), - ); - let log = NullLoggerBuilder.build().expect("logger should build"); - let store = - HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone()) - .expect("store"); - - let da_checker = Arc::new( - DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec.clone()) - .expect("data availability checker"), - ); - - let mut sl = SingleBlockLookup::::new( - block.canonical_root(), - None, - &[peer_id], - da_checker, - 1, - ); - for _ in 1..TestLookup2::MAX_ATTEMPTS { - as RequestState>::build_request( - &mut sl.block_request_state, - ) - .unwrap(); - sl.block_request_state.state.on_download_failure(); - } - - // Now we receive the block and send it for processing - as RequestState>::build_request( - &mut sl.block_request_state, - ) - .unwrap(); - sl.block_request_state.state.state = State::Downloading { peer_id }; - - // One processing failure maxes the available attempts - sl.block_request_state.state.on_processing_failure(); - assert_eq!( - as RequestState>::build_request( - &mut sl.block_request_state, - ) - .unwrap_err(), - LookupRequestError::TooManyAttempts { - cannot_process: false - } - ) - } -} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 23bd1010bfe..9c17c6a1512 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -42,7 +42,6 @@ use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; -use crate::sync::block_lookups::common::{Current, Parent}; use crate::sync::block_lookups::{BlobRequestState, BlockRequestState}; use crate::sync::block_sidecar_coupling::BlocksAndBlobsRequestInfo; use beacon_chain::block_verification_types::AsBlock; @@ -621,14 +620,14 @@ impl SyncManager { } => match process_type { BlockProcessType::SingleBlock { id } => self .block_lookups - .single_block_component_processed::>( + .single_block_component_processed::( id, result, &mut self.network, ), BlockProcessType::SingleBlob { id } => self .block_lookups - .single_block_component_processed::>( + .single_block_component_processed::>( id, result, &mut self.network, @@ -834,7 +833,7 @@ impl SyncManager { Ok((block, seen_timestamp)) => match id.lookup_type { LookupType::Current => self .block_lookups - .single_lookup_response::>( + .single_lookup_response::( id, peer_id, block, @@ -843,7 +842,7 @@ impl SyncManager { ), LookupType::Parent => self .block_lookups - .parent_lookup_response::>( + .parent_lookup_response::( id, peer_id, block, @@ -854,7 +853,7 @@ impl SyncManager { Err(error) => match id.lookup_type { LookupType::Current => self .block_lookups - .single_block_lookup_failed::>( + .single_block_lookup_failed::( id, &peer_id, &mut self.network, @@ -862,7 +861,7 @@ impl SyncManager { ), LookupType::Parent => self .block_lookups - .parent_lookup_failed::>( + .parent_lookup_failed::( id, &peer_id, &mut self.network, @@ -909,7 +908,7 @@ impl SyncManager { Ok((blobs, seen_timestamp)) => match id.lookup_type { LookupType::Current => self .block_lookups - .single_lookup_response::>( + .single_lookup_response::>( id, peer_id, blobs, @@ -918,7 +917,7 @@ impl SyncManager { ), LookupType::Parent => self .block_lookups - .parent_lookup_response::>( + .parent_lookup_response::>( id, peer_id, blobs, @@ -930,7 +929,7 @@ impl SyncManager { Err(error) => match id.lookup_type { LookupType::Current => self .block_lookups - .single_block_lookup_failed::>( + .single_block_lookup_failed::>( id, &peer_id, &mut self.network, @@ -938,7 +937,7 @@ impl SyncManager { ), LookupType::Parent => self .block_lookups - .parent_lookup_failed::>( + .parent_lookup_failed::>( id, &peer_id, &mut self.network, From c67f67d4331ece95f69ed6e46b20d1e4fe2d6952 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 24 Apr 2024 12:21:55 +1000 Subject: [PATCH 09/10] Bump jobserver and fix non-portable builds --- Cargo.lock | 5 ++--- Cargo.toml | 4 +++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d63773d9a94..a2a7f52eb94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1175,7 +1175,6 @@ dependencies = [ "glob", "hex", "libc", - "serde", ] [[package]] @@ -4386,9 +4385,9 @@ dependencies = [ [[package]] name = "jobserver" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] diff --git a/Cargo.toml b/Cargo.toml index 296dadc1f66..be2011ba286 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,7 +102,9 @@ bincode = "1" bitvec = "1" byteorder = "1" bytes = "1" -c-kzg = "1" +# Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable +# feature ourselves when desired. +c-kzg = { version = "1", default-features = false } clap = "2" compare_fields_derive = { path = "common/compare_fields_derive" } criterion = "0.3" From 4e0bd8ad20a7276d318dba0925f3692d05ff70cc Mon Sep 17 00:00:00 2001 From: sean Date: Tue, 23 Apr 2024 21:43:39 +0000 Subject: [PATCH 10/10] use yaml-rust2 --- Cargo.lock | 424 +++--------------------------- consensus/int_to_bytes/Cargo.toml | 2 +- consensus/int_to_bytes/src/lib.rs | 10 +- 3 files changed, 43 insertions(+), 393 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e3557c7efb..ebfcad650c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,7 +18,7 @@ version = "0.3.5" dependencies = [ "account_utils", "bls", - "clap 2.34.0", + "clap", "clap_utils", "directory", "environment", @@ -292,54 +292,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "anstream" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" - -[[package]] -name = "anstyle-parse" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" -dependencies = [ - "windows-sys 0.52.0", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" -dependencies = [ - "anstyle", - "windows-sys 0.52.0", -] - [[package]] name = "anyhow" version = "1.0.82" @@ -485,6 +437,12 @@ dependencies = [ "rand", ] +[[package]] +name = "arraydeque" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + [[package]] name = "arrayref" version = "0.3.7" @@ -847,7 +805,7 @@ name = "beacon_node" version = "5.1.3" dependencies = [ "beacon_chain", - "clap 2.34.0", + "clap", "clap_utils", "client", "directory", @@ -1060,7 +1018,7 @@ name = "boot_node" version = "5.1.3" dependencies = [ "beacon_node", - "clap 2.34.0", + "clap", "clap_utils", "eth2_network_config", "ethereum_ssz", @@ -1336,38 +1294,11 @@ dependencies = [ "vec_map", ] -[[package]] -name = "clap" -version = "4.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" -dependencies = [ - "clap_builder", -] - -[[package]] -name = "clap_builder" -version = "4.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim 0.11.1", -] - -[[package]] -name = "clap_lex" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" - [[package]] name = "clap_utils" version = "0.1.0" dependencies = [ - "clap 2.34.0", + "clap", "dirs", "eth2_network_config", "ethereum-types 0.14.1", @@ -1433,12 +1364,6 @@ dependencies = [ "cc", ] -[[package]] -name = "colorchoice" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" - [[package]] name = "compare_fields" version = "0.2.0" @@ -1464,19 +1389,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "console" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" -dependencies = [ - "encode_unicode", - "lazy_static", - "libc", - "unicode-width", - "windows-sys 0.52.0", -] - [[package]] name = "const-hex" version = "1.11.3" @@ -1559,7 +1471,7 @@ checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", - "clap 2.34.0", + "clap", "criterion-plot", "csv", "itertools", @@ -1774,18 +1686,8 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - -[[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core 0.14.4", - "darling_macro 0.14.4", + "darling_core", + "darling_macro", ] [[package]] @@ -1802,38 +1704,13 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "darling_core" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core 0.13.4", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core 0.14.4", + "darling_core", "quote", "syn 1.0.109", ] @@ -1896,7 +1773,7 @@ version = "0.1.0" dependencies = [ "beacon_chain", "beacon_node", - "clap 2.34.0", + "clap", "clap_utils", "environment", "hex", @@ -2005,37 +1882,6 @@ dependencies = [ "syn 2.0.60", ] -[[package]] -name = "derive_builder" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" -dependencies = [ - "darling 0.14.4", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "derive_builder_macro" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e" -dependencies = [ - "derive_builder_core", - "syn 1.0.109", -] - [[package]] name = "derive_more" version = "0.99.17" @@ -2049,19 +1895,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "dialoguer" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bce805d770f407bc62102fca7c2c64ceef2fbcb2b8bd19d2765ce093980de" -dependencies = [ - "console", - "shell-words", - "tempfile", - "thiserror", - "zeroize", -] - [[package]] name = "diesel" version = "2.1.6" @@ -2133,7 +1966,7 @@ dependencies = [ name = "directory" version = "0.1.0" dependencies = [ - "clap 2.34.0", + "clap", "clap_utils", "eth2_network_config", ] @@ -2227,29 +2060,6 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" -[[package]] -name = "dtt" -version = "0.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b2dd9ee2d76888dc4c17d6da74629fa11b3cb1e8094fdc159b7f8ff259fc88" -dependencies = [ - "regex", - "serde", - "time", -] - -[[package]] -name = "duct" -version = "0.13.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ab5718d1224b63252cd0c6f74f6480f9ffeb117438a2e0f5cf6d9a4798929c" -dependencies = [ - "libc", - "once_cell", - "os_pipe", - "shared_child", -] - [[package]] name = "dunce" version = "1.0.4" @@ -2387,12 +2197,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "encode_unicode" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" - [[package]] name = "encoding_rs" version = "0.8.34" @@ -2433,16 +2237,6 @@ dependencies = [ "syn 2.0.60", ] -[[package]] -name = "env_filter" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" -dependencies = [ - "log", - "regex", -] - [[package]] name = "env_logger" version = "0.8.4" @@ -2466,19 +2260,6 @@ dependencies = [ "termcolor", ] -[[package]] -name = "env_logger" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" -dependencies = [ - "anstream", - "anstyle", - "env_filter", - "humantime", - "log", -] - [[package]] name = "environment" version = "0.1.2" @@ -2666,7 +2447,7 @@ dependencies = [ "sha2 0.9.9", "tempfile", "unicode-normalization", - "uuid 0.8.2", + "uuid", "zeroize", ] @@ -2706,7 +2487,7 @@ dependencies = [ "serde_repr", "tempfile", "tiny-bip39", - "uuid 0.8.2", + "uuid", ] [[package]] @@ -2850,7 +2631,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6085d7fd3cf84bd2b8fec150d54c8467fb491d8db9c460607c5534f653a0ee38" dependencies = [ - "darling 0.13.4", + "darling", "proc-macro2", "quote", "syn 1.0.109", @@ -3163,12 +2944,6 @@ dependencies = [ "rustc_version 0.4.0", ] -[[package]] -name = "figlet-rs" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4742a071cd9694fc86f9fa1a08fa3e53d40cc899d7ee532295da2d085639fbc5" - [[package]] name = "filesystem" version = "0.1.0" @@ -3274,12 +3049,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "fs_extra" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" - [[package]] name = "funty" version = "1.1.0" @@ -4277,7 +4046,7 @@ version = "0.2.0" dependencies = [ "bytes", "hex", - "serde_yml", + "yaml-rust2", ] [[package]] @@ -4501,7 +4270,7 @@ dependencies = [ "account_utils", "beacon_chain", "bls", - "clap 2.34.0", + "clap", "clap_utils", "deposit_contract", "directory", @@ -5079,7 +4848,7 @@ dependencies = [ "beacon_processor", "bls", "boot_node", - "clap 2.34.0", + "clap", "clap_utils", "database_manager", "directory", @@ -5427,7 +5196,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37cb4045d5677b7da537f8cb5d0730d5b6414e3cc81c61e4b50e1f0cbdc73909" dependencies = [ - "darling 0.13.4", + "darling", "itertools", "proc-macro2", "quote", @@ -6012,16 +5781,6 @@ dependencies = [ "types", ] -[[package]] -name = "os_pipe" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57119c3b893986491ec9aa85056780d3a0f3cf4da7cc09dd3650dbd6c6738fb9" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - [[package]] name = "overload" version = "0.1.1" @@ -7054,30 +6813,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" -[[package]] -name = "rlg" -version = "0.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6ccf670238310d5c31a52fed1a3314620d037a64f1e5fbdc71b2c50909134dc" -dependencies = [ - "dtt", - "tokio", - "vrd 0.0.4", -] - -[[package]] -name = "rlg" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e02c717e23f67b23032a4acb01cf63534d6259938d592e6d2451c02f09fc368" -dependencies = [ - "dtt", - "hostname", - "serde_json", - "tokio", - "vrd 0.0.5", -] - [[package]] name = "rlp" version = "0.5.2" @@ -7646,7 +7381,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling 0.13.4", + "darling", "proc-macro2", "quote", "syn 1.0.109", @@ -7665,27 +7400,6 @@ dependencies = [ "unsafe-libyaml", ] -[[package]] -name = "serde_yml" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196c3750ff0411738366b0d9534ca55fc74d04a3d4284a450039de950bd11938" -dependencies = [ - "dtt", - "env_logger 0.11.3", - "figlet-rs", - "indexmap 2.2.6", - "itoa", - "log", - "openssl", - "rlg 0.0.3", - "ryu", - "serde", - "unsafe-libyaml", - "uuid 1.8.0", - "xtasks", -] - [[package]] name = "sha1" version = "0.10.6" @@ -7762,22 +7476,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "shared_child" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0d94659ad3c2137fef23ae75b03d5241d633f8acded53d672decfa0e6e0caef" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "shell-words" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" - [[package]] name = "shlex" version = "1.3.0" @@ -7829,7 +7527,7 @@ dependencies = [ name = "simulator" version = "0.2.0" dependencies = [ - "clap 2.34.0", + "clap", "env_logger 0.9.3", "eth1", "eth2_network_config", @@ -8231,12 +7929,6 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - [[package]] name = "strum" version = "0.24.1" @@ -8271,7 +7963,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b9e5728aa1a87141cefd4e7509903fc01fa0dcb108022b1e841a67c5159fc5" dependencies = [ - "darling 0.13.4", + "darling", "itertools", "proc-macro2", "quote", @@ -8639,7 +8331,6 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", "socket2 0.5.6", @@ -8970,7 +8661,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84303a9c7cda5f085a3ed9cd241d1e95e04d88aab1d679b02f212e653537ba86" dependencies = [ - "darling 0.13.4", + "darling", "quote", "syn 1.0.109", ] @@ -9206,12 +8897,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "utf8parse" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" - [[package]] name = "uuid" version = "0.8.2" @@ -9222,15 +8907,6 @@ dependencies = [ "serde", ] -[[package]] -name = "uuid" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" -dependencies = [ - "getrandom", -] - [[package]] name = "validator_client" version = "0.3.5" @@ -9238,7 +8914,7 @@ dependencies = [ "account_utils", "bincode", "bls", - "clap 2.34.0", + "clap", "clap_utils", "deposit_contract", "directory", @@ -9310,7 +8986,7 @@ version = "0.1.0" dependencies = [ "account_utils", "bls", - "clap 2.34.0", + "clap", "clap_utils", "environment", "eth2", @@ -9359,25 +9035,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -[[package]] -name = "vrd" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a81b8b5b404f3d7afa1b8142a6bc980c20cd68556c634c3db517871aa0402521" -dependencies = [ - "rand", -] - -[[package]] -name = "vrd" -version = "0.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee1067b8d17481f5be71b59d11c329e955ffe36348907e0a4a41b619682bb4af" -dependencies = [ - "rand", - "serde", -] - [[package]] name = "wait-timeout" version = "0.2.0" @@ -9570,7 +9227,7 @@ dependencies = [ "beacon_node", "bls", "byteorder", - "clap 2.34.0", + "clap", "diesel", "diesel_migrations", "env_logger 0.9.3", @@ -10035,23 +9692,14 @@ dependencies = [ ] [[package]] -name = "xtasks" -version = "0.0.2" +name = "yaml-rust2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "940db5674e301470e6cd91098b2c68a1fad751a1623575d1133f7456146e6d2f" +checksum = "498f4d102a79ea1c9d4dd27573c0fc96ad74c023e8da38484e47883076da25fb" dependencies = [ - "anyhow", - "clap 4.5.4", - "derive_builder", - "dialoguer", - "dtt", - "duct", - "fs_extra", - "glob", - "rlg 0.0.2", - "serde", - "serde_json", - "vrd 0.0.5", + "arraydeque", + "encoding_rs", + "hashlink", ] [[package]] diff --git a/consensus/int_to_bytes/Cargo.toml b/consensus/int_to_bytes/Cargo.toml index 1958fac6e56..e99d1af8e56 100644 --- a/consensus/int_to_bytes/Cargo.toml +++ b/consensus/int_to_bytes/Cargo.toml @@ -8,5 +8,5 @@ edition = { workspace = true } bytes = { workspace = true } [dev-dependencies] +yaml-rust2 = "0.8" hex = { workspace = true } -serde_yml = "0.0.4" diff --git a/consensus/int_to_bytes/src/lib.rs b/consensus/int_to_bytes/src/lib.rs index 076e7154647..7a1fff7ccd3 100644 --- a/consensus/int_to_bytes/src/lib.rs +++ b/consensus/int_to_bytes/src/lib.rs @@ -78,7 +78,8 @@ pub fn int_to_bytes96(int: u64) -> Vec { #[cfg(test)] mod tests { use super::*; - use std::{collections::HashMap, fs::File, io::prelude::*, path::PathBuf}; + use std::{fs::File, io::prelude::*, path::PathBuf}; + use yaml_rust2::yaml; #[test] fn fixed_bytes32() { @@ -110,13 +111,14 @@ mod tests { file.read_to_string(&mut yaml_str).unwrap(); - let docs: HashMap = serde_yml::from_str(&yaml_str).unwrap(); - let test_cases = docs["test_cases"].as_sequence().unwrap(); + let docs = yaml::YamlLoader::load_from_str(&yaml_str).unwrap(); + let doc = &docs[0]; + let test_cases = doc["test_cases"].as_vec().unwrap(); for test_case in test_cases { let byte_length = test_case["byte_length"].as_i64().unwrap() as u64; let int = test_case["int"].as_i64().unwrap() as u64; - let bytes_string = test_case["bytes"].as_str().unwrap(); + let bytes_string = test_case["bytes"].clone().into_string().unwrap(); let bytes = hex::decode(bytes_string.replace("0x", "")).unwrap(); match byte_length {