Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix compilation #968

Merged
merged 3 commits into from
Nov 12, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
184 changes: 85 additions & 99 deletions node/src/service.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.

use crate::cli::Sealing;
use crate::client::{FullBackend, FullClient, RuntimeApiCollection};
use crate::client::{FullBackend, FullClient};
use crate::ethereum::{
db_config_dir, new_frontier_partial, spawn_frontier_tasks, BackendType, EthConfiguration,
FrontierBackend, FrontierBlockImport, FrontierPartialComponents, StorageOverride,
Expand All @@ -12,24 +12,20 @@ use sc_client_api::{Backend as BackendT, BlockBackend};
use sc_consensus::{BasicQueue, BoxBlockImport};
use sc_consensus_grandpa::BlockNumberOps;
use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging;
use sc_executor::HostFunctions as HostFunctionsT;
use sc_network_sync::strategy::warp::{WarpSyncConfig, WarpSyncProvider};
use sc_service::{error::Error as ServiceError, Configuration, PartialComponents, TaskManager};
use sc_telemetry::{log, Telemetry, TelemetryHandle, TelemetryWorker};
use sc_transaction_pool::FullPool;
use sc_transaction_pool_api::OffchainTransactionPoolFactory;
use sp_api::ConstructRuntimeApi;
use sp_consensus_aura::sr25519::{AuthorityId as AuraId, AuthorityPair as AuraPair};
use sp_core::{H256, U256};
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
use sp_core::U256;
use sp_runtime::traits::{Block as BlockT, NumberFor};
use std::{cell::RefCell, path::Path};
use std::{sync::Arc, time::Duration};
use substrate_prometheus_endpoint::Registry;

// Runtime
use node_subtensor_runtime::{
opaque::Block, AccountId, Balance, Nonce, RuntimeApi, TransactionConverter,
};
use node_subtensor_runtime::{opaque::Block, RuntimeApi, TransactionConverter};

/// The minimum period of blocks on which justifications will be
/// imported and generated.
Expand All @@ -53,41 +49,41 @@ type GrandpaBlockImport<B, C> =
sc_consensus_grandpa::GrandpaBlockImport<FullBackend<B>, B, C, FullSelectChain<B>>;
type GrandpaLinkHalf<B, C> = sc_consensus_grandpa::LinkHalf<B, C, FullSelectChain<B>>;

pub fn new_partial<B, RA, HF, BIQ>(
pub fn new_partial<BIQ>(
config: &Configuration,
eth_config: &EthConfiguration,
build_import_queue: BIQ,
) -> Result<
PartialComponents<
FullClient<B, RA, HF>,
FullBackend<B>,
FullSelectChain<B>,
BasicQueue<B>,
FullPool<B, FullClient<B, RA, HF>>,
Client,
FullBackend<Block>,
FullSelectChain<Block>,
BasicQueue<Block>,
FullPool<Block, Client>,
(
Option<Telemetry>,
BoxBlockImport<B>,
GrandpaLinkHalf<B, FullClient<B, RA, HF>>,
FrontierBackend<B, FullClient<B, RA, HF>>,
Arc<dyn StorageOverride<B>>,
BoxBlockImport<Block>,
GrandpaLinkHalf<Block, Client>,
FrontierBackend<Block, Client>,
Arc<dyn StorageOverride<Block>>,
),
>,
ServiceError,
>
where
B: BlockT<Hash = H256>,
RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
RA: Send + Sync + 'static,
RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
HF: HostFunctionsT + 'static,
// B: BlockT<Hash = H256>,
// RA: ConstructRuntimeApi<Block, Client>,
// RA: Send + Sync + 'static,
// RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
// HF: HostFunctionsT + 'static,
BIQ: FnOnce(
Arc<FullClient<B, RA, HF>>,
Arc<Client>,
&Configuration,
&EthConfiguration,
&TaskManager,
Option<TelemetryHandle>,
GrandpaBlockImport<B, FullClient<B, RA, HF>>,
) -> Result<(BasicQueue<B>, BoxBlockImport<B>), ServiceError>,
GrandpaBlockImport<Block, Client>,
) -> Result<(BasicQueue<Block>, BoxBlockImport<Block>), ServiceError>,
{
let telemetry = config
.telemetry_endpoints
Expand All @@ -100,12 +96,13 @@ where
})
.transpose()?;

let executor = sc_service::new_wasm_executor::<HostFunctions>(&config.executor);
let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::<B, RA, _>(
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
)?;
let executor = sc_service::new_wasm_executor(&config.executor);
let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, _>(
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
)?;
let client = Arc::new(client);

let telemetry = telemetry.map(|(worker, telemetry)| {
Expand All @@ -124,7 +121,7 @@ where
telemetry.as_ref().map(|x| x.handle()),
)?;

let storage_override = Arc::new(StorageOverrideHandler::<B, _, _>::new(client.clone()));
let storage_override = Arc::new(StorageOverrideHandler::<_, _, _>::new(client.clone()));
let frontier_backend = match eth_config.frontier_backend_type {
BackendType::KeyValue => FrontierBackend::KeyValue(Arc::new(fc_db::kv::Backend::open(
Arc::clone(&client),
Expand Down Expand Up @@ -190,21 +187,21 @@ where
}

/// Build the import queue for the template runtime (aura + grandpa).
pub fn build_aura_grandpa_import_queue<B, RA, HF>(
client: Arc<FullClient<B, RA, HF>>,
pub fn build_aura_grandpa_import_queue(
client: Arc<Client>,
config: &Configuration,
eth_config: &EthConfiguration,
task_manager: &TaskManager,
telemetry: Option<TelemetryHandle>,
grandpa_block_import: GrandpaBlockImport<B, FullClient<B, RA, HF>>,
) -> Result<(BasicQueue<B>, BoxBlockImport<B>), ServiceError>
grandpa_block_import: GrandpaBlockImport<Block, Client>,
) -> Result<(BasicQueue<Block>, BoxBlockImport<Block>), ServiceError>
where
B: BlockT,
NumberFor<B>: BlockNumberOps,
RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
RA: Send + Sync + 'static,
RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
HF: HostFunctionsT + 'static,
// B: BlockT,
NumberFor<Block>: BlockNumberOps,
// RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
// RA: Send + Sync + 'static,
// RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
// HF: HostFunctionsT + 'static,
{
let frontier_block_import =
FrontierBlockImport::new(grandpa_block_import.clone(), client.clone());
Expand Down Expand Up @@ -241,20 +238,20 @@ where
}

/// Build the import queue for the template runtime (manual seal).
pub fn build_manual_seal_import_queue<B, RA, HF>(
client: Arc<FullClient<B, RA, HF>>,
pub fn build_manual_seal_import_queue(
client: Arc<Client>,
config: &Configuration,
_eth_config: &EthConfiguration,
task_manager: &TaskManager,
_telemetry: Option<TelemetryHandle>,
_grandpa_block_import: GrandpaBlockImport<B, FullClient<B, RA, HF>>,
) -> Result<(BasicQueue<B>, BoxBlockImport<B>), ServiceError>
where
B: BlockT,
RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
RA: Send + Sync + 'static,
RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
HF: HostFunctionsT + 'static,
_grandpa_block_import: GrandpaBlockImport<Block, Client>,
) -> Result<(BasicQueue<Block>, BoxBlockImport<Block>), ServiceError>
// where
// B: BlockT,
// RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
// RA: Send + Sync + 'static,
// RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
// HF: HostFunctionsT + 'static,
{
let frontier_block_import = FrontierBlockImport::new(client.clone(), client);
Ok((
Expand All @@ -268,25 +265,25 @@ where
}

/// Builds a new service for a full client.
pub async fn new_full<B, RA, HF, NB>(
pub async fn new_full<NB>(
mut config: Configuration,
eth_config: EthConfiguration,
sealing: Option<Sealing>,
) -> Result<TaskManager, ServiceError>
where
B: BlockT<Hash = H256>,
NumberFor<B>: BlockNumberOps,
<B as BlockT>::Header: Unpin,
RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
RA: Send + Sync + 'static,
RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
HF: HostFunctionsT + 'static,
NB: sc_network::NetworkBackend<B, <B as sp_runtime::traits::Block>::Hash>,
// B: BlockT<Hash = H256>,
NumberFor<Block>: BlockNumberOps,
// <B as BlockT>::Header: Unpin,
// RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
// RA: Send + Sync + 'static,
// RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
// HF: HostFunctionsT + 'static,
NB: sc_network::NetworkBackend<Block, <Block as BlockT>::Hash>,
{
let build_import_queue = if sealing.is_some() {
build_manual_seal_import_queue::<B, RA, HF>
build_manual_seal_import_queue
} else {
build_aura_grandpa_import_queue::<B, RA, HF>
build_aura_grandpa_import_queue
};

let PartialComponents {
Expand Down Expand Up @@ -315,9 +312,7 @@ where
let metrics = NB::register_notification_metrics(maybe_registry);

let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name(
&client
.block_hash(0u32.into())?
.expect("Genesis block exists; qed"),
&client.block_hash(0u32)?.expect("Genesis block exists; qed"),
&config.chain_spec,
);

Expand All @@ -332,7 +327,7 @@ where
None
} else {
net_config.add_notification_protocol(grandpa_protocol_config);
let warp_sync: Arc<dyn WarpSyncProvider<B>> =
let warp_sync: Arc<dyn WarpSyncProvider<Block>> =
Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new(
backend.clone(),
grandpa_link.shared_authority_set().clone(),
Expand Down Expand Up @@ -378,10 +373,11 @@ where

let role = config.role;
let force_authoring = config.force_authoring;
let backoff_authoring_blocks = Some(BackoffAuthoringOnFinalizedHeadLagging::<NumberFor<B>> {
unfinalized_slack: 6u32.into(),
..Default::default()
});
let backoff_authoring_blocks =
Some(BackoffAuthoringOnFinalizedHeadLagging::<NumberFor<Block>> {
unfinalized_slack: 6u32,
..Default::default()
});
let name = config.network.node_name.clone();
let frontier_backend = Arc::new(frontier_backend);
let enable_grandpa = !config.disable_grandpa && sealing.is_none();
Expand All @@ -395,7 +391,7 @@ where
// The MappingSyncWorker sends through the channel on block import and the subscription emits a notification to the subscriber on receiving a message through this channel.
// This way we avoid race conditions when using native substrate block import notification stream.
let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks<
fc_mapping_sync::EthereumBlockNotification<B>,
fc_mapping_sync::EthereumBlockNotification<Block>,
> = Default::default();
let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks);

Expand Down Expand Up @@ -447,7 +443,7 @@ where
client: client.clone(),
pool: pool.clone(),
graph: pool.pool().clone(),
converter: Some(TransactionConverter::<B>::default()),
converter: Some(TransactionConverter::<Block>::default()),
is_authority,
enable_dev_signer,
network: network.clone(),
Expand Down Expand Up @@ -640,16 +636,10 @@ pub async fn build_full(
) -> Result<TaskManager, ServiceError> {
match config.network.network_backend {
sc_network::config::NetworkBackendType::Libp2p => {
new_full::<Block, RuntimeApi, HostFunctions, sc_network::NetworkWorker<_, _>>(
config, eth_config, sealing,
)
.await
new_full::<sc_network::NetworkWorker<_, _>>(config, eth_config, sealing).await
}
sc_network::config::NetworkBackendType::Litep2p => {
new_full::<Block, RuntimeApi, HostFunctions, sc_network::Litep2pNetworkBackend>(
config, eth_config, sealing,
)
.await
new_full::<sc_network::NetworkWorker<_, _>>(config, eth_config, sealing).await
}
}
}
Expand All @@ -675,35 +665,31 @@ pub fn new_chain_ops(
task_manager,
other,
..
} = new_partial::<Block, RuntimeApi, HostFunctions, _>(
config,
eth_config,
build_aura_grandpa_import_queue,
)?;
} = new_partial(config, eth_config, build_aura_grandpa_import_queue)?;
Ok((client, backend, import_queue, task_manager, other.3))
}

#[allow(clippy::too_many_arguments)]
fn run_manual_seal_authorship<B, RA, HF>(
fn run_manual_seal_authorship(
eth_config: &EthConfiguration,
sealing: Sealing,
client: Arc<FullClient<B, RA, HF>>,
transaction_pool: Arc<FullPool<B, FullClient<B, RA, HF>>>,
select_chain: FullSelectChain<B>,
block_import: BoxBlockImport<B>,
client: Arc<Client>,
transaction_pool: Arc<FullPool<Block, Client>>,
select_chain: FullSelectChain<Block>,
block_import: BoxBlockImport<Block>,
task_manager: &TaskManager,
prometheus_registry: Option<&Registry>,
telemetry: Option<&Telemetry>,
commands_stream: mpsc::Receiver<
sc_consensus_manual_seal::rpc::EngineCommand<<B as BlockT>::Hash>,
sc_consensus_manual_seal::rpc::EngineCommand<<Block as BlockT>::Hash>,
>,
) -> Result<(), ServiceError>
where
B: BlockT,
RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
RA: Send + Sync + 'static,
RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
HF: HostFunctionsT + 'static,
// where
// B: BlockT,
// RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
// RA: Send + Sync + 'static,
// RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
// HF: HostFunctionsT + 'static,
{
let proposer_factory = sc_basic_authorship::ProposerFactory::new(
task_manager.spawn_handle(),
Expand Down
Loading