Skip to content

Commit ab79f68

Browse files
authored
feat: map l1 batch numbers to their l1 block number equivalent (#113)
* feat: map l1 batch numbers to their l1 block number equivalent * chore: ceil instead of add one * chore: make mapping task cancellable * chore: remove `println!` * chore: clone tokens directly instead of passing around
1 parent a347f4b commit ab79f68

File tree

10 files changed

+228
-121
lines changed

10 files changed

+228
-121
lines changed

src/cli.rs

-3
Original file line numberDiff line numberDiff line change
@@ -109,9 +109,6 @@ pub enum Command {
109109
/// The path to the storage solution.
110110
#[arg(short, long, default_value = snapshot::DEFAULT_DB_PATH)]
111111
db_path: Option<String>,
112-
/// Number of chunks to split storage chunks into.
113-
#[arg(short, long, default_value_t = snapshot::DEFAULT_NUM_CHUNKS)]
114-
num_chunks: usize,
115112
/// The directory to export the snapshot files to.
116113
directory: String,
117114
},

src/main.rs

+17-16
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@ use std::{
1212
path::{Path, PathBuf},
1313
};
1414

15+
use ::eyre::Result;
1516
use clap::Parser;
1617
use cli::{Cli, Command, ReconstructSource};
17-
use eyre::Result;
1818
use processor::snapshot::{
1919
exporter::SnapshotExporter, importer::SnapshotImporter, SnapshotBuilder,
2020
};
@@ -75,24 +75,29 @@ async fn main() -> Result<()> {
7575
None => env::current_dir()?.join(storage::DEFAULT_DB_NAME),
7676
};
7777

78-
if let Some(directory) = snapshot {
79-
tracing::info!("Trying to restore state from snapshot...");
80-
let importer = SnapshotImporter::new(PathBuf::from(directory));
81-
importer.run(&db_path.clone()).await?;
82-
}
78+
let snapshot_end_batch = match snapshot {
79+
Some(directory) => {
80+
tracing::info!("Trying to restore state from snapshot...");
81+
let importer = SnapshotImporter::new(PathBuf::from(directory));
82+
let end_batch = importer.run(&db_path.clone()).await?;
83+
Some(end_batch)
84+
}
85+
None => None,
86+
};
8387

8488
match source {
8589
ReconstructSource::L1 { l1_fetcher_options } => {
8690
let fetcher_options = l1_fetcher_options.into();
8791
let processor = TreeProcessor::new(db_path.clone()).await?;
8892
let fetcher = L1Fetcher::new(fetcher_options, Some(processor.get_inner_db()))?;
93+
8994
let (tx, rx) = mpsc::channel::<CommitBlock>(5);
9095

9196
let processor_handle = tokio::spawn(async move {
9297
processor.run(rx).await;
9398
});
9499

95-
fetcher.run(tx).await?;
100+
fetcher.run(tx, snapshot_end_batch).await?;
96101
processor_handle.await?;
97102
}
98103
ReconstructSource::File { file } => {
@@ -128,7 +133,7 @@ async fn main() -> Result<()> {
128133
processor.run(rx).await;
129134
});
130135

131-
fetcher.run(tx).await?;
136+
fetcher.run(tx, None).await?;
132137
processor_handle.await?;
133138

134139
tracing::info!("Successfully downloaded CommitBlocks to {}", file);
@@ -159,7 +164,7 @@ async fn main() -> Result<()> {
159164
let processor = SnapshotBuilder::new(db_path);
160165

161166
let mut fetcher_options: L1FetcherOptions = l1_fetcher_options.into();
162-
if let Ok(batch_number) = processor.get_latest_l1_batch_number() {
167+
if let Ok(batch_number) = processor.get_latest_l1_block_number() {
163168
let batch_number = batch_number.as_u64();
164169
if batch_number > ethereum::GENESIS_BLOCK {
165170
tracing::info!(
@@ -176,18 +181,14 @@ async fn main() -> Result<()> {
176181
processor.run(rx).await;
177182
});
178183

179-
fetcher.run(tx).await?;
184+
fetcher.run(tx, None).await?;
180185
processor_handle.await?;
181186
}
182-
Command::ExportSnapshot {
183-
db_path,
184-
num_chunks,
185-
directory,
186-
} => {
187+
Command::ExportSnapshot { db_path, directory } => {
187188
let export_path = Path::new(&directory);
188189
std::fs::create_dir_all(export_path)?;
189190
let exporter = SnapshotExporter::new(export_path, db_path)?;
190-
exporter.export_snapshot(num_chunks)?;
191+
exporter.export_snapshot()?;
191192

192193
tracing::info!("Succesfully exported snapshot files to \"{directory}\"!");
193194
}

src/processor/snapshot/exporter.rs

+10-7
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,9 @@ use crate::processor::snapshot::{
1616
DEFAULT_DB_PATH, SNAPSHOT_FACTORY_DEPS_FILE_NAME_SUFFIX, SNAPSHOT_HEADER_FILE_NAME,
1717
};
1818

19+
/// Number of storage logs included in each chunk.
20+
const SNAPSHOT_CHUNK_SIZE: usize = 1_000_000;
21+
1922
pub struct SnapshotExporter {
2023
basedir: PathBuf,
2124
database: SnapshotDatabase,
@@ -35,7 +38,7 @@ impl SnapshotExporter {
3538
})
3639
}
3740

38-
pub fn export_snapshot(&self, num_chunks: usize) -> Result<()> {
41+
pub fn export_snapshot(&self) -> Result<()> {
3942
let l1_batch_number = self
4043
.database
4144
.get_latest_l1_batch_number()?
@@ -50,7 +53,7 @@ impl SnapshotExporter {
5053
..Default::default()
5154
};
5255

53-
self.export_storage_logs(num_chunks, &mut header)?;
56+
self.export_storage_logs(&mut header)?;
5457
self.export_factory_deps(&mut header)?;
5558

5659
let path = self.basedir.join(SNAPSHOT_HEADER_FILE_NAME);
@@ -97,7 +100,7 @@ impl SnapshotExporter {
97100
Ok(())
98101
}
99102

100-
fn export_storage_logs(&self, num_chunks: usize, header: &mut SnapshotHeader) -> Result<()> {
103+
fn export_storage_logs(&self, header: &mut SnapshotHeader) -> Result<()> {
101104
tracing::info!("Exporting storage logs...");
102105

103106
let num_logs = self.database.get_last_repeated_key_index()?;
@@ -108,17 +111,17 @@ impl SnapshotExporter {
108111
.database
109112
.iterator_cf(index_to_key_map, rocksdb::IteratorMode::Start);
110113

111-
let chunk_size = num_logs / num_chunks as u64;
114+
let num_chunks = num_logs.div_ceil(SNAPSHOT_CHUNK_SIZE as u64);
112115
for chunk_id in 0..num_chunks {
113116
tracing::info!("Serializing chunk {}/{}...", chunk_id + 1, num_chunks);
114117

115118
let mut chunk = SnapshotStorageLogsChunk::default();
116-
for _ in 0..chunk_size {
119+
for _ in 0..SNAPSHOT_CHUNK_SIZE {
117120
if let Some(Ok((_, key))) = iterator.next() {
118121
let key = U256::from_big_endian(&key);
119122
if let Ok(Some(entry)) = self.database.get_storage_log(&key) {
120123
chunk.storage_logs.push(entry);
121-
}
124+
};
122125
} else {
123126
break;
124127
}
@@ -131,7 +134,7 @@ impl SnapshotExporter {
131134
header
132135
.storage_logs_chunks
133136
.push(SnapshotStorageLogsChunkMetadata {
134-
chunk_id: chunk_id as u64,
137+
chunk_id,
135138
filepath: path
136139
.clone()
137140
.into_os_string()

src/processor/snapshot/importer.rs

+5-6
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@ use std::{
66
use ethers::types::U64;
77
use eyre::Result;
88
use regex::{Captures, Regex};
9-
use state_reconstruct_fetcher::constants::ethereum::GENESIS_BLOCK;
109
use state_reconstruct_storage::types::{
1110
Proto, SnapshotFactoryDependencies, SnapshotHeader, SnapshotStorageLogsChunk,
1211
SnapshotStorageLogsChunkMetadata,
@@ -29,7 +28,8 @@ impl SnapshotImporter {
2928
Self { directory }
3029
}
3130

32-
pub async fn run(self, db_path: &Path) -> Result<()> {
31+
/// Run the snapshot importer task. Returns the batch number contained in the header.
32+
pub async fn run(self, db_path: &Path) -> Result<U64> {
3333
let (tx, rx) = mpsc::channel(1);
3434

3535
let header = self.read_header().expect("failed to read header filepath");
@@ -46,14 +46,13 @@ impl SnapshotImporter {
4646
}
4747
});
4848

49-
let l1_batch_number = header.l1_batch_number + GENESIS_BLOCK;
49+
let l1_batch_number = U64::from(header.l1_batch_number);
5050
let mut tree = TreeWrapper::new_snapshot_wrapper(db_path)
5151
.await
5252
.expect("can't create tree");
53-
tree.restore_from_snapshot(rx, U64::from(l1_batch_number))
54-
.await?;
53+
tree.restore_from_snapshot(rx, l1_batch_number).await?;
5554

56-
Ok(())
55+
Ok(l1_batch_number)
5756
}
5857

5958
fn read_header(&self) -> Result<SnapshotHeader> {

src/processor/snapshot/mod.rs

+3-4
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ use crate::util::{h256_to_u256, unpack_block_info};
2525
pub const DEFAULT_DB_PATH: &str = "snapshot_db";
2626
pub const SNAPSHOT_HEADER_FILE_NAME: &str = "snapshot-header.json";
2727
pub const SNAPSHOT_FACTORY_DEPS_FILE_NAME_SUFFIX: &str = "factory_deps.proto.gzip";
28-
pub const DEFAULT_NUM_CHUNKS: usize = 10;
2928

3029
pub struct SnapshotBuilder {
3130
database: SnapshotDatabase,
@@ -53,8 +52,8 @@ impl SnapshotBuilder {
5352
Self { database }
5453
}
5554

56-
// Gets the next L1 batch number to be processed for ues in state recovery.
57-
pub fn get_latest_l1_batch_number(&self) -> Result<U64> {
55+
// Gets the next L1 block number to be processed for ues in state recovery.
56+
pub fn get_latest_l1_block_number(&self) -> Result<U64> {
5857
self.database
5958
.get_latest_l1_block_number()
6059
.map(|o| o.unwrap_or(U64::from(0)))
@@ -107,7 +106,7 @@ impl Processor for SnapshotBuilder {
107106

108107
if self
109108
.database
110-
.update_storage_log_value(index as u64, &value.to_fixed_bytes())
109+
.update_storage_log_value(index as u64, value)
111110
.is_err()
112111
{
113112
let max_idx = self

src/processor/tree/tree_wrapper.rs

+6-3
Original file line numberDiff line numberDiff line change
@@ -168,18 +168,21 @@ impl TreeWrapper {
168168
total_tree_entries += tree_entries.len();
169169
self.tree.extend(tree_entries);
170170

171-
tracing::info!("Chunk {} was succesfully imported!", i + 1);
171+
tracing::info!("Chunk {} was successfully imported!", i + 1);
172172
i += 1;
173173
}
174174

175175
tracing::info!(
176-
"Succesfully imported snapshot containing {total_tree_entries} storage logs!",
176+
"Successfully imported snapshot containing {total_tree_entries} storage logs!",
177177
);
178178

179+
let root_hash = hex::encode(self.tree.latest_root_hash());
180+
tracing::debug!("Current root hash is: {}", root_hash);
181+
179182
self.inner_db
180183
.lock()
181184
.await
182-
.set_latest_l1_batch_number(l1_batch_number.as_u64() + 1)?;
185+
.set_latest_l1_batch_number(l1_batch_number.as_u64())?;
183186

184187
Ok(())
185188
}

0 commit comments

Comments
 (0)