From a7e2ee73b81f12fdefbc2d03b1e69018f006b0f8 Mon Sep 17 00:00:00 2001 From: Stephen Akinyemi Date: Sun, 16 Feb 2025 15:26:26 +0100 Subject: [PATCH] refactor: simplify image pulling to focus on layer extraction in preparation for overlayfs (#140) - Remove monofs layer creation and merging during image pulls - Store extracted layers in a dedicated layers directory instead of download dir - Remove head_cid columns from images and layers tables - Rename layer/image "complete" checks to "exists" checks - Remove monoimage module and registry implementation - Update logging messages to use consistent lowercase style --- monocore/bin/monocore.rs | 17 +- monocore/lib/lib.rs | 1 - monocore/lib/management/db.rs | 108 +---- monocore/lib/management/image.rs | 371 ++++++------------ monocore/lib/management/menv.rs | 4 +- .../oci/20250128014900_create_images.up.sql | 1 - .../oci/20250128014904_create_layers.up.sql | 1 - monocore/lib/monoimage/mod.rs | 9 - monocore/lib/monoimage/registry.rs | 54 --- monocore/lib/oci/implementations/docker.rs | 6 +- monofs/bin/monofs.rs | 8 +- monofs/lib/management/mfs.rs | 58 +-- 12 files changed, 179 insertions(+), 459 deletions(-) delete mode 100644 monocore/lib/monoimage/mod.rs delete mode 100644 monocore/lib/monoimage/registry.rs diff --git a/monocore/bin/monocore.rs b/monocore/bin/monocore.rs index 29de5da..5718afb 100644 --- a/monocore/bin/monocore.rs +++ b/monocore/bin/monocore.rs @@ -10,24 +10,31 @@ use monocore::{ #[tokio::main] async fn main() -> MonocoreResult<()> { - tracing_subscriber::fmt::init(); + tracing_subscriber::fmt() + .with_target(false) + .with_file(false) + .with_line_number(false) + .with_thread_ids(false) + .with_thread_names(false) + .with_level(true) + .init(); // Parse command line arguments let args = MonocoreArgs::parse(); match args.subcommand { Some(MonocoreSubcommand::Init { path }) => { - tracing::info!("Initializing monocore project: path={path:?}"); + tracing::info!("initializing monocore project: path={path:?}"); management::init_menv(path).await?; - tracing::info!("Successfully initialized monocore project"); + tracing::info!("successfully initialized monocore project"); } Some(MonocoreSubcommand::Pull { image, image_group, name, }) => { - tracing::info!("Pulling image: name={name}, image={image}, image_group={image_group}"); + tracing::info!("pulling image: name={name}, image={image}, image_group={image_group}"); management::pull_image(name, image, image_group).await?; - tracing::info!("Successfully pulled image"); + tracing::info!("successfully pulled image"); } Some(_) => (), // TODO: implement other subcommands None => { diff --git a/monocore/lib/lib.rs b/monocore/lib/lib.rs index 2efd831..d5c70ae 100644 --- a/monocore/lib/lib.rs +++ b/monocore/lib/lib.rs @@ -70,7 +70,6 @@ mod error; pub mod cli; pub mod config; pub mod management; -pub mod monoimage; pub mod oci; pub mod runtime; pub mod utils; diff --git a/monocore/lib/management/db.rs b/monocore/lib/management/db.rs index 2f64c32..b33009a 100644 --- a/monocore/lib/management/db.rs +++ b/monocore/lib/management/db.rs @@ -388,114 +388,18 @@ pub(crate) async fn save_or_update_layer( } } -/// Updates the head CID of a layer in the database. -/// -/// ## Arguments -/// -/// * `pool` - The database connection pool -/// * `digest` - The digest of the layer to update -/// * `head_cid` - The root CID of the monofs layer -pub(crate) async fn update_layer_head_cid( - pool: &Pool, - digest: &str, - head_cid: &str, -) -> MonocoreResult<()> { - sqlx::query( - r#" - UPDATE layers - SET head_cid = ?, modified_at = CURRENT_TIMESTAMP - WHERE digest = ? - "#, - ) - .bind(head_cid) - .bind(digest) - .execute(pool) - .await?; - - Ok(()) -} - -/// Updates the head CID of an image in the database. -/// -/// ## Arguments -/// -/// * `pool` - The database connection pool -/// * `reference` - The reference of the image to update -/// * `head_cid` - The root CID of the merged monofs layers -pub(crate) async fn update_image_head_cid( - pool: &Pool, - reference: &str, - head_cid: &str, -) -> MonocoreResult<()> { - sqlx::query( - r#" - UPDATE images - SET head_cid = ?, modified_at = CURRENT_TIMESTAMP - WHERE reference = ? - "#, - ) - .bind(head_cid) - .bind(reference) - .execute(pool) - .await?; - - Ok(()) -} - -/// Gets all layer CIDs for an image in base-to-top order. -/// -/// This function returns a vector of tuples containing: -/// - The layer's digest -/// - The layer's head CID (if it exists) -/// -/// The layers are returned in the order they should be applied, -/// from base layer to top layer. -/// -/// ## Arguments -/// -/// * `pool` - The database connection pool -/// * `reference` - The reference of the image to get layers for -pub(crate) async fn get_image_layer_cids( - pool: &Pool, - reference: &str, -) -> MonocoreResult)>> { - let records = sqlx::query( - r#" - SELECT l.digest, l.head_cid - FROM layers l - JOIN manifests m ON l.manifest_id = m.id - JOIN images i ON m.image_id = i.id - WHERE i.reference = ? - ORDER BY l.id ASC - "#, - ) - .bind(reference) - .fetch_all(pool) - .await?; - - Ok(records - .into_iter() - .map(|record| { - ( - record.get::("digest"), - record.get::, _>("head_cid"), - ) - }) - .collect()) -} - -/// Checks if a layer exists and is complete (has head_cid) in the database. +/// Checks if a layer exists in the database. /// /// ## Arguments /// /// * `pool` - The database connection pool /// * `digest` - The digest string of the layer to check -pub(crate) async fn layer_complete(pool: &Pool, digest: &str) -> MonocoreResult { +pub(crate) async fn layer_exists(pool: &Pool, digest: &str) -> MonocoreResult { let record = sqlx::query( r#" SELECT COUNT(*) as count FROM layers - WHERE digest = ? AND head_cid IS NOT NULL + WHERE digest = ? "#, ) .bind(digest) @@ -505,18 +409,18 @@ pub(crate) async fn layer_complete(pool: &Pool, digest: &str) -> Monocor Ok(record.get::("count") > 0) } -/// Checks if an image exists and is complete (has head_cid) in the database. +/// Checks if an image exists in the database. /// /// ## Arguments /// /// * `pool` - The database connection pool /// * `reference` - The reference string of the image to check -pub(crate) async fn image_complete(pool: &Pool, reference: &str) -> MonocoreResult { +pub(crate) async fn image_exists(pool: &Pool, reference: &str) -> MonocoreResult { let record = sqlx::query( r#" SELECT COUNT(*) as count FROM images - WHERE reference = ? AND head_cid IS NOT NULL + WHERE reference = ? "#, ) .bind(reference) diff --git a/monocore/lib/management/image.rs b/monocore/lib/management/image.rs index d3dada3..a0d09d5 100644 --- a/monocore/lib/management/image.rs +++ b/monocore/lib/management/image.rs @@ -1,16 +1,13 @@ use crate::{ - management::{ - db::{self, OCI_DB_MIGRATOR}, - rootfs, - }, + management::db::{self, OCI_DB_MIGRATOR}, oci::{DockerRegistry, OciRegistryPull, Reference}, - utils::{env::get_monocore_home_path, path::OCI_DB_FILENAME, BLOCKS_SUBDIR}, + utils::{ + env::get_monocore_home_path, + path::{LAYERS_SUBDIR, OCI_DB_FILENAME}, + }, MonocoreError, MonocoreResult, }; use futures::future; -use ipldstore::{ipld::cid::Cid, IpldStore, Storable}; -use monofs::{filesystem::Dir, store::FlatFsStore}; -use sqlx::SqlitePool; use std::path::{Path, PathBuf}; use tempfile::tempdir; use tokio::{fs, process::Command}; @@ -103,7 +100,7 @@ pub async fn pull_image(name: Reference, image: bool, image_group: bool) -> Mono Ok(_) => Ok(()), Err(e) => { tracing::warn!( - "Sandboxes registry image pull failed: {}. Falling back to DockerRegistry pull.", + "sandboxes registry image pull failed: {}. falling back to DockerRegistry pull.", e ); // Create a new reference with docker.io registry for fallback @@ -139,19 +136,20 @@ pub async fn pull_docker_registry_image( let download_dir = download_dir.as_ref(); let monocore_home_path = get_monocore_home_path(); let db_path = monocore_home_path.join(OCI_DB_FILENAME); + let layers_dir = monocore_home_path.join(LAYERS_SUBDIR); + + // Create layers directory if it doesn't exist + fs::create_dir_all(&layers_dir).await?; let docker_registry = DockerRegistry::new(download_dir, &db_path).await?; // Get or create a connection pool to the database let pool = db::get_or_create_db_pool(&db_path, &OCI_DB_MIGRATOR).await?; - // Check if the image already exists and is complete in the database - tracing::info!("Checking if image {} already exists in database", image); - if db::image_complete(&pool, &image.to_string()).await? { - tracing::info!( - "Image {} already exists and is complete in database, skipping pull", - image - ); + // Check if the image already exists in the database + tracing::info!("checking if image {} already exists in database", image); + if db::image_exists(&pool, &image.to_string()).await? { + tracing::info!("image {} already exists in database, skipping pull", image); return Ok(()); } @@ -164,7 +162,10 @@ pub async fn pull_docker_registry_image( let extraction_futures: Vec<_> = layer_paths .into_iter() - .map(|path| async move { extract_layer(path).await }) + .map(|path| { + let layers_dir = layers_dir.clone(); + async move { extract_layer(path, &layers_dir).await } + }) .collect(); // Wait for all extractions to complete @@ -172,19 +173,6 @@ pub async fn pull_docker_registry_image( result?; } - // Create monofs layers from extracted OCI layers and store their CIDs - let store_path = monocore_home_path.join(BLOCKS_SUBDIR); - fs::create_dir_all(&store_path).await?; - let store = FlatFsStore::new(store_path); - let _ = create_monofs_layers_from_extracted(&pool, download_dir, store.clone()).await?; - - // Get and merge the layers into a single monofs layer - let layer_dirs = get_ordered_layer_dirs(&pool, &image.to_string(), store.clone()).await?; - let (root_cid, _) = rootfs::merge_oci_based_monofs_layers(layer_dirs, store.clone()).await?; - - // Update the image's head_cid with the merged layer's root CID - db::update_image_head_cid(&pool, &image.to_string(), &root_cid.to_string()).await?; - Ok(()) } @@ -199,7 +187,7 @@ pub async fn pull_docker_registry_image( /// * Sandboxes registry image pull is not implemented pub async fn pull_sandboxes_registry_image(_image: &Reference) -> MonocoreResult<()> { return Err(MonocoreError::NotImplemented( - "Sandboxes registry image pull is not implemented".to_string(), + "sandboxes registry image pull is not implemented".to_string(), )); } @@ -224,25 +212,23 @@ pub async fn pull_sandboxes_registry_image_group(_group: &Reference) -> Monocore /// Extracts a layer from the downloaded tar.gz file into an extracted directory. /// The extracted directory will be named as .extracted -async fn extract_layer(layer_path: impl AsRef) -> MonocoreResult<()> { +async fn extract_layer( + layer_path: impl AsRef, + extract_base_dir: impl AsRef, +) -> MonocoreResult<()> { let layer_path = layer_path.as_ref(); let file_name = layer_path .file_name() .and_then(|n| n.to_str()) .ok_or_else(|| MonocoreError::LayerHandling { - source: std::io::Error::new(std::io::ErrorKind::NotFound, "Invalid layer file name"), + source: std::io::Error::new(std::io::ErrorKind::NotFound, "invalid layer file name"), layer: layer_path.display().to_string(), })?; - let parent_dir = layer_path - .parent() - .ok_or_else(|| MonocoreError::LayerHandling { - source: std::io::Error::new(std::io::ErrorKind::NotFound, "Parent directory not found"), - layer: file_name.to_string(), - })?; - // Create the extraction directory with name .extracted - let extract_dir = parent_dir.join(format!("{}.{}", file_name, EXTRACTED_LAYER_SUFFIX)); + let extract_dir = extract_base_dir + .as_ref() + .join(format!("{}.{}", file_name, EXTRACTED_LAYER_SUFFIX)); fs::create_dir_all(&extract_dir) .await .map_err(|e| MonocoreError::LayerHandling { @@ -251,7 +237,7 @@ async fn extract_layer(layer_path: impl AsRef) -> MonocoreResul })?; tracing::info!( - "Extracting layer {} to {}", + "extracting layer {} to {}", file_name, extract_dir.display() ); @@ -278,7 +264,7 @@ async fn extract_layer(layer_path: impl AsRef) -> MonocoreResul } tracing::info!( - "Successfully extracted layer {} to {}", + "successfully extracted layer {} to {}", file_name, extract_dir.display() ); @@ -301,125 +287,10 @@ async fn collect_layer_files(dir: impl AsRef) -> MonocoreResult( - pool: &SqlitePool, - download_dir: impl AsRef, - store: S, -) -> MonocoreResult)>> -where - S: IpldStore + Clone + Send + Sync + 'static, -{ - let mut monofs_layers = Vec::new(); - let mut read_dir = fs::read_dir(download_dir).await?; - - while let Ok(Some(entry)) = read_dir.next_entry().await { - let path = entry.path(); - if path.is_dir() { - let file_name = path.file_name().and_then(|n| n.to_str()).ok_or_else(|| { - MonocoreError::LayerHandling { - source: std::io::Error::new( - std::io::ErrorKind::NotFound, - "Invalid layer directory name", - ), - layer: path.display().to_string(), - } - })?; - - // Skip if not an extracted layer directory - if !file_name.ends_with(EXTRACTED_LAYER_SUFFIX) { - continue; - } - - // Get the original layer name (without .extracted suffix) - let layer_name = file_name - .strip_suffix(&format!(".{}", EXTRACTED_LAYER_SUFFIX)) - .ok_or_else(|| MonocoreError::LayerHandling { - source: std::io::Error::new( - std::io::ErrorKind::NotFound, - "Invalid layer name format", - ), - layer: file_name.to_string(), - })?; - - // Create monofs layer from the extracted directory - tracing::info!("Creating monofs layer from {}", path.display()); - let (root_cid, root_dir) = - rootfs::create_monofs_from_oci_layer(&path, store.clone()).await?; - - // Update the layer's head_cid in the database - db::update_layer_head_cid(pool, layer_name, &root_cid.to_string()).await?; - - monofs_layers.push((layer_name.to_string(), root_cid.to_string(), root_dir)); - } - } - - Ok(monofs_layers) -} - -/// Gathers all layer directories for an image in base-to-top order. -/// -/// This function: -/// 1. Gets all layer CIDs from the database in the correct order -/// 2. Verifies that all required layers have been processed -/// 3. Loads each layer's directory from its CID -/// 4. Returns a vector of layer directories in base-to-top order -/// -/// ## Arguments -/// -/// * `pool` - The database connection pool -/// * `reference` - The reference of the image to get layers for -/// * `store` - The IPLD store to load directories from -async fn get_ordered_layer_dirs( - pool: &SqlitePool, - reference: &str, - store: S, -) -> MonocoreResult>> -where - S: IpldStore + Clone + Send + Sync + 'static, -{ - let layer_cids = db::get_image_layer_cids(pool, reference).await?; - - // Verify all layers have been processed (have a head_cid) - let missing_layers: Vec<_> = layer_cids - .iter() - .filter(|(_, head_cid)| head_cid.is_none()) - .map(|(digest, _)| digest.clone()) - .collect(); - - if !missing_layers.is_empty() { - return Err(MonocoreError::LayerHandling { - source: std::io::Error::new( - std::io::ErrorKind::NotFound, - format!( - "Some layers have not been processed: {}", - missing_layers.join(", ") - ), - ), - layer: missing_layers[0].clone(), - }); - } - - // Load each layer's directory from its CID - let mut layer_dirs = Vec::new(); - for (_, head_cid) in layer_cids { - let cid = head_cid.unwrap().parse::()?; - let dir = Dir::load(&cid, store.clone()).await?; - layer_dirs.push(dir); - } - - Ok(layer_dirs) -} - //-------------------------------------------------------------------------------------------------- // Tests //-------------------------------------------------------------------------------------------------- @@ -427,7 +298,6 @@ where #[cfg(test)] mod tests { use super::*; - use sqlx::Row; use tempfile::TempDir; #[test_log::test(tokio::test)] @@ -453,50 +323,37 @@ mod tests { let db_path = monocore_home.join(OCI_DB_FILENAME); let pool = db::get_or_create_db_pool(&db_path, &OCI_DB_MIGRATOR).await?; - // Verify database updates - let layer_cids = db::get_image_layer_cids(&pool, &image_ref.to_string()).await?; - assert!(!layer_cids.is_empty(), "Expected layers in database"); - - // Verify all layers have head_cids - for (digest, head_cid) in layer_cids { - assert!(head_cid.is_some(), "Layer {} should have head_cid", digest); - - // Verify layer files exist - let layer_path = download_dir.join(&digest); - assert!(layer_path.exists(), "Layer file {} should exist", digest); - - // Verify extracted directories exist - let extracted_path = - download_dir.join(format!("{}.{}", digest, EXTRACTED_LAYER_SUFFIX)); - assert!( - extracted_path.exists(), - "Extracted directory {} should exist", - digest - ); - assert!( - extracted_path.is_dir(), - "Extracted path should be a directory" - ); + // Verify image exists in database + let image_exists = db::image_exists(&pool, &image_ref.to_string()).await?; + assert!(image_exists, "Image should exist in database"); + + // Verify layers directory exists and contains extracted layers + let layers_dir = monocore_home.join(LAYERS_SUBDIR); + assert!(layers_dir.exists(), "Layers directory should exist"); + + // Verify extracted layer directories exist + let mut entries = fs::read_dir(&layers_dir).await?; + let mut found_extracted_layers = false; + while let Some(entry) = entries.next_entry().await? { + if entry + .file_name() + .to_string_lossy() + .ends_with(EXTRACTED_LAYER_SUFFIX) + { + found_extracted_layers = true; + assert!( + entry.path().is_dir(), + "Extracted layer path should be a directory" + ); + } } + assert!( + found_extracted_layers, + "Should have found extracted layer directories" + ); - // Verify image has head_cid - let image_complete = db::image_complete(&pool, &image_ref.to_string()).await?; - assert!(image_complete, "Image should be marked as complete"); - - // Verify final merged monofs image contains nginx files - let store_path = monocore_home.join(BLOCKS_SUBDIR); - let store = FlatFsStore::new(store_path); - - // Get the head CID from the database - let image_record = sqlx::query("SELECT head_cid FROM images WHERE reference = ?") - .bind(image_ref.to_string()) - .fetch_one(&pool) - .await?; - let head_cid_str = image_record - .try_get::, _>("head_cid")? - .expect("head_cid should not be null"); - - helper::verify_nginx_files(&head_cid_str, store).await?; + // Verify nginx files exist in the extracted layers + helper::verify_nginx_files(&layers_dir).await?; Ok(()) } @@ -506,52 +363,70 @@ mod tests { mod helper { use super::*; - /// Helper function to verify that all expected nginx files exist in the merged monofs image - pub(super) async fn verify_nginx_files(head_cid_str: &str, store: S) -> MonocoreResult<()> - where - S: IpldStore + Clone + Send + Sync + 'static, - { - let cid = head_cid_str.parse::()?; - let final_dir = Dir::load(&cid, store.clone()).await?; - - // Verify critical nginx paths and files exist - let etc_dir = final_dir - .get_dir("etc") - .await? - .expect("Merged image root should contain /etc directory"); - let nginx_dir = etc_dir - .get_dir("nginx") - .await? - .expect("Merged image /etc should contain nginx directory"); - - // Check nginx.conf exists - let _nginx_conf = nginx_dir - .get_file("nginx.conf") - .await? - .expect("nginx.conf should exist in /etc/nginx"); - - // Check conf.d directory exists and contains default.conf - let conf_d = nginx_dir - .get_dir("conf.d") - .await? - .expect("conf.d directory should exist in /etc/nginx"); - let _default_conf = conf_d - .get_file("default.conf") - .await? - .expect("default.conf should exist in /etc/nginx/conf.d"); - - // Verify nginx binary exists in /usr/sbin - let usr_sbin = final_dir - .get_dir("usr") - .await? - .expect("Merged image root should contain /usr directory") - .get_dir("sbin") - .await? - .expect("Merged image /usr should contain sbin directory"); - let _nginx_binary = usr_sbin - .get_file("nginx") - .await? - .expect("nginx binary should exist in /usr/sbin"); + /// Helper function to verify that all expected nginx files exist in the extracted layers + pub(super) async fn verify_nginx_files(layers_dir: impl AsRef) -> MonocoreResult<()> { + let mut found_nginx_conf = false; + let mut found_default_conf = false; + let mut found_nginx_binary = false; + + // Check each extracted layer directory for nginx files + let mut entries = fs::read_dir(layers_dir).await?; + while let Some(entry) = entries.next_entry().await? { + if !entry + .file_name() + .to_string_lossy() + .ends_with(EXTRACTED_LAYER_SUFFIX) + { + continue; + } + + let layer_path = entry.path(); + tracing::info!("Checking layer: {}", layer_path.display()); + + // Check for nginx.conf + let nginx_conf = layer_path.join("etc").join("nginx").join("nginx.conf"); + if nginx_conf.exists() { + found_nginx_conf = true; + tracing::info!("Found nginx.conf at {}", nginx_conf.display()); + } + + // Check for default.conf + let default_conf = layer_path + .join("etc") + .join("nginx") + .join("conf.d") + .join("default.conf"); + if default_conf.exists() { + found_default_conf = true; + tracing::info!("Found default.conf at {}", default_conf.display()); + } + + // Check for nginx binary + let nginx_binary = layer_path.join("usr").join("sbin").join("nginx"); + if nginx_binary.exists() { + found_nginx_binary = true; + tracing::info!("Found nginx binary at {}", nginx_binary.display()); + } + + // If we found all files, we can stop checking + if found_nginx_conf && found_default_conf && found_nginx_binary { + break; + } + } + + // Assert that we found all the expected files + assert!( + found_nginx_conf, + "nginx.conf should exist in one of the layers" + ); + assert!( + found_default_conf, + "default.conf should exist in one of the layers" + ); + assert!( + found_nginx_binary, + "nginx binary should exist in one of the layers" + ); Ok(()) } diff --git a/monocore/lib/management/menv.rs b/monocore/lib/management/menv.rs index 57908d3..1915b62 100644 --- a/monocore/lib/management/menv.rs +++ b/monocore/lib/management/menv.rs @@ -45,12 +45,12 @@ pub async fn init_menv(project_path: Option) -> MonocoreResult<()> { // Initialize sandbox database let _ = db::init_db(&db_path, &SANDBOX_DB_MIGRATOR).await?; - tracing::info!("Initialized sandbox database at {}", db_path.display()); + tracing::info!("initialized sandbox database at {}", db_path.display()); // Create default config file if it doesn't exist create_default_config(&project_path).await?; tracing::info!( - "Created default config file at {}", + "created default config file at {}", project_path.join("monocore.yaml").display() ); diff --git a/monocore/lib/management/migrations/oci/20250128014900_create_images.up.sql b/monocore/lib/management/migrations/oci/20250128014900_create_images.up.sql index 96e0c41..2ae06d2 100644 --- a/monocore/lib/management/migrations/oci/20250128014900_create_images.up.sql +++ b/monocore/lib/management/migrations/oci/20250128014900_create_images.up.sql @@ -5,7 +5,6 @@ CREATE TABLE IF NOT EXISTS images ( id INTEGER PRIMARY KEY, reference TEXT NOT NULL UNIQUE, size_bytes INTEGER NOT NULL, - head_cid TEXT, -- the root cid of the merged layers in monofs last_used_at DATETIME, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, modified_at DATETIME DEFAULT CURRENT_TIMESTAMP diff --git a/monocore/lib/management/migrations/oci/20250128014904_create_layers.up.sql b/monocore/lib/management/migrations/oci/20250128014904_create_layers.up.sql index d8a0e33..0c043fd 100644 --- a/monocore/lib/management/migrations/oci/20250128014904_create_layers.up.sql +++ b/monocore/lib/management/migrations/oci/20250128014904_create_layers.up.sql @@ -7,7 +7,6 @@ CREATE TABLE IF NOT EXISTS layers ( media_type TEXT NOT NULL, digest TEXT NOT NULL, -- the hash of the compressed layer diff_id TEXT NOT NULL, -- the hash of the uncompressed layer - head_cid TEXT, -- the root cid of the partial fs in monofs size_bytes INTEGER NOT NULL, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, modified_at DATETIME DEFAULT CURRENT_TIMESTAMP, diff --git a/monocore/lib/monoimage/mod.rs b/monocore/lib/monoimage/mod.rs deleted file mode 100644 index 3c3c8c8..0000000 --- a/monocore/lib/monoimage/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! The `monoimage` module provides functionality for interacting with the Sandboxes Registry. - -mod registry; - -//-------------------------------------------------------------------------------------------------- -// Exports -//-------------------------------------------------------------------------------------------------- - -pub use registry::*; diff --git a/monocore/lib/monoimage/registry.rs b/monocore/lib/monoimage/registry.rs deleted file mode 100644 index c62918d..0000000 --- a/monocore/lib/monoimage/registry.rs +++ /dev/null @@ -1,54 +0,0 @@ -use std::path::PathBuf; - -use getset::Getters; -use reqwest::Client; -use reqwest_middleware::{ClientBuilder, ClientWithMiddleware}; -use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; -use sqlx::{Pool, Sqlite}; - -use crate::{MonocoreError, MonocoreResult}; - -//-------------------------------------------------------------------------------------------------- -// Types -//-------------------------------------------------------------------------------------------------- - -/// SandboxesRegistry is a client for interacting with the Sandboxes Registry. -#[derive(Debug, Getters)] -pub struct SandboxesRegistry { - /// The HTTP client used to make requests to the registry - #[getset(get = "pub with_prefix")] // TODO: Remove - client: ClientWithMiddleware, - - /// The directory where image data is stored - #[getset(get = "pub with_prefix")] - store_dir: PathBuf, - - /// The database connection pool - #[getset(get = "pub with_prefix")] - monoimage_db: Pool, -} - -impl SandboxesRegistry { - /// Creates a new Sandboxes Registry client with the specified store directory and database. - pub fn new(store_dir: PathBuf, monoimage_db: Pool) -> Self { - let retry_policy = ExponentialBackoff::builder().build_with_max_retries(3); - let client_builder = ClientBuilder::new(Client::new()); - let client = client_builder - .with(RetryTransientMiddleware::new_with_policy(retry_policy)) - .build(); - - Self { - client, - store_dir, - monoimage_db, - } - } - - /// Pulls an image from the Sandboxes Registry. - pub async fn pull_image(&self, _repository: &str, _reference: &str) -> MonocoreResult<()> { - // For now, just return an error as specified - Err(MonocoreError::NotImplemented( - "Sandboxes Registry pull_image not yet implemented".to_string(), - )) - } -} diff --git a/monocore/lib/oci/implementations/docker.rs b/monocore/lib/oci/implementations/docker.rs index e4d9b43..4e753a8 100644 --- a/monocore/lib/oci/implementations/docker.rs +++ b/monocore/lib/oci/implementations/docker.rs @@ -335,12 +335,12 @@ impl OciRegistryPull for DockerRegistry { .iter() .zip(config.rootfs().diff_ids()) .map(|(layer_desc, diff_id)| async { - // Check if layer already exists and is complete in database - if management::layer_complete(&self.oci_db, &layer_desc.digest().to_string()) + // Check if layer already exists in database + if management::layer_exists(&self.oci_db, &layer_desc.digest().to_string()) .await? { tracing::info!( - "Layer {} already exists and is complete, skipping download", + "layer {} already exists, skipping download", layer_desc.digest() ); } else { diff --git a/monofs/bin/monofs.rs b/monofs/bin/monofs.rs index bee7e4a..3b18d64 100644 --- a/monofs/bin/monofs.rs +++ b/monofs/bin/monofs.rs @@ -17,14 +17,14 @@ async fn main() -> anyhow::Result<()> { let args = MonofsArgs::parse(); match args.subcommand { Some(MonofsSubcommand::Init { mount_dir }) => { - tracing::info!("Initializing monofs..."); + tracing::info!("initializing monofs..."); management::init_mfs(mount_dir).await?; - tracing::info!("Successfully initialized monofs"); + tracing::info!("successfully initialized monofs"); } Some(MonofsSubcommand::Detach { mount_dir, force }) => { - tracing::info!("Detaching monofs..."); + tracing::info!("detaching monofs..."); management::detach_mfs(mount_dir, force).await?; - tracing::info!("Successfully detached monofs"); + tracing::info!("successfully detached monofs"); } Some(_) => (), // TODO: implement other subcommands None => { diff --git a/monofs/lib/management/mfs.rs b/monofs/lib/management/mfs.rs index bc4ee42..8454cf8 100644 --- a/monofs/lib/management/mfs.rs +++ b/monofs/lib/management/mfs.rs @@ -43,7 +43,7 @@ pub async fn init_mfs(mount_dir: Option) -> FsResult { // Ensure the mount directory is absolute let mount_dir = fs::canonicalize(&mount_dir).await?; - tracing::info!("Mount point available at {}", mount_dir.display()); + tracing::info!("mount point available at {}", mount_dir.display()); // Create the .mfs directory adjacent to the mount point let mfs_data_dir = PathBuf::from(format!("{}.{}", mount_dir.display(), MFS_DIR_SUFFIX)); @@ -52,39 +52,39 @@ pub async fn init_mfs(mount_dir: Option) -> FsResult { // Find an available port let port = super::find_available_port(DEFAULT_HOST, DEFAULT_NFS_PORT).await?; - tracing::info!("Found available port: {}", port); + tracing::info!("found available port: {}", port); // Create required directories let log_dir = mfs_data_dir.join(LOG_SUBDIR); fs::create_dir_all(&log_dir).await?; - tracing::info!("Log directory available at {}", log_dir.display()); + tracing::info!("log directory available at {}", log_dir.display()); // Create the fs database file let fs_db_path = mfs_data_dir.join(FS_DB_FILENAME); if !fs_db_path.exists() { fs::File::create(&fs_db_path).await?; - tracing::info!("Created fs database at {}", fs_db_path.display()); + tracing::info!("created fs database at {}", fs_db_path.display()); } // Initialize the filesystem database schema db::init_db(&fs_db_path, &FS_DB_MIGRATOR).await?; - tracing::info!("Initialized fs database schema"); + tracing::info!("initialized fs database schema"); // Create the blocks directory let blocks_dir = mfs_data_dir.join(BLOCKS_SUBDIR); fs::create_dir_all(&blocks_dir).await?; - tracing::info!("Blocks directory available at {}", blocks_dir.display()); + tracing::info!("blocks directory available at {}", blocks_dir.display()); // Start the supervisor process let child_name = mount_dir .file_name() .map(|name| name.to_string_lossy().to_string()) - .expect("Failed to get file name for mount point"); + .expect("failed to get file name for mount point"); let mfsrun_path = monoutils::path::resolve_binary_path(MFSRUN_BIN_PATH_ENV_VAR, DEFAULT_MFSRUN_BIN_PATH)?; - tracing::info!("Mounting the filesystem..."); + tracing::info!("mounting the filesystem..."); let status = Command::new(mfsrun_path) .arg("supervisor") .arg("--log-dir") @@ -104,19 +104,19 @@ pub async fn init_mfs(mount_dir: Option) -> FsResult { .spawn()?; tracing::info!( - "Started supervisor process with PID: {}", + "started supervisor process with PID: {}", status.id().unwrap_or(0) ); // Mount the filesystem mount_fs(&mount_dir, DEFAULT_HOST, port).await?; - tracing::info!("Mounted filesystem at {}", mount_dir.display()); + tracing::info!("mounted filesystem at {}", mount_dir.display()); // Create symbolic link to mfs_data_dir in mount directory let link_path = mount_dir.join(MFS_LINK_FILENAME); if !link_path.exists() { fs::symlink(&mfs_data_dir, &link_path).await?; - tracing::info!("Created symbolic link at {}", link_path.display()); + tracing::info!("created symbolic link at {}", link_path.display()); } Ok(port) @@ -147,7 +147,7 @@ pub async fn detach_mfs(mount_dir: Option, force: bool) -> FsResult<()> // Find the MFS root directory let mfs_root = find::find_mfs_root(&start_path).await?; - tracing::info!("Found MFS root at {}", mfs_root.display()); + tracing::info!("found MFS root at {}", mfs_root.display()); // Get the filesystem database path let db_path = get_fs_db_path(&mfs_root).await?; @@ -158,7 +158,7 @@ pub async fn detach_mfs(mount_dir: Option, force: bool) -> FsResult<()> // Get and terminate the supervisor process match get_supervisor_pid(&db_path, &mfs_root).await { Ok(Some(supervisor_pid)) => { - tracing::info!("Found supervisor process with PID: {}", supervisor_pid); + tracing::info!("found supervisor process with PID: {}", supervisor_pid); // Check if process is still running let pid = Pid::from_raw(supervisor_pid); @@ -167,20 +167,20 @@ pub async fn detach_mfs(mount_dir: Option, force: bool) -> FsResult<()> // Process exists, send SIGTERM if let Err(e) = signal::kill(pid, Signal::SIGTERM) { tracing::warn!( - "Failed to send SIGTERM to supervisor process {}: {}", + "failed to send SIGTERM to supervisor process {}: {}", supervisor_pid, e ); } else { - tracing::info!("Sent SIGTERM to supervisor process {}", supervisor_pid); + tracing::info!("sent SIGTERM to supervisor process {}", supervisor_pid); } } Err(nix::errno::Errno::ESRCH) => { - tracing::info!("Supervisor process {} no longer exists", supervisor_pid); + tracing::info!("supervisor process {} no longer exists", supervisor_pid); } Err(e) => { tracing::warn!( - "Failed to check if supervisor process {} exists: {}", + "failed to check if supervisor process {} exists: {}", supervisor_pid, e ); @@ -189,8 +189,8 @@ pub async fn detach_mfs(mount_dir: Option, force: bool) -> FsResult<()> } Ok(None) => { tracing::warn!( - "No supervisor PID found in database for mount point {}. \ - The supervisor may have already exited.", + "no supervisor PID found in database for mount point {}. \ + the supervisor may have already exited.", mfs_root.display() ); } @@ -252,7 +252,7 @@ async fn unmount_fs(mount_dir: impl AsRef, force: bool) -> FsResult<()> { ))); } - tracing::info!("Unmounting filesystem at {}", mount_dir.display()); + tracing::info!("unmounting filesystem at {}", mount_dir.display()); // Construct the unmount command let mut cmd = Command::new("umount"); @@ -265,13 +265,13 @@ async fn unmount_fs(mount_dir: impl AsRef, force: bool) -> FsResult<()> { if !status.success() { return Err(FsError::UnmountFailed(format!( - "Unmount command exited with status: {}", + "unmount command exited with status: {}", status ))); } tracing::info!( - "Successfully unmounted filesystem at {}", + "successfully unmounted filesystem at {}", mount_dir.display() ); Ok(()) @@ -283,7 +283,7 @@ async fn mount_fs(mount_dir: impl AsRef, host: &str, port: u32) -> FsResul // Create mount point if it doesn't exist fs::create_dir_all(&mount_dir).await?; - tracing::info!("Mount point available at {}", mount_dir.display()); + tracing::info!("mount point available at {}", mount_dir.display()); // Check if mount point is empty let mut entries = fs::read_dir(&mount_dir).await?; @@ -292,7 +292,7 @@ async fn mount_fs(mount_dir: impl AsRef, host: &str, port: u32) -> FsResul mount_dir.to_string_lossy().to_string(), )); } - tracing::info!("Mounting NFS share at {}", mount_dir.display()); + tracing::info!("mounting NFS share at {}", mount_dir.display()); // Wait for the port to be ready. If we don't do this, the mount command will retry every // 5+ seconds on macos. @@ -320,16 +320,16 @@ async fn mount_fs(mount_dir: impl AsRef, host: &str, port: u32) -> FsResul .status() .await?; - tracing::info!("Mount command took {:?} to complete", start.elapsed()); + tracing::info!("mount command took {:?} to complete", start.elapsed()); if !status.success() { return Err(FsError::MountFailed(format!( - "Mount command exited with status: {}", + "mount command exited with status: {}", status ))); } - tracing::info!("Successfully mounted NFS share at {}", mount_dir.display()); + tracing::info!("successfully mounted NFS share at {}", mount_dir.display()); Ok(()) } @@ -346,13 +346,13 @@ async fn wait_for_port(host: &str, port: u32) { loop { match TcpStream::connect(&addr).await { Ok(_) => { - tracing::info!("Port {} on {} is ready!", port, host); + tracing::info!("port {} on {} is ready!", port, host); break; } Err(e) => { let retry_delay = 50; tracing::info!( - "Port {} on {} is not ready yet (error: {}), retrying in {}ms...", + "port {} on {} is not ready yet (error: {}), retrying in {}ms...", port, host, e,