diff --git a/rs/execution_environment/tests/execution_test.rs b/rs/execution_environment/tests/execution_test.rs index 21466424583..ddabb128835 100644 --- a/rs/execution_environment/tests/execution_test.rs +++ b/rs/execution_environment/tests/execution_test.rs @@ -9,8 +9,9 @@ use ic_config::{ }; use ic_management_canister_types_private::{ CanisterIdRecord, CanisterSettingsArgs, CanisterSettingsArgsBuilder, CanisterStatusResultV2, - CreateCanisterArgs, DerivationPath, EcdsaKeyId, EmptyBlob, MasterPublicKeyId, Method, Payload, - SignWithECDSAArgs, TakeCanisterSnapshotArgs, UpdateSettingsArgs, IC_00, + CreateCanisterArgs, DerivationPath, EcdsaKeyId, EmptyBlob, LoadCanisterSnapshotArgs, + MasterPublicKeyId, Method, Payload, SignWithECDSAArgs, TakeCanisterSnapshotArgs, + UpdateSettingsArgs, IC_00, }; use ic_registry_subnet_type::SubnetType; use ic_replicated_state::NumWasmPages; @@ -789,6 +790,86 @@ fn take_canister_snapshot_request_fails_when_subnet_capacity_reached() { assert_eq!(error.code(), ErrorCode::SubnetOversubscribed); } +#[test] +fn load_canister_snapshot_request_fails_when_subnet_capacity_reached() { + let mut subnet_config = SubnetConfig::new(SubnetType::Application); + subnet_config.scheduler_config.scheduler_cores = 2; + let env = StateMachine::new_with_config(StateMachineConfig::new( + subnet_config, + HypervisorConfig { + subnet_memory_capacity: NumBytes::from(100 * MIB), + subnet_memory_reservation: NumBytes::from(0), + ..Default::default() + }, + )); + + let now = std::time::SystemTime::now(); + env.set_time(now); + env.set_checkpoints_enabled(false); + + let canister_id = create_universal_canister_with_cycles( + &env, + Some(CanisterSettingsArgsBuilder::new().build()), + INITIAL_CYCLES_BALANCE, + ); + env.execute_ingress( + canister_id, + "update", + wasm() + // As there are 2 scheduler cores, the memory capacity is 100 / 2 = 50 MiB per core. + .memory_size_is_at_least(30 * MIB) + .reply_data(&[42]) + .build(), + ) + .expect("Error increasing the canister memory size"); + + let other_canister_id = create_universal_canister_with_cycles( + &env, + Some(CanisterSettingsArgsBuilder::new().build()), + INITIAL_CYCLES_BALANCE, + ); + env.execute_ingress( + other_canister_id, + "update", + wasm() + // The memory capacity is (100 - 30) / 2 = 35 MiB per core. + .memory_size_is_at_least(25 * MIB) + .reply_data(&[42]) + .build(), + ) + .expect("Error increasing the canister memory size"); + + // This should take another 30 MiB on top of the 30 MiB of the canister state. + // The available memory at this point is 100 - 30 - 25 = 45 MiB. + let snapshot_id = env + .take_canister_snapshot(TakeCanisterSnapshotArgs::new(canister_id, None)) + .unwrap() + .snapshot_id(); + + // Ensure that at least one round has passed between the attempts to take a snapshot. + env.tick(); + + // Uninstall the first canister to free up some memory. This should free up 30MiB. + // The available memory at this point should be 100 - 30 - 25 - 30 + 30 = 45 MiB. + env.uninstall_code(canister_id).unwrap(); + + // Taking a snapshot of the second canister should take another 25MiB, + // making the available memory 100 - 30 - 25 - 30 + 30 - 25 = 20 MiB. + env.take_canister_snapshot(TakeCanisterSnapshotArgs::new(other_canister_id, None)) + .unwrap(); + + // Loading the snapshot back to the first canister should fail as there + // is not enough memory available. + let err = env + .load_canister_snapshot(LoadCanisterSnapshotArgs::new( + canister_id, + snapshot_id, + None, + )) + .unwrap_err(); + assert_eq!(err.code(), ErrorCode::SubnetOversubscribed); +} + #[test] fn canister_snapshot_metrics_are_observed() { let mut subnet_config = SubnetConfig::new(SubnetType::Application);