-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathfinite_state_machine.rs
139 lines (125 loc) · 8.43 KB
/
finite_state_machine.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
// SPDX-FileCopyrightText: 2023 IBM Corporation
// SPDX-FileContributor: Wojciech Ozga <woz@zurich.ibm.com>, IBM Research - Zurich
// SPDX-License-Identifier: Apache-2.0
use crate::confidential_flow::ConfidentialFlow;
use crate::core::architecture::riscv::sbi::BaseExtension::*;
use crate::core::architecture::riscv::sbi::CovhExtension::*;
use crate::core::architecture::riscv::sbi::NaclExtension::*;
use crate::core::architecture::riscv::sbi::NaclSharedMemory;
use crate::core::architecture::riscv::sbi::SbiExtension::*;
use crate::core::architecture::TrapCause;
use crate::core::architecture::TrapCause::*;
use crate::core::control_data::{ConfidentialVmId, HardwareHart, HypervisorHart};
use crate::error::Error;
use crate::non_confidential_flow::handlers::cove_host_extension::{
DestroyConfidentialVm, GetSecurityMonitorInfo, PromoteToConfidentialVm, RunConfidentialHart,
};
use crate::non_confidential_flow::handlers::nested_acceleration_extension::{NaclProbeFeature, NaclSetupSharedMemory};
use crate::non_confidential_flow::handlers::supervisor_binary_interface::{InvalidCall, ProbeSbiExtension};
use crate::non_confidential_flow::{ApplyToHypervisorHart, DeclassifyToHypervisor};
use opensbi_sys::sbi_trap_regs;
extern "C" {
/// To ensure safety, specify all possible valid states that KVM expects to see and prove that security monitor
/// never returns to KVM with other state. For example, only a subset of exceptions/interrupts can be handled by KVM.
/// KVM kill the vcpu if it receives unexpected exception because it does not know what to do with it.
fn exit_to_hypervisor_asm() -> !;
/// Currently, we rely on OpenSBI to handle some of the interrupts or exceptions. Below function is the entry point
/// to OpenSBI trap handler.
fn sbi_trap_handler(regs: *mut sbi_trap_regs) -> *mut sbi_trap_regs;
}
/// Represents the non-confidential part of the finite state machine (FSM), implementing router and exit nodes. It encapsulates the
/// HardwareHart instance, which is never exposed. It invokes handlers providing them temporary read access to hypervisor hart state.
pub struct NonConfidentialFlow<'a> {
hardware_hart: &'a mut HardwareHart,
}
impl<'a> NonConfidentialFlow<'a> {
const CTX_SWITCH_ERROR_MSG: &'static str = "Bug: invalid argument provided by the assembly context switch";
/// Creates an instance of the `NonConfidentialFlow`. A confidential hart must not be assigned to the hardware hart.
pub fn create(hardware_hart: &'a mut HardwareHart) -> Self {
assert!(hardware_hart.confidential_hart().is_dummy());
Self { hardware_hart }
}
/// Routes control flow execution based on the trap cause. This is an entry node (Assembly->Rust) of the non-confidential flow part of
/// the finite state machine (FSM).
///
/// # Safety
///
/// * A confidential hart must not be assigned to the hardware hart.
/// * This function must only be invoked by the assembly lightweight context switch.
/// * Pointer is a not null and points to a memory region owned by the physical hart executing this code.
#[no_mangle]
unsafe extern "C" fn route_trap_from_hypervisor_or_vm(hart_ptr: *mut HardwareHart) -> ! {
// Below unsafe is ok because the lightweight context switch (assembly) guarantees that it provides us with a valid pointer to the
// hardware hart's dump area in main memory. This area in main memory is exclusively owned by the physical hart executing this code.
// Specifically, every physical hart has its own are in the main memory and its `mscratch` register stores the address. See the
// `initialization` procedure for more details.
let mut flow = unsafe { Self::create(hart_ptr.as_mut().expect(Self::CTX_SWITCH_ERROR_MSG)) };
match TrapCause::from_hart_architectural_state(flow.hypervisor_hart().hypervisor_hart_state()) {
HsEcall(Base(ProbeExtension)) => ProbeSbiExtension::from_hypervisor_hart(flow.hypervisor_hart_mut()).handle(flow),
HsEcall(Covh(TsmGetInfo)) => GetSecurityMonitorInfo::from_hypervisor_hart(flow.hypervisor_hart()).handle(flow),
HsEcall(Covh(PromoteToTvm)) => PromoteToConfidentialVm::from_hypervisor_hart(flow.hypervisor_hart()).handle(flow),
HsEcall(Covh(TvmVcpuRun)) => RunConfidentialHart::from_hypervisor_hart(flow.hypervisor_hart()).handle(flow),
HsEcall(Covh(DestroyTvm)) => DestroyConfidentialVm::from_hypervisor_hart(flow.hypervisor_hart()).handle(flow),
HsEcall(Covh(_)) => InvalidCall::from_hypervisor_hart(flow.hypervisor_hart()).handle(flow),
HsEcall(Nacl(ProbeFeature)) => NaclProbeFeature::from_hypervisor_hart(flow.hypervisor_hart()).handle(flow),
HsEcall(Nacl(SetupSharedMemory)) => NaclSetupSharedMemory::from_hypervisor_hart(flow.hypervisor_hart()).handle(flow),
HsEcall(Nacl(_)) => InvalidCall::from_hypervisor_hart(flow.hypervisor_hart()).handle(flow),
_ => flow.delegate_to_opensbi(),
}
}
pub fn delegate_to_opensbi(self) -> ! {
// Safety: We play with fire here. We must statically make sure that OpenSBI's input structure is bitwise same as ACE's hart state.
let trap_regs = self.hardware_hart.hypervisor_hart_mut().hypervisor_hart_state_mut() as *mut _ as *mut sbi_trap_regs;
let _ = self.hardware_hart.opensbi_context(|| {
Ok(unsafe {
sbi_trap_handler(trap_regs);
})
});
unsafe { exit_to_hypervisor_asm() }
}
/// Tries to traverse to confidential flow of the finite state machine (FSM). Returns error if the identifier of a confidential VM or
/// hart are incorrect or cannot be scheduled for execution.
pub fn into_confidential_flow(
self, confidential_vm_id: ConfidentialVmId, confidential_hart_id: usize,
) -> Result<(usize, ConfidentialFlow<'a>), (NonConfidentialFlow<'a>, Error)> {
ConfidentialFlow::enter_from_non_confidential_flow(self.hardware_hart, confidential_vm_id, confidential_hart_id)
.map_err(|(hardware_hart, error)| (Self::create(hardware_hart), error))
}
pub fn declassify_to_hypervisor_hart(mut self, declassify: DeclassifyToHypervisor) -> Self {
match declassify {
DeclassifyToHypervisor::SbiRequest(v) => v.declassify_to_hypervisor_hart(self.hypervisor_hart_mut()),
DeclassifyToHypervisor::SbiResponse(v) => v.declassify_to_hypervisor_hart(self.hypervisor_hart_mut()),
DeclassifyToHypervisor::Interrupt(v) => v.declassify_to_hypervisor_hart(self.hypervisor_hart_mut()),
DeclassifyToHypervisor::MmioLoadRequest(v) => v.declassify_to_hypervisor_hart(self.hypervisor_hart_mut()),
DeclassifyToHypervisor::MmioStoreRequest(v) => v.declassify_to_hypervisor_hart(self.hypervisor_hart_mut()),
DeclassifyToHypervisor::EnabledInterrupts(v) => v.declassify_to_hypervisor_hart(self.hypervisor_hart_mut()),
}
self
}
/// Resumes execution of the hypervisor hart and declassifies information from a confidential VM to the hypervisor. This is an exit node
/// (Rust->Assembly) of the non-confidential part of the finite state machine (FSM), executed as a result of confidential VM
/// execution (there was context switch between security domains).
pub fn declassify_and_exit_to_hypervisor(self, declassify: DeclassifyToHypervisor) -> ! {
self.declassify_to_hypervisor_hart(declassify);
unsafe { exit_to_hypervisor_asm() }
}
/// Resumes execution of the hypervisor hart and applies state transformation. This is an exit node (Rust->Assembly) of the
/// non-confidential part of the finite state machine (FSM), executed as a result of processing hypervisor request (there was no
/// context switch between security domains).
pub(super) fn apply_and_exit_to_hypervisor(mut self, transformation: ApplyToHypervisorHart) -> ! {
match transformation {
ApplyToHypervisorHart::SbiResponse(v) => v.apply_to_hypervisor_hart(self.hypervisor_hart_mut()),
ApplyToHypervisorHart::SetSharedMemory(v) => v.apply_to_hypervisor_hart(self.hypervisor_hart_mut()),
}
unsafe { exit_to_hypervisor_asm() }
}
pub fn shared_memory(&self) -> &NaclSharedMemory {
self.hypervisor_hart().shared_memory()
}
fn hypervisor_hart_mut(&mut self) -> &mut HypervisorHart {
self.hardware_hart.hypervisor_hart_mut()
}
fn hypervisor_hart(&self) -> &HypervisorHart {
&self.hardware_hart.hypervisor_hart()
}
}