diff --git a/security-monitor/src/core/architecture/riscv/mmu/hgatp.rs b/security-monitor/src/core/architecture/riscv/mmu/hgatp.rs index 67a7d2c..084a630 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/hgatp.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/hgatp.rs @@ -1,38 +1,7 @@ // SPDX-FileCopyrightText: 2023 IBM Corporation // SPDX-FileContributor: Wojciech Ozga , IBM Research - Zurich // SPDX-License-Identifier: Apache-2.0 - -pub struct Hgatp { - bits: usize, -} - -impl Hgatp { - const HGATP64_MODE_SHIFT: usize = 60; - const HGATP64_VMID_SHIFT: usize = 44; - const PAGE_SHIFT: usize = 12; - const HGATP_PPN_MASK: usize = 0x0000FFFFFFFFFFF; - - pub fn from(bits: usize) -> Self { - Self { bits } - } - - pub fn bits(&self) -> usize { - self.bits - } - - pub fn address(&self) -> usize { - (self.bits & Self::HGATP_PPN_MASK) << Self::PAGE_SHIFT - } - - pub fn mode(&self) -> Option { - HgatpMode::from_code((self.bits >> Self::HGATP64_MODE_SHIFT) & 0b1111) - } - - pub fn new(address: usize, mode: HgatpMode, vmid: usize) -> Self { - let ppn = (address >> Self::PAGE_SHIFT) & Self::HGATP_PPN_MASK; - Self { bits: (vmid << Self::HGATP64_VMID_SHIFT) | (mode.code() << Self::HGATP64_MODE_SHIFT) | ppn } - } -} +use super::specification::*; #[repr(usize)] #[derive(Clone, Copy, Debug)] @@ -52,3 +21,36 @@ impl HgatpMode { } } } + +#[derive(PartialEq)] +pub struct Hgatp(usize); + +impl Hgatp { + pub fn new(address: usize, mode: HgatpMode, vmid: usize) -> Self { + Self((vmid << HGATP64_VMID_SHIFT) | (mode.code() << HGATP64_MODE_SHIFT) | (address >> HGATP_PAGE_SHIFT) & HGATP_PPN_MASK) + } + + pub fn disabled() -> Self { + Self::from(0) + } + + pub fn from(bits: usize) -> Self { + Self(bits) + } + + pub fn bits(&self) -> usize { + self.0 + } + + pub fn is_empty(&self) -> bool { + self.0 == 0 + } + + pub fn root_page_table_pointer(&self) -> *mut usize { + ((self.0 & HGATP_PPN_MASK) << HGATP_PAGE_SHIFT) as *mut usize + } + + pub fn mode(&self) -> Option { + HgatpMode::from_code((self.0 >> HGATP64_MODE_SHIFT) & 0b1111) + } +} diff --git a/security-monitor/src/core/architecture/riscv/mmu/mod.rs b/security-monitor/src/core/architecture/riscv/mmu/mod.rs index 5a56d62..624e866 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/mod.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/mod.rs @@ -18,17 +18,19 @@ mod page_table_entry; mod page_table_level; mod paging_system; mod shared_page; +mod specification; pub fn copy_mmu_configuration_from_non_confidential_memory(hgatp: &Hgatp) -> Result { let paging_mode = hgatp.mode().ok_or_else(|| Error::UnsupportedPagingMode())?; let paging_system = PagingSystem::from(&paging_mode).ok_or_else(|| Error::UnsupportedPagingMode())?; - let root_page_address = NonConfidentialMemoryAddress::new(hgatp.address() as *mut usize)?; + let root_page_address = NonConfidentialMemoryAddress::new(hgatp.root_page_table_pointer())?; Ok(PageTable::copy_from_non_confidential_memory(root_page_address, paging_system, paging_system.levels())?) } -pub fn enable_address_translation_and_protection(hgatp: usize) { +pub fn enable_address_translation_and_protection(hgatp: &Hgatp) { + use crate::core::architecture::CSR; // Enable MMU for HS,VS,VS,U modes. It is safe to invoke below code because we have access to this register (run in the M-mode) and - // hgatp is the content of the HGATP register calculated by the security monitor when recreating page tables of a confidential virtual - // machine that will get executed. - CSR.hgatp.write(hgatp); + // hgatp is the content of the HGATP register calculated by the security monitor when recreating page tables of a confidential + // virtual machine that will get executed. + CSR.hgatp.write(hgatp.bits()); } diff --git a/security-monitor/src/core/architecture/riscv/mmu/page_table.rs b/security-monitor/src/core/architecture/riscv/mmu/page_table.rs index b11b4df..621f3ee 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/page_table.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/page_table.rs @@ -1,9 +1,7 @@ // SPDX-FileCopyrightText: 2023 IBM Corporation // SPDX-FileContributor: Wojciech Ozga , IBM Research - Zurich // SPDX-License-Identifier: Apache-2.0 -use crate::core::architecture::mmu::page_table_entry::{ - PageTableAddress, PageTableBits, PageTableConfiguration, PageTableEntry, PageTablePermission, -}; +use crate::core::architecture::mmu::page_table_entry::{LogicalPageTableEntry, PageTableEntry}; use crate::core::architecture::mmu::page_table_level::PageTableLevel; use crate::core::architecture::mmu::paging_system::PagingSystem; use crate::core::architecture::mmu::HgatpMode; @@ -35,7 +33,7 @@ pub struct PageTable { /// the MMU. serialized_representation: Page, /// Logical representation stores a strongly typed page table configuration used by security monitor. - logical_representation: Vec, + logical_representation: Vec, } impl PageTable { @@ -68,29 +66,26 @@ impl PageTable { .offsets() .map(|index| { // Below unwrap is ok because we iterate over valid offsets in the page, so `index` is valid. - let entry_raw = serialized_representation.read(index).unwrap(); - let page_table_entry = if !PageTableBits::is_valid(entry_raw) { - PageTableEntry::NotValid - } else if PageTableBits::is_leaf(entry_raw) { - let address = NonConfidentialMemoryAddress::new(PageTableAddress::decode(entry_raw))?; - let page_size = paging_system.page_size(level); - let page = PageAllocator::acquire_page(page_size)? - .copy_from_non_confidential_memory(address) - .map_err(|_| Error::AddressNotInNonConfidentialMemory())?; - let configuration = PageTableConfiguration::decode(entry_raw); - let permission = PageTablePermission::decode(entry_raw); - PageTableEntry::PageWithConfidentialVmData(Box::new(page), configuration, permission) - } else { - let lower_level = level.lower().ok_or(Error::PageTableCorrupted())?; - let address = NonConfidentialMemoryAddress::new(PageTableAddress::decode(entry_raw))?; - let page_table = Self::copy_from_non_confidential_memory(address, paging_system, lower_level)?; - let configuration = PageTableConfiguration::decode(entry_raw); - PageTableEntry::PointerToNextPageTable(Box::new(page_table), configuration) + let serialized_entry = serialized_representation.read(index).unwrap(); + let logical_page_table_entry = match PageTableEntry::deserialize(serialized_entry) { + PageTableEntry::NotMapped => LogicalPageTableEntry::NotMapped, + PageTableEntry::PointerToNextPageTable(pointer) => { + let address = NonConfidentialMemoryAddress::new(pointer)?; + let lower_level = level.lower().ok_or(Error::PageTableCorrupted())?; + let page_table = Self::copy_from_non_confidential_memory(address, paging_system, lower_level)?; + LogicalPageTableEntry::PointerToNextPageTable(Box::new(page_table)) + } + PageTableEntry::PointerToDataPage(pointer) => { + let address = NonConfidentialMemoryAddress::new(pointer)?; + let page_size = paging_system.data_page_size(level); + let page = PageAllocator::acquire_page(page_size)?.copy_from_non_confidential_memory(address)?; + LogicalPageTableEntry::PageWithConfidentialVmData(Box::new(page)) + } }; - serialized_representation.write(index, page_table_entry.encode()).unwrap(); - Ok(page_table_entry) + serialized_representation.write(index, logical_page_table_entry.serialize()).unwrap(); + Ok(logical_page_table_entry) }) - .collect::, Error>>()?; + .collect::, Error>>()?; Ok(Self { level, paging_system, serialized_representation, logical_representation }) } @@ -99,7 +94,7 @@ impl PageTable { pub fn empty(paging_system: PagingSystem, level: PageTableLevel) -> Result { let serialized_representation = PageAllocator::acquire_page(paging_system.memory_page_size(level))?.zeroize(); let number_of_entries = serialized_representation.size().in_bytes() / paging_system.entry_size(); - let logical_representation = (0..number_of_entries).map(|_| PageTableEntry::NotValid).collect(); + let logical_representation = (0..number_of_entries).map(|_| LogicalPageTableEntry::NotMapped).collect(); Ok(Self { level, paging_system, serialized_representation, logical_representation }) } @@ -114,27 +109,25 @@ impl PageTable { /// The caller of this function must ensure that he synchronizes changes to page table configuration, i.e., by clearing address /// translation caches. pub fn map_shared_page(&mut self, shared_page: SharedPage) -> Result<(), Error> { - let page_size_at_current_level = self.paging_system.page_size(self.level); + let page_size_at_current_level = self.paging_system.data_page_size(self.level); ensure!(page_size_at_current_level >= shared_page.page_size(), Error::InvalidParameter())?; let virtual_page_number = self.paging_system.vpn(&shared_page.confidential_vm_address, self.level); if page_size_at_current_level > shared_page.page_size() { // We are at the intermediary page table. We will recursively go to the next page table, creating it in case it does not exist. match self.logical_representation.get_mut(virtual_page_number).ok_or_else(|| Error::PageTableConfiguration())? { - PageTableEntry::PointerToNextPageTable(next_page_table, _) => next_page_table.map_shared_page(shared_page)?, - PageTableEntry::NotValid => { + LogicalPageTableEntry::PointerToNextPageTable(next_page_table) => next_page_table.map_shared_page(shared_page)?, + LogicalPageTableEntry::NotMapped => { let mut next_page_table = PageTable::empty(self.paging_system, self.level.lower().ok_or(Error::PageTableCorrupted())?)?; next_page_table.map_shared_page(shared_page)?; - let entry = PageTableEntry::PointerToNextPageTable(Box::new(next_page_table), PageTableConfiguration::empty()); - self.set_entry(virtual_page_number, entry); + self.set_entry(virtual_page_number, LogicalPageTableEntry::PointerToNextPageTable(Box::new(next_page_table))); } _ => return Err(Error::PageTableConfiguration()), } } else { // We are at the correct page table level at which we must create the page table entry for the shared page. We will overwrite // whatever was there before. We end the recursion here. - let entry = PageTableEntry::PageSharedWithHypervisor(shared_page); - self.set_entry(virtual_page_number, entry); + self.set_entry(virtual_page_number, LogicalPageTableEntry::PageSharedWithHypervisor(shared_page)); } Ok(()) } @@ -151,11 +144,11 @@ impl PageTable { pub fn unmap_shared_page(&mut self, address: &ConfidentialVmPhysicalAddress) -> Result { let virtual_page_number = self.paging_system.vpn(address, self.level); match self.logical_representation.get_mut(virtual_page_number).ok_or_else(|| Error::PageTableConfiguration())? { - PageTableEntry::PointerToNextPageTable(next_page_table, _) => next_page_table.unmap_shared_page(address), - PageTableEntry::PageSharedWithHypervisor(shared_page) => { + LogicalPageTableEntry::PointerToNextPageTable(next_page_table) => next_page_table.unmap_shared_page(address), + LogicalPageTableEntry::PageSharedWithHypervisor(shared_page) => { ensure!(&shared_page.confidential_vm_address == address, Error::PageTableConfiguration())?; - self.set_entry(virtual_page_number, PageTableEntry::NotValid); - Ok(self.paging_system.page_size(self.level)) + self.set_entry(virtual_page_number, LogicalPageTableEntry::NotMapped); + Ok(self.paging_system.data_page_size(self.level)) } _ => Err(Error::PageTableConfiguration()), } @@ -168,8 +161,8 @@ impl PageTable { pub fn translate(&self, address: &ConfidentialVmPhysicalAddress) -> Result { let virtual_page_number = self.paging_system.vpn(address, self.level); match self.logical_representation.get(virtual_page_number).ok_or_else(|| Error::PageTableConfiguration())? { - PageTableEntry::PointerToNextPageTable(next_page_table, _) => next_page_table.translate(address), - PageTableEntry::PageWithConfidentialVmData(page, _configuration, _permission) => { + LogicalPageTableEntry::PointerToNextPageTable(next_page_table) => next_page_table.translate(address), + LogicalPageTableEntry::PageWithConfidentialVmData(page) => { let page_offset = self.paging_system.page_offset(address, self.level); // Below unsafe is ok because page_offset recorded in the page table entry is lower than the page size. Thus, we the // resulting address will still be in confidential memory because the page is in confidential memory by definition. @@ -183,12 +176,12 @@ impl PageTable { pub fn measure(&self, digest: &mut MeasurementDigest, address: usize) -> Result<(), Error> { use sha2::Digest; self.logical_representation.iter().enumerate().try_for_each(|(i, entry)| { - let guest_physical_address = address + i * self.paging_system.page_size(self.level).in_bytes(); + let guest_physical_address = address + i * self.paging_system.data_page_size(self.level).in_bytes(); match entry { - PageTableEntry::PointerToNextPageTable(next_page_table, _) => next_page_table.measure(digest, guest_physical_address), - PageTableEntry::PageWithConfidentialVmData(page, _, permission) => Ok(page.measure(digest, guest_physical_address)), - PageTableEntry::PageSharedWithHypervisor(_) => Err(Error::PageTableConfiguration()), - PageTableEntry::NotValid => Ok(()), + LogicalPageTableEntry::PointerToNextPageTable(next_page_table) => next_page_table.measure(digest, guest_physical_address), + LogicalPageTableEntry::PageWithConfidentialVmData(page) => Ok(page.measure(digest, guest_physical_address)), + LogicalPageTableEntry::PageSharedWithHypervisor(_) => Err(Error::PageTableConfiguration()), + LogicalPageTableEntry::NotMapped => Ok(()), } }) } @@ -203,10 +196,10 @@ impl PageTable { } /// Set a new page table entry at the given index, replacing whatever was there before. - fn set_entry(&mut self, virtual_page_number: usize, entry: PageTableEntry) { - self.serialized_representation.write(self.paging_system.entry_size() * virtual_page_number, entry.encode()).unwrap(); + fn set_entry(&mut self, virtual_page_number: usize, entry: LogicalPageTableEntry) { + self.serialized_representation.write(self.paging_system.entry_size() * virtual_page_number, entry.serialize()).unwrap(); let entry_to_remove = core::mem::replace(&mut self.logical_representation[virtual_page_number], entry); - if let PageTableEntry::PageWithConfidentialVmData(page, _, _) = entry_to_remove { + if let LogicalPageTableEntry::PageWithConfidentialVmData(page) = entry_to_remove { PageAllocator::release_pages(alloc::vec![page.deallocate()]); } } @@ -216,8 +209,8 @@ impl PageTable { let mut pages = Vec::with_capacity(self.logical_representation.len() + 1); pages.push(self.serialized_representation.deallocate()); self.logical_representation.drain(..).for_each(|entry| match entry { - PageTableEntry::PointerToNextPageTable(next_page_table, _) => next_page_table.deallocate(), - PageTableEntry::PageWithConfidentialVmData(page, _configuration, _permission) => pages.push(page.deallocate()), + LogicalPageTableEntry::PointerToNextPageTable(next_page_table) => next_page_table.deallocate(), + LogicalPageTableEntry::PageWithConfidentialVmData(page) => pages.push(page.deallocate()), _ => {} }); PageAllocator::release_pages(pages); diff --git a/security-monitor/src/core/architecture/riscv/mmu/page_table_entry.rs b/security-monitor/src/core/architecture/riscv/mmu/page_table_entry.rs index d00544c..5238adf 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/page_table_entry.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/page_table_entry.rs @@ -1,156 +1,68 @@ // SPDX-FileCopyrightText: 2023 IBM Corporation // SPDX-FileContributor: Wojciech Ozga , IBM Research - Zurich // SPDX-License-Identifier: Apache-2.0 +use super::specification::*; use crate::core::architecture::mmu::page_table::PageTable; use crate::core::architecture::SharedPage; +use crate::core::memory_layout::NonConfidentialMemoryAddress; use crate::core::page_allocator::{Allocated, Page}; +use crate::error::Error; use alloc::boxed::Box; -pub(super) enum PageTableEntry { - PointerToNextPageTable(Box, PageTableConfiguration), - PageWithConfidentialVmData(Box>, PageTableConfiguration, PageTablePermission), +/// Logical page table entry contains variants specific to the security monitor architecture. These new variants distinguish among certain +/// types (e.g., shared page, confidential data page) that are not covered by the general RISC-V specification. +pub(super) enum LogicalPageTableEntry { + PointerToNextPageTable(Box), + PageWithConfidentialVmData(Box>), PageSharedWithHypervisor(SharedPage), - NotValid, + NotMapped, } -impl PageTableEntry { - pub fn encode(&self) -> usize { +impl LogicalPageTableEntry { + pub fn serialize(&self) -> usize { match self { - PageTableEntry::PointerToNextPageTable(page_table, configuration) => { - PageTableBits::Valid.mask() | PageTableAddress::encode(page_table.address()) | configuration.encode() + Self::PointerToNextPageTable(page_table) => { + page_table.address() >> ADDRESS_SHIFT + | PAGE_TABLE_ENTRY_EMPTY_CONF + | PAGE_TABLE_ENTRY_NO_PERMISSIONS + | PAGE_TABLE_ENTRY_VALID_MASK } - PageTableEntry::PageWithConfidentialVmData(page, configuration, permissions) => { - PageTableBits::Valid.mask() | PageTableAddress::encode(page.start_address()) | configuration.encode() | permissions.encode() + Self::PageWithConfidentialVmData(page) => { + page.start_address() >> ADDRESS_SHIFT + | PAGE_TABLE_ENTRY_UAD_CONF_MASK + | PAGE_TABLE_ENTRY_RWX_PERMISSIONS + | PAGE_TABLE_ENTRY_VALID_MASK } - PageTableEntry::PageSharedWithHypervisor(shared_page) => { - PageTableBits::Valid.mask() - | PageTableAddress::encode(shared_page.hypervisor_address.usize()) - | PageTableConfiguration::shared_page_configuration().encode() - | PageTablePermission::read_write_permissions().encode() + Self::PageSharedWithHypervisor(shared_page) => { + shared_page.hypervisor_address.usize() >> ADDRESS_SHIFT + | PAGE_TABLE_ENTRY_UAD_CONF_MASK + | PAGE_TABLE_ENTRY_RW_PERMISSIONS + | PAGE_TABLE_ENTRY_VALID_MASK } - PageTableEntry::NotValid => 0, + Self::NotMapped => PAGE_TABLE_ENTRY_NOT_MAPPED, } } } -#[derive(Copy, Clone)] -pub(super) enum PageTableBits { - Valid = 0, - Read = 1, - Write = 2, - Execute = 3, - User = 4, - Global = 5, - Accessed = 6, - Dirty = 7, -} - -impl PageTableBits { - pub const fn mask(&self) -> usize { - 1 << (*self as usize) - } - - pub const fn is_set(&self, raw_entry: usize) -> bool { - raw_entry & self.mask() != 0 - } - - pub const fn is_valid(raw_entry: usize) -> bool { - Self::Valid.is_set(raw_entry) - } - - pub const fn is_leaf(raw_entry: usize) -> bool { - Self::Read.is_set(raw_entry) || Self::Write.is_set(raw_entry) || Self::Execute.is_set(raw_entry) - } -} - -pub(super) struct PageTableAddress(); - -impl PageTableAddress { - const CONFIGURATION_BIT_MASK: usize = 0x3ff; // first 10 bits - const ADDRESS_SHIFT: usize = 2; - - pub const fn decode(raw_entry: usize) -> *mut usize { - ((raw_entry & !Self::CONFIGURATION_BIT_MASK) << Self::ADDRESS_SHIFT) as *mut usize - } - - pub fn encode(address: usize) -> usize { - address >> Self::ADDRESS_SHIFT - } -} - -pub(super) struct PageTablePermission { - can_read: bool, - can_write: bool, - can_execute: bool, -} - -impl PageTablePermission { - pub fn read_write_permissions() -> Self { - Self { can_read: true, can_write: true, can_execute: false } - } - - pub fn decode(raw_entry: usize) -> Self { - Self { - can_read: PageTableBits::Read.is_set(raw_entry), - can_write: PageTableBits::Write.is_set(raw_entry), - can_execute: PageTableBits::Execute.is_set(raw_entry), - } - } - - pub fn encode(&self) -> usize { - let mut encoded_value = 0; - if self.can_read { - encoded_value = encoded_value | PageTableBits::Read.mask(); - } - if self.can_write { - encoded_value = encoded_value | PageTableBits::Write.mask(); - } - if self.can_execute { - encoded_value = encoded_value | PageTableBits::Execute.mask(); - } - encoded_value - } -} - -pub(super) struct PageTableConfiguration { - is_accessible_to_user: bool, - was_accessed: bool, - is_global_mapping: bool, - is_dirty: bool, +/// Page table entry corresponds to entires defined by the RISC-V spec. +pub(super) enum PageTableEntry { + NotMapped, + PointerToNextPageTable(*mut usize), + PointerToDataPage(*mut usize), } -impl PageTableConfiguration { - pub fn empty() -> Self { - Self { is_accessible_to_user: false, was_accessed: false, is_global_mapping: false, is_dirty: false } - } - - pub fn shared_page_configuration() -> Self { - Self { is_accessible_to_user: true, was_accessed: true, is_global_mapping: false, is_dirty: true } - } - - pub fn decode(raw_entry: usize) -> Self { - Self { - is_accessible_to_user: PageTableBits::User.is_set(raw_entry), - was_accessed: PageTableBits::Accessed.is_set(raw_entry), - is_global_mapping: PageTableBits::Global.is_set(raw_entry), - is_dirty: PageTableBits::Dirty.is_set(raw_entry), +impl PageTableEntry { + pub fn deserialize(serialized_entry: usize) -> Self { + match serialized_entry & PAGE_TABLE_ENTRY_TYPE_MASK { + PAGE_TABLE_ENTRY_NOT_MAPPED => Self::NotMapped, + PAGE_TABLE_ENTRY_POINTER => Self::PointerToNextPageTable(Self::decode_pointer(serialized_entry)), + _ => Self::PointerToDataPage(Self::decode_pointer(serialized_entry)), } } - pub fn encode(&self) -> usize { - let mut encoded_value = 0; - if self.is_accessible_to_user { - encoded_value = encoded_value | PageTableBits::User.mask(); - } - if self.was_accessed { - encoded_value = encoded_value | PageTableBits::Accessed.mask(); - } - if self.is_global_mapping { - encoded_value = encoded_value | PageTableBits::Global.mask(); - } - if self.is_dirty { - encoded_value = encoded_value | PageTableBits::Dirty.mask(); - } - encoded_value + /// Decodes a raw pointer from the page table entry. It is up to the user to decide how to deal with this pointer and check if it is + /// valid and is in confidential or non-confidential memory. + pub fn decode_pointer(raw_entry: usize) -> *mut usize { + ((raw_entry & !CONFIGURATION_BIT_MASK) << ADDRESS_SHIFT) as *mut usize } } diff --git a/security-monitor/src/core/architecture/riscv/mmu/paging_system.rs b/security-monitor/src/core/architecture/riscv/mmu/paging_system.rs index 1b1282d..4451481 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/paging_system.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/paging_system.rs @@ -75,7 +75,7 @@ impl PagingSystem { vpn_to_rewrite | page_offset } - pub fn page_size(&self, level: PageTableLevel) -> PageSize { + pub fn data_page_size(&self, level: PageTableLevel) -> PageSize { match level { PageTableLevel::Level5 => PageSize::Size128TiB, PageTableLevel::Level4 => PageSize::Size512GiB, diff --git a/security-monitor/src/core/architecture/riscv/mmu/shared_page.rs b/security-monitor/src/core/architecture/riscv/mmu/shared_page.rs index 8029670..8983bf3 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/shared_page.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/shared_page.rs @@ -16,13 +16,8 @@ pub struct SharedPage { pub confidential_vm_address: ConfidentialVmPhysicalAddress, } -/// It is safe to implement Send+Sync on the SharedPage type because it encapsulates the raw pointer -/// to non-confidential memory which is never dereferenced inside the security monitor. Its address is -/// used only to map a page located in the non-confidential memory to the address space of a confidential VM. -unsafe impl Send for SharedPage {} -unsafe impl Sync for SharedPage {} - impl SharedPage { + // CoVE spec defines that the size of a shared page is always 4KiB. pub const SIZE: PageSize = PageSize::Size4KiB; pub fn new( @@ -37,3 +32,9 @@ impl SharedPage { Self::SIZE } } + +/// It is safe to implement Send+Sync on the SharedPage type because it encapsulates the raw pointer +/// to non-confidential memory which is never dereferenced inside the security monitor. Its address is +/// used only to map a page located in the non-confidential memory to the address space of a confidential VM. +unsafe impl Send for SharedPage {} +unsafe impl Sync for SharedPage {} diff --git a/security-monitor/src/core/architecture/riscv/mmu/specification.rs b/security-monitor/src/core/architecture/riscv/mmu/specification.rs new file mode 100644 index 0000000..8ee11bc --- /dev/null +++ b/security-monitor/src/core/architecture/riscv/mmu/specification.rs @@ -0,0 +1,42 @@ +// SPDX-FileCopyrightText: 2023 IBM Corporation +// SPDX-FileContributor: Wojciech Ozga , IBM Research - Zurich +// SPDX-License-Identifier: Apache-2.0 + +// TODO: these constants should be generated automatically from the RISC-V formal spec +pub const PAGE_TABLE_ENTRY_VALID_BIT: usize = 0; +pub const PAGE_TABLE_ENTRY_VALID_MASK: usize = 1 << PAGE_TABLE_ENTRY_VALID_BIT; +pub const PAGE_TABLE_ENTRY_READ_BIT: usize = 1; +pub const PAGE_TABLE_ENTRY_READ_MASK: usize = 1 << PAGE_TABLE_ENTRY_READ_BIT; +pub const PAGE_TABLE_ENTRY_WRITE_BIT: usize = 2; +pub const PAGE_TABLE_ENTRY_WRITE_MASK: usize = 1 << PAGE_TABLE_ENTRY_WRITE_BIT; +pub const PAGE_TABLE_ENTRY_EXECUTE_BIT: usize = 3; +pub const PAGE_TABLE_ENTRY_EXECUTE_MASK: usize = 1 << PAGE_TABLE_ENTRY_EXECUTE_BIT; +pub const PAGE_TABLE_ENTRY_USER_BIT: usize = 4; +pub const PAGE_TABLE_ENTRY_USER_MASK: usize = 1 << PAGE_TABLE_ENTRY_USER_BIT; +pub const PAGE_TABLE_ENTRY_GLOBAL_BIT: usize = 5; +pub const PAGE_TABLE_ENTRY_GLOBAL_MASK: usize = 1 << PAGE_TABLE_ENTRY_GLOBAL_BIT; +pub const PAGE_TABLE_ENTRY_ACCESSED_BIT: usize = 6; +pub const PAGE_TABLE_ENTRY_ACCESSED_MASK: usize = 1 << PAGE_TABLE_ENTRY_ACCESSED_BIT; +pub const PAGE_TABLE_ENTRY_DIRTY_BIT: usize = 7; +pub const PAGE_TABLE_ENTRY_DIRTY_MASK: usize = 1 << PAGE_TABLE_ENTRY_DIRTY_BIT; + +pub const PAGE_TABLE_ENTRY_EMPTY_CONF: usize = 0; +pub const PAGE_TABLE_ENTRY_UAD_CONF_MASK: usize = PAGE_TABLE_ENTRY_USER_MASK | PAGE_TABLE_ENTRY_ACCESSED_MASK | PAGE_TABLE_ENTRY_DIRTY_MASK; + +pub const PAGE_TABLE_ENTRY_NO_PERMISSIONS: usize = 0; +pub const PAGE_TABLE_ENTRY_RW_PERMISSIONS: usize = PAGE_TABLE_ENTRY_READ_MASK | PAGE_TABLE_ENTRY_WRITE_MASK; +pub const PAGE_TABLE_ENTRY_RWX_PERMISSIONS: usize = + PAGE_TABLE_ENTRY_READ_MASK | PAGE_TABLE_ENTRY_WRITE_MASK | PAGE_TABLE_ENTRY_EXECUTE_MASK; + +pub const PAGE_TABLE_ENTRY_TYPE_MASK: usize = + PAGE_TABLE_ENTRY_VALID_MASK | PAGE_TABLE_ENTRY_READ_MASK | PAGE_TABLE_ENTRY_WRITE_MASK | PAGE_TABLE_ENTRY_EXECUTE_MASK; +pub const PAGE_TABLE_ENTRY_NOT_MAPPED: usize = 0; +pub const PAGE_TABLE_ENTRY_POINTER: usize = PAGE_TABLE_ENTRY_VALID_MASK; + +pub const CONFIGURATION_BIT_MASK: usize = 0x3ff; // first 10 bits +pub const ADDRESS_SHIFT: usize = 2; + +pub const HGATP64_MODE_SHIFT: usize = 60; +pub const HGATP64_VMID_SHIFT: usize = 44; +pub const HGATP_PAGE_SHIFT: usize = 12; +pub const HGATP_PPN_MASK: usize = 0x0000FFFFFFFFFFF; diff --git a/security-monitor/src/core/control_data/hypervisor_hart.rs b/security-monitor/src/core/control_data/hypervisor_hart.rs index 82e0af2..3153fe1 100644 --- a/security-monitor/src/core/control_data/hypervisor_hart.rs +++ b/security-monitor/src/core/control_data/hypervisor_hart.rs @@ -72,7 +72,9 @@ impl HypervisorHart { } pub unsafe fn enable_hypervisor_memory_protector(&self) { - self.hypervisor_memory_protector.enable(self.csrs().hgatp.read_from_main_memory()) + use crate::core::architecture::Hgatp; + let hgatp = Hgatp::from(self.csrs().hgatp.read_from_main_memory()); + self.hypervisor_memory_protector.enable(&hgatp); } pub fn address(&self) -> usize { diff --git a/security-monitor/src/core/memory_protector/confidential_vm_memory_protector.rs b/security-monitor/src/core/memory_protector/confidential_vm_memory_protector.rs index 2c8e9aa..09cf551 100644 --- a/security-monitor/src/core/memory_protector/confidential_vm_memory_protector.rs +++ b/security-monitor/src/core/memory_protector/confidential_vm_memory_protector.rs @@ -14,7 +14,7 @@ pub struct ConfidentialVmMemoryProtector { // Stores the page table configuration of the confidential VM. root_page_table: PageTable, // Stores the value of the hypervisor G-stage address translation protocol register. - hgatp: usize, + hgatp: Hgatp, } impl ConfidentialVmMemoryProtector { @@ -27,13 +27,12 @@ impl ConfidentialVmMemoryProtector { /// * the configuration of the memory isolation component (MMU) is invalid. pub fn from_vm_state(hgatp: &Hgatp) -> Result { let root_page_table = mmu::copy_mmu_configuration_from_non_confidential_memory(hgatp)?; - Ok(Self { root_page_table, hgatp: 0 }) + Ok(Self { root_page_table, hgatp: Hgatp::disabled() }) } pub fn set_confidential_vm_id(&mut self, id: ConfidentialVmId) { - assert!(self.hgatp == 0); - let hgatp = Hgatp::new(self.root_page_table.address(), self.root_page_table.hgatp_mode(), id.usize()); - self.hgatp = hgatp.bits(); + assert!(self.hgatp.is_empty()); + self.hgatp = Hgatp::new(self.root_page_table.address(), self.root_page_table.hgatp_mode(), id.usize()); } /// Modifies the configuration of the underlying hardware memory isolation component (e.g., MMU) in a way that a @@ -83,9 +82,9 @@ impl ConfidentialVmMemoryProtector { /// flow` and that the hgatp argument contains the correct id and the root page table address of the confidential VM /// that will be executed next. pub unsafe fn enable(&self) { - assert!(self.hgatp != 0); + assert!(!self.hgatp.is_empty()); pmp::open_access_to_confidential_memory(); - mmu::enable_address_translation_and_protection(self.hgatp); + mmu::enable_address_translation_and_protection(&self.hgatp); tlb::clear_hart_tlbs(); } diff --git a/security-monitor/src/core/memory_protector/hypervisor_memory_protector.rs b/security-monitor/src/core/memory_protector/hypervisor_memory_protector.rs index 4fe6503..bf8de5e 100644 --- a/security-monitor/src/core/memory_protector/hypervisor_memory_protector.rs +++ b/security-monitor/src/core/memory_protector/hypervisor_memory_protector.rs @@ -2,6 +2,7 @@ // SPDX-FileContributor: Wojciech Ozga , IBM Research - Zurich // SPDX-License-Identifier: Apache-2.0 use crate::core::architecture::riscv::{iopmp, mmu, pmp, tlb}; +use crate::core::architecture::Hgatp; use crate::core::memory_layout::MemoryLayout; use crate::error::Error; @@ -42,7 +43,7 @@ impl HypervisorMemoryProtector { /// /// Caller must guarantee that the security monitor will transition in the finite state machine to the /// `non-confidential flow` and eventually to the hypervisor code. - pub unsafe fn enable(&self, hgatp: usize) { + pub unsafe fn enable(&self, hgatp: &Hgatp) { pmp::close_access_to_confidential_memory(); mmu::enable_address_translation_and_protection(hgatp); tlb::clear_hart_tlbs(); diff --git a/security-monitor/src/core/page_allocator/page.rs b/security-monitor/src/core/page_allocator/page.rs index df5df15..d46d277 100644 --- a/security-monitor/src/core/page_allocator/page.rs +++ b/security-monitor/src/core/page_allocator/page.rs @@ -25,13 +25,6 @@ pub struct Page { _marker: PhantomData, } -// We declare Send+Sync on the `Page` because it stores internally a raw pointer, which is -// not safe to pass in a multi-threaded program. But in the case of the `Page` it is safe -// because the `Page` owns the memory associated with pointer and never exposes the raw pointer -// to the outside. -unsafe impl Send for Page where S: PageState {} -unsafe impl Sync for Page where S: PageState {} - impl Page { /// Creates a page token at the given address in the confidential memory. /// @@ -52,7 +45,9 @@ impl Page { /// content of a page located in the non-confidential memory. pub fn copy_from_non_confidential_memory(mut self, mut address: NonConfidentialMemoryAddress) -> Result, Error> { self.offsets().into_iter().try_for_each(|offset_in_bytes| { - let non_confidential_address = MemoryLayout::read().non_confidential_address_at_offset(&mut address, offset_in_bytes)?; + let non_confidential_address = MemoryLayout::read() + .non_confidential_address_at_offset(&mut address, offset_in_bytes) + .map_err(|_| Error::AddressNotInNonConfidentialMemory())?; // TODO: describe why below unsafe block is safe in this invocation. let data_to_copy = unsafe { non_confidential_address.read() }; self.write(offset_in_bytes, data_to_copy)?; @@ -171,11 +166,14 @@ impl Page { Ok(()) } + /// Extends the digest with the guest physical address and the content of the page. pub fn measure(&self, digest: &mut MeasurementDigest, guest_physical_address: usize) { use sha2::Digest; - let slice = unsafe { core::slice::from_raw_parts(self.address().to_ptr(), self.size().in_bytes()) }; let mut hasher = DigestType::new_with_prefix(digest.clone()); hasher.update(guest_physical_address.to_le_bytes()); + // below unsafe is ok because the page has been initialized and it owns the entire memory region. + // We are creating a slice of bytes, so the number of elements in the slice is the same as the size of the page. + let slice: &[u8] = unsafe { core::slice::from_raw_parts(self.address().to_ptr(), self.size().in_bytes()) }; hasher.update(&slice); hasher.finalize_into(digest); } @@ -195,3 +193,10 @@ impl Page { self.offsets().for_each(|offset_in_bytes| self.write(offset_in_bytes, 0).unwrap()); } } + +// We declare Send+Sync on the `Page` because it stores internally a raw pointer, which is +// not safe to pass in a multi-threaded program. But in the case of the `Page` it is safe +// because the `Page` owns the memory associated with pointer and never exposes the raw pointer +// to the outside. +unsafe impl Send for Page where S: PageState {} +unsafe impl Sync for Page where S: PageState {} diff --git a/security-monitor/src/non_confidential_flow/handlers/cove_hypervisor_extension/promote_to_confidential_vm.rs b/security-monitor/src/non_confidential_flow/handlers/cove_hypervisor_extension/promote_to_confidential_vm.rs index 906171f..9a3f070 100644 --- a/security-monitor/src/non_confidential_flow/handlers/cove_hypervisor_extension/promote_to_confidential_vm.rs +++ b/security-monitor/src/non_confidential_flow/handlers/cove_hypervisor_extension/promote_to_confidential_vm.rs @@ -75,14 +75,16 @@ impl PromoteToConfidentialVm { let htimedelta = 0; // We create a fixed number of harts (all but the boot hart are in the reset state). - let confidential_harts = (0..number_of_confidential_harts) + let confidential_harts: Vec<_> = (0..number_of_confidential_harts) .map(|confidential_hart_id| match confidential_hart_id { Self::BOOT_HART_ID => ConfidentialHart::from_vm_hart(confidential_hart_id, self.program_counter, htimedelta, shared_memory), _ => ConfidentialHart::from_vm_hart_reset(confidential_hart_id, htimedelta, shared_memory), }) .collect(); - let measurements = self.measure(&memory_protector, &confidential_harts)?; + let measured_pages_digest = memory_protector.measure()?; + let confidential_hart_digest = confidential_harts[Self::BOOT_HART_ID].measure(); + let measurements = StaticMeasurements::new(measured_pages_digest, confidential_hart_digest); debug!("VM measurements: {:?}", measurements); self.authenticate_and_authorize_vm(&memory_protector, &measurements)?; @@ -95,15 +97,6 @@ impl PromoteToConfidentialVm { }) } - /// Measures content of the initial confidential VM's state - fn measure( - &self, memory_protector: &ConfidentialVmMemoryProtector, confidential_harts: &Vec, - ) -> Result { - let measured_pages_digest = memory_protector.measure()?; - let configuration_digest = confidential_harts[Self::BOOT_HART_ID].measure(); - Ok(StaticMeasurements::new(measured_pages_digest, configuration_digest)) - } - fn process_device_tree(&self, memory_protector: &ConfidentialVmMemoryProtector) -> Result { let address_in_confidential_memory = memory_protector.translate_address(&self.fdt_address)?; // Make sure that the address is 8-bytes aligned. Once we ensure this, we can safely read 8 bytes because they must be within @@ -140,6 +133,7 @@ impl PromoteToConfidentialVm { &self, memory_protector: &ConfidentialVmMemoryProtector, _measurements: &StaticMeasurements, ) -> Result<(), Error> { if let Some(blob_address) = self.auth_blob_address { + debug!("Performing local attestation"); let address_in_confidential_memory = memory_protector.translate_address(&blob_address)?; // Make sure that the address is 8-bytes aligned. Once we ensure this, we can safely read 8 bytes because they must be within // the page boundary. These 8 bytes should contain the `magic` (first 4 bytes) and `size` (next 4 bytes).