Skip to content

Commit

Permalink
user_driver: specify tags when allocating dma buffers
Browse files Browse the repository at this point in the history
  • Loading branch information
chris-oo committed Feb 26, 2025
1 parent d45ac87 commit 0d69fd1
Show file tree
Hide file tree
Showing 16 changed files with 50 additions and 27 deletions.
2 changes: 1 addition & 1 deletion openhcl/hcl/src/ioctl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1599,7 +1599,7 @@ impl HclVp {
.map_err(|e| Error::MmapVp(e, Some(Vtl::Vtl0)))?,
vtl1_apic_page: private_dma_client
.ok_or(Error::MissingPrivateMemory)?
.allocate_dma_buffer(HV_PAGE_SIZE as usize)
.allocate_dma_buffer(HV_PAGE_SIZE as usize, format!("vp-{vp}-vtl1-apic"))
.map_err(Error::AllocVp)?,
},
};
Expand Down
4 changes: 2 additions & 2 deletions openhcl/lower_vtl_permissions_guard/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,8 @@ impl<T: DmaClient> LowerVtlMemorySpawner<T> {
}

impl<T: DmaClient> DmaClient for LowerVtlMemorySpawner<T> {
fn allocate_dma_buffer(&self, len: usize) -> Result<MemoryBlock> {
let mem = self.spawner.allocate_dma_buffer(len)?;
fn allocate_dma_buffer(&self, len: usize, tag: String) -> Result<MemoryBlock> {
let mem = self.spawner.allocate_dma_buffer(len, tag)?;
let vtl_guard =
PagesAccessibleToLowerVtl::new_from_pages(self.vtl_protect.clone(), mem.pfns())
.context("failed to lower VTL permissions on memory block")?;
Expand Down
18 changes: 12 additions & 6 deletions openhcl/openhcl_dma_manager/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -407,16 +407,21 @@ impl DmaClientBacking {
fn allocate_dma_buffer(
&self,
total_size: usize,
tag: String,
) -> anyhow::Result<user_driver::memory::MemoryBlock> {
match self {
DmaClientBacking::SharedPool(allocator) => allocator.allocate_dma_buffer(total_size),
DmaClientBacking::PrivatePool(allocator) => allocator.allocate_dma_buffer(total_size),
DmaClientBacking::LockedMemory(spawner) => spawner.allocate_dma_buffer(total_size),
DmaClientBacking::SharedPool(allocator) => {
allocator.allocate_dma_buffer(total_size, tag)
}
DmaClientBacking::PrivatePool(allocator) => {
allocator.allocate_dma_buffer(total_size, tag)
}
DmaClientBacking::LockedMemory(spawner) => spawner.allocate_dma_buffer(total_size, tag),
DmaClientBacking::PrivatePoolLowerVtl(spawner) => {
spawner.allocate_dma_buffer(total_size)
spawner.allocate_dma_buffer(total_size, tag)
}
DmaClientBacking::LockedMemoryLowerVtl(spawner) => {
spawner.allocate_dma_buffer(total_size)
spawner.allocate_dma_buffer(total_size, tag)
}
}
}
Expand Down Expand Up @@ -452,8 +457,9 @@ impl DmaClient for OpenhclDmaClient {
fn allocate_dma_buffer(
&self,
total_size: usize,
tag: String,
) -> anyhow::Result<user_driver::memory::MemoryBlock> {
self.backing.allocate_dma_buffer(total_size)
self.backing.allocate_dma_buffer(total_size, tag)
}

fn attach_dma_buffer(
Expand Down
5 changes: 4 additions & 1 deletion openhcl/virt_mshv_vtl/src/processor/snp/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,10 @@ impl BackingPrivate for SnpBacked {
.shared_dma_client
.as_ref()
.ok_or(Error::MissingSharedMemory)?
.allocate_dma_buffer((shared_pages_required_per_cpu() * HV_PAGE_SIZE) as usize)
.allocate_dma_buffer(
(shared_pages_required_per_cpu() * HV_PAGE_SIZE) as usize,
format!("direct overlay vp {}", params.vp_info.base.vp_index.index()),
)
.map_err(Error::AllocateSharedVisOverlay)?;

let overlays: Vec<_> = pfns_handle.pfns().to_vec();
Expand Down
10 changes: 8 additions & 2 deletions openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -683,7 +683,10 @@ impl BackingPrivate for TdxBacked {
.shared_dma_client
.as_ref()
.ok_or(crate::Error::MissingSharedMemory)?
.allocate_dma_buffer((shared_pages_required_per_cpu() * HV_PAGE_SIZE) as usize)
.allocate_dma_buffer(
(shared_pages_required_per_cpu() * HV_PAGE_SIZE) as usize,
format!("direct overlay vp {}", params.vp_info.base.vp_index.index()),
)
.map_err(super::Error::AllocateSharedVisOverlay)?;
let overlays: Vec<_> = pfns_handle.pfns().to_vec();

Expand All @@ -692,7 +695,10 @@ impl BackingPrivate for TdxBacked {
.private_dma_client
.as_ref()
.ok_or(crate::Error::MissingPrivateMemory)?
.allocate_dma_buffer(HV_PAGE_SIZE as usize)
.allocate_dma_buffer(
HV_PAGE_SIZE as usize,
format!("tdx-flush-page vp {}", params.vp_info.base.vp_index.index()),
)
.map_err(crate::Error::AllocateTlbFlushPage)?;

let untrusted_synic = params
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1835,7 +1835,7 @@ async fn request_igvm_attest(
let allocator = gpa_allocator.ok_or(FatalError::GpaAllocatorUnavailable)?;
let dma_size = request.response_buffer_len;
let mem = allocator
.allocate_dma_buffer(dma_size)
.allocate_dma_buffer(dma_size, "attest request".into())
.map_err(FatalError::GpaMemoryAllocationError)?;

// Host expects the vTOM bit to be stripped
Expand Down
2 changes: 1 addition & 1 deletion vm/devices/net/mana_driver/src/gdma_driver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ impl<T: DeviceBacking> GdmaDriver<T> {
let dma_client = device.dma_client();

let dma_buffer = dma_client
.allocate_dma_buffer(NUM_PAGES * PAGE_SIZE)
.allocate_dma_buffer(NUM_PAGES * PAGE_SIZE, "gdma-pages".into())
.context("failed to allocate DMA buffer")?;

let pages = dma_buffer.pfns();
Expand Down
4 changes: 2 additions & 2 deletions vm/devices/net/mana_driver/src/mana.rs
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ impl<T: DeviceBacking> Vport<T> {
let mut gdma = self.inner.gdma.lock().await;
let dma_client = gdma.device().dma_client();
let mem = dma_client
.allocate_dma_buffer(size as usize)
.allocate_dma_buffer(size as usize, "mana-event-queue".into())
.context("Failed to allocate DMA buffer")?;

let gdma_region = gdma
Expand Down Expand Up @@ -373,7 +373,7 @@ impl<T: DeviceBacking> Vport<T> {
let dma_client = gdma.device().dma_client();

let mem = dma_client
.allocate_dma_buffer((wq_size + cq_size) as usize)
.allocate_dma_buffer((wq_size + cq_size) as usize, "mana-work-queue".into())
.context("failed to allocate DMA buffer")?;

let wq_mem = mem.subblock(0, wq_size as usize);
Expand Down
2 changes: 1 addition & 1 deletion vm/devices/net/mana_driver/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ async fn test_gdma(driver: DefaultDriver) {
let buffer = Arc::new(
gdma.device()
.dma_client()
.allocate_dma_buffer(0x5000)
.allocate_dma_buffer(0x5000, "test-gdma".into())
.unwrap(),
);
let mut arena = ResourceArena::new();
Expand Down
2 changes: 1 addition & 1 deletion vm/devices/net/net_mana/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1223,7 +1223,7 @@ struct OutOfMemory;
impl ContiguousBufferManager {
pub fn new(dma_client: Arc<dyn DmaClient>, page_limit: u32) -> anyhow::Result<Self> {
let len = PAGE_SIZE32 * page_limit;
let mem = dma_client.allocate_dma_buffer(len as usize)?;
let mem = dma_client.allocate_dma_buffer(len as usize, "mana-contiguous-buffer".into())?;
Ok(Self {
len,
head: 0,
Expand Down
2 changes: 1 addition & 1 deletion vm/devices/storage/disk_nvme/nvme_driver/src/queue_pair.rs
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ impl QueuePair {
QueuePair::SQ_SIZE + QueuePair::CQ_SIZE + QueuePair::PER_QUEUE_PAGES * PAGE_SIZE;
let dma_client = device.dma_client();
let mem = dma_client
.allocate_dma_buffer(total_size)
.allocate_dma_buffer(total_size, "nvme-queues".into())
.context("failed to allocate memory for queues")?;

assert!(sq_entries <= Self::MAX_SQ_ENTRIES);
Expand Down
2 changes: 1 addition & 1 deletion vm/devices/user_driver/src/emulated.rs
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ pub struct EmulatedDmaAllocator {
}

impl DmaClient for EmulatedDmaAllocator {
fn allocate_dma_buffer(&self, len: usize) -> anyhow::Result<MemoryBlock> {
fn allocate_dma_buffer(&self, len: usize, _tag: String) -> anyhow::Result<MemoryBlock> {
let memory = MemoryBlock::new(self.shared_mem.alloc(len).context("out of memory")?);
memory.as_slice().atomic_fill(0);
Ok(memory)
Expand Down
6 changes: 3 additions & 3 deletions vm/devices/user_driver/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,10 @@ pub trait DeviceRegisterIo: Send + Sync {
/// Device interfaces for DMA.
pub trait DmaClient: Send + Sync + Inspect {
/// Allocate a new DMA buffer. This buffer must be zero initialized.
///
/// TODO: string tag for allocation?
fn allocate_dma_buffer(&self, total_size: usize) -> anyhow::Result<MemoryBlock>;
fn allocate_dma_buffer(&self, total_size: usize, tag: String) -> anyhow::Result<MemoryBlock>;

/// Attach to a previously allocated memory block.
///
/// TODO: add tag and require tag to match allocated
fn attach_dma_buffer(&self, len: usize, base_pfn: u64) -> anyhow::Result<MemoryBlock>;
}
6 changes: 5 additions & 1 deletion vm/devices/user_driver/src/lockmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,11 @@ unsafe impl MappedDmaTarget for LockedMemory {
pub struct LockedMemorySpawner;

impl crate::DmaClient for LockedMemorySpawner {
fn allocate_dma_buffer(&self, len: usize) -> anyhow::Result<crate::memory::MemoryBlock> {
fn allocate_dma_buffer(
&self,
len: usize,
_tag: String,
) -> anyhow::Result<crate::memory::MemoryBlock> {
Ok(crate::memory::MemoryBlock::new(LockedMemory::new(len)?))
}

Expand Down
2 changes: 1 addition & 1 deletion vm/devices/vmbus/vmbus_relay_intercept_device/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ impl<T: SimpleVmbusClientDeviceAsync> SimpleVmbusClientDeviceTask<T> {

let mem = self
.dma_alloc
.allocate_dma_buffer(page_count * PAGE_SIZE)
.allocate_dma_buffer(page_count * PAGE_SIZE, "vmbus-relay-intercept-rings".into())
.context("allocating memory for vmbus rings")?;
state.vtl_pages = Some(mem.clone());
let buf: Vec<_> = [mem.len() as u64]
Expand Down
8 changes: 6 additions & 2 deletions vm/page_pool_alloc/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -817,7 +817,11 @@ impl Drop for PagePoolAllocator {
}

impl user_driver::DmaClient for PagePoolAllocator {
fn allocate_dma_buffer(&self, len: usize) -> anyhow::Result<user_driver::memory::MemoryBlock> {
fn allocate_dma_buffer(
&self,
len: usize,
tag: String,
) -> anyhow::Result<user_driver::memory::MemoryBlock> {
if len as u64 % PAGE_SIZE != 0 {
anyhow::bail!("not a page-size multiple");
}
Expand All @@ -826,7 +830,7 @@ impl user_driver::DmaClient for PagePoolAllocator {
.context("allocation of size 0 not supported")?;

let alloc = self
.alloc(size_pages, "vfio dma".into())
.alloc(size_pages, tag)
.context("failed to allocate shared mem")?;

// The VfioDmaBuffer trait requires that newly allocated buffers are
Expand Down

0 comments on commit 0d69fd1

Please sign in to comment.