Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

user_driver: specify tags when allocating dma buffers #912

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion openhcl/hcl/src/ioctl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1599,7 +1599,7 @@ impl HclVp {
.map_err(|e| Error::MmapVp(e, Some(Vtl::Vtl0)))?,
vtl1_apic_page: private_dma_client
.ok_or(Error::MissingPrivateMemory)?
.allocate_dma_buffer(HV_PAGE_SIZE as usize)
.allocate_dma_buffer(HV_PAGE_SIZE as usize, format!("vp-{vp}-vtl1-apic"))
.map_err(Error::AllocVp)?,
},
};
Expand Down
4 changes: 2 additions & 2 deletions openhcl/lower_vtl_permissions_guard/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,8 @@ impl<T: DmaClient> LowerVtlMemorySpawner<T> {
}

impl<T: DmaClient> DmaClient for LowerVtlMemorySpawner<T> {
fn allocate_dma_buffer(&self, len: usize) -> Result<MemoryBlock> {
let mem = self.spawner.allocate_dma_buffer(len)?;
fn allocate_dma_buffer(&self, len: usize, tag: String) -> Result<MemoryBlock> {
let mem = self.spawner.allocate_dma_buffer(len, tag)?;
let vtl_guard =
PagesAccessibleToLowerVtl::new_from_pages(self.vtl_protect.clone(), mem.pfns())
.context("failed to lower VTL permissions on memory block")?;
Expand Down
18 changes: 12 additions & 6 deletions openhcl/openhcl_dma_manager/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -407,16 +407,21 @@ impl DmaClientBacking {
fn allocate_dma_buffer(
&self,
total_size: usize,
tag: String,
) -> anyhow::Result<user_driver::memory::MemoryBlock> {
match self {
DmaClientBacking::SharedPool(allocator) => allocator.allocate_dma_buffer(total_size),
DmaClientBacking::PrivatePool(allocator) => allocator.allocate_dma_buffer(total_size),
DmaClientBacking::LockedMemory(spawner) => spawner.allocate_dma_buffer(total_size),
DmaClientBacking::SharedPool(allocator) => {
allocator.allocate_dma_buffer(total_size, tag)
}
DmaClientBacking::PrivatePool(allocator) => {
allocator.allocate_dma_buffer(total_size, tag)
}
DmaClientBacking::LockedMemory(spawner) => spawner.allocate_dma_buffer(total_size, tag),
DmaClientBacking::PrivatePoolLowerVtl(spawner) => {
spawner.allocate_dma_buffer(total_size)
spawner.allocate_dma_buffer(total_size, tag)
}
DmaClientBacking::LockedMemoryLowerVtl(spawner) => {
spawner.allocate_dma_buffer(total_size)
spawner.allocate_dma_buffer(total_size, tag)
}
}
}
Expand Down Expand Up @@ -452,8 +457,9 @@ impl DmaClient for OpenhclDmaClient {
fn allocate_dma_buffer(
&self,
total_size: usize,
tag: String,
) -> anyhow::Result<user_driver::memory::MemoryBlock> {
self.backing.allocate_dma_buffer(total_size)
self.backing.allocate_dma_buffer(total_size, tag)
}

fn attach_dma_buffer(
Expand Down
5 changes: 4 additions & 1 deletion openhcl/virt_mshv_vtl/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,10 @@ impl UhCvmVpState {
.shared_dma_client
.as_ref()
.ok_or(Error::MissingSharedMemory)?
.allocate_dma_buffer(overlay_pages_required * HV_PAGE_SIZE as usize)
.allocate_dma_buffer(
overlay_pages_required * HV_PAGE_SIZE as usize,
format!("vp-{}-direct-overlay", vp_info.base.vp_index),
)
.map_err(Error::AllocateSharedVisOverlay)?;

let apic_base = virt::vp::Apic::at_reset(&inner.caps, vp_info).apic_base;
Expand Down
5 changes: 4 additions & 1 deletion openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -674,7 +674,10 @@ impl BackingPrivate for TdxBacked {
.private_dma_client
.as_ref()
.ok_or(crate::Error::MissingPrivateMemory)?
.allocate_dma_buffer(HV_PAGE_SIZE as usize)
.allocate_dma_buffer(
HV_PAGE_SIZE as usize,
format!("tdx-flush-page vp {}", params.vp_info.base.vp_index.index()),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This one doesn't really follow the format of all the others, but i guess it doesn't matter?

)
.map_err(crate::Error::AllocateTlbFlushPage)?;

let untrusted_synic = params
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1835,7 +1835,7 @@ async fn request_igvm_attest(
let allocator = gpa_allocator.ok_or(FatalError::GpaAllocatorUnavailable)?;
let dma_size = request.response_buffer_len;
let mem = allocator
.allocate_dma_buffer(dma_size)
.allocate_dma_buffer(dma_size, "attest request".into())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This one too

.map_err(FatalError::GpaMemoryAllocationError)?;

// Host expects the vTOM bit to be stripped
Expand Down
2 changes: 1 addition & 1 deletion vm/devices/net/mana_driver/src/gdma_driver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ impl<T: DeviceBacking> GdmaDriver<T> {
let dma_client = device.dma_client();

let dma_buffer = dma_client
.allocate_dma_buffer(NUM_PAGES * PAGE_SIZE)
.allocate_dma_buffer(NUM_PAGES * PAGE_SIZE, "gdma-pages".into())
.context("failed to allocate DMA buffer")?;

let pages = dma_buffer.pfns();
Expand Down
4 changes: 2 additions & 2 deletions vm/devices/net/mana_driver/src/mana.rs
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ impl<T: DeviceBacking> Vport<T> {
let mut gdma = self.inner.gdma.lock().await;
let dma_client = gdma.device().dma_client();
let mem = dma_client
.allocate_dma_buffer(size as usize)
.allocate_dma_buffer(size as usize, "mana-event-queue".into())
.context("Failed to allocate DMA buffer")?;

let gdma_region = gdma
Expand Down Expand Up @@ -373,7 +373,7 @@ impl<T: DeviceBacking> Vport<T> {
let dma_client = gdma.device().dma_client();

let mem = dma_client
.allocate_dma_buffer((wq_size + cq_size) as usize)
.allocate_dma_buffer((wq_size + cq_size) as usize, "mana-work-queue".into())
.context("failed to allocate DMA buffer")?;

let wq_mem = mem.subblock(0, wq_size as usize);
Expand Down
2 changes: 1 addition & 1 deletion vm/devices/net/mana_driver/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ async fn test_gdma(driver: DefaultDriver) {
let buffer = Arc::new(
gdma.device()
.dma_client()
.allocate_dma_buffer(0x5000)
.allocate_dma_buffer(0x5000, "test-gdma".into())
.unwrap(),
);
let mut arena = ResourceArena::new();
Expand Down
2 changes: 1 addition & 1 deletion vm/devices/net/net_mana/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1223,7 +1223,7 @@ struct OutOfMemory;
impl ContiguousBufferManager {
pub fn new(dma_client: Arc<dyn DmaClient>, page_limit: u32) -> anyhow::Result<Self> {
let len = PAGE_SIZE32 * page_limit;
let mem = dma_client.allocate_dma_buffer(len as usize)?;
let mem = dma_client.allocate_dma_buffer(len as usize, "mana-contiguous-buffer".into())?;
Ok(Self {
len,
head: 0,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ impl QueuePair {
QueuePair::SQ_SIZE + QueuePair::CQ_SIZE + QueuePair::PER_QUEUE_PAGES * PAGE_SIZE;
let dma_client = device.dma_client();
let mem = dma_client
.allocate_dma_buffer(total_size)
.allocate_dma_buffer(total_size, "nvme-queues".into())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should include qid here at least, and possibly pci_id if we can retreive one

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

pci_id is tied to the client (this is allocated off a specific device's client), but sure - what's the source of qid you want here?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

qid is passed as a parameter to this function, I think it would help to know for which queue number (qid) we allocated this particular block.

.context("failed to allocate memory for queues")?;

assert!(sq_entries <= Self::MAX_SQ_ENTRIES);
Expand Down
2 changes: 1 addition & 1 deletion vm/devices/user_driver/src/emulated.rs
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ pub struct EmulatedDmaAllocator {
}

impl DmaClient for EmulatedDmaAllocator {
fn allocate_dma_buffer(&self, len: usize) -> anyhow::Result<MemoryBlock> {
fn allocate_dma_buffer(&self, len: usize, _tag: String) -> anyhow::Result<MemoryBlock> {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could still be useful here right?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, add it to .context below

let memory = MemoryBlock::new(self.shared_mem.alloc(len).context("out of memory")?);
memory.as_slice().atomic_fill(0);
Ok(memory)
Expand Down
6 changes: 3 additions & 3 deletions vm/devices/user_driver/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,10 @@ pub trait DeviceRegisterIo: Send + Sync {
/// Device interfaces for DMA.
pub trait DmaClient: Send + Sync + Inspect {
/// Allocate a new DMA buffer. This buffer must be zero initialized.
///
/// TODO: string tag for allocation?
fn allocate_dma_buffer(&self, total_size: usize) -> anyhow::Result<MemoryBlock>;
fn allocate_dma_buffer(&self, total_size: usize, tag: String) -> anyhow::Result<MemoryBlock>;

/// Attach to a previously allocated memory block.
///
/// TODO: add tag and require tag to match allocated
fn attach_dma_buffer(&self, len: usize, base_pfn: u64) -> anyhow::Result<MemoryBlock>;
}
6 changes: 5 additions & 1 deletion vm/devices/user_driver/src/lockmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,11 @@ unsafe impl MappedDmaTarget for LockedMemory {
pub struct LockedMemorySpawner;

impl crate::DmaClient for LockedMemorySpawner {
fn allocate_dma_buffer(&self, len: usize) -> anyhow::Result<crate::memory::MemoryBlock> {
fn allocate_dma_buffer(
&self,
len: usize,
_tag: String,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could be useful here too right?

) -> anyhow::Result<crate::memory::MemoryBlock> {
Ok(crate::memory::MemoryBlock::new(LockedMemory::new(len)?))
}

Expand Down
2 changes: 1 addition & 1 deletion vm/devices/vmbus/vmbus_relay_intercept_device/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ impl<T: SimpleVmbusClientDeviceAsync> SimpleVmbusClientDeviceTask<T> {

let mem = self
.dma_alloc
.allocate_dma_buffer(page_count * PAGE_SIZE)
.allocate_dma_buffer(page_count * PAGE_SIZE, "vmbus-relay-intercept-rings".into())
.context("allocating memory for vmbus rings")?;
state.vtl_pages = Some(mem.clone());
let buf: Vec<_> = [mem.len() as u64]
Expand Down
8 changes: 6 additions & 2 deletions vm/page_pool_alloc/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -817,7 +817,11 @@ impl Drop for PagePoolAllocator {
}

impl user_driver::DmaClient for PagePoolAllocator {
fn allocate_dma_buffer(&self, len: usize) -> anyhow::Result<user_driver::memory::MemoryBlock> {
fn allocate_dma_buffer(
&self,
len: usize,
tag: String,
) -> anyhow::Result<user_driver::memory::MemoryBlock> {
if len as u64 % PAGE_SIZE != 0 {
anyhow::bail!("not a page-size multiple");
}
Expand All @@ -826,7 +830,7 @@ impl user_driver::DmaClient for PagePoolAllocator {
.context("allocation of size 0 not supported")?;

let alloc = self
.alloc(size_pages, "vfio dma".into())
.alloc(size_pages, tag)
.context("failed to allocate shared mem")?;

// The VfioDmaBuffer trait requires that newly allocated buffers are
Expand Down
Loading